File: | lib/Target/X86/X86MCInstLower.cpp |
Warning: | line 770, column 3 Value stored to 'Opc' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains code to lower X86 MachineInstrs to their corresponding |
10 | // MCInst records. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "InstPrinter/X86ATTInstPrinter.h" |
15 | #include "InstPrinter/X86InstComments.h" |
16 | #include "MCTargetDesc/X86BaseInfo.h" |
17 | #include "MCTargetDesc/X86TargetStreamer.h" |
18 | #include "Utils/X86ShuffleDecode.h" |
19 | #include "X86AsmPrinter.h" |
20 | #include "X86RegisterInfo.h" |
21 | #include "X86ShuffleDecodeConstantPool.h" |
22 | #include "llvm/ADT/Optional.h" |
23 | #include "llvm/ADT/SmallString.h" |
24 | #include "llvm/ADT/iterator_range.h" |
25 | #include "llvm/CodeGen/MachineConstantPool.h" |
26 | #include "llvm/CodeGen/MachineFunction.h" |
27 | #include "llvm/CodeGen/MachineModuleInfoImpls.h" |
28 | #include "llvm/CodeGen/MachineOperand.h" |
29 | #include "llvm/CodeGen/StackMaps.h" |
30 | #include "llvm/IR/DataLayout.h" |
31 | #include "llvm/IR/GlobalValue.h" |
32 | #include "llvm/IR/Mangler.h" |
33 | #include "llvm/MC/MCAsmInfo.h" |
34 | #include "llvm/MC/MCCodeEmitter.h" |
35 | #include "llvm/MC/MCContext.h" |
36 | #include "llvm/MC/MCExpr.h" |
37 | #include "llvm/MC/MCFixup.h" |
38 | #include "llvm/MC/MCInst.h" |
39 | #include "llvm/MC/MCInstBuilder.h" |
40 | #include "llvm/MC/MCSection.h" |
41 | #include "llvm/MC/MCSectionELF.h" |
42 | #include "llvm/MC/MCStreamer.h" |
43 | #include "llvm/MC/MCSymbol.h" |
44 | #include "llvm/MC/MCSymbolELF.h" |
45 | #include "llvm/Target/TargetLoweringObjectFile.h" |
46 | |
47 | using namespace llvm; |
48 | |
49 | namespace { |
50 | |
51 | /// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst. |
52 | class X86MCInstLower { |
53 | MCContext &Ctx; |
54 | const MachineFunction &MF; |
55 | const TargetMachine &TM; |
56 | const MCAsmInfo &MAI; |
57 | X86AsmPrinter &AsmPrinter; |
58 | |
59 | public: |
60 | X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter); |
61 | |
62 | Optional<MCOperand> LowerMachineOperand(const MachineInstr *MI, |
63 | const MachineOperand &MO) const; |
64 | void Lower(const MachineInstr *MI, MCInst &OutMI) const; |
65 | |
66 | MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const; |
67 | MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; |
68 | |
69 | private: |
70 | MachineModuleInfoMachO &getMachOMMI() const; |
71 | }; |
72 | |
73 | } // end anonymous namespace |
74 | |
75 | // Emit a minimal sequence of nops spanning NumBytes bytes. |
76 | static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, |
77 | const MCSubtargetInfo &STI); |
78 | |
79 | void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst, |
80 | const MCSubtargetInfo &STI, |
81 | MCCodeEmitter *CodeEmitter) { |
82 | if (InShadow) { |
83 | SmallString<256> Code; |
84 | SmallVector<MCFixup, 4> Fixups; |
85 | raw_svector_ostream VecOS(Code); |
86 | CodeEmitter->encodeInstruction(Inst, VecOS, Fixups, STI); |
87 | CurrentShadowSize += Code.size(); |
88 | if (CurrentShadowSize >= RequiredShadowSize) |
89 | InShadow = false; // The shadow is big enough. Stop counting. |
90 | } |
91 | } |
92 | |
93 | void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding( |
94 | MCStreamer &OutStreamer, const MCSubtargetInfo &STI) { |
95 | if (InShadow && CurrentShadowSize < RequiredShadowSize) { |
96 | InShadow = false; |
97 | EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize, |
98 | MF->getSubtarget<X86Subtarget>().is64Bit(), STI); |
99 | } |
100 | } |
101 | |
102 | void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) { |
103 | OutStreamer->EmitInstruction(Inst, getSubtargetInfo()); |
104 | SMShadowTracker.count(Inst, getSubtargetInfo(), CodeEmitter.get()); |
105 | } |
106 | |
107 | X86MCInstLower::X86MCInstLower(const MachineFunction &mf, |
108 | X86AsmPrinter &asmprinter) |
109 | : Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()), MAI(*TM.getMCAsmInfo()), |
110 | AsmPrinter(asmprinter) {} |
111 | |
112 | MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const { |
113 | return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>(); |
114 | } |
115 | |
116 | /// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol |
117 | /// operand to an MCSymbol. |
118 | MCSymbol *X86MCInstLower::GetSymbolFromOperand(const MachineOperand &MO) const { |
119 | const DataLayout &DL = MF.getDataLayout(); |
120 | assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) &&(((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference" ) ? static_cast<void> (0) : __assert_fail ("(MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && \"Isn't a symbol reference\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 121, __PRETTY_FUNCTION__)) |
121 | "Isn't a symbol reference")(((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference" ) ? static_cast<void> (0) : __assert_fail ("(MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && \"Isn't a symbol reference\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 121, __PRETTY_FUNCTION__)); |
122 | |
123 | MCSymbol *Sym = nullptr; |
124 | SmallString<128> Name; |
125 | StringRef Suffix; |
126 | |
127 | switch (MO.getTargetFlags()) { |
128 | case X86II::MO_DLLIMPORT: |
129 | // Handle dllimport linkage. |
130 | Name += "__imp_"; |
131 | break; |
132 | case X86II::MO_COFFSTUB: |
133 | Name += ".refptr."; |
134 | break; |
135 | case X86II::MO_DARWIN_NONLAZY: |
136 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: |
137 | Suffix = "$non_lazy_ptr"; |
138 | break; |
139 | } |
140 | |
141 | if (!Suffix.empty()) |
142 | Name += DL.getPrivateGlobalPrefix(); |
143 | |
144 | if (MO.isGlobal()) { |
145 | const GlobalValue *GV = MO.getGlobal(); |
146 | AsmPrinter.getNameWithPrefix(Name, GV); |
147 | } else if (MO.isSymbol()) { |
148 | Mangler::getNameWithPrefix(Name, MO.getSymbolName(), DL); |
149 | } else if (MO.isMBB()) { |
150 | assert(Suffix.empty())((Suffix.empty()) ? static_cast<void> (0) : __assert_fail ("Suffix.empty()", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 150, __PRETTY_FUNCTION__)); |
151 | Sym = MO.getMBB()->getSymbol(); |
152 | } |
153 | |
154 | Name += Suffix; |
155 | if (!Sym) |
156 | Sym = Ctx.getOrCreateSymbol(Name); |
157 | |
158 | // If the target flags on the operand changes the name of the symbol, do that |
159 | // before we return the symbol. |
160 | switch (MO.getTargetFlags()) { |
161 | default: |
162 | break; |
163 | case X86II::MO_COFFSTUB: { |
164 | MachineModuleInfoCOFF &MMICOFF = |
165 | MF.getMMI().getObjFileInfo<MachineModuleInfoCOFF>(); |
166 | MachineModuleInfoImpl::StubValueTy &StubSym = MMICOFF.getGVStubEntry(Sym); |
167 | if (!StubSym.getPointer()) { |
168 | assert(MO.isGlobal() && "Extern symbol not handled yet")((MO.isGlobal() && "Extern symbol not handled yet") ? static_cast<void> (0) : __assert_fail ("MO.isGlobal() && \"Extern symbol not handled yet\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 168, __PRETTY_FUNCTION__)); |
169 | StubSym = MachineModuleInfoImpl::StubValueTy( |
170 | AsmPrinter.getSymbol(MO.getGlobal()), true); |
171 | } |
172 | break; |
173 | } |
174 | case X86II::MO_DARWIN_NONLAZY: |
175 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: { |
176 | MachineModuleInfoImpl::StubValueTy &StubSym = |
177 | getMachOMMI().getGVStubEntry(Sym); |
178 | if (!StubSym.getPointer()) { |
179 | assert(MO.isGlobal() && "Extern symbol not handled yet")((MO.isGlobal() && "Extern symbol not handled yet") ? static_cast<void> (0) : __assert_fail ("MO.isGlobal() && \"Extern symbol not handled yet\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 179, __PRETTY_FUNCTION__)); |
180 | StubSym = MachineModuleInfoImpl::StubValueTy( |
181 | AsmPrinter.getSymbol(MO.getGlobal()), |
182 | !MO.getGlobal()->hasInternalLinkage()); |
183 | } |
184 | break; |
185 | } |
186 | } |
187 | |
188 | return Sym; |
189 | } |
190 | |
191 | MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, |
192 | MCSymbol *Sym) const { |
193 | // FIXME: We would like an efficient form for this, so we don't have to do a |
194 | // lot of extra uniquing. |
195 | const MCExpr *Expr = nullptr; |
196 | MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None; |
197 | |
198 | switch (MO.getTargetFlags()) { |
199 | default: |
200 | llvm_unreachable("Unknown target flag on GV operand")::llvm::llvm_unreachable_internal("Unknown target flag on GV operand" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 200); |
201 | case X86II::MO_NO_FLAG: // No flag. |
202 | // These affect the name of the symbol, not any suffix. |
203 | case X86II::MO_DARWIN_NONLAZY: |
204 | case X86II::MO_DLLIMPORT: |
205 | case X86II::MO_COFFSTUB: |
206 | break; |
207 | |
208 | case X86II::MO_TLVP: |
209 | RefKind = MCSymbolRefExpr::VK_TLVP; |
210 | break; |
211 | case X86II::MO_TLVP_PIC_BASE: |
212 | Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx); |
213 | // Subtract the pic base. |
214 | Expr = MCBinaryExpr::createSub( |
215 | Expr, MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), Ctx); |
216 | break; |
217 | case X86II::MO_SECREL: |
218 | RefKind = MCSymbolRefExpr::VK_SECREL; |
219 | break; |
220 | case X86II::MO_TLSGD: |
221 | RefKind = MCSymbolRefExpr::VK_TLSGD; |
222 | break; |
223 | case X86II::MO_TLSLD: |
224 | RefKind = MCSymbolRefExpr::VK_TLSLD; |
225 | break; |
226 | case X86II::MO_TLSLDM: |
227 | RefKind = MCSymbolRefExpr::VK_TLSLDM; |
228 | break; |
229 | case X86II::MO_GOTTPOFF: |
230 | RefKind = MCSymbolRefExpr::VK_GOTTPOFF; |
231 | break; |
232 | case X86II::MO_INDNTPOFF: |
233 | RefKind = MCSymbolRefExpr::VK_INDNTPOFF; |
234 | break; |
235 | case X86II::MO_TPOFF: |
236 | RefKind = MCSymbolRefExpr::VK_TPOFF; |
237 | break; |
238 | case X86II::MO_DTPOFF: |
239 | RefKind = MCSymbolRefExpr::VK_DTPOFF; |
240 | break; |
241 | case X86II::MO_NTPOFF: |
242 | RefKind = MCSymbolRefExpr::VK_NTPOFF; |
243 | break; |
244 | case X86II::MO_GOTNTPOFF: |
245 | RefKind = MCSymbolRefExpr::VK_GOTNTPOFF; |
246 | break; |
247 | case X86II::MO_GOTPCREL: |
248 | RefKind = MCSymbolRefExpr::VK_GOTPCREL; |
249 | break; |
250 | case X86II::MO_GOT: |
251 | RefKind = MCSymbolRefExpr::VK_GOT; |
252 | break; |
253 | case X86II::MO_GOTOFF: |
254 | RefKind = MCSymbolRefExpr::VK_GOTOFF; |
255 | break; |
256 | case X86II::MO_PLT: |
257 | RefKind = MCSymbolRefExpr::VK_PLT; |
258 | break; |
259 | case X86II::MO_ABS8: |
260 | RefKind = MCSymbolRefExpr::VK_X86_ABS8; |
261 | break; |
262 | case X86II::MO_PIC_BASE_OFFSET: |
263 | case X86II::MO_DARWIN_NONLAZY_PIC_BASE: |
264 | Expr = MCSymbolRefExpr::create(Sym, Ctx); |
265 | // Subtract the pic base. |
266 | Expr = MCBinaryExpr::createSub( |
267 | Expr, MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), Ctx); |
268 | if (MO.isJTI()) { |
269 | assert(MAI.doesSetDirectiveSuppressReloc())((MAI.doesSetDirectiveSuppressReloc()) ? static_cast<void> (0) : __assert_fail ("MAI.doesSetDirectiveSuppressReloc()", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 269, __PRETTY_FUNCTION__)); |
270 | // If .set directive is supported, use it to reduce the number of |
271 | // relocations the assembler will generate for differences between |
272 | // local labels. This is only safe when the symbols are in the same |
273 | // section so we are restricting it to jumptable references. |
274 | MCSymbol *Label = Ctx.createTempSymbol(); |
275 | AsmPrinter.OutStreamer->EmitAssignment(Label, Expr); |
276 | Expr = MCSymbolRefExpr::create(Label, Ctx); |
277 | } |
278 | break; |
279 | } |
280 | |
281 | if (!Expr) |
282 | Expr = MCSymbolRefExpr::create(Sym, RefKind, Ctx); |
283 | |
284 | if (!MO.isJTI() && !MO.isMBB() && MO.getOffset()) |
285 | Expr = MCBinaryExpr::createAdd( |
286 | Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx); |
287 | return MCOperand::createExpr(Expr); |
288 | } |
289 | |
290 | /// Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with |
291 | /// a short fixed-register form. |
292 | static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { |
293 | unsigned ImmOp = Inst.getNumOperands() - 1; |
294 | assert(Inst.getOperand(0).isReg() &&((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 299, __PRETTY_FUNCTION__)) |
295 | (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) &&((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 299, __PRETTY_FUNCTION__)) |
296 | ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() &&((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 299, __PRETTY_FUNCTION__)) |
297 | Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) ||((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 299, __PRETTY_FUNCTION__)) |
298 | Inst.getNumOperands() == 2) &&((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 299, __PRETTY_FUNCTION__)) |
299 | "Unexpected instruction!")((Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp ).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst .getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getOperand(0).isReg() && (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || Inst.getNumOperands() == 2) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 299, __PRETTY_FUNCTION__)); |
300 | |
301 | // Check whether the destination register can be fixed. |
302 | unsigned Reg = Inst.getOperand(0).getReg(); |
303 | if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) |
304 | return; |
305 | |
306 | // If so, rewrite the instruction. |
307 | MCOperand Saved = Inst.getOperand(ImmOp); |
308 | Inst = MCInst(); |
309 | Inst.setOpcode(Opcode); |
310 | Inst.addOperand(Saved); |
311 | } |
312 | |
313 | /// If a movsx instruction has a shorter encoding for the used register |
314 | /// simplify the instruction to use it instead. |
315 | static void SimplifyMOVSX(MCInst &Inst) { |
316 | unsigned NewOpcode = 0; |
317 | unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg(); |
318 | switch (Inst.getOpcode()) { |
319 | default: |
320 | llvm_unreachable("Unexpected instruction!")::llvm::llvm_unreachable_internal("Unexpected instruction!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 320); |
321 | case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw |
322 | if (Op0 == X86::AX && Op1 == X86::AL) |
323 | NewOpcode = X86::CBW; |
324 | break; |
325 | case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl |
326 | if (Op0 == X86::EAX && Op1 == X86::AX) |
327 | NewOpcode = X86::CWDE; |
328 | break; |
329 | case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq |
330 | if (Op0 == X86::RAX && Op1 == X86::EAX) |
331 | NewOpcode = X86::CDQE; |
332 | break; |
333 | } |
334 | |
335 | if (NewOpcode != 0) { |
336 | Inst = MCInst(); |
337 | Inst.setOpcode(NewOpcode); |
338 | } |
339 | } |
340 | |
341 | /// Simplify things like MOV32rm to MOV32o32a. |
342 | static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, |
343 | unsigned Opcode) { |
344 | // Don't make these simplifications in 64-bit mode; other assemblers don't |
345 | // perform them because they make the code larger. |
346 | if (Printer.getSubtarget().is64Bit()) |
347 | return; |
348 | |
349 | bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg(); |
350 | unsigned AddrBase = IsStore; |
351 | unsigned RegOp = IsStore ? 0 : 5; |
352 | unsigned AddrOp = AddrBase + 3; |
353 | assert(((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 360, __PRETTY_FUNCTION__)) |
354 | Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 360, __PRETTY_FUNCTION__)) |
355 | Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 360, __PRETTY_FUNCTION__)) |
356 | Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 360, __PRETTY_FUNCTION__)) |
357 | Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 360, __PRETTY_FUNCTION__)) |
358 | Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 360, __PRETTY_FUNCTION__)) |
359 | (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) &&((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 360, __PRETTY_FUNCTION__)) |
360 | "Unexpected instruction!")((Inst.getNumOperands() == 6 && Inst.getOperand(RegOp ).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt ).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg ).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg ).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst .getOperand(AddrOp).isImm()) && "Unexpected instruction!" ) ? static_cast<void> (0) : __assert_fail ("Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && \"Unexpected instruction!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 360, __PRETTY_FUNCTION__)); |
361 | |
362 | // Check whether the destination register can be fixed. |
363 | unsigned Reg = Inst.getOperand(RegOp).getReg(); |
364 | if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) |
365 | return; |
366 | |
367 | // Check whether this is an absolute address. |
368 | // FIXME: We know TLVP symbol refs aren't, but there should be a better way |
369 | // to do this here. |
370 | bool Absolute = true; |
371 | if (Inst.getOperand(AddrOp).isExpr()) { |
372 | const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr(); |
373 | if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE)) |
374 | if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP) |
375 | Absolute = false; |
376 | } |
377 | |
378 | if (Absolute && |
379 | (Inst.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 || |
380 | Inst.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 || |
381 | Inst.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0)) |
382 | return; |
383 | |
384 | // If so, rewrite the instruction. |
385 | MCOperand Saved = Inst.getOperand(AddrOp); |
386 | MCOperand Seg = Inst.getOperand(AddrBase + X86::AddrSegmentReg); |
387 | Inst = MCInst(); |
388 | Inst.setOpcode(Opcode); |
389 | Inst.addOperand(Saved); |
390 | Inst.addOperand(Seg); |
391 | } |
392 | |
393 | static unsigned getRetOpcode(const X86Subtarget &Subtarget) { |
394 | return Subtarget.is64Bit() ? X86::RETQ : X86::RETL; |
395 | } |
396 | |
397 | Optional<MCOperand> |
398 | X86MCInstLower::LowerMachineOperand(const MachineInstr *MI, |
399 | const MachineOperand &MO) const { |
400 | switch (MO.getType()) { |
401 | default: |
402 | MI->print(errs()); |
403 | llvm_unreachable("unknown operand type")::llvm::llvm_unreachable_internal("unknown operand type", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 403); |
404 | case MachineOperand::MO_Register: |
405 | // Ignore all implicit register operands. |
406 | if (MO.isImplicit()) |
407 | return None; |
408 | return MCOperand::createReg(MO.getReg()); |
409 | case MachineOperand::MO_Immediate: |
410 | return MCOperand::createImm(MO.getImm()); |
411 | case MachineOperand::MO_MachineBasicBlock: |
412 | case MachineOperand::MO_GlobalAddress: |
413 | case MachineOperand::MO_ExternalSymbol: |
414 | return LowerSymbolOperand(MO, GetSymbolFromOperand(MO)); |
415 | case MachineOperand::MO_MCSymbol: |
416 | return LowerSymbolOperand(MO, MO.getMCSymbol()); |
417 | case MachineOperand::MO_JumpTableIndex: |
418 | return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex())); |
419 | case MachineOperand::MO_ConstantPoolIndex: |
420 | return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex())); |
421 | case MachineOperand::MO_BlockAddress: |
422 | return LowerSymbolOperand( |
423 | MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress())); |
424 | case MachineOperand::MO_RegisterMask: |
425 | // Ignore call clobbers. |
426 | return None; |
427 | } |
428 | } |
429 | |
430 | void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { |
431 | OutMI.setOpcode(MI->getOpcode()); |
432 | |
433 | for (const MachineOperand &MO : MI->operands()) |
434 | if (auto MaybeMCOp = LowerMachineOperand(MI, MO)) |
435 | OutMI.addOperand(MaybeMCOp.getValue()); |
436 | |
437 | // Handle a few special cases to eliminate operand modifiers. |
438 | switch (OutMI.getOpcode()) { |
439 | case X86::LEA64_32r: |
440 | case X86::LEA64r: |
441 | case X86::LEA16r: |
442 | case X86::LEA32r: |
443 | // LEA should have a segment register, but it must be empty. |
444 | assert(OutMI.getNumOperands() == 1 + X86::AddrNumOperands &&((OutMI.getNumOperands() == 1 + X86::AddrNumOperands && "Unexpected # of LEA operands") ? static_cast<void> (0 ) : __assert_fail ("OutMI.getNumOperands() == 1 + X86::AddrNumOperands && \"Unexpected # of LEA operands\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 445, __PRETTY_FUNCTION__)) |
445 | "Unexpected # of LEA operands")((OutMI.getNumOperands() == 1 + X86::AddrNumOperands && "Unexpected # of LEA operands") ? static_cast<void> (0 ) : __assert_fail ("OutMI.getNumOperands() == 1 + X86::AddrNumOperands && \"Unexpected # of LEA operands\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 445, __PRETTY_FUNCTION__)); |
446 | assert(OutMI.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 &&((OutMI.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 && "LEA has segment specified!") ? static_cast<void> (0) : __assert_fail ("OutMI.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 && \"LEA has segment specified!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 447, __PRETTY_FUNCTION__)) |
447 | "LEA has segment specified!")((OutMI.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 && "LEA has segment specified!") ? static_cast<void> (0) : __assert_fail ("OutMI.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 && \"LEA has segment specified!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 447, __PRETTY_FUNCTION__)); |
448 | break; |
449 | |
450 | // Commute operands to get a smaller encoding by using VEX.R instead of VEX.B |
451 | // if one of the registers is extended, but other isn't. |
452 | case X86::VMOVZPQILo2PQIrr: |
453 | case X86::VMOVAPDrr: |
454 | case X86::VMOVAPDYrr: |
455 | case X86::VMOVAPSrr: |
456 | case X86::VMOVAPSYrr: |
457 | case X86::VMOVDQArr: |
458 | case X86::VMOVDQAYrr: |
459 | case X86::VMOVDQUrr: |
460 | case X86::VMOVDQUYrr: |
461 | case X86::VMOVUPDrr: |
462 | case X86::VMOVUPDYrr: |
463 | case X86::VMOVUPSrr: |
464 | case X86::VMOVUPSYrr: { |
465 | if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && |
466 | X86II::isX86_64ExtendedReg(OutMI.getOperand(1).getReg())) { |
467 | unsigned NewOpc; |
468 | switch (OutMI.getOpcode()) { |
469 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 469); |
470 | case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break; |
471 | case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break; |
472 | case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break; |
473 | case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break; |
474 | case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break; |
475 | case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break; |
476 | case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break; |
477 | case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break; |
478 | case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break; |
479 | case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break; |
480 | case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break; |
481 | case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break; |
482 | case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break; |
483 | } |
484 | OutMI.setOpcode(NewOpc); |
485 | } |
486 | break; |
487 | } |
488 | case X86::VMOVSDrr: |
489 | case X86::VMOVSSrr: { |
490 | if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && |
491 | X86II::isX86_64ExtendedReg(OutMI.getOperand(2).getReg())) { |
492 | unsigned NewOpc; |
493 | switch (OutMI.getOpcode()) { |
494 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 494); |
495 | case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break; |
496 | case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break; |
497 | } |
498 | OutMI.setOpcode(NewOpc); |
499 | } |
500 | break; |
501 | } |
502 | |
503 | // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register |
504 | // inputs modeled as normal uses instead of implicit uses. As such, truncate |
505 | // off all but the first operand (the callee). FIXME: Change isel. |
506 | case X86::TAILJMPr64: |
507 | case X86::TAILJMPr64_REX: |
508 | case X86::CALL64r: |
509 | case X86::CALL64pcrel32: { |
510 | unsigned Opcode = OutMI.getOpcode(); |
511 | MCOperand Saved = OutMI.getOperand(0); |
512 | OutMI = MCInst(); |
513 | OutMI.setOpcode(Opcode); |
514 | OutMI.addOperand(Saved); |
515 | break; |
516 | } |
517 | |
518 | case X86::EH_RETURN: |
519 | case X86::EH_RETURN64: { |
520 | OutMI = MCInst(); |
521 | OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); |
522 | break; |
523 | } |
524 | |
525 | case X86::CLEANUPRET: { |
526 | // Replace CLEANUPRET with the appropriate RET. |
527 | OutMI = MCInst(); |
528 | OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); |
529 | break; |
530 | } |
531 | |
532 | case X86::CATCHRET: { |
533 | // Replace CATCHRET with the appropriate RET. |
534 | const X86Subtarget &Subtarget = AsmPrinter.getSubtarget(); |
535 | unsigned ReturnReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; |
536 | OutMI = MCInst(); |
537 | OutMI.setOpcode(getRetOpcode(Subtarget)); |
538 | OutMI.addOperand(MCOperand::createReg(ReturnReg)); |
539 | break; |
540 | } |
541 | |
542 | // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump |
543 | // instruction. |
544 | { |
545 | unsigned Opcode; |
546 | case X86::TAILJMPr: |
547 | Opcode = X86::JMP32r; |
548 | goto SetTailJmpOpcode; |
549 | case X86::TAILJMPd: |
550 | case X86::TAILJMPd64: |
551 | Opcode = X86::JMP_1; |
552 | goto SetTailJmpOpcode; |
553 | |
554 | SetTailJmpOpcode: |
555 | MCOperand Saved = OutMI.getOperand(0); |
556 | OutMI = MCInst(); |
557 | OutMI.setOpcode(Opcode); |
558 | OutMI.addOperand(Saved); |
559 | break; |
560 | } |
561 | |
562 | case X86::TAILJMPd_CC: |
563 | case X86::TAILJMPd64_CC: { |
564 | MCOperand Saved = OutMI.getOperand(0); |
565 | MCOperand Saved2 = OutMI.getOperand(1); |
566 | OutMI = MCInst(); |
567 | OutMI.setOpcode(X86::JCC_1); |
568 | OutMI.addOperand(Saved); |
569 | OutMI.addOperand(Saved2); |
570 | break; |
571 | } |
572 | |
573 | case X86::DEC16r: |
574 | case X86::DEC32r: |
575 | case X86::INC16r: |
576 | case X86::INC32r: |
577 | // If we aren't in 64-bit mode we can use the 1-byte inc/dec instructions. |
578 | if (!AsmPrinter.getSubtarget().is64Bit()) { |
579 | unsigned Opcode; |
580 | switch (OutMI.getOpcode()) { |
581 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 581); |
582 | case X86::DEC16r: Opcode = X86::DEC16r_alt; break; |
583 | case X86::DEC32r: Opcode = X86::DEC32r_alt; break; |
584 | case X86::INC16r: Opcode = X86::INC16r_alt; break; |
585 | case X86::INC32r: Opcode = X86::INC32r_alt; break; |
586 | } |
587 | OutMI.setOpcode(Opcode); |
588 | } |
589 | break; |
590 | |
591 | // We don't currently select the correct instruction form for instructions |
592 | // which have a short %eax, etc. form. Handle this by custom lowering, for |
593 | // now. |
594 | // |
595 | // Note, we are currently not handling the following instructions: |
596 | // MOV64ao8, MOV64o8a |
597 | // XCHG16ar, XCHG32ar, XCHG64ar |
598 | case X86::MOV8mr_NOREX: |
599 | case X86::MOV8mr: |
600 | case X86::MOV8rm_NOREX: |
601 | case X86::MOV8rm: |
602 | case X86::MOV16mr: |
603 | case X86::MOV16rm: |
604 | case X86::MOV32mr: |
605 | case X86::MOV32rm: { |
606 | unsigned NewOpc; |
607 | switch (OutMI.getOpcode()) { |
608 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 608); |
609 | case X86::MOV8mr_NOREX: |
610 | case X86::MOV8mr: NewOpc = X86::MOV8o32a; break; |
611 | case X86::MOV8rm_NOREX: |
612 | case X86::MOV8rm: NewOpc = X86::MOV8ao32; break; |
613 | case X86::MOV16mr: NewOpc = X86::MOV16o32a; break; |
614 | case X86::MOV16rm: NewOpc = X86::MOV16ao32; break; |
615 | case X86::MOV32mr: NewOpc = X86::MOV32o32a; break; |
616 | case X86::MOV32rm: NewOpc = X86::MOV32ao32; break; |
617 | } |
618 | SimplifyShortMoveForm(AsmPrinter, OutMI, NewOpc); |
619 | break; |
620 | } |
621 | |
622 | case X86::ADC8ri: case X86::ADC16ri: case X86::ADC32ri: case X86::ADC64ri32: |
623 | case X86::ADD8ri: case X86::ADD16ri: case X86::ADD32ri: case X86::ADD64ri32: |
624 | case X86::AND8ri: case X86::AND16ri: case X86::AND32ri: case X86::AND64ri32: |
625 | case X86::CMP8ri: case X86::CMP16ri: case X86::CMP32ri: case X86::CMP64ri32: |
626 | case X86::OR8ri: case X86::OR16ri: case X86::OR32ri: case X86::OR64ri32: |
627 | case X86::SBB8ri: case X86::SBB16ri: case X86::SBB32ri: case X86::SBB64ri32: |
628 | case X86::SUB8ri: case X86::SUB16ri: case X86::SUB32ri: case X86::SUB64ri32: |
629 | case X86::TEST8ri:case X86::TEST16ri:case X86::TEST32ri:case X86::TEST64ri32: |
630 | case X86::XOR8ri: case X86::XOR16ri: case X86::XOR32ri: case X86::XOR64ri32: { |
631 | unsigned NewOpc; |
632 | switch (OutMI.getOpcode()) { |
633 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 633); |
634 | case X86::ADC8ri: NewOpc = X86::ADC8i8; break; |
635 | case X86::ADC16ri: NewOpc = X86::ADC16i16; break; |
636 | case X86::ADC32ri: NewOpc = X86::ADC32i32; break; |
637 | case X86::ADC64ri32: NewOpc = X86::ADC64i32; break; |
638 | case X86::ADD8ri: NewOpc = X86::ADD8i8; break; |
639 | case X86::ADD16ri: NewOpc = X86::ADD16i16; break; |
640 | case X86::ADD32ri: NewOpc = X86::ADD32i32; break; |
641 | case X86::ADD64ri32: NewOpc = X86::ADD64i32; break; |
642 | case X86::AND8ri: NewOpc = X86::AND8i8; break; |
643 | case X86::AND16ri: NewOpc = X86::AND16i16; break; |
644 | case X86::AND32ri: NewOpc = X86::AND32i32; break; |
645 | case X86::AND64ri32: NewOpc = X86::AND64i32; break; |
646 | case X86::CMP8ri: NewOpc = X86::CMP8i8; break; |
647 | case X86::CMP16ri: NewOpc = X86::CMP16i16; break; |
648 | case X86::CMP32ri: NewOpc = X86::CMP32i32; break; |
649 | case X86::CMP64ri32: NewOpc = X86::CMP64i32; break; |
650 | case X86::OR8ri: NewOpc = X86::OR8i8; break; |
651 | case X86::OR16ri: NewOpc = X86::OR16i16; break; |
652 | case X86::OR32ri: NewOpc = X86::OR32i32; break; |
653 | case X86::OR64ri32: NewOpc = X86::OR64i32; break; |
654 | case X86::SBB8ri: NewOpc = X86::SBB8i8; break; |
655 | case X86::SBB16ri: NewOpc = X86::SBB16i16; break; |
656 | case X86::SBB32ri: NewOpc = X86::SBB32i32; break; |
657 | case X86::SBB64ri32: NewOpc = X86::SBB64i32; break; |
658 | case X86::SUB8ri: NewOpc = X86::SUB8i8; break; |
659 | case X86::SUB16ri: NewOpc = X86::SUB16i16; break; |
660 | case X86::SUB32ri: NewOpc = X86::SUB32i32; break; |
661 | case X86::SUB64ri32: NewOpc = X86::SUB64i32; break; |
662 | case X86::TEST8ri: NewOpc = X86::TEST8i8; break; |
663 | case X86::TEST16ri: NewOpc = X86::TEST16i16; break; |
664 | case X86::TEST32ri: NewOpc = X86::TEST32i32; break; |
665 | case X86::TEST64ri32: NewOpc = X86::TEST64i32; break; |
666 | case X86::XOR8ri: NewOpc = X86::XOR8i8; break; |
667 | case X86::XOR16ri: NewOpc = X86::XOR16i16; break; |
668 | case X86::XOR32ri: NewOpc = X86::XOR32i32; break; |
669 | case X86::XOR64ri32: NewOpc = X86::XOR64i32; break; |
670 | } |
671 | SimplifyShortImmForm(OutMI, NewOpc); |
672 | break; |
673 | } |
674 | |
675 | // Try to shrink some forms of movsx. |
676 | case X86::MOVSX16rr8: |
677 | case X86::MOVSX32rr16: |
678 | case X86::MOVSX64rr32: |
679 | SimplifyMOVSX(OutMI); |
680 | break; |
681 | } |
682 | } |
683 | |
684 | void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering, |
685 | const MachineInstr &MI) { |
686 | |
687 | bool is64Bits = MI.getOpcode() == X86::TLS_addr64 || |
688 | MI.getOpcode() == X86::TLS_base_addr64; |
689 | |
690 | bool needsPadding = MI.getOpcode() == X86::TLS_addr64; |
691 | |
692 | MCContext &context = OutStreamer->getContext(); |
693 | |
694 | if (needsPadding) |
695 | EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); |
696 | |
697 | MCSymbolRefExpr::VariantKind SRVK; |
698 | switch (MI.getOpcode()) { |
699 | case X86::TLS_addr32: |
700 | case X86::TLS_addr64: |
701 | SRVK = MCSymbolRefExpr::VK_TLSGD; |
702 | break; |
703 | case X86::TLS_base_addr32: |
704 | SRVK = MCSymbolRefExpr::VK_TLSLDM; |
705 | break; |
706 | case X86::TLS_base_addr64: |
707 | SRVK = MCSymbolRefExpr::VK_TLSLD; |
708 | break; |
709 | default: |
710 | llvm_unreachable("unexpected opcode")::llvm::llvm_unreachable_internal("unexpected opcode", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 710); |
711 | } |
712 | |
713 | MCSymbol *sym = MCInstLowering.GetSymbolFromOperand(MI.getOperand(3)); |
714 | const MCSymbolRefExpr *symRef = MCSymbolRefExpr::create(sym, SRVK, context); |
715 | |
716 | MCInst LEA; |
717 | if (is64Bits) { |
718 | LEA.setOpcode(X86::LEA64r); |
719 | LEA.addOperand(MCOperand::createReg(X86::RDI)); // dest |
720 | LEA.addOperand(MCOperand::createReg(X86::RIP)); // base |
721 | LEA.addOperand(MCOperand::createImm(1)); // scale |
722 | LEA.addOperand(MCOperand::createReg(0)); // index |
723 | LEA.addOperand(MCOperand::createExpr(symRef)); // disp |
724 | LEA.addOperand(MCOperand::createReg(0)); // seg |
725 | } else if (SRVK == MCSymbolRefExpr::VK_TLSLDM) { |
726 | LEA.setOpcode(X86::LEA32r); |
727 | LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest |
728 | LEA.addOperand(MCOperand::createReg(X86::EBX)); // base |
729 | LEA.addOperand(MCOperand::createImm(1)); // scale |
730 | LEA.addOperand(MCOperand::createReg(0)); // index |
731 | LEA.addOperand(MCOperand::createExpr(symRef)); // disp |
732 | LEA.addOperand(MCOperand::createReg(0)); // seg |
733 | } else { |
734 | LEA.setOpcode(X86::LEA32r); |
735 | LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest |
736 | LEA.addOperand(MCOperand::createReg(0)); // base |
737 | LEA.addOperand(MCOperand::createImm(1)); // scale |
738 | LEA.addOperand(MCOperand::createReg(X86::EBX)); // index |
739 | LEA.addOperand(MCOperand::createExpr(symRef)); // disp |
740 | LEA.addOperand(MCOperand::createReg(0)); // seg |
741 | } |
742 | EmitAndCountInstruction(LEA); |
743 | |
744 | if (needsPadding) { |
745 | EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); |
746 | EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); |
747 | EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX)); |
748 | } |
749 | |
750 | StringRef name = is64Bits ? "__tls_get_addr" : "___tls_get_addr"; |
751 | MCSymbol *tlsGetAddr = context.getOrCreateSymbol(name); |
752 | const MCSymbolRefExpr *tlsRef = |
753 | MCSymbolRefExpr::create(tlsGetAddr, MCSymbolRefExpr::VK_PLT, context); |
754 | |
755 | EmitAndCountInstruction( |
756 | MCInstBuilder(is64Bits ? X86::CALL64pcrel32 : X86::CALLpcrel32) |
757 | .addExpr(tlsRef)); |
758 | } |
759 | |
760 | /// Emit the largest nop instruction smaller than or equal to \p NumBytes |
761 | /// bytes. Return the size of nop emitted. |
762 | static unsigned EmitNop(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, |
763 | const MCSubtargetInfo &STI) { |
764 | // This works only for 64bit. For 32bit we have to do additional checking if |
765 | // the CPU supports multi-byte nops. |
766 | assert(Is64Bit && "EmitNops only supports X86-64")((Is64Bit && "EmitNops only supports X86-64") ? static_cast <void> (0) : __assert_fail ("Is64Bit && \"EmitNops only supports X86-64\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 766, __PRETTY_FUNCTION__)); |
767 | |
768 | unsigned NopSize; |
769 | unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg; |
770 | Opc = IndexReg = Displacement = SegmentReg = 0; |
Value stored to 'Opc' is never read | |
771 | BaseReg = X86::RAX; |
772 | ScaleVal = 1; |
773 | switch (NumBytes) { |
774 | case 0: |
775 | llvm_unreachable("Zero nops?")::llvm::llvm_unreachable_internal("Zero nops?", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 775); |
776 | break; |
777 | case 1: |
778 | NopSize = 1; |
779 | Opc = X86::NOOP; |
780 | break; |
781 | case 2: |
782 | NopSize = 2; |
783 | Opc = X86::XCHG16ar; |
784 | break; |
785 | case 3: |
786 | NopSize = 3; |
787 | Opc = X86::NOOPL; |
788 | break; |
789 | case 4: |
790 | NopSize = 4; |
791 | Opc = X86::NOOPL; |
792 | Displacement = 8; |
793 | break; |
794 | case 5: |
795 | NopSize = 5; |
796 | Opc = X86::NOOPL; |
797 | Displacement = 8; |
798 | IndexReg = X86::RAX; |
799 | break; |
800 | case 6: |
801 | NopSize = 6; |
802 | Opc = X86::NOOPW; |
803 | Displacement = 8; |
804 | IndexReg = X86::RAX; |
805 | break; |
806 | case 7: |
807 | NopSize = 7; |
808 | Opc = X86::NOOPL; |
809 | Displacement = 512; |
810 | break; |
811 | case 8: |
812 | NopSize = 8; |
813 | Opc = X86::NOOPL; |
814 | Displacement = 512; |
815 | IndexReg = X86::RAX; |
816 | break; |
817 | case 9: |
818 | NopSize = 9; |
819 | Opc = X86::NOOPW; |
820 | Displacement = 512; |
821 | IndexReg = X86::RAX; |
822 | break; |
823 | default: |
824 | NopSize = 10; |
825 | Opc = X86::NOOPW; |
826 | Displacement = 512; |
827 | IndexReg = X86::RAX; |
828 | SegmentReg = X86::CS; |
829 | break; |
830 | } |
831 | |
832 | unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U); |
833 | NopSize += NumPrefixes; |
834 | for (unsigned i = 0; i != NumPrefixes; ++i) |
835 | OS.EmitBytes("\x66"); |
836 | |
837 | switch (Opc) { |
838 | default: llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 838); |
839 | case X86::NOOP: |
840 | OS.EmitInstruction(MCInstBuilder(Opc), STI); |
841 | break; |
842 | case X86::XCHG16ar: |
843 | OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX).addReg(X86::AX), STI); |
844 | break; |
845 | case X86::NOOPL: |
846 | case X86::NOOPW: |
847 | OS.EmitInstruction(MCInstBuilder(Opc) |
848 | .addReg(BaseReg) |
849 | .addImm(ScaleVal) |
850 | .addReg(IndexReg) |
851 | .addImm(Displacement) |
852 | .addReg(SegmentReg), |
853 | STI); |
854 | break; |
855 | } |
856 | assert(NopSize <= NumBytes && "We overemitted?")((NopSize <= NumBytes && "We overemitted?") ? static_cast <void> (0) : __assert_fail ("NopSize <= NumBytes && \"We overemitted?\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 856, __PRETTY_FUNCTION__)); |
857 | return NopSize; |
858 | } |
859 | |
860 | /// Emit the optimal amount of multi-byte nops on X86. |
861 | static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, |
862 | const MCSubtargetInfo &STI) { |
863 | unsigned NopsToEmit = NumBytes; |
864 | (void)NopsToEmit; |
865 | while (NumBytes) { |
866 | NumBytes -= EmitNop(OS, NumBytes, Is64Bit, STI); |
867 | assert(NopsToEmit >= NumBytes && "Emitted more than I asked for!")((NopsToEmit >= NumBytes && "Emitted more than I asked for!" ) ? static_cast<void> (0) : __assert_fail ("NopsToEmit >= NumBytes && \"Emitted more than I asked for!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 867, __PRETTY_FUNCTION__)); |
868 | } |
869 | } |
870 | |
871 | void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI, |
872 | X86MCInstLower &MCIL) { |
873 | assert(Subtarget->is64Bit() && "Statepoint currently only supports X86-64")((Subtarget->is64Bit() && "Statepoint currently only supports X86-64" ) ? static_cast<void> (0) : __assert_fail ("Subtarget->is64Bit() && \"Statepoint currently only supports X86-64\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 873, __PRETTY_FUNCTION__)); |
874 | |
875 | StatepointOpers SOpers(&MI); |
876 | if (unsigned PatchBytes = SOpers.getNumPatchBytes()) { |
877 | EmitNops(*OutStreamer, PatchBytes, Subtarget->is64Bit(), |
878 | getSubtargetInfo()); |
879 | } else { |
880 | // Lower call target and choose correct opcode |
881 | const MachineOperand &CallTarget = SOpers.getCallTarget(); |
882 | MCOperand CallTargetMCOp; |
883 | unsigned CallOpcode; |
884 | switch (CallTarget.getType()) { |
885 | case MachineOperand::MO_GlobalAddress: |
886 | case MachineOperand::MO_ExternalSymbol: |
887 | CallTargetMCOp = MCIL.LowerSymbolOperand( |
888 | CallTarget, MCIL.GetSymbolFromOperand(CallTarget)); |
889 | CallOpcode = X86::CALL64pcrel32; |
890 | // Currently, we only support relative addressing with statepoints. |
891 | // Otherwise, we'll need a scratch register to hold the target |
892 | // address. You'll fail asserts during load & relocation if this |
893 | // symbol is to far away. (TODO: support non-relative addressing) |
894 | break; |
895 | case MachineOperand::MO_Immediate: |
896 | CallTargetMCOp = MCOperand::createImm(CallTarget.getImm()); |
897 | CallOpcode = X86::CALL64pcrel32; |
898 | // Currently, we only support relative addressing with statepoints. |
899 | // Otherwise, we'll need a scratch register to hold the target |
900 | // immediate. You'll fail asserts during load & relocation if this |
901 | // address is to far away. (TODO: support non-relative addressing) |
902 | break; |
903 | case MachineOperand::MO_Register: |
904 | // FIXME: Add retpoline support and remove this. |
905 | if (Subtarget->useRetpolineIndirectCalls()) |
906 | report_fatal_error("Lowering register statepoints with retpoline not " |
907 | "yet implemented."); |
908 | CallTargetMCOp = MCOperand::createReg(CallTarget.getReg()); |
909 | CallOpcode = X86::CALL64r; |
910 | break; |
911 | default: |
912 | llvm_unreachable("Unsupported operand type in statepoint call target")::llvm::llvm_unreachable_internal("Unsupported operand type in statepoint call target" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 912); |
913 | break; |
914 | } |
915 | |
916 | // Emit call |
917 | MCInst CallInst; |
918 | CallInst.setOpcode(CallOpcode); |
919 | CallInst.addOperand(CallTargetMCOp); |
920 | OutStreamer->EmitInstruction(CallInst, getSubtargetInfo()); |
921 | } |
922 | |
923 | // Record our statepoint node in the same section used by STACKMAP |
924 | // and PATCHPOINT |
925 | SM.recordStatepoint(MI); |
926 | } |
927 | |
928 | void X86AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI, |
929 | X86MCInstLower &MCIL) { |
930 | // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>, |
931 | // <opcode>, <operands> |
932 | |
933 | unsigned DefRegister = FaultingMI.getOperand(0).getReg(); |
934 | FaultMaps::FaultKind FK = |
935 | static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm()); |
936 | MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol(); |
937 | unsigned Opcode = FaultingMI.getOperand(3).getImm(); |
938 | unsigned OperandsBeginIdx = 4; |
939 | |
940 | assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!")((FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!" ) ? static_cast<void> (0) : __assert_fail ("FK < FaultMaps::FaultKindMax && \"Invalid Faulting Kind!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 940, __PRETTY_FUNCTION__)); |
941 | FM.recordFaultingOp(FK, HandlerLabel); |
942 | |
943 | MCInst MI; |
944 | MI.setOpcode(Opcode); |
945 | |
946 | if (DefRegister != X86::NoRegister) |
947 | MI.addOperand(MCOperand::createReg(DefRegister)); |
948 | |
949 | for (auto I = FaultingMI.operands_begin() + OperandsBeginIdx, |
950 | E = FaultingMI.operands_end(); |
951 | I != E; ++I) |
952 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&FaultingMI, *I)) |
953 | MI.addOperand(MaybeOperand.getValue()); |
954 | |
955 | OutStreamer->AddComment("on-fault: " + HandlerLabel->getName()); |
956 | OutStreamer->EmitInstruction(MI, getSubtargetInfo()); |
957 | } |
958 | |
959 | void X86AsmPrinter::LowerFENTRY_CALL(const MachineInstr &MI, |
960 | X86MCInstLower &MCIL) { |
961 | bool Is64Bits = Subtarget->is64Bit(); |
962 | MCContext &Ctx = OutStreamer->getContext(); |
963 | MCSymbol *fentry = Ctx.getOrCreateSymbol("__fentry__"); |
964 | const MCSymbolRefExpr *Op = |
965 | MCSymbolRefExpr::create(fentry, MCSymbolRefExpr::VK_None, Ctx); |
966 | |
967 | EmitAndCountInstruction( |
968 | MCInstBuilder(Is64Bits ? X86::CALL64pcrel32 : X86::CALLpcrel32) |
969 | .addExpr(Op)); |
970 | } |
971 | |
972 | void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, |
973 | X86MCInstLower &MCIL) { |
974 | // PATCHABLE_OP minsize, opcode, operands |
975 | |
976 | unsigned MinSize = MI.getOperand(0).getImm(); |
977 | unsigned Opcode = MI.getOperand(1).getImm(); |
978 | |
979 | MCInst MCI; |
980 | MCI.setOpcode(Opcode); |
981 | for (auto &MO : make_range(MI.operands_begin() + 2, MI.operands_end())) |
982 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) |
983 | MCI.addOperand(MaybeOperand.getValue()); |
984 | |
985 | SmallString<256> Code; |
986 | SmallVector<MCFixup, 4> Fixups; |
987 | raw_svector_ostream VecOS(Code); |
988 | CodeEmitter->encodeInstruction(MCI, VecOS, Fixups, getSubtargetInfo()); |
989 | |
990 | if (Code.size() < MinSize) { |
991 | if (MinSize == 2 && Opcode == X86::PUSH64r) { |
992 | // This is an optimization that lets us get away without emitting a nop in |
993 | // many cases. |
994 | // |
995 | // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %r9) takes two |
996 | // bytes too, so the check on MinSize is important. |
997 | MCI.setOpcode(X86::PUSH64rmr); |
998 | } else { |
999 | unsigned NopSize = EmitNop(*OutStreamer, MinSize, Subtarget->is64Bit(), |
1000 | getSubtargetInfo()); |
1001 | assert(NopSize == MinSize && "Could not implement MinSize!")((NopSize == MinSize && "Could not implement MinSize!" ) ? static_cast<void> (0) : __assert_fail ("NopSize == MinSize && \"Could not implement MinSize!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1001, __PRETTY_FUNCTION__)); |
1002 | (void)NopSize; |
1003 | } |
1004 | } |
1005 | |
1006 | OutStreamer->EmitInstruction(MCI, getSubtargetInfo()); |
1007 | } |
1008 | |
1009 | // Lower a stackmap of the form: |
1010 | // <id>, <shadowBytes>, ... |
1011 | void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) { |
1012 | SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); |
1013 | SM.recordStackMap(MI); |
1014 | unsigned NumShadowBytes = MI.getOperand(1).getImm(); |
1015 | SMShadowTracker.reset(NumShadowBytes); |
1016 | } |
1017 | |
1018 | // Lower a patchpoint of the form: |
1019 | // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ... |
1020 | void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI, |
1021 | X86MCInstLower &MCIL) { |
1022 | assert(Subtarget->is64Bit() && "Patchpoint currently only supports X86-64")((Subtarget->is64Bit() && "Patchpoint currently only supports X86-64" ) ? static_cast<void> (0) : __assert_fail ("Subtarget->is64Bit() && \"Patchpoint currently only supports X86-64\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1022, __PRETTY_FUNCTION__)); |
1023 | |
1024 | SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); |
1025 | |
1026 | SM.recordPatchPoint(MI); |
1027 | |
1028 | PatchPointOpers opers(&MI); |
1029 | unsigned ScratchIdx = opers.getNextScratchIdx(); |
1030 | unsigned EncodedBytes = 0; |
1031 | const MachineOperand &CalleeMO = opers.getCallTarget(); |
1032 | |
1033 | // Check for null target. If target is non-null (i.e. is non-zero or is |
1034 | // symbolic) then emit a call. |
1035 | if (!(CalleeMO.isImm() && !CalleeMO.getImm())) { |
1036 | MCOperand CalleeMCOp; |
1037 | switch (CalleeMO.getType()) { |
1038 | default: |
1039 | /// FIXME: Add a verifier check for bad callee types. |
1040 | llvm_unreachable("Unrecognized callee operand type.")::llvm::llvm_unreachable_internal("Unrecognized callee operand type." , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1040); |
1041 | case MachineOperand::MO_Immediate: |
1042 | if (CalleeMO.getImm()) |
1043 | CalleeMCOp = MCOperand::createImm(CalleeMO.getImm()); |
1044 | break; |
1045 | case MachineOperand::MO_ExternalSymbol: |
1046 | case MachineOperand::MO_GlobalAddress: |
1047 | CalleeMCOp = MCIL.LowerSymbolOperand(CalleeMO, |
1048 | MCIL.GetSymbolFromOperand(CalleeMO)); |
1049 | break; |
1050 | } |
1051 | |
1052 | // Emit MOV to materialize the target address and the CALL to target. |
1053 | // This is encoded with 12-13 bytes, depending on which register is used. |
1054 | unsigned ScratchReg = MI.getOperand(ScratchIdx).getReg(); |
1055 | if (X86II::isX86_64ExtendedReg(ScratchReg)) |
1056 | EncodedBytes = 13; |
1057 | else |
1058 | EncodedBytes = 12; |
1059 | |
1060 | EmitAndCountInstruction( |
1061 | MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).addOperand(CalleeMCOp)); |
1062 | // FIXME: Add retpoline support and remove this. |
1063 | if (Subtarget->useRetpolineIndirectCalls()) |
1064 | report_fatal_error( |
1065 | "Lowering patchpoint with retpoline not yet implemented."); |
1066 | EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg)); |
1067 | } |
1068 | |
1069 | // Emit padding. |
1070 | unsigned NumBytes = opers.getNumPatchBytes(); |
1071 | assert(NumBytes >= EncodedBytes &&((NumBytes >= EncodedBytes && "Patchpoint can't request size less than the length of a call." ) ? static_cast<void> (0) : __assert_fail ("NumBytes >= EncodedBytes && \"Patchpoint can't request size less than the length of a call.\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1072, __PRETTY_FUNCTION__)) |
1072 | "Patchpoint can't request size less than the length of a call.")((NumBytes >= EncodedBytes && "Patchpoint can't request size less than the length of a call." ) ? static_cast<void> (0) : __assert_fail ("NumBytes >= EncodedBytes && \"Patchpoint can't request size less than the length of a call.\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1072, __PRETTY_FUNCTION__)); |
1073 | |
1074 | EmitNops(*OutStreamer, NumBytes - EncodedBytes, Subtarget->is64Bit(), |
1075 | getSubtargetInfo()); |
1076 | } |
1077 | |
1078 | void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, |
1079 | X86MCInstLower &MCIL) { |
1080 | assert(Subtarget->is64Bit() && "XRay custom events only supports X86-64")((Subtarget->is64Bit() && "XRay custom events only supports X86-64" ) ? static_cast<void> (0) : __assert_fail ("Subtarget->is64Bit() && \"XRay custom events only supports X86-64\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1080, __PRETTY_FUNCTION__)); |
1081 | |
1082 | // We want to emit the following pattern, which follows the x86 calling |
1083 | // convention to prepare for the trampoline call to be patched in. |
1084 | // |
1085 | // .p2align 1, ... |
1086 | // .Lxray_event_sled_N: |
1087 | // jmp +N // jump across the instrumentation sled |
1088 | // ... // set up arguments in register |
1089 | // callq __xray_CustomEvent@plt // force dependency to symbol |
1090 | // ... |
1091 | // <jump here> |
1092 | // |
1093 | // After patching, it would look something like: |
1094 | // |
1095 | // nopw (2-byte nop) |
1096 | // ... |
1097 | // callq __xrayCustomEvent // already lowered |
1098 | // ... |
1099 | // |
1100 | // --- |
1101 | // First we emit the label and the jump. |
1102 | auto CurSled = OutContext.createTempSymbol("xray_event_sled_", true); |
1103 | OutStreamer->AddComment("# XRay Custom Event Log"); |
1104 | OutStreamer->EmitCodeAlignment(2); |
1105 | OutStreamer->EmitLabel(CurSled); |
1106 | |
1107 | // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as |
1108 | // an operand (computed as an offset from the jmp instruction). |
1109 | // FIXME: Find another less hacky way do force the relative jump. |
1110 | OutStreamer->EmitBinaryData("\xeb\x0f"); |
1111 | |
1112 | // The default C calling convention will place two arguments into %rcx and |
1113 | // %rdx -- so we only work with those. |
1114 | unsigned DestRegs[] = {X86::RDI, X86::RSI}; |
1115 | bool UsedMask[] = {false, false}; |
1116 | // Filled out in loop. |
1117 | unsigned SrcRegs[] = {0, 0}; |
1118 | |
1119 | // Then we put the operands in the %rdi and %rsi registers. We spill the |
1120 | // values in the register before we clobber them, and mark them as used in |
1121 | // UsedMask. In case the arguments are already in the correct register, we use |
1122 | // emit nops appropriately sized to keep the sled the same size in every |
1123 | // situation. |
1124 | for (unsigned I = 0; I < MI.getNumOperands(); ++I) |
1125 | if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I))) { |
1126 | assert(Op->isReg() && "Only support arguments in registers")((Op->isReg() && "Only support arguments in registers" ) ? static_cast<void> (0) : __assert_fail ("Op->isReg() && \"Only support arguments in registers\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1126, __PRETTY_FUNCTION__)); |
1127 | SrcRegs[I] = Op->getReg(); |
1128 | if (SrcRegs[I] != DestRegs[I]) { |
1129 | UsedMask[I] = true; |
1130 | EmitAndCountInstruction( |
1131 | MCInstBuilder(X86::PUSH64r).addReg(DestRegs[I])); |
1132 | } else { |
1133 | EmitNops(*OutStreamer, 4, Subtarget->is64Bit(), getSubtargetInfo()); |
1134 | } |
1135 | } |
1136 | |
1137 | // Now that the register values are stashed, mov arguments into place. |
1138 | for (unsigned I = 0; I < MI.getNumOperands(); ++I) |
1139 | if (SrcRegs[I] != DestRegs[I]) |
1140 | EmitAndCountInstruction( |
1141 | MCInstBuilder(X86::MOV64rr).addReg(DestRegs[I]).addReg(SrcRegs[I])); |
1142 | |
1143 | // We emit a hard dependency on the __xray_CustomEvent symbol, which is the |
1144 | // name of the trampoline to be implemented by the XRay runtime. |
1145 | auto TSym = OutContext.getOrCreateSymbol("__xray_CustomEvent"); |
1146 | MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym); |
1147 | if (isPositionIndependent()) |
1148 | TOp.setTargetFlags(X86II::MO_PLT); |
1149 | |
1150 | // Emit the call instruction. |
1151 | EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32) |
1152 | .addOperand(MCIL.LowerSymbolOperand(TOp, TSym))); |
1153 | |
1154 | // Restore caller-saved and used registers. |
1155 | for (unsigned I = sizeof UsedMask; I-- > 0;) |
1156 | if (UsedMask[I]) |
1157 | EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(DestRegs[I])); |
1158 | else |
1159 | EmitNops(*OutStreamer, 1, Subtarget->is64Bit(), getSubtargetInfo()); |
1160 | |
1161 | OutStreamer->AddComment("xray custom event end."); |
1162 | |
1163 | // Record the sled version. Older versions of this sled were spelled |
1164 | // differently, so we let the runtime handle the different offsets we're |
1165 | // using. |
1166 | recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 1); |
1167 | } |
1168 | |
1169 | void X86AsmPrinter::LowerPATCHABLE_TYPED_EVENT_CALL(const MachineInstr &MI, |
1170 | X86MCInstLower &MCIL) { |
1171 | assert(Subtarget->is64Bit() && "XRay typed events only supports X86-64")((Subtarget->is64Bit() && "XRay typed events only supports X86-64" ) ? static_cast<void> (0) : __assert_fail ("Subtarget->is64Bit() && \"XRay typed events only supports X86-64\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1171, __PRETTY_FUNCTION__)); |
1172 | |
1173 | // We want to emit the following pattern, which follows the x86 calling |
1174 | // convention to prepare for the trampoline call to be patched in. |
1175 | // |
1176 | // .p2align 1, ... |
1177 | // .Lxray_event_sled_N: |
1178 | // jmp +N // jump across the instrumentation sled |
1179 | // ... // set up arguments in register |
1180 | // callq __xray_TypedEvent@plt // force dependency to symbol |
1181 | // ... |
1182 | // <jump here> |
1183 | // |
1184 | // After patching, it would look something like: |
1185 | // |
1186 | // nopw (2-byte nop) |
1187 | // ... |
1188 | // callq __xrayTypedEvent // already lowered |
1189 | // ... |
1190 | // |
1191 | // --- |
1192 | // First we emit the label and the jump. |
1193 | auto CurSled = OutContext.createTempSymbol("xray_typed_event_sled_", true); |
1194 | OutStreamer->AddComment("# XRay Typed Event Log"); |
1195 | OutStreamer->EmitCodeAlignment(2); |
1196 | OutStreamer->EmitLabel(CurSled); |
1197 | |
1198 | // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as |
1199 | // an operand (computed as an offset from the jmp instruction). |
1200 | // FIXME: Find another less hacky way do force the relative jump. |
1201 | OutStreamer->EmitBinaryData("\xeb\x14"); |
1202 | |
1203 | // An x86-64 convention may place three arguments into %rcx, %rdx, and R8, |
1204 | // so we'll work with those. Or we may be called via SystemV, in which case |
1205 | // we don't have to do any translation. |
1206 | unsigned DestRegs[] = {X86::RDI, X86::RSI, X86::RDX}; |
1207 | bool UsedMask[] = {false, false, false}; |
1208 | |
1209 | // Will fill out src regs in the loop. |
1210 | unsigned SrcRegs[] = {0, 0, 0}; |
1211 | |
1212 | // Then we put the operands in the SystemV registers. We spill the values in |
1213 | // the registers before we clobber them, and mark them as used in UsedMask. |
1214 | // In case the arguments are already in the correct register, we emit nops |
1215 | // appropriately sized to keep the sled the same size in every situation. |
1216 | for (unsigned I = 0; I < MI.getNumOperands(); ++I) |
1217 | if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I))) { |
1218 | // TODO: Is register only support adequate? |
1219 | assert(Op->isReg() && "Only supports arguments in registers")((Op->isReg() && "Only supports arguments in registers" ) ? static_cast<void> (0) : __assert_fail ("Op->isReg() && \"Only supports arguments in registers\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1219, __PRETTY_FUNCTION__)); |
1220 | SrcRegs[I] = Op->getReg(); |
1221 | if (SrcRegs[I] != DestRegs[I]) { |
1222 | UsedMask[I] = true; |
1223 | EmitAndCountInstruction( |
1224 | MCInstBuilder(X86::PUSH64r).addReg(DestRegs[I])); |
1225 | } else { |
1226 | EmitNops(*OutStreamer, 4, Subtarget->is64Bit(), getSubtargetInfo()); |
1227 | } |
1228 | } |
1229 | |
1230 | // In the above loop we only stash all of the destination registers or emit |
1231 | // nops if the arguments are already in the right place. Doing the actually |
1232 | // moving is postponed until after all the registers are stashed so nothing |
1233 | // is clobbers. We've already added nops to account for the size of mov and |
1234 | // push if the register is in the right place, so we only have to worry about |
1235 | // emitting movs. |
1236 | for (unsigned I = 0; I < MI.getNumOperands(); ++I) |
1237 | if (UsedMask[I]) |
1238 | EmitAndCountInstruction( |
1239 | MCInstBuilder(X86::MOV64rr).addReg(DestRegs[I]).addReg(SrcRegs[I])); |
1240 | |
1241 | // We emit a hard dependency on the __xray_TypedEvent symbol, which is the |
1242 | // name of the trampoline to be implemented by the XRay runtime. |
1243 | auto TSym = OutContext.getOrCreateSymbol("__xray_TypedEvent"); |
1244 | MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym); |
1245 | if (isPositionIndependent()) |
1246 | TOp.setTargetFlags(X86II::MO_PLT); |
1247 | |
1248 | // Emit the call instruction. |
1249 | EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32) |
1250 | .addOperand(MCIL.LowerSymbolOperand(TOp, TSym))); |
1251 | |
1252 | // Restore caller-saved and used registers. |
1253 | for (unsigned I = sizeof UsedMask; I-- > 0;) |
1254 | if (UsedMask[I]) |
1255 | EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(DestRegs[I])); |
1256 | else |
1257 | EmitNops(*OutStreamer, 1, Subtarget->is64Bit(), getSubtargetInfo()); |
1258 | |
1259 | OutStreamer->AddComment("xray typed event end."); |
1260 | |
1261 | // Record the sled version. |
1262 | recordSled(CurSled, MI, SledKind::TYPED_EVENT, 0); |
1263 | } |
1264 | |
1265 | void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI, |
1266 | X86MCInstLower &MCIL) { |
1267 | // We want to emit the following pattern: |
1268 | // |
1269 | // .p2align 1, ... |
1270 | // .Lxray_sled_N: |
1271 | // jmp .tmpN |
1272 | // # 9 bytes worth of noops |
1273 | // |
1274 | // We need the 9 bytes because at runtime, we'd be patching over the full 11 |
1275 | // bytes with the following pattern: |
1276 | // |
1277 | // mov %r10, <function id, 32-bit> // 6 bytes |
1278 | // call <relative offset, 32-bits> // 5 bytes |
1279 | // |
1280 | auto CurSled = OutContext.createTempSymbol("xray_sled_", true); |
1281 | OutStreamer->EmitCodeAlignment(2); |
1282 | OutStreamer->EmitLabel(CurSled); |
1283 | |
1284 | // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as |
1285 | // an operand (computed as an offset from the jmp instruction). |
1286 | // FIXME: Find another less hacky way do force the relative jump. |
1287 | OutStreamer->EmitBytes("\xeb\x09"); |
1288 | EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo()); |
1289 | recordSled(CurSled, MI, SledKind::FUNCTION_ENTER); |
1290 | } |
1291 | |
1292 | void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI, |
1293 | X86MCInstLower &MCIL) { |
1294 | // Since PATCHABLE_RET takes the opcode of the return statement as an |
1295 | // argument, we use that to emit the correct form of the RET that we want. |
1296 | // i.e. when we see this: |
1297 | // |
1298 | // PATCHABLE_RET X86::RET ... |
1299 | // |
1300 | // We should emit the RET followed by sleds. |
1301 | // |
1302 | // .p2align 1, ... |
1303 | // .Lxray_sled_N: |
1304 | // ret # or equivalent instruction |
1305 | // # 10 bytes worth of noops |
1306 | // |
1307 | // This just makes sure that the alignment for the next instruction is 2. |
1308 | auto CurSled = OutContext.createTempSymbol("xray_sled_", true); |
1309 | OutStreamer->EmitCodeAlignment(2); |
1310 | OutStreamer->EmitLabel(CurSled); |
1311 | unsigned OpCode = MI.getOperand(0).getImm(); |
1312 | MCInst Ret; |
1313 | Ret.setOpcode(OpCode); |
1314 | for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end())) |
1315 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) |
1316 | Ret.addOperand(MaybeOperand.getValue()); |
1317 | OutStreamer->EmitInstruction(Ret, getSubtargetInfo()); |
1318 | EmitNops(*OutStreamer, 10, Subtarget->is64Bit(), getSubtargetInfo()); |
1319 | recordSled(CurSled, MI, SledKind::FUNCTION_EXIT); |
1320 | } |
1321 | |
1322 | void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, |
1323 | X86MCInstLower &MCIL) { |
1324 | // Like PATCHABLE_RET, we have the actual instruction in the operands to this |
1325 | // instruction so we lower that particular instruction and its operands. |
1326 | // Unlike PATCHABLE_RET though, we put the sled before the JMP, much like how |
1327 | // we do it for PATCHABLE_FUNCTION_ENTER. The sled should be very similar to |
1328 | // the PATCHABLE_FUNCTION_ENTER case, followed by the lowering of the actual |
1329 | // tail call much like how we have it in PATCHABLE_RET. |
1330 | auto CurSled = OutContext.createTempSymbol("xray_sled_", true); |
1331 | OutStreamer->EmitCodeAlignment(2); |
1332 | OutStreamer->EmitLabel(CurSled); |
1333 | auto Target = OutContext.createTempSymbol(); |
1334 | |
1335 | // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as |
1336 | // an operand (computed as an offset from the jmp instruction). |
1337 | // FIXME: Find another less hacky way do force the relative jump. |
1338 | OutStreamer->EmitBytes("\xeb\x09"); |
1339 | EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo()); |
1340 | OutStreamer->EmitLabel(Target); |
1341 | recordSled(CurSled, MI, SledKind::TAIL_CALL); |
1342 | |
1343 | unsigned OpCode = MI.getOperand(0).getImm(); |
1344 | MCInst TC; |
1345 | TC.setOpcode(OpCode); |
1346 | |
1347 | // Before emitting the instruction, add a comment to indicate that this is |
1348 | // indeed a tail call. |
1349 | OutStreamer->AddComment("TAILCALL"); |
1350 | for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end())) |
1351 | if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) |
1352 | TC.addOperand(MaybeOperand.getValue()); |
1353 | OutStreamer->EmitInstruction(TC, getSubtargetInfo()); |
1354 | } |
1355 | |
1356 | // Returns instruction preceding MBBI in MachineFunction. |
1357 | // If MBBI is the first instruction of the first basic block, returns null. |
1358 | static MachineBasicBlock::const_iterator |
1359 | PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) { |
1360 | const MachineBasicBlock *MBB = MBBI->getParent(); |
1361 | while (MBBI == MBB->begin()) { |
1362 | if (MBB == &MBB->getParent()->front()) |
1363 | return MachineBasicBlock::const_iterator(); |
1364 | MBB = MBB->getPrevNode(); |
1365 | MBBI = MBB->end(); |
1366 | } |
1367 | --MBBI; |
1368 | return MBBI; |
1369 | } |
1370 | |
1371 | static const Constant *getConstantFromPool(const MachineInstr &MI, |
1372 | const MachineOperand &Op) { |
1373 | if (!Op.isCPI() || Op.getOffset() != 0) |
1374 | return nullptr; |
1375 | |
1376 | ArrayRef<MachineConstantPoolEntry> Constants = |
1377 | MI.getParent()->getParent()->getConstantPool()->getConstants(); |
1378 | const MachineConstantPoolEntry &ConstantEntry = Constants[Op.getIndex()]; |
1379 | |
1380 | // Bail if this is a machine constant pool entry, we won't be able to dig out |
1381 | // anything useful. |
1382 | if (ConstantEntry.isMachineConstantPoolEntry()) |
1383 | return nullptr; |
1384 | |
1385 | const Constant *C = ConstantEntry.Val.ConstVal; |
1386 | assert((!C || ConstantEntry.getType() == C->getType()) &&(((!C || ConstantEntry.getType() == C->getType()) && "Expected a constant of the same type!") ? static_cast<void > (0) : __assert_fail ("(!C || ConstantEntry.getType() == C->getType()) && \"Expected a constant of the same type!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1387, __PRETTY_FUNCTION__)) |
1387 | "Expected a constant of the same type!")(((!C || ConstantEntry.getType() == C->getType()) && "Expected a constant of the same type!") ? static_cast<void > (0) : __assert_fail ("(!C || ConstantEntry.getType() == C->getType()) && \"Expected a constant of the same type!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1387, __PRETTY_FUNCTION__)); |
1388 | return C; |
1389 | } |
1390 | |
1391 | static std::string getShuffleComment(const MachineInstr *MI, unsigned SrcOp1Idx, |
1392 | unsigned SrcOp2Idx, ArrayRef<int> Mask) { |
1393 | std::string Comment; |
1394 | |
1395 | // Compute the name for a register. This is really goofy because we have |
1396 | // multiple instruction printers that could (in theory) use different |
1397 | // names. Fortunately most people use the ATT style (outside of Windows) |
1398 | // and they actually agree on register naming here. Ultimately, this is |
1399 | // a comment, and so its OK if it isn't perfect. |
1400 | auto GetRegisterName = [](unsigned RegNum) -> StringRef { |
1401 | return X86ATTInstPrinter::getRegisterName(RegNum); |
1402 | }; |
1403 | |
1404 | const MachineOperand &DstOp = MI->getOperand(0); |
1405 | const MachineOperand &SrcOp1 = MI->getOperand(SrcOp1Idx); |
1406 | const MachineOperand &SrcOp2 = MI->getOperand(SrcOp2Idx); |
1407 | |
1408 | StringRef DstName = DstOp.isReg() ? GetRegisterName(DstOp.getReg()) : "mem"; |
1409 | StringRef Src1Name = |
1410 | SrcOp1.isReg() ? GetRegisterName(SrcOp1.getReg()) : "mem"; |
1411 | StringRef Src2Name = |
1412 | SrcOp2.isReg() ? GetRegisterName(SrcOp2.getReg()) : "mem"; |
1413 | |
1414 | // One source operand, fix the mask to print all elements in one span. |
1415 | SmallVector<int, 8> ShuffleMask(Mask.begin(), Mask.end()); |
1416 | if (Src1Name == Src2Name) |
1417 | for (int i = 0, e = ShuffleMask.size(); i != e; ++i) |
1418 | if (ShuffleMask[i] >= e) |
1419 | ShuffleMask[i] -= e; |
1420 | |
1421 | raw_string_ostream CS(Comment); |
1422 | CS << DstName; |
1423 | |
1424 | // Handle AVX512 MASK/MASXZ write mask comments. |
1425 | // MASK: zmmX {%kY} |
1426 | // MASKZ: zmmX {%kY} {z} |
1427 | if (SrcOp1Idx > 1) { |
1428 | assert((SrcOp1Idx == 2 || SrcOp1Idx == 3) && "Unexpected writemask")(((SrcOp1Idx == 2 || SrcOp1Idx == 3) && "Unexpected writemask" ) ? static_cast<void> (0) : __assert_fail ("(SrcOp1Idx == 2 || SrcOp1Idx == 3) && \"Unexpected writemask\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1428, __PRETTY_FUNCTION__)); |
1429 | |
1430 | const MachineOperand &WriteMaskOp = MI->getOperand(SrcOp1Idx - 1); |
1431 | if (WriteMaskOp.isReg()) { |
1432 | CS << " {%" << GetRegisterName(WriteMaskOp.getReg()) << "}"; |
1433 | |
1434 | if (SrcOp1Idx == 2) { |
1435 | CS << " {z}"; |
1436 | } |
1437 | } |
1438 | } |
1439 | |
1440 | CS << " = "; |
1441 | |
1442 | for (int i = 0, e = ShuffleMask.size(); i != e; ++i) { |
1443 | if (i != 0) |
1444 | CS << ","; |
1445 | if (ShuffleMask[i] == SM_SentinelZero) { |
1446 | CS << "zero"; |
1447 | continue; |
1448 | } |
1449 | |
1450 | // Otherwise, it must come from src1 or src2. Print the span of elements |
1451 | // that comes from this src. |
1452 | bool isSrc1 = ShuffleMask[i] < (int)e; |
1453 | CS << (isSrc1 ? Src1Name : Src2Name) << '['; |
1454 | |
1455 | bool IsFirst = true; |
1456 | while (i != e && ShuffleMask[i] != SM_SentinelZero && |
1457 | (ShuffleMask[i] < (int)e) == isSrc1) { |
1458 | if (!IsFirst) |
1459 | CS << ','; |
1460 | else |
1461 | IsFirst = false; |
1462 | if (ShuffleMask[i] == SM_SentinelUndef) |
1463 | CS << "u"; |
1464 | else |
1465 | CS << ShuffleMask[i] % (int)e; |
1466 | ++i; |
1467 | } |
1468 | CS << ']'; |
1469 | --i; // For loop increments element #. |
1470 | } |
1471 | CS.flush(); |
1472 | |
1473 | return Comment; |
1474 | } |
1475 | |
1476 | static void printConstant(const APInt &Val, raw_ostream &CS) { |
1477 | if (Val.getBitWidth() <= 64) { |
1478 | CS << Val.getZExtValue(); |
1479 | } else { |
1480 | // print multi-word constant as (w0,w1) |
1481 | CS << "("; |
1482 | for (int i = 0, N = Val.getNumWords(); i < N; ++i) { |
1483 | if (i > 0) |
1484 | CS << ","; |
1485 | CS << Val.getRawData()[i]; |
1486 | } |
1487 | CS << ")"; |
1488 | } |
1489 | } |
1490 | |
1491 | static void printConstant(const APFloat &Flt, raw_ostream &CS) { |
1492 | SmallString<32> Str; |
1493 | // Force scientific notation to distinquish from integers. |
1494 | Flt.toString(Str, 0, 0); |
1495 | CS << Str; |
1496 | } |
1497 | |
1498 | static void printConstant(const Constant *COp, raw_ostream &CS) { |
1499 | if (isa<UndefValue>(COp)) { |
1500 | CS << "u"; |
1501 | } else if (auto *CI = dyn_cast<ConstantInt>(COp)) { |
1502 | printConstant(CI->getValue(), CS); |
1503 | } else if (auto *CF = dyn_cast<ConstantFP>(COp)) { |
1504 | printConstant(CF->getValueAPF(), CS); |
1505 | } else { |
1506 | CS << "?"; |
1507 | } |
1508 | } |
1509 | |
1510 | void X86AsmPrinter::EmitSEHInstruction(const MachineInstr *MI) { |
1511 | assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?")((MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?" ) ? static_cast<void> (0) : __assert_fail ("MF->hasWinCFI() && \"SEH_ instruction in function without WinCFI?\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1511, __PRETTY_FUNCTION__)); |
1512 | assert(getSubtarget().isOSWindows() && "SEH_ instruction Windows only")((getSubtarget().isOSWindows() && "SEH_ instruction Windows only" ) ? static_cast<void> (0) : __assert_fail ("getSubtarget().isOSWindows() && \"SEH_ instruction Windows only\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1512, __PRETTY_FUNCTION__)); |
1513 | const X86RegisterInfo *RI = |
1514 | MF->getSubtarget<X86Subtarget>().getRegisterInfo(); |
1515 | |
1516 | // Use the .cv_fpo directives if we're emitting CodeView on 32-bit x86. |
1517 | if (EmitFPOData) { |
1518 | X86TargetStreamer *XTS = |
1519 | static_cast<X86TargetStreamer *>(OutStreamer->getTargetStreamer()); |
1520 | switch (MI->getOpcode()) { |
1521 | case X86::SEH_PushReg: |
1522 | XTS->emitFPOPushReg(MI->getOperand(0).getImm()); |
1523 | break; |
1524 | case X86::SEH_StackAlloc: |
1525 | XTS->emitFPOStackAlloc(MI->getOperand(0).getImm()); |
1526 | break; |
1527 | case X86::SEH_StackAlign: |
1528 | XTS->emitFPOStackAlign(MI->getOperand(0).getImm()); |
1529 | break; |
1530 | case X86::SEH_SetFrame: |
1531 | assert(MI->getOperand(1).getImm() == 0 &&((MI->getOperand(1).getImm() == 0 && ".cv_fpo_setframe takes no offset" ) ? static_cast<void> (0) : __assert_fail ("MI->getOperand(1).getImm() == 0 && \".cv_fpo_setframe takes no offset\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1532, __PRETTY_FUNCTION__)) |
1532 | ".cv_fpo_setframe takes no offset")((MI->getOperand(1).getImm() == 0 && ".cv_fpo_setframe takes no offset" ) ? static_cast<void> (0) : __assert_fail ("MI->getOperand(1).getImm() == 0 && \".cv_fpo_setframe takes no offset\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1532, __PRETTY_FUNCTION__)); |
1533 | XTS->emitFPOSetFrame(MI->getOperand(0).getImm()); |
1534 | break; |
1535 | case X86::SEH_EndPrologue: |
1536 | XTS->emitFPOEndPrologue(); |
1537 | break; |
1538 | case X86::SEH_SaveReg: |
1539 | case X86::SEH_SaveXMM: |
1540 | case X86::SEH_PushFrame: |
1541 | llvm_unreachable("SEH_ directive incompatible with FPO")::llvm::llvm_unreachable_internal("SEH_ directive incompatible with FPO" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1541); |
1542 | break; |
1543 | default: |
1544 | llvm_unreachable("expected SEH_ instruction")::llvm::llvm_unreachable_internal("expected SEH_ instruction" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1544); |
1545 | } |
1546 | return; |
1547 | } |
1548 | |
1549 | // Otherwise, use the .seh_ directives for all other Windows platforms. |
1550 | switch (MI->getOpcode()) { |
1551 | case X86::SEH_PushReg: |
1552 | OutStreamer->EmitWinCFIPushReg( |
1553 | RI->getSEHRegNum(MI->getOperand(0).getImm())); |
1554 | break; |
1555 | |
1556 | case X86::SEH_SaveReg: |
1557 | OutStreamer->EmitWinCFISaveReg(RI->getSEHRegNum(MI->getOperand(0).getImm()), |
1558 | MI->getOperand(1).getImm()); |
1559 | break; |
1560 | |
1561 | case X86::SEH_SaveXMM: |
1562 | OutStreamer->EmitWinCFISaveXMM(RI->getSEHRegNum(MI->getOperand(0).getImm()), |
1563 | MI->getOperand(1).getImm()); |
1564 | break; |
1565 | |
1566 | case X86::SEH_StackAlloc: |
1567 | OutStreamer->EmitWinCFIAllocStack(MI->getOperand(0).getImm()); |
1568 | break; |
1569 | |
1570 | case X86::SEH_SetFrame: |
1571 | OutStreamer->EmitWinCFISetFrame( |
1572 | RI->getSEHRegNum(MI->getOperand(0).getImm()), |
1573 | MI->getOperand(1).getImm()); |
1574 | break; |
1575 | |
1576 | case X86::SEH_PushFrame: |
1577 | OutStreamer->EmitWinCFIPushFrame(MI->getOperand(0).getImm()); |
1578 | break; |
1579 | |
1580 | case X86::SEH_EndPrologue: |
1581 | OutStreamer->EmitWinCFIEndProlog(); |
1582 | break; |
1583 | |
1584 | default: |
1585 | llvm_unreachable("expected SEH_ instruction")::llvm::llvm_unreachable_internal("expected SEH_ instruction" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1585); |
1586 | } |
1587 | } |
1588 | |
1589 | static unsigned getRegisterWidth(const MCOperandInfo &Info) { |
1590 | if (Info.RegClass == X86::VR128RegClassID || |
1591 | Info.RegClass == X86::VR128XRegClassID) |
1592 | return 128; |
1593 | if (Info.RegClass == X86::VR256RegClassID || |
1594 | Info.RegClass == X86::VR256XRegClassID) |
1595 | return 256; |
1596 | if (Info.RegClass == X86::VR512RegClassID) |
1597 | return 512; |
1598 | llvm_unreachable("Unknown register class!")::llvm::llvm_unreachable_internal("Unknown register class!", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1598); |
1599 | } |
1600 | |
1601 | void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { |
1602 | X86MCInstLower MCInstLowering(*MF, *this); |
1603 | const X86RegisterInfo *RI = |
1604 | MF->getSubtarget<X86Subtarget>().getRegisterInfo(); |
1605 | |
1606 | // Add a comment about EVEX-2-VEX compression for AVX-512 instrs that |
1607 | // are compressed from EVEX encoding to VEX encoding. |
1608 | if (TM.Options.MCOptions.ShowMCEncoding) { |
1609 | if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_VEX) |
1610 | OutStreamer->AddComment("EVEX TO VEX Compression ", false); |
1611 | } |
1612 | |
1613 | switch (MI->getOpcode()) { |
1614 | case TargetOpcode::DBG_VALUE: |
1615 | llvm_unreachable("Should be handled target independently")::llvm::llvm_unreachable_internal("Should be handled target independently" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1615); |
1616 | |
1617 | // Emit nothing here but a comment if we can. |
1618 | case X86::Int_MemBarrier: |
1619 | OutStreamer->emitRawComment("MEMBARRIER"); |
1620 | return; |
1621 | |
1622 | case X86::EH_RETURN: |
1623 | case X86::EH_RETURN64: { |
1624 | // Lower these as normal, but add some comments. |
1625 | unsigned Reg = MI->getOperand(0).getReg(); |
1626 | OutStreamer->AddComment(StringRef("eh_return, addr: %") + |
1627 | X86ATTInstPrinter::getRegisterName(Reg)); |
1628 | break; |
1629 | } |
1630 | case X86::CLEANUPRET: { |
1631 | // Lower these as normal, but add some comments. |
1632 | OutStreamer->AddComment("CLEANUPRET"); |
1633 | break; |
1634 | } |
1635 | |
1636 | case X86::CATCHRET: { |
1637 | // Lower these as normal, but add some comments. |
1638 | OutStreamer->AddComment("CATCHRET"); |
1639 | break; |
1640 | } |
1641 | |
1642 | case X86::TAILJMPr: |
1643 | case X86::TAILJMPm: |
1644 | case X86::TAILJMPd: |
1645 | case X86::TAILJMPd_CC: |
1646 | case X86::TAILJMPr64: |
1647 | case X86::TAILJMPm64: |
1648 | case X86::TAILJMPd64: |
1649 | case X86::TAILJMPd64_CC: |
1650 | case X86::TAILJMPr64_REX: |
1651 | case X86::TAILJMPm64_REX: |
1652 | // Lower these as normal, but add some comments. |
1653 | OutStreamer->AddComment("TAILCALL"); |
1654 | break; |
1655 | |
1656 | case X86::TLS_addr32: |
1657 | case X86::TLS_addr64: |
1658 | case X86::TLS_base_addr32: |
1659 | case X86::TLS_base_addr64: |
1660 | return LowerTlsAddr(MCInstLowering, *MI); |
1661 | |
1662 | case X86::MOVPC32r: { |
1663 | // This is a pseudo op for a two instruction sequence with a label, which |
1664 | // looks like: |
1665 | // call "L1$pb" |
1666 | // "L1$pb": |
1667 | // popl %esi |
1668 | |
1669 | // Emit the call. |
1670 | MCSymbol *PICBase = MF->getPICBaseSymbol(); |
1671 | // FIXME: We would like an efficient form for this, so we don't have to do a |
1672 | // lot of extra uniquing. |
1673 | EmitAndCountInstruction( |
1674 | MCInstBuilder(X86::CALLpcrel32) |
1675 | .addExpr(MCSymbolRefExpr::create(PICBase, OutContext))); |
1676 | |
1677 | const X86FrameLowering *FrameLowering = |
1678 | MF->getSubtarget<X86Subtarget>().getFrameLowering(); |
1679 | bool hasFP = FrameLowering->hasFP(*MF); |
1680 | |
1681 | // TODO: This is needed only if we require precise CFA. |
1682 | bool HasActiveDwarfFrame = OutStreamer->getNumFrameInfos() && |
1683 | !OutStreamer->getDwarfFrameInfos().back().End; |
1684 | |
1685 | int stackGrowth = -RI->getSlotSize(); |
1686 | |
1687 | if (HasActiveDwarfFrame && !hasFP) { |
1688 | OutStreamer->EmitCFIAdjustCfaOffset(-stackGrowth); |
1689 | } |
1690 | |
1691 | // Emit the label. |
1692 | OutStreamer->EmitLabel(PICBase); |
1693 | |
1694 | // popl $reg |
1695 | EmitAndCountInstruction( |
1696 | MCInstBuilder(X86::POP32r).addReg(MI->getOperand(0).getReg())); |
1697 | |
1698 | if (HasActiveDwarfFrame && !hasFP) { |
1699 | OutStreamer->EmitCFIAdjustCfaOffset(stackGrowth); |
1700 | } |
1701 | return; |
1702 | } |
1703 | |
1704 | case X86::ADD32ri: { |
1705 | // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri. |
1706 | if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS) |
1707 | break; |
1708 | |
1709 | // Okay, we have something like: |
1710 | // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL) |
1711 | |
1712 | // For this, we want to print something like: |
1713 | // MYGLOBAL + (. - PICBASE) |
1714 | // However, we can't generate a ".", so just emit a new label here and refer |
1715 | // to it. |
1716 | MCSymbol *DotSym = OutContext.createTempSymbol(); |
1717 | OutStreamer->EmitLabel(DotSym); |
1718 | |
1719 | // Now that we have emitted the label, lower the complex operand expression. |
1720 | MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2)); |
1721 | |
1722 | const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext); |
1723 | const MCExpr *PICBase = |
1724 | MCSymbolRefExpr::create(MF->getPICBaseSymbol(), OutContext); |
1725 | DotExpr = MCBinaryExpr::createSub(DotExpr, PICBase, OutContext); |
1726 | |
1727 | DotExpr = MCBinaryExpr::createAdd( |
1728 | MCSymbolRefExpr::create(OpSym, OutContext), DotExpr, OutContext); |
1729 | |
1730 | EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri) |
1731 | .addReg(MI->getOperand(0).getReg()) |
1732 | .addReg(MI->getOperand(1).getReg()) |
1733 | .addExpr(DotExpr)); |
1734 | return; |
1735 | } |
1736 | case TargetOpcode::STATEPOINT: |
1737 | return LowerSTATEPOINT(*MI, MCInstLowering); |
1738 | |
1739 | case TargetOpcode::FAULTING_OP: |
1740 | return LowerFAULTING_OP(*MI, MCInstLowering); |
1741 | |
1742 | case TargetOpcode::FENTRY_CALL: |
1743 | return LowerFENTRY_CALL(*MI, MCInstLowering); |
1744 | |
1745 | case TargetOpcode::PATCHABLE_OP: |
1746 | return LowerPATCHABLE_OP(*MI, MCInstLowering); |
1747 | |
1748 | case TargetOpcode::STACKMAP: |
1749 | return LowerSTACKMAP(*MI); |
1750 | |
1751 | case TargetOpcode::PATCHPOINT: |
1752 | return LowerPATCHPOINT(*MI, MCInstLowering); |
1753 | |
1754 | case TargetOpcode::PATCHABLE_FUNCTION_ENTER: |
1755 | return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering); |
1756 | |
1757 | case TargetOpcode::PATCHABLE_RET: |
1758 | return LowerPATCHABLE_RET(*MI, MCInstLowering); |
1759 | |
1760 | case TargetOpcode::PATCHABLE_TAIL_CALL: |
1761 | return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering); |
1762 | |
1763 | case TargetOpcode::PATCHABLE_EVENT_CALL: |
1764 | return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering); |
1765 | |
1766 | case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL: |
1767 | return LowerPATCHABLE_TYPED_EVENT_CALL(*MI, MCInstLowering); |
1768 | |
1769 | case X86::MORESTACK_RET: |
1770 | EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); |
1771 | return; |
1772 | |
1773 | case X86::MORESTACK_RET_RESTORE_R10: |
1774 | // Return, then restore R10. |
1775 | EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); |
1776 | EmitAndCountInstruction( |
1777 | MCInstBuilder(X86::MOV64rr).addReg(X86::R10).addReg(X86::RAX)); |
1778 | return; |
1779 | |
1780 | case X86::SEH_PushReg: |
1781 | case X86::SEH_SaveReg: |
1782 | case X86::SEH_SaveXMM: |
1783 | case X86::SEH_StackAlloc: |
1784 | case X86::SEH_StackAlign: |
1785 | case X86::SEH_SetFrame: |
1786 | case X86::SEH_PushFrame: |
1787 | case X86::SEH_EndPrologue: |
1788 | EmitSEHInstruction(MI); |
1789 | return; |
1790 | |
1791 | case X86::SEH_Epilogue: { |
1792 | assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?")((MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?" ) ? static_cast<void> (0) : __assert_fail ("MF->hasWinCFI() && \"SEH_ instruction in function without WinCFI?\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1792, __PRETTY_FUNCTION__)); |
1793 | MachineBasicBlock::const_iterator MBBI(MI); |
1794 | // Check if preceded by a call and emit nop if so. |
1795 | for (MBBI = PrevCrossBBInst(MBBI); |
1796 | MBBI != MachineBasicBlock::const_iterator(); |
1797 | MBBI = PrevCrossBBInst(MBBI)) { |
1798 | // Conservatively assume that pseudo instructions don't emit code and keep |
1799 | // looking for a call. We may emit an unnecessary nop in some cases. |
1800 | if (!MBBI->isPseudo()) { |
1801 | if (MBBI->isCall()) |
1802 | EmitAndCountInstruction(MCInstBuilder(X86::NOOP)); |
1803 | break; |
1804 | } |
1805 | } |
1806 | return; |
1807 | } |
1808 | |
1809 | // Lower PSHUFB and VPERMILP normally but add a comment if we can find |
1810 | // a constant shuffle mask. We won't be able to do this at the MC layer |
1811 | // because the mask isn't an immediate. |
1812 | case X86::PSHUFBrm: |
1813 | case X86::VPSHUFBrm: |
1814 | case X86::VPSHUFBYrm: |
1815 | case X86::VPSHUFBZ128rm: |
1816 | case X86::VPSHUFBZ128rmk: |
1817 | case X86::VPSHUFBZ128rmkz: |
1818 | case X86::VPSHUFBZ256rm: |
1819 | case X86::VPSHUFBZ256rmk: |
1820 | case X86::VPSHUFBZ256rmkz: |
1821 | case X86::VPSHUFBZrm: |
1822 | case X86::VPSHUFBZrmk: |
1823 | case X86::VPSHUFBZrmkz: { |
1824 | if (!OutStreamer->isVerboseAsm()) |
1825 | break; |
1826 | unsigned SrcIdx, MaskIdx; |
1827 | switch (MI->getOpcode()) { |
1828 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1828); |
1829 | case X86::PSHUFBrm: |
1830 | case X86::VPSHUFBrm: |
1831 | case X86::VPSHUFBYrm: |
1832 | case X86::VPSHUFBZ128rm: |
1833 | case X86::VPSHUFBZ256rm: |
1834 | case X86::VPSHUFBZrm: |
1835 | SrcIdx = 1; MaskIdx = 5; break; |
1836 | case X86::VPSHUFBZ128rmkz: |
1837 | case X86::VPSHUFBZ256rmkz: |
1838 | case X86::VPSHUFBZrmkz: |
1839 | SrcIdx = 2; MaskIdx = 6; break; |
1840 | case X86::VPSHUFBZ128rmk: |
1841 | case X86::VPSHUFBZ256rmk: |
1842 | case X86::VPSHUFBZrmk: |
1843 | SrcIdx = 3; MaskIdx = 7; break; |
1844 | } |
1845 | |
1846 | assert(MI->getNumOperands() >= 6 &&((MI->getNumOperands() >= 6 && "We should always have at least 6 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1847, __PRETTY_FUNCTION__)) |
1847 | "We should always have at least 6 operands!")((MI->getNumOperands() >= 6 && "We should always have at least 6 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1847, __PRETTY_FUNCTION__)); |
1848 | |
1849 | const MachineOperand &MaskOp = MI->getOperand(MaskIdx); |
1850 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { |
1851 | unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); |
1852 | SmallVector<int, 64> Mask; |
1853 | DecodePSHUFBMask(C, Width, Mask); |
1854 | if (!Mask.empty()) |
1855 | OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask)); |
1856 | } |
1857 | break; |
1858 | } |
1859 | |
1860 | case X86::VPERMILPSrm: |
1861 | case X86::VPERMILPSYrm: |
1862 | case X86::VPERMILPSZ128rm: |
1863 | case X86::VPERMILPSZ128rmk: |
1864 | case X86::VPERMILPSZ128rmkz: |
1865 | case X86::VPERMILPSZ256rm: |
1866 | case X86::VPERMILPSZ256rmk: |
1867 | case X86::VPERMILPSZ256rmkz: |
1868 | case X86::VPERMILPSZrm: |
1869 | case X86::VPERMILPSZrmk: |
1870 | case X86::VPERMILPSZrmkz: |
1871 | case X86::VPERMILPDrm: |
1872 | case X86::VPERMILPDYrm: |
1873 | case X86::VPERMILPDZ128rm: |
1874 | case X86::VPERMILPDZ128rmk: |
1875 | case X86::VPERMILPDZ128rmkz: |
1876 | case X86::VPERMILPDZ256rm: |
1877 | case X86::VPERMILPDZ256rmk: |
1878 | case X86::VPERMILPDZ256rmkz: |
1879 | case X86::VPERMILPDZrm: |
1880 | case X86::VPERMILPDZrmk: |
1881 | case X86::VPERMILPDZrmkz: { |
1882 | if (!OutStreamer->isVerboseAsm()) |
1883 | break; |
1884 | unsigned SrcIdx, MaskIdx; |
1885 | unsigned ElSize; |
1886 | switch (MI->getOpcode()) { |
1887 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1887); |
1888 | case X86::VPERMILPSrm: |
1889 | case X86::VPERMILPSYrm: |
1890 | case X86::VPERMILPSZ128rm: |
1891 | case X86::VPERMILPSZ256rm: |
1892 | case X86::VPERMILPSZrm: |
1893 | SrcIdx = 1; MaskIdx = 5; ElSize = 32; break; |
1894 | case X86::VPERMILPSZ128rmkz: |
1895 | case X86::VPERMILPSZ256rmkz: |
1896 | case X86::VPERMILPSZrmkz: |
1897 | SrcIdx = 2; MaskIdx = 6; ElSize = 32; break; |
1898 | case X86::VPERMILPSZ128rmk: |
1899 | case X86::VPERMILPSZ256rmk: |
1900 | case X86::VPERMILPSZrmk: |
1901 | SrcIdx = 3; MaskIdx = 7; ElSize = 32; break; |
1902 | case X86::VPERMILPDrm: |
1903 | case X86::VPERMILPDYrm: |
1904 | case X86::VPERMILPDZ128rm: |
1905 | case X86::VPERMILPDZ256rm: |
1906 | case X86::VPERMILPDZrm: |
1907 | SrcIdx = 1; MaskIdx = 5; ElSize = 64; break; |
1908 | case X86::VPERMILPDZ128rmkz: |
1909 | case X86::VPERMILPDZ256rmkz: |
1910 | case X86::VPERMILPDZrmkz: |
1911 | SrcIdx = 2; MaskIdx = 6; ElSize = 64; break; |
1912 | case X86::VPERMILPDZ128rmk: |
1913 | case X86::VPERMILPDZ256rmk: |
1914 | case X86::VPERMILPDZrmk: |
1915 | SrcIdx = 3; MaskIdx = 7; ElSize = 64; break; |
1916 | } |
1917 | |
1918 | assert(MI->getNumOperands() >= 6 &&((MI->getNumOperands() >= 6 && "We should always have at least 6 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1919, __PRETTY_FUNCTION__)) |
1919 | "We should always have at least 6 operands!")((MI->getNumOperands() >= 6 && "We should always have at least 6 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 6 && \"We should always have at least 6 operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1919, __PRETTY_FUNCTION__)); |
1920 | |
1921 | const MachineOperand &MaskOp = MI->getOperand(MaskIdx); |
1922 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { |
1923 | unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); |
1924 | SmallVector<int, 16> Mask; |
1925 | DecodeVPERMILPMask(C, ElSize, Width, Mask); |
1926 | if (!Mask.empty()) |
1927 | OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask)); |
1928 | } |
1929 | break; |
1930 | } |
1931 | |
1932 | case X86::VPERMIL2PDrm: |
1933 | case X86::VPERMIL2PSrm: |
1934 | case X86::VPERMIL2PDYrm: |
1935 | case X86::VPERMIL2PSYrm: { |
1936 | if (!OutStreamer->isVerboseAsm()) |
1937 | break; |
1938 | assert(MI->getNumOperands() >= 8 &&((MI->getNumOperands() >= 8 && "We should always have at least 8 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 8 && \"We should always have at least 8 operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1939, __PRETTY_FUNCTION__)) |
1939 | "We should always have at least 8 operands!")((MI->getNumOperands() >= 8 && "We should always have at least 8 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 8 && \"We should always have at least 8 operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1939, __PRETTY_FUNCTION__)); |
1940 | |
1941 | const MachineOperand &CtrlOp = MI->getOperand(MI->getNumOperands() - 1); |
1942 | if (!CtrlOp.isImm()) |
1943 | break; |
1944 | |
1945 | unsigned ElSize; |
1946 | switch (MI->getOpcode()) { |
1947 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1947); |
1948 | case X86::VPERMIL2PSrm: case X86::VPERMIL2PSYrm: ElSize = 32; break; |
1949 | case X86::VPERMIL2PDrm: case X86::VPERMIL2PDYrm: ElSize = 64; break; |
1950 | } |
1951 | |
1952 | const MachineOperand &MaskOp = MI->getOperand(6); |
1953 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { |
1954 | unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); |
1955 | SmallVector<int, 16> Mask; |
1956 | DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Width, Mask); |
1957 | if (!Mask.empty()) |
1958 | OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask)); |
1959 | } |
1960 | break; |
1961 | } |
1962 | |
1963 | case X86::VPPERMrrm: { |
1964 | if (!OutStreamer->isVerboseAsm()) |
1965 | break; |
1966 | assert(MI->getNumOperands() >= 7 &&((MI->getNumOperands() >= 7 && "We should always have at least 7 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 7 && \"We should always have at least 7 operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1967, __PRETTY_FUNCTION__)) |
1967 | "We should always have at least 7 operands!")((MI->getNumOperands() >= 7 && "We should always have at least 7 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI->getNumOperands() >= 7 && \"We should always have at least 7 operands!\"" , "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 1967, __PRETTY_FUNCTION__)); |
1968 | |
1969 | const MachineOperand &MaskOp = MI->getOperand(6); |
1970 | if (auto *C = getConstantFromPool(*MI, MaskOp)) { |
1971 | unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); |
1972 | SmallVector<int, 16> Mask; |
1973 | DecodeVPPERMMask(C, Width, Mask); |
1974 | if (!Mask.empty()) |
1975 | OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask)); |
1976 | } |
1977 | break; |
1978 | } |
1979 | |
1980 | case X86::MMX_MOVQ64rm: { |
1981 | if (!OutStreamer->isVerboseAsm()) |
1982 | break; |
1983 | if (MI->getNumOperands() <= 4) |
1984 | break; |
1985 | if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { |
1986 | std::string Comment; |
1987 | raw_string_ostream CS(Comment); |
1988 | const MachineOperand &DstOp = MI->getOperand(0); |
1989 | CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; |
1990 | if (auto *CF = dyn_cast<ConstantFP>(C)) { |
1991 | CS << "0x" << CF->getValueAPF().bitcastToAPInt().toString(16, false); |
1992 | OutStreamer->AddComment(CS.str()); |
1993 | } |
1994 | } |
1995 | break; |
1996 | } |
1997 | |
1998 | #define MOV_CASE(Prefix, Suffix)case X86::PrefixMOVAPDSuffixrm: case X86::PrefixMOVAPSSuffixrm : case X86::PrefixMOVUPDSuffixrm: case X86::PrefixMOVUPSSuffixrm : case X86::PrefixMOVDQASuffixrm: case X86::PrefixMOVDQUSuffixrm : \ |
1999 | case X86::Prefix##MOVAPD##Suffix##rm: \ |
2000 | case X86::Prefix##MOVAPS##Suffix##rm: \ |
2001 | case X86::Prefix##MOVUPD##Suffix##rm: \ |
2002 | case X86::Prefix##MOVUPS##Suffix##rm: \ |
2003 | case X86::Prefix##MOVDQA##Suffix##rm: \ |
2004 | case X86::Prefix##MOVDQU##Suffix##rm: |
2005 | |
2006 | #define MOV_AVX512_CASE(Suffix)case X86::VMOVDQA64Suffixrm: case X86::VMOVDQA32Suffixrm: case X86::VMOVDQU64Suffixrm: case X86::VMOVDQU32Suffixrm: case X86 ::VMOVDQU16Suffixrm: case X86::VMOVDQU8Suffixrm: case X86::VMOVAPSSuffixrm : case X86::VMOVAPDSuffixrm: case X86::VMOVUPSSuffixrm: case X86 ::VMOVUPDSuffixrm: \ |
2007 | case X86::VMOVDQA64##Suffix##rm: \ |
2008 | case X86::VMOVDQA32##Suffix##rm: \ |
2009 | case X86::VMOVDQU64##Suffix##rm: \ |
2010 | case X86::VMOVDQU32##Suffix##rm: \ |
2011 | case X86::VMOVDQU16##Suffix##rm: \ |
2012 | case X86::VMOVDQU8##Suffix##rm: \ |
2013 | case X86::VMOVAPS##Suffix##rm: \ |
2014 | case X86::VMOVAPD##Suffix##rm: \ |
2015 | case X86::VMOVUPS##Suffix##rm: \ |
2016 | case X86::VMOVUPD##Suffix##rm: |
2017 | |
2018 | #define CASE_ALL_MOV_RM()case X86::MOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPDrm: case X86::MOVUPSrm: case X86::MOVDQArm: case X86::MOVDQUrm: case X86 ::VMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPDrm: case X86 ::VMOVUPSrm: case X86::VMOVDQArm: case X86::VMOVDQUrm: case X86 ::VMOVAPDYrm: case X86::VMOVAPSYrm: case X86::VMOVUPDYrm: case X86::VMOVUPSYrm: case X86::VMOVDQAYrm: case X86::VMOVDQUYrm: case X86::VMOVDQA64Zrm: case X86::VMOVDQA32Zrm: case X86::VMOVDQU64Zrm : case X86::VMOVDQU32Zrm: case X86::VMOVDQU16Zrm: case X86::VMOVDQU8Zrm : case X86::VMOVAPSZrm: case X86::VMOVAPDZrm: case X86::VMOVUPSZrm : case X86::VMOVUPDZrm: case X86::VMOVDQA64Z256rm: case X86:: VMOVDQA32Z256rm: case X86::VMOVDQU64Z256rm: case X86::VMOVDQU32Z256rm : case X86::VMOVDQU16Z256rm: case X86::VMOVDQU8Z256rm: case X86 ::VMOVAPSZ256rm: case X86::VMOVAPDZ256rm: case X86::VMOVUPSZ256rm : case X86::VMOVUPDZ256rm: case X86::VMOVDQA64Z128rm: case X86 ::VMOVDQA32Z128rm: case X86::VMOVDQU64Z128rm: case X86::VMOVDQU32Z128rm : case X86::VMOVDQU16Z128rm: case X86::VMOVDQU8Z128rm: case X86 ::VMOVAPSZ128rm: case X86::VMOVAPDZ128rm: case X86::VMOVUPSZ128rm : case X86::VMOVUPDZ128rm: \ |
2019 | MOV_CASE(, )case X86::MOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPDrm: case X86::MOVUPSrm: case X86::MOVDQArm: case X86::MOVDQUrm: /* SSE */ \ |
2020 | MOV_CASE(V, )case X86::VMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPDrm : case X86::VMOVUPSrm: case X86::VMOVDQArm: case X86::VMOVDQUrm : /* AVX-128 */ \ |
2021 | MOV_CASE(V, Y)case X86::VMOVAPDYrm: case X86::VMOVAPSYrm: case X86::VMOVUPDYrm : case X86::VMOVUPSYrm: case X86::VMOVDQAYrm: case X86::VMOVDQUYrm : /* AVX-256 */ \ |
2022 | MOV_AVX512_CASE(Z)case X86::VMOVDQA64Zrm: case X86::VMOVDQA32Zrm: case X86::VMOVDQU64Zrm : case X86::VMOVDQU32Zrm: case X86::VMOVDQU16Zrm: case X86::VMOVDQU8Zrm : case X86::VMOVAPSZrm: case X86::VMOVAPDZrm: case X86::VMOVUPSZrm : case X86::VMOVUPDZrm: \ |
2023 | MOV_AVX512_CASE(Z256)case X86::VMOVDQA64Z256rm: case X86::VMOVDQA32Z256rm: case X86 ::VMOVDQU64Z256rm: case X86::VMOVDQU32Z256rm: case X86::VMOVDQU16Z256rm : case X86::VMOVDQU8Z256rm: case X86::VMOVAPSZ256rm: case X86 ::VMOVAPDZ256rm: case X86::VMOVUPSZ256rm: case X86::VMOVUPDZ256rm : \ |
2024 | MOV_AVX512_CASE(Z128)case X86::VMOVDQA64Z128rm: case X86::VMOVDQA32Z128rm: case X86 ::VMOVDQU64Z128rm: case X86::VMOVDQU32Z128rm: case X86::VMOVDQU16Z128rm : case X86::VMOVDQU8Z128rm: case X86::VMOVAPSZ128rm: case X86 ::VMOVAPDZ128rm: case X86::VMOVUPSZ128rm: case X86::VMOVUPDZ128rm : |
2025 | |
2026 | // For loads from a constant pool to a vector register, print the constant |
2027 | // loaded. |
2028 | CASE_ALL_MOV_RM()case X86::MOVAPDrm: case X86::MOVAPSrm: case X86::MOVUPDrm: case X86::MOVUPSrm: case X86::MOVDQArm: case X86::MOVDQUrm: case X86 ::VMOVAPDrm: case X86::VMOVAPSrm: case X86::VMOVUPDrm: case X86 ::VMOVUPSrm: case X86::VMOVDQArm: case X86::VMOVDQUrm: case X86 ::VMOVAPDYrm: case X86::VMOVAPSYrm: case X86::VMOVUPDYrm: case X86::VMOVUPSYrm: case X86::VMOVDQAYrm: case X86::VMOVDQUYrm: case X86::VMOVDQA64Zrm: case X86::VMOVDQA32Zrm: case X86::VMOVDQU64Zrm : case X86::VMOVDQU32Zrm: case X86::VMOVDQU16Zrm: case X86::VMOVDQU8Zrm : case X86::VMOVAPSZrm: case X86::VMOVAPDZrm: case X86::VMOVUPSZrm : case X86::VMOVUPDZrm: case X86::VMOVDQA64Z256rm: case X86:: VMOVDQA32Z256rm: case X86::VMOVDQU64Z256rm: case X86::VMOVDQU32Z256rm : case X86::VMOVDQU16Z256rm: case X86::VMOVDQU8Z256rm: case X86 ::VMOVAPSZ256rm: case X86::VMOVAPDZ256rm: case X86::VMOVUPSZ256rm : case X86::VMOVUPDZ256rm: case X86::VMOVDQA64Z128rm: case X86 ::VMOVDQA32Z128rm: case X86::VMOVDQU64Z128rm: case X86::VMOVDQU32Z128rm : case X86::VMOVDQU16Z128rm: case X86::VMOVDQU8Z128rm: case X86 ::VMOVAPSZ128rm: case X86::VMOVAPDZ128rm: case X86::VMOVUPSZ128rm : case X86::VMOVUPDZ128rm: |
2029 | case X86::VBROADCASTF128: |
2030 | case X86::VBROADCASTI128: |
2031 | case X86::VBROADCASTF32X4Z256rm: |
2032 | case X86::VBROADCASTF32X4rm: |
2033 | case X86::VBROADCASTF32X8rm: |
2034 | case X86::VBROADCASTF64X2Z128rm: |
2035 | case X86::VBROADCASTF64X2rm: |
2036 | case X86::VBROADCASTF64X4rm: |
2037 | case X86::VBROADCASTI32X4Z256rm: |
2038 | case X86::VBROADCASTI32X4rm: |
2039 | case X86::VBROADCASTI32X8rm: |
2040 | case X86::VBROADCASTI64X2Z128rm: |
2041 | case X86::VBROADCASTI64X2rm: |
2042 | case X86::VBROADCASTI64X4rm: |
2043 | if (!OutStreamer->isVerboseAsm()) |
2044 | break; |
2045 | if (MI->getNumOperands() <= 4) |
2046 | break; |
2047 | if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { |
2048 | int NumLanes = 1; |
2049 | // Override NumLanes for the broadcast instructions. |
2050 | switch (MI->getOpcode()) { |
2051 | case X86::VBROADCASTF128: NumLanes = 2; break; |
2052 | case X86::VBROADCASTI128: NumLanes = 2; break; |
2053 | case X86::VBROADCASTF32X4Z256rm: NumLanes = 2; break; |
2054 | case X86::VBROADCASTF32X4rm: NumLanes = 4; break; |
2055 | case X86::VBROADCASTF32X8rm: NumLanes = 2; break; |
2056 | case X86::VBROADCASTF64X2Z128rm: NumLanes = 2; break; |
2057 | case X86::VBROADCASTF64X2rm: NumLanes = 4; break; |
2058 | case X86::VBROADCASTF64X4rm: NumLanes = 2; break; |
2059 | case X86::VBROADCASTI32X4Z256rm: NumLanes = 2; break; |
2060 | case X86::VBROADCASTI32X4rm: NumLanes = 4; break; |
2061 | case X86::VBROADCASTI32X8rm: NumLanes = 2; break; |
2062 | case X86::VBROADCASTI64X2Z128rm: NumLanes = 2; break; |
2063 | case X86::VBROADCASTI64X2rm: NumLanes = 4; break; |
2064 | case X86::VBROADCASTI64X4rm: NumLanes = 2; break; |
2065 | } |
2066 | |
2067 | std::string Comment; |
2068 | raw_string_ostream CS(Comment); |
2069 | const MachineOperand &DstOp = MI->getOperand(0); |
2070 | CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; |
2071 | if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) { |
2072 | CS << "["; |
2073 | for (int l = 0; l != NumLanes; ++l) { |
2074 | for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; |
2075 | ++i) { |
2076 | if (i != 0 || l != 0) |
2077 | CS << ","; |
2078 | if (CDS->getElementType()->isIntegerTy()) |
2079 | printConstant(CDS->getElementAsAPInt(i), CS); |
2080 | else if (CDS->getElementType()->isHalfTy() || |
2081 | CDS->getElementType()->isFloatTy() || |
2082 | CDS->getElementType()->isDoubleTy()) |
2083 | printConstant(CDS->getElementAsAPFloat(i), CS); |
2084 | else |
2085 | CS << "?"; |
2086 | } |
2087 | } |
2088 | CS << "]"; |
2089 | OutStreamer->AddComment(CS.str()); |
2090 | } else if (auto *CV = dyn_cast<ConstantVector>(C)) { |
2091 | CS << "<"; |
2092 | for (int l = 0; l != NumLanes; ++l) { |
2093 | for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; |
2094 | ++i) { |
2095 | if (i != 0 || l != 0) |
2096 | CS << ","; |
2097 | printConstant(CV->getOperand(i), CS); |
2098 | } |
2099 | } |
2100 | CS << ">"; |
2101 | OutStreamer->AddComment(CS.str()); |
2102 | } |
2103 | } |
2104 | break; |
2105 | case X86::MOVDDUPrm: |
2106 | case X86::VMOVDDUPrm: |
2107 | case X86::VMOVDDUPZ128rm: |
2108 | case X86::VBROADCASTSSrm: |
2109 | case X86::VBROADCASTSSYrm: |
2110 | case X86::VBROADCASTSSZ128m: |
2111 | case X86::VBROADCASTSSZ256m: |
2112 | case X86::VBROADCASTSSZm: |
2113 | case X86::VBROADCASTSDYrm: |
2114 | case X86::VBROADCASTSDZ256m: |
2115 | case X86::VBROADCASTSDZm: |
2116 | case X86::VPBROADCASTBrm: |
2117 | case X86::VPBROADCASTBYrm: |
2118 | case X86::VPBROADCASTBZ128m: |
2119 | case X86::VPBROADCASTBZ256m: |
2120 | case X86::VPBROADCASTBZm: |
2121 | case X86::VPBROADCASTDrm: |
2122 | case X86::VPBROADCASTDYrm: |
2123 | case X86::VPBROADCASTDZ128m: |
2124 | case X86::VPBROADCASTDZ256m: |
2125 | case X86::VPBROADCASTDZm: |
2126 | case X86::VPBROADCASTQrm: |
2127 | case X86::VPBROADCASTQYrm: |
2128 | case X86::VPBROADCASTQZ128m: |
2129 | case X86::VPBROADCASTQZ256m: |
2130 | case X86::VPBROADCASTQZm: |
2131 | case X86::VPBROADCASTWrm: |
2132 | case X86::VPBROADCASTWYrm: |
2133 | case X86::VPBROADCASTWZ128m: |
2134 | case X86::VPBROADCASTWZ256m: |
2135 | case X86::VPBROADCASTWZm: |
2136 | if (!OutStreamer->isVerboseAsm()) |
2137 | break; |
2138 | if (MI->getNumOperands() <= 4) |
2139 | break; |
2140 | if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { |
2141 | int NumElts; |
2142 | switch (MI->getOpcode()) { |
2143 | default: llvm_unreachable("Invalid opcode")::llvm::llvm_unreachable_internal("Invalid opcode", "/build/llvm-toolchain-snapshot-9~svn359999/lib/Target/X86/X86MCInstLower.cpp" , 2143); |
2144 | case X86::MOVDDUPrm: NumElts = 2; break; |
2145 | case X86::VMOVDDUPrm: NumElts = 2; break; |
2146 | case X86::VMOVDDUPZ128rm: NumElts = 2; break; |
2147 | case X86::VBROADCASTSSrm: NumElts = 4; break; |
2148 | case X86::VBROADCASTSSYrm: NumElts = 8; break; |
2149 | case X86::VBROADCASTSSZ128m: NumElts = 4; break; |
2150 | case X86::VBROADCASTSSZ256m: NumElts = 8; break; |
2151 | case X86::VBROADCASTSSZm: NumElts = 16; break; |
2152 | case X86::VBROADCASTSDYrm: NumElts = 4; break; |
2153 | case X86::VBROADCASTSDZ256m: NumElts = 4; break; |
2154 | case X86::VBROADCASTSDZm: NumElts = 8; break; |
2155 | case X86::VPBROADCASTBrm: NumElts = 16; break; |
2156 | case X86::VPBROADCASTBYrm: NumElts = 32; break; |
2157 | case X86::VPBROADCASTBZ128m: NumElts = 16; break; |
2158 | case X86::VPBROADCASTBZ256m: NumElts = 32; break; |
2159 | case X86::VPBROADCASTBZm: NumElts = 64; break; |
2160 | case X86::VPBROADCASTDrm: NumElts = 4; break; |
2161 | case X86::VPBROADCASTDYrm: NumElts = 8; break; |
2162 | case X86::VPBROADCASTDZ128m: NumElts = 4; break; |
2163 | case X86::VPBROADCASTDZ256m: NumElts = 8; break; |
2164 | case X86::VPBROADCASTDZm: NumElts = 16; break; |
2165 | case X86::VPBROADCASTQrm: NumElts = 2; break; |
2166 | case X86::VPBROADCASTQYrm: NumElts = 4; break; |
2167 | case X86::VPBROADCASTQZ128m: NumElts = 2; break; |
2168 | case X86::VPBROADCASTQZ256m: NumElts = 4; break; |
2169 | case X86::VPBROADCASTQZm: NumElts = 8; break; |
2170 | case X86::VPBROADCASTWrm: NumElts = 8; break; |
2171 | case X86::VPBROADCASTWYrm: NumElts = 16; break; |
2172 | case X86::VPBROADCASTWZ128m: NumElts = 8; break; |
2173 | case X86::VPBROADCASTWZ256m: NumElts = 16; break; |
2174 | case X86::VPBROADCASTWZm: NumElts = 32; break; |
2175 | } |
2176 | |
2177 | std::string Comment; |
2178 | raw_string_ostream CS(Comment); |
2179 | const MachineOperand &DstOp = MI->getOperand(0); |
2180 | CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; |
2181 | CS << "["; |
2182 | for (int i = 0; i != NumElts; ++i) { |
2183 | if (i != 0) |
2184 | CS << ","; |
2185 | printConstant(C, CS); |
2186 | } |
2187 | CS << "]"; |
2188 | OutStreamer->AddComment(CS.str()); |
2189 | } |
2190 | } |
2191 | |
2192 | MCInst TmpInst; |
2193 | MCInstLowering.Lower(MI, TmpInst); |
2194 | |
2195 | // Stackmap shadows cannot include branch targets, so we can count the bytes |
2196 | // in a call towards the shadow, but must ensure that the no thread returns |
2197 | // in to the stackmap shadow. The only way to achieve this is if the call |
2198 | // is at the end of the shadow. |
2199 | if (MI->isCall()) { |
2200 | // Count then size of the call towards the shadow |
2201 | SMShadowTracker.count(TmpInst, getSubtargetInfo(), CodeEmitter.get()); |
2202 | // Then flush the shadow so that we fill with nops before the call, not |
2203 | // after it. |
2204 | SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); |
2205 | // Then emit the call |
2206 | OutStreamer->EmitInstruction(TmpInst, getSubtargetInfo()); |
2207 | return; |
2208 | } |
2209 | |
2210 | EmitAndCountInstruction(TmpInst); |
2211 | } |