LLVM 22.0.0git
X86MCInstLower.cpp
Go to the documentation of this file.
1//===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains code to lower X86 MachineInstrs to their corresponding
10// MCInst records.
11//
12//===----------------------------------------------------------------------===//
13
21#include "X86AsmPrinter.h"
23#include "X86RegisterInfo.h"
25#include "X86Subtarget.h"
26#include "llvm/ADT/STLExtras.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/GlobalValue.h"
38#include "llvm/IR/Mangler.h"
39#include "llvm/MC/MCAsmInfo.h"
41#include "llvm/MC/MCContext.h"
42#include "llvm/MC/MCExpr.h"
43#include "llvm/MC/MCFixup.h"
44#include "llvm/MC/MCInst.h"
46#include "llvm/MC/MCSection.h"
47#include "llvm/MC/MCStreamer.h"
48#include "llvm/MC/MCSymbol.h"
55#include <string>
56
57using namespace llvm;
58
59static cl::opt<bool> EnableBranchHint("enable-branch-hint",
60 cl::desc("Enable branch hint."),
61 cl::init(false), cl::Hidden);
63 "branch-hint-probability-threshold",
64 cl::desc("The probability threshold of enabling branch hint."),
65 cl::init(50), cl::Hidden);
66
67namespace {
68
69/// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst.
70class X86MCInstLower {
71 MCContext &Ctx;
72 const MachineFunction &MF;
73 const TargetMachine &TM;
74 const MCAsmInfo &MAI;
76
77public:
78 X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter);
79
80 MCOperand LowerMachineOperand(const MachineInstr *MI,
81 const MachineOperand &MO) const;
82 void Lower(const MachineInstr *MI, MCInst &OutMI) const;
83
86
87private:
88 MachineModuleInfoMachO &getMachOMMI() const;
89};
90
91} // end anonymous namespace
92
93/// A RAII helper which defines a region of instructions which can't have
94/// padding added between them for correctness.
99 : OS(OS), OldAllowAutoPadding(OS.getAllowAutoPadding()) {
100 changeAndComment(false);
101 }
103 void changeAndComment(bool b) {
104 if (b == OS.getAllowAutoPadding())
105 return;
106 OS.setAllowAutoPadding(b);
107 if (b)
108 OS.emitRawComment("autopadding");
109 else
110 OS.emitRawComment("noautopadding");
111 }
112};
113
114// Emit a minimal sequence of nops spanning NumBytes bytes.
115static void emitX86Nops(MCStreamer &OS, unsigned NumBytes,
116 const X86Subtarget *Subtarget);
117
118void X86AsmPrinter::StackMapShadowTracker::count(const MCInst &Inst,
119 const MCSubtargetInfo &STI,
120 MCCodeEmitter *CodeEmitter) {
121 if (InShadow) {
122 SmallString<256> Code;
124 CodeEmitter->encodeInstruction(Inst, Code, Fixups, STI);
125 CurrentShadowSize += Code.size();
126 if (CurrentShadowSize >= RequiredShadowSize)
127 InShadow = false; // The shadow is big enough. Stop counting.
128 }
129}
130
131void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding(
132 MCStreamer &OutStreamer, const MCSubtargetInfo &STI) {
133 if (InShadow && CurrentShadowSize < RequiredShadowSize) {
134 InShadow = false;
135 emitX86Nops(OutStreamer, RequiredShadowSize - CurrentShadowSize,
136 &MF->getSubtarget<X86Subtarget>());
137 }
138}
139
140void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) {
141 OutStreamer->emitInstruction(Inst, getSubtargetInfo());
142 SMShadowTracker.count(Inst, getSubtargetInfo(), CodeEmitter.get());
143}
144
145X86MCInstLower::X86MCInstLower(const MachineFunction &mf,
146 X86AsmPrinter &asmprinter)
147 : Ctx(asmprinter.OutContext), MF(mf), TM(mf.getTarget()),
148 MAI(*TM.getMCAsmInfo()), AsmPrinter(asmprinter) {}
149
150MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
151 return AsmPrinter.MMI->getObjFileInfo<MachineModuleInfoMachO>();
152}
153
154/// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol
155/// operand to an MCSymbol.
156MCSymbol *X86MCInstLower::GetSymbolFromOperand(const MachineOperand &MO) const {
157 const Triple &TT = TM.getTargetTriple();
158 if (MO.isGlobal() && TT.isOSBinFormatELF())
159 return AsmPrinter.getSymbolPreferLocal(*MO.getGlobal());
160
161 const DataLayout &DL = MF.getDataLayout();
162 assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) &&
163 "Isn't a symbol reference");
164
165 MCSymbol *Sym = nullptr;
166 SmallString<128> Name;
167 StringRef Suffix;
168
169 switch (MO.getTargetFlags()) {
171 // Handle dllimport linkage.
172 Name += "__imp_";
173 break;
175 Name += ".refptr.";
176 break;
179 Suffix = "$non_lazy_ptr";
180 break;
181 }
182
183 if (!Suffix.empty())
184 Name += DL.getPrivateGlobalPrefix();
185
186 if (MO.isGlobal()) {
187 const GlobalValue *GV = MO.getGlobal();
188 AsmPrinter.getNameWithPrefix(Name, GV);
189 } else if (MO.isSymbol()) {
191 } else if (MO.isMBB()) {
192 assert(Suffix.empty());
193 Sym = MO.getMBB()->getSymbol();
194 }
195
196 Name += Suffix;
197 if (!Sym)
198 Sym = Ctx.getOrCreateSymbol(Name);
199
200 // If the target flags on the operand changes the name of the symbol, do that
201 // before we return the symbol.
202 switch (MO.getTargetFlags()) {
203 default:
204 break;
205 case X86II::MO_COFFSTUB: {
206 MachineModuleInfoCOFF &MMICOFF =
207 AsmPrinter.MMI->getObjFileInfo<MachineModuleInfoCOFF>();
209 if (!StubSym.getPointer()) {
210 assert(MO.isGlobal() && "Extern symbol not handled yet");
212 AsmPrinter.getSymbol(MO.getGlobal()), true);
213 }
214 break;
215 }
219 getMachOMMI().getGVStubEntry(Sym);
220 if (!StubSym.getPointer()) {
221 assert(MO.isGlobal() && "Extern symbol not handled yet");
223 AsmPrinter.getSymbol(MO.getGlobal()),
225 }
226 break;
227 }
228 }
229
230 return Sym;
231}
232
233MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
234 MCSymbol *Sym) const {
235 // FIXME: We would like an efficient form for this, so we don't have to do a
236 // lot of extra uniquing.
237 const MCExpr *Expr = nullptr;
238 uint16_t Specifier = X86::S_None;
239
240 switch (MO.getTargetFlags()) {
241 default:
242 llvm_unreachable("Unknown target flag on GV operand");
243 case X86II::MO_NO_FLAG: // No flag.
244 // These affect the name of the symbol, not any suffix.
248 break;
249
250 case X86II::MO_TLVP:
252 break;
254 Expr = MCSymbolRefExpr::create(Sym, X86::S_TLVP, Ctx);
255 // Subtract the pic base.
257 Expr, MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), Ctx);
258 break;
259 case X86II::MO_SECREL:
260 Specifier = uint16_t(X86::S_COFF_SECREL);
261 break;
262 case X86II::MO_TLSGD:
264 break;
265 case X86II::MO_TLSLD:
267 break;
268 case X86II::MO_TLSLDM:
270 break;
273 break;
276 break;
277 case X86II::MO_TPOFF:
279 break;
280 case X86II::MO_DTPOFF:
282 break;
283 case X86II::MO_NTPOFF:
285 break;
288 break;
291 break;
294 break;
295 case X86II::MO_GOT:
297 break;
298 case X86II::MO_GOTOFF:
300 break;
301 case X86II::MO_PLT:
303 break;
304 case X86II::MO_ABS8:
306 break;
309 Expr = MCSymbolRefExpr::create(Sym, Ctx);
310 // Subtract the pic base.
312 Expr, MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), Ctx);
313 if (MO.isJTI()) {
315 // If .set directive is supported, use it to reduce the number of
316 // relocations the assembler will generate for differences between
317 // local labels. This is only safe when the symbols are in the same
318 // section so we are restricting it to jumptable references.
320 AsmPrinter.OutStreamer->emitAssignment(Label, Expr);
321 Expr = MCSymbolRefExpr::create(Label, Ctx);
322 }
323 break;
324 }
325
326 if (!Expr)
327 Expr = MCSymbolRefExpr::create(Sym, Specifier, Ctx);
328
329 if (!MO.isJTI() && !MO.isMBB() && MO.getOffset())
331 Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
332 return MCOperand::createExpr(Expr);
333}
334
335static unsigned getRetOpcode(const X86Subtarget &Subtarget) {
336 return Subtarget.is64Bit() ? X86::RET64 : X86::RET32;
337}
338
339MCOperand X86MCInstLower::LowerMachineOperand(const MachineInstr *MI,
340 const MachineOperand &MO) const {
341 switch (MO.getType()) {
342 default:
343 MI->print(errs());
344 llvm_unreachable("unknown operand type");
346 // Ignore all implicit register operands.
347 if (MO.isImplicit())
348 return MCOperand();
349 return MCOperand::createReg(MO.getReg());
351 return MCOperand::createImm(MO.getImm());
357 return LowerSymbolOperand(MO, MO.getMCSymbol());
359 return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()));
361 return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()));
363 return LowerSymbolOperand(
364 MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()));
366 // Ignore call clobbers.
367 return MCOperand();
368 }
369}
370
371// Replace TAILJMP opcodes with their equivalent opcodes that have encoding
372// information.
373static unsigned convertTailJumpOpcode(unsigned Opcode) {
374 switch (Opcode) {
375 case X86::TAILJMPr:
376 Opcode = X86::JMP32r;
377 break;
378 case X86::TAILJMPm:
379 Opcode = X86::JMP32m;
380 break;
381 case X86::TAILJMPr64:
382 Opcode = X86::JMP64r;
383 break;
384 case X86::TAILJMPm64:
385 Opcode = X86::JMP64m;
386 break;
387 case X86::TAILJMPr64_REX:
388 Opcode = X86::JMP64r_REX;
389 break;
390 case X86::TAILJMPm64_REX:
391 Opcode = X86::JMP64m_REX;
392 break;
393 case X86::TAILJMPd:
394 case X86::TAILJMPd64:
395 Opcode = X86::JMP_1;
396 break;
397 case X86::TAILJMPd_CC:
398 case X86::TAILJMPd64_CC:
399 Opcode = X86::JCC_1;
400 break;
401 }
402
403 return Opcode;
404}
405
406void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
407 OutMI.setOpcode(MI->getOpcode());
408
409 for (const MachineOperand &MO : MI->operands())
410 if (auto Op = LowerMachineOperand(MI, MO); Op.isValid())
411 OutMI.addOperand(Op);
412
413 bool In64BitMode = AsmPrinter.getSubtarget().is64Bit();
414 if (X86::optimizeInstFromVEX3ToVEX2(OutMI, MI->getDesc()) ||
417 X86::optimizeMOVSX(OutMI) || X86::optimizeINCDEC(OutMI, In64BitMode) ||
418 X86::optimizeMOV(OutMI, In64BitMode) ||
420 return;
421
422 // Handle a few special cases to eliminate operand modifiers.
423 switch (OutMI.getOpcode()) {
424 case X86::LEA64_32r:
425 case X86::LEA64r:
426 case X86::LEA16r:
427 case X86::LEA32r:
428 // LEA should have a segment register, but it must be empty.
430 "Unexpected # of LEA operands");
431 assert(OutMI.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 &&
432 "LEA has segment specified!");
433 break;
434 case X86::MULX32Hrr:
435 case X86::MULX32Hrm:
436 case X86::MULX64Hrr:
437 case X86::MULX64Hrm: {
438 // Turn into regular MULX by duplicating the destination.
439 unsigned NewOpc;
440 switch (OutMI.getOpcode()) {
441 default: llvm_unreachable("Invalid opcode");
442 case X86::MULX32Hrr: NewOpc = X86::MULX32rr; break;
443 case X86::MULX32Hrm: NewOpc = X86::MULX32rm; break;
444 case X86::MULX64Hrr: NewOpc = X86::MULX64rr; break;
445 case X86::MULX64Hrm: NewOpc = X86::MULX64rm; break;
446 }
447 OutMI.setOpcode(NewOpc);
448 // Duplicate the destination.
449 MCRegister DestReg = OutMI.getOperand(0).getReg();
450 OutMI.insert(OutMI.begin(), MCOperand::createReg(DestReg));
451 break;
452 }
453 // CALL64r, CALL64pcrel32 - These instructions used to have
454 // register inputs modeled as normal uses instead of implicit uses. As such,
455 // they we used to truncate off all but the first operand (the callee). This
456 // issue seems to have been fixed at some point. This assert verifies that.
457 case X86::CALL64r:
458 case X86::CALL64pcrel32:
459 assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!");
460 break;
461 case X86::EH_RETURN:
462 case X86::EH_RETURN64: {
463 OutMI = MCInst();
464 OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget()));
465 break;
466 }
467 case X86::CLEANUPRET: {
468 // Replace CLEANUPRET with the appropriate RET.
469 OutMI = MCInst();
470 OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget()));
471 break;
472 }
473 case X86::CATCHRET: {
474 // Replace CATCHRET with the appropriate RET.
475 const X86Subtarget &Subtarget = AsmPrinter.getSubtarget();
476 unsigned ReturnReg = In64BitMode ? X86::RAX : X86::EAX;
477 OutMI = MCInst();
478 OutMI.setOpcode(getRetOpcode(Subtarget));
479 OutMI.addOperand(MCOperand::createReg(ReturnReg));
480 break;
481 }
482 // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump
483 // instruction.
484 case X86::TAILJMPr:
485 case X86::TAILJMPr64:
486 case X86::TAILJMPr64_REX:
487 case X86::TAILJMPd:
488 case X86::TAILJMPd64:
489 assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!");
491 break;
492 case X86::TAILJMPd_CC:
493 case X86::TAILJMPd64_CC:
494 assert(OutMI.getNumOperands() == 2 && "Unexpected number of operands!");
496 break;
497 case X86::TAILJMPm:
498 case X86::TAILJMPm64:
499 case X86::TAILJMPm64_REX:
501 "Unexpected number of operands!");
503 break;
504 case X86::MASKMOVDQU:
505 case X86::VMASKMOVDQU:
506 if (In64BitMode)
508 break;
509 case X86::BSF16rm:
510 case X86::BSF16rr:
511 case X86::BSF32rm:
512 case X86::BSF32rr:
513 case X86::BSF64rm:
514 case X86::BSF64rr: {
515 // Add an REP prefix to BSF instructions so that new processors can
516 // recognize as TZCNT, which has better performance than BSF.
517 // BSF and TZCNT have different interpretations on ZF bit. So make sure
518 // it won't be used later.
519 const MachineOperand *FlagDef =
520 MI->findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
521 if (!MF.getFunction().hasOptSize() && FlagDef && FlagDef->isDead())
523 break;
524 }
525 default:
526 break;
527 }
528}
529
530void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering,
531 const MachineInstr &MI) {
532 NoAutoPaddingScope NoPadScope(*OutStreamer);
533 bool Is64Bits = getSubtarget().is64Bit();
534 bool Is64BitsLP64 = getSubtarget().isTarget64BitLP64();
535 MCContext &Ctx = OutStreamer->getContext();
536
538 switch (MI.getOpcode()) {
539 case X86::TLS_addr32:
540 case X86::TLS_addr64:
541 case X86::TLS_addrX32:
543 break;
544 case X86::TLS_base_addr32:
546 break;
547 case X86::TLS_base_addr64:
548 case X86::TLS_base_addrX32:
550 break;
551 case X86::TLS_desc32:
552 case X86::TLS_desc64:
554 break;
555 default:
556 llvm_unreachable("unexpected opcode");
557 }
558
559 const MCSymbolRefExpr *Sym = MCSymbolRefExpr::create(
560 MCInstLowering.GetSymbolFromOperand(MI.getOperand(3)), Specifier, Ctx);
561
562 // Before binutils 2.41, ld has a bogus TLS relaxation error when the GD/LD
563 // code sequence using R_X86_64_GOTPCREL (instead of R_X86_64_GOTPCRELX) is
564 // attempted to be relaxed to IE/LE (binutils PR24784). Work around the bug by
565 // only using GOT when GOTPCRELX is enabled.
566 // TODO Delete the workaround when rustc no longer relies on the hack
567 bool UseGot = MMI->getModule()->getRtLibUseGOT() &&
569
570 if (Specifier == X86::S_TLSDESC) {
571 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(
572 MCInstLowering.GetSymbolFromOperand(MI.getOperand(3)), X86::S_TLSCALL,
573 Ctx);
574 EmitAndCountInstruction(
575 MCInstBuilder(Is64BitsLP64 ? X86::LEA64r : X86::LEA32r)
576 .addReg(Is64BitsLP64 ? X86::RAX : X86::EAX)
577 .addReg(Is64Bits ? X86::RIP : X86::EBX)
578 .addImm(1)
579 .addReg(0)
580 .addExpr(Sym)
581 .addReg(0));
582 EmitAndCountInstruction(
583 MCInstBuilder(Is64Bits ? X86::CALL64m : X86::CALL32m)
584 .addReg(Is64BitsLP64 ? X86::RAX : X86::EAX)
585 .addImm(1)
586 .addReg(0)
587 .addExpr(Expr)
588 .addReg(0));
589 } else if (Is64Bits) {
590 bool NeedsPadding = Specifier == X86::S_TLSGD;
591 if (NeedsPadding && Is64BitsLP64)
592 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
593 EmitAndCountInstruction(MCInstBuilder(X86::LEA64r)
594 .addReg(X86::RDI)
595 .addReg(X86::RIP)
596 .addImm(1)
597 .addReg(0)
598 .addExpr(Sym)
599 .addReg(0));
600 const MCSymbol *TlsGetAddr = Ctx.getOrCreateSymbol("__tls_get_addr");
601 if (NeedsPadding) {
602 if (!UseGot)
603 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
604 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX));
605 EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX));
606 }
607 if (UseGot) {
608 const MCExpr *Expr =
610 EmitAndCountInstruction(MCInstBuilder(X86::CALL64m)
611 .addReg(X86::RIP)
612 .addImm(1)
613 .addReg(0)
614 .addExpr(Expr)
615 .addReg(0));
616 } else {
617 EmitAndCountInstruction(
618 MCInstBuilder(X86::CALL64pcrel32)
619 .addExpr(MCSymbolRefExpr::create(TlsGetAddr, X86::S_PLT, Ctx)));
620 }
621 } else {
622 if (Specifier == X86::S_TLSGD && !UseGot) {
623 EmitAndCountInstruction(MCInstBuilder(X86::LEA32r)
624 .addReg(X86::EAX)
625 .addReg(0)
626 .addImm(1)
627 .addReg(X86::EBX)
628 .addExpr(Sym)
629 .addReg(0));
630 } else {
631 EmitAndCountInstruction(MCInstBuilder(X86::LEA32r)
632 .addReg(X86::EAX)
633 .addReg(X86::EBX)
634 .addImm(1)
635 .addReg(0)
636 .addExpr(Sym)
637 .addReg(0));
638 }
639
640 const MCSymbol *TlsGetAddr = Ctx.getOrCreateSymbol("___tls_get_addr");
641 if (UseGot) {
642 const MCExpr *Expr = MCSymbolRefExpr::create(TlsGetAddr, X86::S_GOT, Ctx);
643 EmitAndCountInstruction(MCInstBuilder(X86::CALL32m)
644 .addReg(X86::EBX)
645 .addImm(1)
646 .addReg(0)
647 .addExpr(Expr)
648 .addReg(0));
649 } else {
650 EmitAndCountInstruction(
651 MCInstBuilder(X86::CALLpcrel32)
652 .addExpr(MCSymbolRefExpr::create(TlsGetAddr, X86::S_PLT, Ctx)));
653 }
654 }
655}
656
657/// Emit the largest nop instruction smaller than or equal to \p NumBytes
658/// bytes. Return the size of nop emitted.
659static unsigned emitNop(MCStreamer &OS, unsigned NumBytes,
660 const X86Subtarget *Subtarget) {
661 // Determine the longest nop which can be efficiently decoded for the given
662 // target cpu. 15-bytes is the longest single NOP instruction, but some
663 // platforms can't decode the longest forms efficiently.
664 unsigned MaxNopLength = 1;
665 if (Subtarget->is64Bit()) {
666 // FIXME: We can use NOOPL on 32-bit targets with FeatureNOPL, but the
667 // IndexReg/BaseReg below need to be updated.
668 if (Subtarget->hasFeature(X86::TuningFast7ByteNOP))
669 MaxNopLength = 7;
670 else if (Subtarget->hasFeature(X86::TuningFast15ByteNOP))
671 MaxNopLength = 15;
672 else if (Subtarget->hasFeature(X86::TuningFast11ByteNOP))
673 MaxNopLength = 11;
674 else
675 MaxNopLength = 10;
676 } if (Subtarget->is32Bit())
677 MaxNopLength = 2;
678
679 // Cap a single nop emission at the profitable value for the target
680 NumBytes = std::min(NumBytes, MaxNopLength);
681
682 unsigned NopSize;
683 unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg;
684 IndexReg = Displacement = SegmentReg = 0;
685 BaseReg = X86::RAX;
686 ScaleVal = 1;
687 switch (NumBytes) {
688 case 0:
689 llvm_unreachable("Zero nops?");
690 break;
691 case 1:
692 NopSize = 1;
693 Opc = X86::NOOP;
694 break;
695 case 2:
696 NopSize = 2;
697 Opc = X86::XCHG16ar;
698 break;
699 case 3:
700 NopSize = 3;
701 Opc = X86::NOOPL;
702 break;
703 case 4:
704 NopSize = 4;
705 Opc = X86::NOOPL;
706 Displacement = 8;
707 break;
708 case 5:
709 NopSize = 5;
710 Opc = X86::NOOPL;
711 Displacement = 8;
712 IndexReg = X86::RAX;
713 break;
714 case 6:
715 NopSize = 6;
716 Opc = X86::NOOPW;
717 Displacement = 8;
718 IndexReg = X86::RAX;
719 break;
720 case 7:
721 NopSize = 7;
722 Opc = X86::NOOPL;
723 Displacement = 512;
724 break;
725 case 8:
726 NopSize = 8;
727 Opc = X86::NOOPL;
728 Displacement = 512;
729 IndexReg = X86::RAX;
730 break;
731 case 9:
732 NopSize = 9;
733 Opc = X86::NOOPW;
734 Displacement = 512;
735 IndexReg = X86::RAX;
736 break;
737 default:
738 NopSize = 10;
739 Opc = X86::NOOPW;
740 Displacement = 512;
741 IndexReg = X86::RAX;
742 SegmentReg = X86::CS;
743 break;
744 }
745
746 unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U);
747 NopSize += NumPrefixes;
748 for (unsigned i = 0; i != NumPrefixes; ++i)
749 OS.emitBytes("\x66");
750
751 switch (Opc) {
752 default: llvm_unreachable("Unexpected opcode");
753 case X86::NOOP:
754 OS.emitInstruction(MCInstBuilder(Opc), *Subtarget);
755 break;
756 case X86::XCHG16ar:
757 OS.emitInstruction(MCInstBuilder(Opc).addReg(X86::AX).addReg(X86::AX),
758 *Subtarget);
759 break;
760 case X86::NOOPL:
761 case X86::NOOPW:
763 .addReg(BaseReg)
764 .addImm(ScaleVal)
765 .addReg(IndexReg)
766 .addImm(Displacement)
767 .addReg(SegmentReg),
768 *Subtarget);
769 break;
770 }
771 assert(NopSize <= NumBytes && "We overemitted?");
772 return NopSize;
773}
774
775/// Emit the optimal amount of multi-byte nops on X86.
776static void emitX86Nops(MCStreamer &OS, unsigned NumBytes,
777 const X86Subtarget *Subtarget) {
778 unsigned NopsToEmit = NumBytes;
779 (void)NopsToEmit;
780 while (NumBytes) {
781 NumBytes -= emitNop(OS, NumBytes, Subtarget);
782 assert(NopsToEmit >= NumBytes && "Emitted more than I asked for!");
783 }
784}
785
786void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI,
787 X86MCInstLower &MCIL) {
788 assert(Subtarget->is64Bit() && "Statepoint currently only supports X86-64");
789
790 NoAutoPaddingScope NoPadScope(*OutStreamer);
791
792 StatepointOpers SOpers(&MI);
793 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
794 emitX86Nops(*OutStreamer, PatchBytes, Subtarget);
795 } else {
796 // Lower call target and choose correct opcode
797 const MachineOperand &CallTarget = SOpers.getCallTarget();
798 MCOperand CallTargetMCOp;
799 unsigned CallOpcode;
800 switch (CallTarget.getType()) {
803 CallTargetMCOp = MCIL.LowerSymbolOperand(
804 CallTarget, MCIL.GetSymbolFromOperand(CallTarget));
805 CallOpcode = X86::CALL64pcrel32;
806 // Currently, we only support relative addressing with statepoints.
807 // Otherwise, we'll need a scratch register to hold the target
808 // address. You'll fail asserts during load & relocation if this
809 // symbol is to far away. (TODO: support non-relative addressing)
810 break;
812 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
813 CallOpcode = X86::CALL64pcrel32;
814 // Currently, we only support relative addressing with statepoints.
815 // Otherwise, we'll need a scratch register to hold the target
816 // immediate. You'll fail asserts during load & relocation if this
817 // address is to far away. (TODO: support non-relative addressing)
818 break;
820 // FIXME: Add retpoline support and remove this.
821 if (Subtarget->useIndirectThunkCalls())
822 report_fatal_error("Lowering register statepoints with thunks not "
823 "yet implemented.");
824 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
825 CallOpcode = X86::CALL64r;
826 break;
827 default:
828 llvm_unreachable("Unsupported operand type in statepoint call target");
829 break;
830 }
831
832 // Emit call
833 MCInst CallInst;
834 CallInst.setOpcode(CallOpcode);
835 CallInst.addOperand(CallTargetMCOp);
836 OutStreamer->emitInstruction(CallInst, getSubtargetInfo());
837 maybeEmitNopAfterCallForWindowsEH(&MI);
838 }
839
840 // Record our statepoint node in the same section used by STACKMAP
841 // and PATCHPOINT
842 auto &Ctx = OutStreamer->getContext();
843 MCSymbol *MILabel = Ctx.createTempSymbol();
844 OutStreamer->emitLabel(MILabel);
845 SM.recordStatepoint(*MILabel, MI);
846}
847
848void X86AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI,
849 X86MCInstLower &MCIL) {
850 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
851 // <opcode>, <operands>
852
853 NoAutoPaddingScope NoPadScope(*OutStreamer);
854
855 Register DefRegister = FaultingMI.getOperand(0).getReg();
857 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
858 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
859 unsigned Opcode = FaultingMI.getOperand(3).getImm();
860 unsigned OperandsBeginIdx = 4;
861
862 auto &Ctx = OutStreamer->getContext();
863 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
864 OutStreamer->emitLabel(FaultingLabel);
865
866 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
867 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
868
869 MCInst MI;
870 MI.setOpcode(Opcode);
871
872 if (DefRegister != X86::NoRegister)
873 MI.addOperand(MCOperand::createReg(DefRegister));
874
875 for (const MachineOperand &MO :
876 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx))
877 if (auto Op = MCIL.LowerMachineOperand(&FaultingMI, MO); Op.isValid())
878 MI.addOperand(Op);
879
880 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
881 OutStreamer->emitInstruction(MI, getSubtargetInfo());
882}
883
884void X86AsmPrinter::LowerFENTRY_CALL(const MachineInstr &MI,
885 X86MCInstLower &MCIL) {
886 bool Is64Bits = Subtarget->is64Bit();
887 MCContext &Ctx = OutStreamer->getContext();
888 MCSymbol *fentry = Ctx.getOrCreateSymbol("__fentry__");
889 const MCSymbolRefExpr *Op = MCSymbolRefExpr::create(fentry, Ctx);
890
891 EmitAndCountInstruction(
892 MCInstBuilder(Is64Bits ? X86::CALL64pcrel32 : X86::CALLpcrel32)
893 .addExpr(Op));
894}
895
896void X86AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
897 assert(std::next(MI.getIterator())->isCall() &&
898 "KCFI_CHECK not followed by a call instruction");
899
900 // Adjust the offset for patchable-function-prefix. X86InstrInfo::getNop()
901 // returns a 1-byte X86::NOOP, which means the offset is the same in
902 // bytes. This assumes that patchable-function-prefix is the same for all
903 // functions.
904 const MachineFunction &MF = *MI.getMF();
905 int64_t PrefixNops = 0;
906 (void)MF.getFunction()
907 .getFnAttribute("patchable-function-prefix")
909 .getAsInteger(10, PrefixNops);
910
911 // KCFI allows indirect calls to any location that's preceded by a valid
912 // type identifier. To avoid encoding the full constant into an instruction,
913 // and thus emitting potential call target gadgets at each indirect call
914 // site, load a negated constant to a register and compare that to the
915 // expected value at the call target.
916 const Register AddrReg = MI.getOperand(0).getReg();
917 const uint32_t Type = MI.getOperand(1).getImm();
918 // The check is immediately before the call. If the call target is in R10,
919 // we can clobber R11 for the check instead.
920 unsigned TempReg = AddrReg == X86::R10 ? X86::R11D : X86::R10D;
921 EmitAndCountInstruction(
922 MCInstBuilder(X86::MOV32ri).addReg(TempReg).addImm(-MaskKCFIType(Type)));
923 EmitAndCountInstruction(MCInstBuilder(X86::ADD32rm)
924 .addReg(X86::NoRegister)
925 .addReg(TempReg)
926 .addReg(AddrReg)
927 .addImm(1)
928 .addReg(X86::NoRegister)
929 .addImm(-(PrefixNops + 4))
930 .addReg(X86::NoRegister));
931
932 MCSymbol *Pass = OutContext.createTempSymbol();
933 EmitAndCountInstruction(
934 MCInstBuilder(X86::JCC_1)
935 .addExpr(MCSymbolRefExpr::create(Pass, OutContext))
936 .addImm(X86::COND_E));
937
938 MCSymbol *Trap = OutContext.createTempSymbol();
939 OutStreamer->emitLabel(Trap);
940 EmitAndCountInstruction(MCInstBuilder(X86::TRAP));
941 emitKCFITrapEntry(MF, Trap);
942 OutStreamer->emitLabel(Pass);
943}
944
945void X86AsmPrinter::LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
946 // FIXME: Make this work on non-ELF.
947 if (!TM.getTargetTriple().isOSBinFormatELF()) {
948 report_fatal_error("llvm.asan.check.memaccess only supported on ELF");
949 return;
950 }
951
952 const auto &Reg = MI.getOperand(0).getReg();
953 ASanAccessInfo AccessInfo(MI.getOperand(1).getImm());
954
955 uint64_t ShadowBase;
956 int MappingScale;
957 bool OrShadowOffset;
958 getAddressSanitizerParams(TM.getTargetTriple(), 64, AccessInfo.CompileKernel,
959 &ShadowBase, &MappingScale, &OrShadowOffset);
960
961 StringRef Name = AccessInfo.IsWrite ? "store" : "load";
962 StringRef Op = OrShadowOffset ? "or" : "add";
963 std::string SymName = ("__asan_check_" + Name + "_" + Op + "_" +
964 Twine(1ULL << AccessInfo.AccessSizeIndex) + "_" +
965 TM.getMCRegisterInfo()->getName(Reg.asMCReg()))
966 .str();
967 if (OrShadowOffset)
969 "OrShadowOffset is not supported with optimized callbacks");
970
971 EmitAndCountInstruction(
972 MCInstBuilder(X86::CALL64pcrel32)
974 OutContext.getOrCreateSymbol(SymName), OutContext)));
975}
976
977void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
978 X86MCInstLower &MCIL) {
979 // PATCHABLE_OP minsize
980
981 NoAutoPaddingScope NoPadScope(*OutStreamer);
982
983 auto NextMI = std::find_if(std::next(MI.getIterator()),
984 MI.getParent()->end().getInstrIterator(),
985 [](auto &II) { return !II.isMetaInstruction(); });
986
987 SmallString<256> Code;
988 unsigned MinSize = MI.getOperand(0).getImm();
989
990 if (NextMI != MI.getParent()->end() && !NextMI->isInlineAsm()) {
991 // Lower the next MachineInstr to find its byte size.
992 // If the next instruction is inline assembly, we skip lowering it for now,
993 // and assume we should always generate NOPs.
994 MCInst MCI;
995 MCIL.Lower(&*NextMI, MCI);
996
998 CodeEmitter->encodeInstruction(MCI, Code, Fixups, getSubtargetInfo());
999 }
1000
1001 if (Code.size() < MinSize) {
1002 if (MinSize == 2 && Subtarget->is32Bit() &&
1003 Subtarget->isTargetWindowsMSVC() &&
1004 (Subtarget->getCPU().empty() || Subtarget->getCPU() == "pentium3")) {
1005 // For compatibility reasons, when targetting MSVC, it is important to
1006 // generate a 'legacy' NOP in the form of a 8B FF MOV EDI, EDI. Some tools
1007 // rely specifically on this pattern to be able to patch a function.
1008 // This is only for 32-bit targets, when using /arch:IA32 or /arch:SSE.
1009 OutStreamer->emitInstruction(
1010 MCInstBuilder(X86::MOV32rr_REV).addReg(X86::EDI).addReg(X86::EDI),
1011 *Subtarget);
1012 } else {
1013 unsigned NopSize = emitNop(*OutStreamer, MinSize, Subtarget);
1014 assert(NopSize == MinSize && "Could not implement MinSize!");
1015 (void)NopSize;
1016 }
1017 }
1018}
1019
1020// Lower a stackmap of the form:
1021// <id>, <shadowBytes>, ...
1022void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) {
1023 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
1024
1025 auto &Ctx = OutStreamer->getContext();
1026 MCSymbol *MILabel = Ctx.createTempSymbol();
1027 OutStreamer->emitLabel(MILabel);
1028
1029 SM.recordStackMap(*MILabel, MI);
1030 unsigned NumShadowBytes = MI.getOperand(1).getImm();
1031 SMShadowTracker.reset(NumShadowBytes);
1032}
1033
1034// Lower a patchpoint of the form:
1035// [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
1036void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI,
1037 X86MCInstLower &MCIL) {
1038 assert(Subtarget->is64Bit() && "Patchpoint currently only supports X86-64");
1039
1040 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
1041
1042 NoAutoPaddingScope NoPadScope(*OutStreamer);
1043
1044 auto &Ctx = OutStreamer->getContext();
1045 MCSymbol *MILabel = Ctx.createTempSymbol();
1046 OutStreamer->emitLabel(MILabel);
1047 SM.recordPatchPoint(*MILabel, MI);
1048
1049 PatchPointOpers opers(&MI);
1050 unsigned ScratchIdx = opers.getNextScratchIdx();
1051 unsigned EncodedBytes = 0;
1052 const MachineOperand &CalleeMO = opers.getCallTarget();
1053
1054 // Check for null target. If target is non-null (i.e. is non-zero or is
1055 // symbolic) then emit a call.
1056 if (!(CalleeMO.isImm() && !CalleeMO.getImm())) {
1057 MCOperand CalleeMCOp;
1058 switch (CalleeMO.getType()) {
1059 default:
1060 /// FIXME: Add a verifier check for bad callee types.
1061 llvm_unreachable("Unrecognized callee operand type.");
1063 if (CalleeMO.getImm())
1064 CalleeMCOp = MCOperand::createImm(CalleeMO.getImm());
1065 break;
1068 CalleeMCOp = MCIL.LowerSymbolOperand(CalleeMO,
1069 MCIL.GetSymbolFromOperand(CalleeMO));
1070 break;
1071 }
1072
1073 // Emit MOV to materialize the target address and the CALL to target.
1074 // This is encoded with 12-13 bytes, depending on which register is used.
1075 Register ScratchReg = MI.getOperand(ScratchIdx).getReg();
1076 if (X86II::isX86_64ExtendedReg(ScratchReg))
1077 EncodedBytes = 13;
1078 else
1079 EncodedBytes = 12;
1080
1081 EmitAndCountInstruction(
1082 MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).addOperand(CalleeMCOp));
1083 // FIXME: Add retpoline support and remove this.
1084 if (Subtarget->useIndirectThunkCalls())
1086 "Lowering patchpoint with thunks not yet implemented.");
1087 EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg));
1088 }
1089
1090 // Emit padding.
1091 unsigned NumBytes = opers.getNumPatchBytes();
1092 assert(NumBytes >= EncodedBytes &&
1093 "Patchpoint can't request size less than the length of a call.");
1094
1095 emitX86Nops(*OutStreamer, NumBytes - EncodedBytes, Subtarget);
1096}
1097
1098void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
1099 X86MCInstLower &MCIL) {
1100 assert(Subtarget->is64Bit() && "XRay custom events only supports X86-64");
1101
1102 NoAutoPaddingScope NoPadScope(*OutStreamer);
1103
1104 // We want to emit the following pattern, which follows the x86 calling
1105 // convention to prepare for the trampoline call to be patched in.
1106 //
1107 // .p2align 1, ...
1108 // .Lxray_event_sled_N:
1109 // jmp +N // jump across the instrumentation sled
1110 // ... // set up arguments in register
1111 // callq __xray_CustomEvent@plt // force dependency to symbol
1112 // ...
1113 // <jump here>
1114 //
1115 // After patching, it would look something like:
1116 //
1117 // nopw (2-byte nop)
1118 // ...
1119 // callq __xrayCustomEvent // already lowered
1120 // ...
1121 //
1122 // ---
1123 // First we emit the label and the jump.
1124 auto CurSled = OutContext.createTempSymbol("xray_event_sled_", true);
1125 OutStreamer->AddComment("# XRay Custom Event Log");
1126 OutStreamer->emitCodeAlignment(Align(2), &getSubtargetInfo());
1127 OutStreamer->emitLabel(CurSled);
1128
1129 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
1130 // an operand (computed as an offset from the jmp instruction).
1131 // FIXME: Find another less hacky way do force the relative jump.
1132 OutStreamer->emitBinaryData("\xeb\x0f");
1133
1134 // The default C calling convention will place two arguments into %rcx and
1135 // %rdx -- so we only work with those.
1136 const Register DestRegs[] = {X86::RDI, X86::RSI};
1137 bool UsedMask[] = {false, false};
1138 // Filled out in loop.
1139 Register SrcRegs[] = {0, 0};
1140
1141 // Then we put the operands in the %rdi and %rsi registers. We spill the
1142 // values in the register before we clobber them, and mark them as used in
1143 // UsedMask. In case the arguments are already in the correct register, we use
1144 // emit nops appropriately sized to keep the sled the same size in every
1145 // situation.
1146 for (unsigned I = 0; I < MI.getNumOperands(); ++I)
1147 if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I));
1148 Op.isValid()) {
1149 assert(Op.isReg() && "Only support arguments in registers");
1150 SrcRegs[I] = getX86SubSuperRegister(Op.getReg(), 64);
1151 assert(SrcRegs[I].isValid() && "Invalid operand");
1152 if (SrcRegs[I] != DestRegs[I]) {
1153 UsedMask[I] = true;
1154 EmitAndCountInstruction(
1155 MCInstBuilder(X86::PUSH64r).addReg(DestRegs[I]));
1156 } else {
1157 emitX86Nops(*OutStreamer, 4, Subtarget);
1158 }
1159 }
1160
1161 // Now that the register values are stashed, mov arguments into place.
1162 // FIXME: This doesn't work if one of the later SrcRegs is equal to an
1163 // earlier DestReg. We will have already overwritten over the register before
1164 // we can copy from it.
1165 for (unsigned I = 0; I < MI.getNumOperands(); ++I)
1166 if (SrcRegs[I] != DestRegs[I])
1167 EmitAndCountInstruction(
1168 MCInstBuilder(X86::MOV64rr).addReg(DestRegs[I]).addReg(SrcRegs[I]));
1169
1170 // We emit a hard dependency on the __xray_CustomEvent symbol, which is the
1171 // name of the trampoline to be implemented by the XRay runtime.
1172 auto TSym = OutContext.getOrCreateSymbol("__xray_CustomEvent");
1173 MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym);
1174 if (isPositionIndependent())
1176
1177 // Emit the call instruction.
1178 EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32)
1179 .addOperand(MCIL.LowerSymbolOperand(TOp, TSym)));
1180
1181 // Restore caller-saved and used registers.
1182 for (unsigned I = sizeof UsedMask; I-- > 0;)
1183 if (UsedMask[I])
1184 EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(DestRegs[I]));
1185 else
1186 emitX86Nops(*OutStreamer, 1, Subtarget);
1187
1188 OutStreamer->AddComment("xray custom event end.");
1189
1190 // Record the sled version. Version 0 of this sled was spelled differently, so
1191 // we let the runtime handle the different offsets we're using. Version 2
1192 // changed the absolute address to a PC-relative address.
1193 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
1194}
1195
1196void X86AsmPrinter::LowerPATCHABLE_TYPED_EVENT_CALL(const MachineInstr &MI,
1197 X86MCInstLower &MCIL) {
1198 assert(Subtarget->is64Bit() && "XRay typed events only supports X86-64");
1199
1200 NoAutoPaddingScope NoPadScope(*OutStreamer);
1201
1202 // We want to emit the following pattern, which follows the x86 calling
1203 // convention to prepare for the trampoline call to be patched in.
1204 //
1205 // .p2align 1, ...
1206 // .Lxray_event_sled_N:
1207 // jmp +N // jump across the instrumentation sled
1208 // ... // set up arguments in register
1209 // callq __xray_TypedEvent@plt // force dependency to symbol
1210 // ...
1211 // <jump here>
1212 //
1213 // After patching, it would look something like:
1214 //
1215 // nopw (2-byte nop)
1216 // ...
1217 // callq __xrayTypedEvent // already lowered
1218 // ...
1219 //
1220 // ---
1221 // First we emit the label and the jump.
1222 auto CurSled = OutContext.createTempSymbol("xray_typed_event_sled_", true);
1223 OutStreamer->AddComment("# XRay Typed Event Log");
1224 OutStreamer->emitCodeAlignment(Align(2), &getSubtargetInfo());
1225 OutStreamer->emitLabel(CurSled);
1226
1227 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
1228 // an operand (computed as an offset from the jmp instruction).
1229 // FIXME: Find another less hacky way do force the relative jump.
1230 OutStreamer->emitBinaryData("\xeb\x14");
1231
1232 // An x86-64 convention may place three arguments into %rcx, %rdx, and R8,
1233 // so we'll work with those. Or we may be called via SystemV, in which case
1234 // we don't have to do any translation.
1235 const Register DestRegs[] = {X86::RDI, X86::RSI, X86::RDX};
1236 bool UsedMask[] = {false, false, false};
1237
1238 // Will fill out src regs in the loop.
1239 Register SrcRegs[] = {0, 0, 0};
1240
1241 // Then we put the operands in the SystemV registers. We spill the values in
1242 // the registers before we clobber them, and mark them as used in UsedMask.
1243 // In case the arguments are already in the correct register, we emit nops
1244 // appropriately sized to keep the sled the same size in every situation.
1245 for (unsigned I = 0; I < MI.getNumOperands(); ++I)
1246 if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I));
1247 Op.isValid()) {
1248 // TODO: Is register only support adequate?
1249 assert(Op.isReg() && "Only supports arguments in registers");
1250 SrcRegs[I] = getX86SubSuperRegister(Op.getReg(), 64);
1251 assert(SrcRegs[I].isValid() && "Invalid operand");
1252 if (SrcRegs[I] != DestRegs[I]) {
1253 UsedMask[I] = true;
1254 EmitAndCountInstruction(
1255 MCInstBuilder(X86::PUSH64r).addReg(DestRegs[I]));
1256 } else {
1257 emitX86Nops(*OutStreamer, 4, Subtarget);
1258 }
1259 }
1260
1261 // In the above loop we only stash all of the destination registers or emit
1262 // nops if the arguments are already in the right place. Doing the actually
1263 // moving is postponed until after all the registers are stashed so nothing
1264 // is clobbers. We've already added nops to account for the size of mov and
1265 // push if the register is in the right place, so we only have to worry about
1266 // emitting movs.
1267 // FIXME: This doesn't work if one of the later SrcRegs is equal to an
1268 // earlier DestReg. We will have already overwritten over the register before
1269 // we can copy from it.
1270 for (unsigned I = 0; I < MI.getNumOperands(); ++I)
1271 if (UsedMask[I])
1272 EmitAndCountInstruction(
1273 MCInstBuilder(X86::MOV64rr).addReg(DestRegs[I]).addReg(SrcRegs[I]));
1274
1275 // We emit a hard dependency on the __xray_TypedEvent symbol, which is the
1276 // name of the trampoline to be implemented by the XRay runtime.
1277 auto TSym = OutContext.getOrCreateSymbol("__xray_TypedEvent");
1278 MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym);
1279 if (isPositionIndependent())
1281
1282 // Emit the call instruction.
1283 EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32)
1284 .addOperand(MCIL.LowerSymbolOperand(TOp, TSym)));
1285
1286 // Restore caller-saved and used registers.
1287 for (unsigned I = sizeof UsedMask; I-- > 0;)
1288 if (UsedMask[I])
1289 EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(DestRegs[I]));
1290 else
1291 emitX86Nops(*OutStreamer, 1, Subtarget);
1292
1293 OutStreamer->AddComment("xray typed event end.");
1294
1295 // Record the sled version.
1296 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
1297}
1298
1299void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI,
1300 X86MCInstLower &MCIL) {
1301
1302 NoAutoPaddingScope NoPadScope(*OutStreamer);
1303
1304 const Function &F = MF->getFunction();
1305 if (F.hasFnAttribute("patchable-function-entry")) {
1306 unsigned Num;
1307 if (F.getFnAttribute("patchable-function-entry")
1308 .getValueAsString()
1309 .getAsInteger(10, Num))
1310 return;
1311 emitX86Nops(*OutStreamer, Num, Subtarget);
1312 return;
1313 }
1314 // We want to emit the following pattern:
1315 //
1316 // .p2align 1, ...
1317 // .Lxray_sled_N:
1318 // jmp .tmpN
1319 // # 9 bytes worth of noops
1320 //
1321 // We need the 9 bytes because at runtime, we'd be patching over the full 11
1322 // bytes with the following pattern:
1323 //
1324 // mov %r10, <function id, 32-bit> // 6 bytes
1325 // call <relative offset, 32-bits> // 5 bytes
1326 //
1327 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
1328 OutStreamer->emitCodeAlignment(Align(2), &getSubtargetInfo());
1329 OutStreamer->emitLabel(CurSled);
1330
1331 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
1332 // an operand (computed as an offset from the jmp instruction).
1333 // FIXME: Find another less hacky way do force the relative jump.
1334 OutStreamer->emitBytes("\xeb\x09");
1335 emitX86Nops(*OutStreamer, 9, Subtarget);
1336 recordSled(CurSled, MI, SledKind::FUNCTION_ENTER, 2);
1337}
1338
1339void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI,
1340 X86MCInstLower &MCIL) {
1341 NoAutoPaddingScope NoPadScope(*OutStreamer);
1342
1343 // Since PATCHABLE_RET takes the opcode of the return statement as an
1344 // argument, we use that to emit the correct form of the RET that we want.
1345 // i.e. when we see this:
1346 //
1347 // PATCHABLE_RET X86::RET ...
1348 //
1349 // We should emit the RET followed by sleds.
1350 //
1351 // .p2align 1, ...
1352 // .Lxray_sled_N:
1353 // ret # or equivalent instruction
1354 // # 10 bytes worth of noops
1355 //
1356 // This just makes sure that the alignment for the next instruction is 2.
1357 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
1358 OutStreamer->emitCodeAlignment(Align(2), &getSubtargetInfo());
1359 OutStreamer->emitLabel(CurSled);
1360 unsigned OpCode = MI.getOperand(0).getImm();
1361 MCInst Ret;
1362 Ret.setOpcode(OpCode);
1363 for (auto &MO : drop_begin(MI.operands()))
1364 if (auto Op = MCIL.LowerMachineOperand(&MI, MO); Op.isValid())
1365 Ret.addOperand(Op);
1366 OutStreamer->emitInstruction(Ret, getSubtargetInfo());
1367 emitX86Nops(*OutStreamer, 10, Subtarget);
1368 recordSled(CurSled, MI, SledKind::FUNCTION_EXIT, 2);
1369}
1370
1371void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI,
1372 X86MCInstLower &MCIL) {
1373 MCInst TC;
1374 TC.setOpcode(convertTailJumpOpcode(MI.getOperand(0).getImm()));
1375 // Drop the tail jump opcode.
1376 auto TCOperands = drop_begin(MI.operands());
1377 bool IsConditional = TC.getOpcode() == X86::JCC_1;
1378 MCSymbol *FallthroughLabel;
1379 if (IsConditional) {
1380 // Rewrite:
1381 // je target
1382 //
1383 // To:
1384 // jne .fallthrough
1385 // .p2align 1, ...
1386 // .Lxray_sled_N:
1387 // SLED_CODE
1388 // jmp target
1389 // .fallthrough:
1390 FallthroughLabel = OutContext.createTempSymbol();
1391 EmitToStreamer(
1392 *OutStreamer,
1393 MCInstBuilder(X86::JCC_1)
1394 .addExpr(MCSymbolRefExpr::create(FallthroughLabel, OutContext))
1396 static_cast<X86::CondCode>(MI.getOperand(2).getImm()))));
1397 TC.setOpcode(X86::JMP_1);
1398 // Drop the condition code.
1399 TCOperands = drop_end(TCOperands);
1400 }
1401
1402 NoAutoPaddingScope NoPadScope(*OutStreamer);
1403
1404 // Like PATCHABLE_RET, we have the actual instruction in the operands to this
1405 // instruction so we lower that particular instruction and its operands.
1406 // Unlike PATCHABLE_RET though, we put the sled before the JMP, much like how
1407 // we do it for PATCHABLE_FUNCTION_ENTER. The sled should be very similar to
1408 // the PATCHABLE_FUNCTION_ENTER case, followed by the lowering of the actual
1409 // tail call much like how we have it in PATCHABLE_RET.
1410 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
1411 OutStreamer->emitCodeAlignment(Align(2), &getSubtargetInfo());
1412 OutStreamer->emitLabel(CurSled);
1413 auto Target = OutContext.createTempSymbol();
1414
1415 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
1416 // an operand (computed as an offset from the jmp instruction).
1417 // FIXME: Find another less hacky way do force the relative jump.
1418 OutStreamer->emitBytes("\xeb\x09");
1419 emitX86Nops(*OutStreamer, 9, Subtarget);
1420 OutStreamer->emitLabel(Target);
1421 recordSled(CurSled, MI, SledKind::TAIL_CALL, 2);
1422
1423 // Before emitting the instruction, add a comment to indicate that this is
1424 // indeed a tail call.
1425 OutStreamer->AddComment("TAILCALL");
1426 for (auto &MO : TCOperands)
1427 if (auto Op = MCIL.LowerMachineOperand(&MI, MO); Op.isValid())
1428 TC.addOperand(Op);
1429 OutStreamer->emitInstruction(TC, getSubtargetInfo());
1430
1431 if (IsConditional)
1432 OutStreamer->emitLabel(FallthroughLabel);
1433}
1434
1435static unsigned getSrcIdx(const MachineInstr* MI, unsigned SrcIdx) {
1436 if (X86II::isKMasked(MI->getDesc().TSFlags)) {
1437 // Skip mask operand.
1438 ++SrcIdx;
1439 if (X86II::isKMergeMasked(MI->getDesc().TSFlags)) {
1440 // Skip passthru operand.
1441 ++SrcIdx;
1442 }
1443 }
1444 return SrcIdx;
1445}
1446
1448 unsigned SrcOpIdx) {
1449 const MachineOperand &DstOp = MI->getOperand(0);
1451
1452 // Handle AVX512 MASK/MASXZ write mask comments.
1453 // MASK: zmmX {%kY}
1454 // MASKZ: zmmX {%kY} {z}
1455 if (X86II::isKMasked(MI->getDesc().TSFlags)) {
1456 const MachineOperand &WriteMaskOp = MI->getOperand(SrcOpIdx - 1);
1458 CS << " {%" << Mask << "}";
1459 if (!X86II::isKMergeMasked(MI->getDesc().TSFlags)) {
1460 CS << " {z}";
1461 }
1462 }
1463}
1464
1465static void printShuffleMask(raw_ostream &CS, StringRef Src1Name,
1466 StringRef Src2Name, ArrayRef<int> Mask) {
1467 // One source operand, fix the mask to print all elements in one span.
1468 SmallVector<int, 8> ShuffleMask(Mask);
1469 if (Src1Name == Src2Name)
1470 for (int i = 0, e = ShuffleMask.size(); i != e; ++i)
1471 if (ShuffleMask[i] >= e)
1472 ShuffleMask[i] -= e;
1473
1474 for (int i = 0, e = ShuffleMask.size(); i != e; ++i) {
1475 if (i != 0)
1476 CS << ",";
1477 if (ShuffleMask[i] == SM_SentinelZero) {
1478 CS << "zero";
1479 continue;
1480 }
1481
1482 // Otherwise, it must come from src1 or src2. Print the span of elements
1483 // that comes from this src.
1484 bool isSrc1 = ShuffleMask[i] < (int)e;
1485 CS << (isSrc1 ? Src1Name : Src2Name) << '[';
1486
1487 bool IsFirst = true;
1488 while (i != e && ShuffleMask[i] != SM_SentinelZero &&
1489 (ShuffleMask[i] < (int)e) == isSrc1) {
1490 if (!IsFirst)
1491 CS << ',';
1492 else
1493 IsFirst = false;
1494 if (ShuffleMask[i] == SM_SentinelUndef)
1495 CS << "u";
1496 else
1497 CS << ShuffleMask[i] % (int)e;
1498 ++i;
1499 }
1500 CS << ']';
1501 --i; // For loop increments element #.
1502 }
1503}
1504
1505static std::string getShuffleComment(const MachineInstr *MI, unsigned SrcOp1Idx,
1506 unsigned SrcOp2Idx, ArrayRef<int> Mask) {
1507 std::string Comment;
1508
1509 const MachineOperand &SrcOp1 = MI->getOperand(SrcOp1Idx);
1510 const MachineOperand &SrcOp2 = MI->getOperand(SrcOp2Idx);
1511 StringRef Src1Name = SrcOp1.isReg()
1513 : "mem";
1514 StringRef Src2Name = SrcOp2.isReg()
1516 : "mem";
1517
1518 raw_string_ostream CS(Comment);
1519 printDstRegisterName(CS, MI, SrcOp1Idx);
1520 CS << " = ";
1521 printShuffleMask(CS, Src1Name, Src2Name, Mask);
1522
1523 return Comment;
1524}
1525
1526static void printConstant(const APInt &Val, raw_ostream &CS,
1527 bool PrintZero = false) {
1528 if (Val.getBitWidth() <= 64) {
1529 CS << (PrintZero ? 0ULL : Val.getZExtValue());
1530 } else {
1531 // print multi-word constant as (w0,w1)
1532 CS << "(";
1533 for (int i = 0, N = Val.getNumWords(); i < N; ++i) {
1534 if (i > 0)
1535 CS << ",";
1536 CS << (PrintZero ? 0ULL : Val.getRawData()[i]);
1537 }
1538 CS << ")";
1539 }
1540}
1541
1542static void printConstant(const APFloat &Flt, raw_ostream &CS,
1543 bool PrintZero = false) {
1544 SmallString<32> Str;
1545 // Force scientific notation to distinguish from integers.
1546 if (PrintZero)
1547 APFloat::getZero(Flt.getSemantics()).toString(Str, 0, 0);
1548 else
1549 Flt.toString(Str, 0, 0);
1550 CS << Str;
1551}
1552
1553static void printConstant(const Constant *COp, unsigned BitWidth,
1554 raw_ostream &CS, bool PrintZero = false) {
1555 if (isa<UndefValue>(COp)) {
1556 CS << "u";
1557 } else if (auto *CI = dyn_cast<ConstantInt>(COp)) {
1558 if (auto VTy = dyn_cast<FixedVectorType>(CI->getType())) {
1559 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
1560 if (I != 0)
1561 CS << ',';
1562 printConstant(CI->getValue(), CS, PrintZero);
1563 }
1564 } else
1565 printConstant(CI->getValue(), CS, PrintZero);
1566 } else if (auto *CF = dyn_cast<ConstantFP>(COp)) {
1567 if (auto VTy = dyn_cast<FixedVectorType>(CF->getType())) {
1568 unsigned EltBits = VTy->getScalarSizeInBits();
1569 unsigned E = std::min(BitWidth / EltBits, VTy->getNumElements());
1570 if ((BitWidth % EltBits) == 0) {
1571 for (unsigned I = 0; I != E; ++I) {
1572 if (I != 0)
1573 CS << ",";
1574 printConstant(CF->getValueAPF(), CS, PrintZero);
1575 }
1576 } else {
1577 CS << "?";
1578 }
1579 } else
1580 printConstant(CF->getValueAPF(), CS, PrintZero);
1581 } else if (auto *CDS = dyn_cast<ConstantDataSequential>(COp)) {
1582 Type *EltTy = CDS->getElementType();
1583 bool IsInteger = EltTy->isIntegerTy();
1584 bool IsFP = EltTy->isHalfTy() || EltTy->isFloatTy() || EltTy->isDoubleTy();
1585 unsigned EltBits = EltTy->getPrimitiveSizeInBits();
1586 unsigned E = std::min(BitWidth / EltBits, (unsigned)CDS->getNumElements());
1587 if ((BitWidth % EltBits) == 0) {
1588 for (unsigned I = 0; I != E; ++I) {
1589 if (I != 0)
1590 CS << ",";
1591 if (IsInteger)
1592 printConstant(CDS->getElementAsAPInt(I), CS, PrintZero);
1593 else if (IsFP)
1594 printConstant(CDS->getElementAsAPFloat(I), CS, PrintZero);
1595 else
1596 CS << "?";
1597 }
1598 } else {
1599 CS << "?";
1600 }
1601 } else if (auto *CV = dyn_cast<ConstantVector>(COp)) {
1602 unsigned EltBits = CV->getType()->getScalarSizeInBits();
1603 unsigned E = std::min(BitWidth / EltBits, CV->getNumOperands());
1604 if ((BitWidth % EltBits) == 0) {
1605 for (unsigned I = 0; I != E; ++I) {
1606 if (I != 0)
1607 CS << ",";
1608 printConstant(CV->getOperand(I), EltBits, CS, PrintZero);
1609 }
1610 } else {
1611 CS << "?";
1612 }
1613 } else {
1614 CS << "?";
1615 }
1616}
1617
1618static void printZeroUpperMove(const MachineInstr *MI, MCStreamer &OutStreamer,
1619 int SclWidth, int VecWidth,
1620 const char *ShuffleComment) {
1621 unsigned SrcIdx = getSrcIdx(MI, 1);
1622
1623 std::string Comment;
1624 raw_string_ostream CS(Comment);
1625 printDstRegisterName(CS, MI, SrcIdx);
1626 CS << " = ";
1627
1628 if (auto *C = X86::getConstantFromPool(*MI, SrcIdx)) {
1629 CS << "[";
1630 printConstant(C, SclWidth, CS);
1631 for (int I = 1, E = VecWidth / SclWidth; I < E; ++I) {
1632 CS << ",";
1633 printConstant(C, SclWidth, CS, true);
1634 }
1635 CS << "]";
1636 OutStreamer.AddComment(CS.str());
1637 return; // early-out
1638 }
1639
1640 // We didn't find a constant load, fallback to a shuffle mask decode.
1641 CS << ShuffleComment;
1642 OutStreamer.AddComment(CS.str());
1643}
1644
1645static void printBroadcast(const MachineInstr *MI, MCStreamer &OutStreamer,
1646 int Repeats, int BitWidth) {
1647 unsigned SrcIdx = getSrcIdx(MI, 1);
1648 if (auto *C = X86::getConstantFromPool(*MI, SrcIdx)) {
1649 std::string Comment;
1650 raw_string_ostream CS(Comment);
1651 printDstRegisterName(CS, MI, SrcIdx);
1652 CS << " = [";
1653 for (int l = 0; l != Repeats; ++l) {
1654 if (l != 0)
1655 CS << ",";
1656 printConstant(C, BitWidth, CS);
1657 }
1658 CS << "]";
1659 OutStreamer.AddComment(CS.str());
1660 }
1661}
1662
1663static bool printExtend(const MachineInstr *MI, MCStreamer &OutStreamer,
1664 int SrcEltBits, int DstEltBits, bool IsSext) {
1665 unsigned SrcIdx = getSrcIdx(MI, 1);
1666 auto *C = X86::getConstantFromPool(*MI, SrcIdx);
1667 if (C && C->getType()->getScalarSizeInBits() == unsigned(SrcEltBits)) {
1668 if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
1669 int NumElts = CDS->getNumElements();
1670 std::string Comment;
1671 raw_string_ostream CS(Comment);
1672 printDstRegisterName(CS, MI, SrcIdx);
1673 CS << " = [";
1674 for (int i = 0; i != NumElts; ++i) {
1675 if (i != 0)
1676 CS << ",";
1677 if (CDS->getElementType()->isIntegerTy()) {
1678 APInt Elt = CDS->getElementAsAPInt(i);
1679 Elt = IsSext ? Elt.sext(DstEltBits) : Elt.zext(DstEltBits);
1680 printConstant(Elt, CS);
1681 } else
1682 CS << "?";
1683 }
1684 CS << "]";
1685 OutStreamer.AddComment(CS.str());
1686 return true;
1687 }
1688 }
1689
1690 return false;
1691}
1692static void printSignExtend(const MachineInstr *MI, MCStreamer &OutStreamer,
1693 int SrcEltBits, int DstEltBits) {
1694 printExtend(MI, OutStreamer, SrcEltBits, DstEltBits, true);
1695}
1696static void printZeroExtend(const MachineInstr *MI, MCStreamer &OutStreamer,
1697 int SrcEltBits, int DstEltBits) {
1698 if (printExtend(MI, OutStreamer, SrcEltBits, DstEltBits, false))
1699 return;
1700
1701 // We didn't find a constant load, fallback to a shuffle mask decode.
1702 std::string Comment;
1703 raw_string_ostream CS(Comment);
1705 CS << " = ";
1706
1707 SmallVector<int> Mask;
1708 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1709 assert((Width % DstEltBits) == 0 && (DstEltBits % SrcEltBits) == 0 &&
1710 "Illegal extension ratio");
1711 DecodeZeroExtendMask(SrcEltBits, DstEltBits, Width / DstEltBits, false, Mask);
1712 printShuffleMask(CS, "mem", "", Mask);
1713
1714 OutStreamer.AddComment(CS.str());
1715}
1716
1717void X86AsmPrinter::EmitSEHInstruction(const MachineInstr *MI) {
1718 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
1719 assert((getSubtarget().isOSWindows() || getSubtarget().isUEFI()) &&
1720 "SEH_ instruction Windows and UEFI only");
1721
1722 // Use the .cv_fpo directives if we're emitting CodeView on 32-bit x86.
1723 if (EmitFPOData) {
1724 X86TargetStreamer *XTS =
1725 static_cast<X86TargetStreamer *>(OutStreamer->getTargetStreamer());
1726 switch (MI->getOpcode()) {
1727 case X86::SEH_PushReg:
1728 XTS->emitFPOPushReg(MI->getOperand(0).getImm());
1729 break;
1730 case X86::SEH_StackAlloc:
1731 XTS->emitFPOStackAlloc(MI->getOperand(0).getImm());
1732 break;
1733 case X86::SEH_StackAlign:
1734 XTS->emitFPOStackAlign(MI->getOperand(0).getImm());
1735 break;
1736 case X86::SEH_SetFrame:
1737 assert(MI->getOperand(1).getImm() == 0 &&
1738 ".cv_fpo_setframe takes no offset");
1739 XTS->emitFPOSetFrame(MI->getOperand(0).getImm());
1740 break;
1741 case X86::SEH_EndPrologue:
1742 XTS->emitFPOEndPrologue();
1743 break;
1744 case X86::SEH_SaveReg:
1745 case X86::SEH_SaveXMM:
1746 case X86::SEH_PushFrame:
1747 llvm_unreachable("SEH_ directive incompatible with FPO");
1748 break;
1749 default:
1750 llvm_unreachable("expected SEH_ instruction");
1751 }
1752 return;
1753 }
1754
1755 // Otherwise, use the .seh_ directives for all other Windows platforms.
1756 switch (MI->getOpcode()) {
1757 case X86::SEH_PushReg:
1758 OutStreamer->emitWinCFIPushReg(MI->getOperand(0).getImm());
1759 break;
1760
1761 case X86::SEH_SaveReg:
1762 OutStreamer->emitWinCFISaveReg(MI->getOperand(0).getImm(),
1763 MI->getOperand(1).getImm());
1764 break;
1765
1766 case X86::SEH_SaveXMM:
1767 OutStreamer->emitWinCFISaveXMM(MI->getOperand(0).getImm(),
1768 MI->getOperand(1).getImm());
1769 break;
1770
1771 case X86::SEH_StackAlloc:
1772 OutStreamer->emitWinCFIAllocStack(MI->getOperand(0).getImm());
1773 break;
1774
1775 case X86::SEH_SetFrame:
1776 OutStreamer->emitWinCFISetFrame(MI->getOperand(0).getImm(),
1777 MI->getOperand(1).getImm());
1778 break;
1779
1780 case X86::SEH_PushFrame:
1781 OutStreamer->emitWinCFIPushFrame(MI->getOperand(0).getImm());
1782 break;
1783
1784 case X86::SEH_EndPrologue:
1785 OutStreamer->emitWinCFIEndProlog();
1786 break;
1787
1788 case X86::SEH_BeginEpilogue:
1789 OutStreamer->emitWinCFIBeginEpilogue();
1790 break;
1791
1792 case X86::SEH_EndEpilogue:
1793 OutStreamer->emitWinCFIEndEpilogue();
1794 break;
1795
1796 case X86::SEH_UnwindV2Start:
1797 OutStreamer->emitWinCFIUnwindV2Start();
1798 break;
1799
1800 case X86::SEH_UnwindVersion:
1801 OutStreamer->emitWinCFIUnwindVersion(MI->getOperand(0).getImm());
1802 break;
1803
1804 default:
1805 llvm_unreachable("expected SEH_ instruction");
1806 }
1807}
1808
1810 MCStreamer &OutStreamer) {
1811 switch (MI->getOpcode()) {
1812 // Lower PSHUFB and VPERMILP normally but add a comment if we can find
1813 // a constant shuffle mask. We won't be able to do this at the MC layer
1814 // because the mask isn't an immediate.
1815 case X86::PSHUFBrm:
1816 case X86::VPSHUFBrm:
1817 case X86::VPSHUFBYrm:
1818 case X86::VPSHUFBZ128rm:
1819 case X86::VPSHUFBZ128rmk:
1820 case X86::VPSHUFBZ128rmkz:
1821 case X86::VPSHUFBZ256rm:
1822 case X86::VPSHUFBZ256rmk:
1823 case X86::VPSHUFBZ256rmkz:
1824 case X86::VPSHUFBZrm:
1825 case X86::VPSHUFBZrmk:
1826 case X86::VPSHUFBZrmkz: {
1827 unsigned SrcIdx = getSrcIdx(MI, 1);
1828 if (auto *C = X86::getConstantFromPool(*MI, SrcIdx + 1)) {
1829 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1831 DecodePSHUFBMask(C, Width, Mask);
1832 if (!Mask.empty())
1833 OutStreamer.AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
1834 }
1835 break;
1836 }
1837
1838 case X86::VPERMILPSrm:
1839 case X86::VPERMILPSYrm:
1840 case X86::VPERMILPSZ128rm:
1841 case X86::VPERMILPSZ128rmk:
1842 case X86::VPERMILPSZ128rmkz:
1843 case X86::VPERMILPSZ256rm:
1844 case X86::VPERMILPSZ256rmk:
1845 case X86::VPERMILPSZ256rmkz:
1846 case X86::VPERMILPSZrm:
1847 case X86::VPERMILPSZrmk:
1848 case X86::VPERMILPSZrmkz: {
1849 unsigned SrcIdx = getSrcIdx(MI, 1);
1850 if (auto *C = X86::getConstantFromPool(*MI, SrcIdx + 1)) {
1851 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1853 DecodeVPERMILPMask(C, 32, Width, Mask);
1854 if (!Mask.empty())
1855 OutStreamer.AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
1856 }
1857 break;
1858 }
1859 case X86::VPERMILPDrm:
1860 case X86::VPERMILPDYrm:
1861 case X86::VPERMILPDZ128rm:
1862 case X86::VPERMILPDZ128rmk:
1863 case X86::VPERMILPDZ128rmkz:
1864 case X86::VPERMILPDZ256rm:
1865 case X86::VPERMILPDZ256rmk:
1866 case X86::VPERMILPDZ256rmkz:
1867 case X86::VPERMILPDZrm:
1868 case X86::VPERMILPDZrmk:
1869 case X86::VPERMILPDZrmkz: {
1870 unsigned SrcIdx = getSrcIdx(MI, 1);
1871 if (auto *C = X86::getConstantFromPool(*MI, SrcIdx + 1)) {
1872 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1874 DecodeVPERMILPMask(C, 64, Width, Mask);
1875 if (!Mask.empty())
1876 OutStreamer.AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
1877 }
1878 break;
1879 }
1880
1881 case X86::VPERMIL2PDrm:
1882 case X86::VPERMIL2PSrm:
1883 case X86::VPERMIL2PDYrm:
1884 case X86::VPERMIL2PSYrm: {
1885 assert(MI->getNumOperands() >= (3 + X86::AddrNumOperands + 1) &&
1886 "Unexpected number of operands!");
1887
1888 const MachineOperand &CtrlOp = MI->getOperand(MI->getNumOperands() - 1);
1889 if (!CtrlOp.isImm())
1890 break;
1891
1892 unsigned ElSize;
1893 switch (MI->getOpcode()) {
1894 default: llvm_unreachable("Invalid opcode");
1895 case X86::VPERMIL2PSrm: case X86::VPERMIL2PSYrm: ElSize = 32; break;
1896 case X86::VPERMIL2PDrm: case X86::VPERMIL2PDYrm: ElSize = 64; break;
1897 }
1898
1899 if (auto *C = X86::getConstantFromPool(*MI, 3)) {
1900 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1902 DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Width, Mask);
1903 if (!Mask.empty())
1904 OutStreamer.AddComment(getShuffleComment(MI, 1, 2, Mask));
1905 }
1906 break;
1907 }
1908
1909 case X86::VPPERMrrm: {
1910 if (auto *C = X86::getConstantFromPool(*MI, 3)) {
1911 unsigned Width = X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1913 DecodeVPPERMMask(C, Width, Mask);
1914 if (!Mask.empty())
1915 OutStreamer.AddComment(getShuffleComment(MI, 1, 2, Mask));
1916 }
1917 break;
1918 }
1919
1920 case X86::MMX_MOVQ64rm: {
1921 if (auto *C = X86::getConstantFromPool(*MI, 1)) {
1922 std::string Comment;
1923 raw_string_ostream CS(Comment);
1924 const MachineOperand &DstOp = MI->getOperand(0);
1926 if (auto *CF = dyn_cast<ConstantFP>(C)) {
1927 CS << "0x" << toString(CF->getValueAPF().bitcastToAPInt(), 16, false);
1928 OutStreamer.AddComment(CS.str());
1929 }
1930 }
1931 break;
1932 }
1933
1934#define INSTR_CASE(Prefix, Instr, Suffix, Postfix) \
1935 case X86::Prefix##Instr##Suffix##rm##Postfix:
1936
1937#define CASE_AVX512_ARITH_RM(Instr) \
1938 INSTR_CASE(V, Instr, Z128, ) \
1939 INSTR_CASE(V, Instr, Z128, k) \
1940 INSTR_CASE(V, Instr, Z128, kz) \
1941 INSTR_CASE(V, Instr, Z256, ) \
1942 INSTR_CASE(V, Instr, Z256, k) \
1943 INSTR_CASE(V, Instr, Z256, kz) \
1944 INSTR_CASE(V, Instr, Z, ) \
1945 INSTR_CASE(V, Instr, Z, k) \
1946 INSTR_CASE(V, Instr, Z, kz)
1947
1948#define CASE_ARITH_RM(Instr) \
1949 INSTR_CASE(, Instr, , ) /* SSE */ \
1950 INSTR_CASE(V, Instr, , ) /* AVX-128 */ \
1951 INSTR_CASE(V, Instr, Y, ) /* AVX-256 */ \
1952 INSTR_CASE(V, Instr, Z128, ) \
1953 INSTR_CASE(V, Instr, Z128, k) \
1954 INSTR_CASE(V, Instr, Z128, kz) \
1955 INSTR_CASE(V, Instr, Z256, ) \
1956 INSTR_CASE(V, Instr, Z256, k) \
1957 INSTR_CASE(V, Instr, Z256, kz) \
1958 INSTR_CASE(V, Instr, Z, ) \
1959 INSTR_CASE(V, Instr, Z, k) \
1960 INSTR_CASE(V, Instr, Z, kz)
1961
1962 // TODO: Add additional instructions when useful.
1963 CASE_ARITH_RM(PMADDUBSW)
1964 CASE_ARITH_RM(PMADDWD)
1965 CASE_ARITH_RM(PMULDQ)
1966 CASE_ARITH_RM(PMULUDQ)
1967 CASE_ARITH_RM(PMULLD)
1968 CASE_AVX512_ARITH_RM(PMULLQ)
1969 CASE_ARITH_RM(PMULLW)
1970 CASE_ARITH_RM(PMULHW)
1971 CASE_ARITH_RM(PMULHUW)
1972 CASE_ARITH_RM(PMULHRSW) {
1973 unsigned SrcIdx = getSrcIdx(MI, 1);
1974 if (auto *C = X86::getConstantFromPool(*MI, SrcIdx + 1)) {
1975 std::string Comment;
1976 raw_string_ostream CS(Comment);
1977 unsigned VectorWidth =
1978 X86::getVectorRegisterWidth(MI->getDesc().operands()[0]);
1979 CS << "[";
1980 printConstant(C, VectorWidth, CS);
1981 CS << "]";
1982 OutStreamer.AddComment(CS.str());
1983 }
1984 break;
1985 }
1986
1987#define MASK_AVX512_CASE(Instr) \
1988 case Instr: \
1989 case Instr##k: \
1990 case Instr##kz:
1991
1992 case X86::MOVSDrm:
1993 case X86::VMOVSDrm:
1994 MASK_AVX512_CASE(X86::VMOVSDZrm)
1995 case X86::MOVSDrm_alt:
1996 case X86::VMOVSDrm_alt:
1997 case X86::VMOVSDZrm_alt:
1998 case X86::MOVQI2PQIrm:
1999 case X86::VMOVQI2PQIrm:
2000 case X86::VMOVQI2PQIZrm:
2001 printZeroUpperMove(MI, OutStreamer, 64, 128, "mem[0],zero");
2002 break;
2003
2004 MASK_AVX512_CASE(X86::VMOVSHZrm)
2005 case X86::VMOVSHZrm_alt:
2006 printZeroUpperMove(MI, OutStreamer, 16, 128,
2007 "mem[0],zero,zero,zero,zero,zero,zero,zero");
2008 break;
2009
2010 case X86::MOVSSrm:
2011 case X86::VMOVSSrm:
2012 MASK_AVX512_CASE(X86::VMOVSSZrm)
2013 case X86::MOVSSrm_alt:
2014 case X86::VMOVSSrm_alt:
2015 case X86::VMOVSSZrm_alt:
2016 case X86::MOVDI2PDIrm:
2017 case X86::VMOVDI2PDIrm:
2018 case X86::VMOVDI2PDIZrm:
2019 printZeroUpperMove(MI, OutStreamer, 32, 128, "mem[0],zero,zero,zero");
2020 break;
2021
2022#define MOV_CASE(Prefix, Suffix) \
2023 case X86::Prefix##MOVAPD##Suffix##rm: \
2024 case X86::Prefix##MOVAPS##Suffix##rm: \
2025 case X86::Prefix##MOVUPD##Suffix##rm: \
2026 case X86::Prefix##MOVUPS##Suffix##rm: \
2027 case X86::Prefix##MOVDQA##Suffix##rm: \
2028 case X86::Prefix##MOVDQU##Suffix##rm:
2029
2030#define MOV_AVX512_CASE(Suffix, Postfix) \
2031 case X86::VMOVDQA64##Suffix##rm##Postfix: \
2032 case X86::VMOVDQA32##Suffix##rm##Postfix: \
2033 case X86::VMOVDQU64##Suffix##rm##Postfix: \
2034 case X86::VMOVDQU32##Suffix##rm##Postfix: \
2035 case X86::VMOVDQU16##Suffix##rm##Postfix: \
2036 case X86::VMOVDQU8##Suffix##rm##Postfix: \
2037 case X86::VMOVAPS##Suffix##rm##Postfix: \
2038 case X86::VMOVAPD##Suffix##rm##Postfix: \
2039 case X86::VMOVUPS##Suffix##rm##Postfix: \
2040 case X86::VMOVUPD##Suffix##rm##Postfix:
2041
2042#define CASE_128_MOV_RM() \
2043 MOV_CASE(, ) /* SSE */ \
2044 MOV_CASE(V, ) /* AVX-128 */ \
2045 MOV_AVX512_CASE(Z128, ) \
2046 MOV_AVX512_CASE(Z128, k) \
2047 MOV_AVX512_CASE(Z128, kz)
2048
2049#define CASE_256_MOV_RM() \
2050 MOV_CASE(V, Y) /* AVX-256 */ \
2051 MOV_AVX512_CASE(Z256, ) \
2052 MOV_AVX512_CASE(Z256, k) \
2053 MOV_AVX512_CASE(Z256, kz) \
2054
2055#define CASE_512_MOV_RM() \
2056 MOV_AVX512_CASE(Z, ) \
2057 MOV_AVX512_CASE(Z, k) \
2058 MOV_AVX512_CASE(Z, kz) \
2059
2060 // For loads from a constant pool to a vector register, print the constant
2061 // loaded.
2063 printBroadcast(MI, OutStreamer, 1, 128);
2064 break;
2066 printBroadcast(MI, OutStreamer, 1, 256);
2067 break;
2069 printBroadcast(MI, OutStreamer, 1, 512);
2070 break;
2071 case X86::VBROADCASTF128rm:
2072 case X86::VBROADCASTI128rm:
2073 MASK_AVX512_CASE(X86::VBROADCASTF32X4Z256rm)
2074 MASK_AVX512_CASE(X86::VBROADCASTF64X2Z256rm)
2075 MASK_AVX512_CASE(X86::VBROADCASTI32X4Z256rm)
2076 MASK_AVX512_CASE(X86::VBROADCASTI64X2Z256rm)
2077 printBroadcast(MI, OutStreamer, 2, 128);
2078 break;
2079 MASK_AVX512_CASE(X86::VBROADCASTF32X4Zrm)
2080 MASK_AVX512_CASE(X86::VBROADCASTF64X2Zrm)
2081 MASK_AVX512_CASE(X86::VBROADCASTI32X4Zrm)
2082 MASK_AVX512_CASE(X86::VBROADCASTI64X2Zrm)
2083 printBroadcast(MI, OutStreamer, 4, 128);
2084 break;
2085 MASK_AVX512_CASE(X86::VBROADCASTF32X8Zrm)
2086 MASK_AVX512_CASE(X86::VBROADCASTF64X4Zrm)
2087 MASK_AVX512_CASE(X86::VBROADCASTI32X8Zrm)
2088 MASK_AVX512_CASE(X86::VBROADCASTI64X4Zrm)
2089 printBroadcast(MI, OutStreamer, 2, 256);
2090 break;
2091
2092 // For broadcast loads from a constant pool to a vector register, repeatedly
2093 // print the constant loaded.
2094 case X86::MOVDDUPrm:
2095 case X86::VMOVDDUPrm:
2096 MASK_AVX512_CASE(X86::VMOVDDUPZ128rm)
2097 case X86::VPBROADCASTQrm:
2098 MASK_AVX512_CASE(X86::VPBROADCASTQZ128rm)
2099 printBroadcast(MI, OutStreamer, 2, 64);
2100 break;
2101 case X86::VBROADCASTSDYrm:
2102 MASK_AVX512_CASE(X86::VBROADCASTSDZ256rm)
2103 case X86::VPBROADCASTQYrm:
2104 MASK_AVX512_CASE(X86::VPBROADCASTQZ256rm)
2105 printBroadcast(MI, OutStreamer, 4, 64);
2106 break;
2107 MASK_AVX512_CASE(X86::VBROADCASTSDZrm)
2108 MASK_AVX512_CASE(X86::VPBROADCASTQZrm)
2109 printBroadcast(MI, OutStreamer, 8, 64);
2110 break;
2111 case X86::VBROADCASTSSrm:
2112 MASK_AVX512_CASE(X86::VBROADCASTSSZ128rm)
2113 case X86::VPBROADCASTDrm:
2114 MASK_AVX512_CASE(X86::VPBROADCASTDZ128rm)
2115 printBroadcast(MI, OutStreamer, 4, 32);
2116 break;
2117 case X86::VBROADCASTSSYrm:
2118 MASK_AVX512_CASE(X86::VBROADCASTSSZ256rm)
2119 case X86::VPBROADCASTDYrm:
2120 MASK_AVX512_CASE(X86::VPBROADCASTDZ256rm)
2121 printBroadcast(MI, OutStreamer, 8, 32);
2122 break;
2123 MASK_AVX512_CASE(X86::VBROADCASTSSZrm)
2124 MASK_AVX512_CASE(X86::VPBROADCASTDZrm)
2125 printBroadcast(MI, OutStreamer, 16, 32);
2126 break;
2127 case X86::VPBROADCASTWrm:
2128 MASK_AVX512_CASE(X86::VPBROADCASTWZ128rm)
2129 printBroadcast(MI, OutStreamer, 8, 16);
2130 break;
2131 case X86::VPBROADCASTWYrm:
2132 MASK_AVX512_CASE(X86::VPBROADCASTWZ256rm)
2133 printBroadcast(MI, OutStreamer, 16, 16);
2134 break;
2135 MASK_AVX512_CASE(X86::VPBROADCASTWZrm)
2136 printBroadcast(MI, OutStreamer, 32, 16);
2137 break;
2138 case X86::VPBROADCASTBrm:
2139 MASK_AVX512_CASE(X86::VPBROADCASTBZ128rm)
2140 printBroadcast(MI, OutStreamer, 16, 8);
2141 break;
2142 case X86::VPBROADCASTBYrm:
2143 MASK_AVX512_CASE(X86::VPBROADCASTBZ256rm)
2144 printBroadcast(MI, OutStreamer, 32, 8);
2145 break;
2146 MASK_AVX512_CASE(X86::VPBROADCASTBZrm)
2147 printBroadcast(MI, OutStreamer, 64, 8);
2148 break;
2149
2150#define MOVX_CASE(Prefix, Ext, Type, Suffix, Postfix) \
2151 case X86::Prefix##PMOV##Ext##Type##Suffix##rm##Postfix:
2152
2153#define CASE_MOVX_RM(Ext, Type) \
2154 MOVX_CASE(, Ext, Type, , ) \
2155 MOVX_CASE(V, Ext, Type, , ) \
2156 MOVX_CASE(V, Ext, Type, Y, ) \
2157 MOVX_CASE(V, Ext, Type, Z128, ) \
2158 MOVX_CASE(V, Ext, Type, Z128, k ) \
2159 MOVX_CASE(V, Ext, Type, Z128, kz ) \
2160 MOVX_CASE(V, Ext, Type, Z256, ) \
2161 MOVX_CASE(V, Ext, Type, Z256, k ) \
2162 MOVX_CASE(V, Ext, Type, Z256, kz ) \
2163 MOVX_CASE(V, Ext, Type, Z, ) \
2164 MOVX_CASE(V, Ext, Type, Z, k ) \
2165 MOVX_CASE(V, Ext, Type, Z, kz )
2166
2167 CASE_MOVX_RM(SX, BD)
2168 printSignExtend(MI, OutStreamer, 8, 32);
2169 break;
2170 CASE_MOVX_RM(SX, BQ)
2171 printSignExtend(MI, OutStreamer, 8, 64);
2172 break;
2173 CASE_MOVX_RM(SX, BW)
2174 printSignExtend(MI, OutStreamer, 8, 16);
2175 break;
2176 CASE_MOVX_RM(SX, DQ)
2177 printSignExtend(MI, OutStreamer, 32, 64);
2178 break;
2179 CASE_MOVX_RM(SX, WD)
2180 printSignExtend(MI, OutStreamer, 16, 32);
2181 break;
2182 CASE_MOVX_RM(SX, WQ)
2183 printSignExtend(MI, OutStreamer, 16, 64);
2184 break;
2185
2186 CASE_MOVX_RM(ZX, BD)
2187 printZeroExtend(MI, OutStreamer, 8, 32);
2188 break;
2189 CASE_MOVX_RM(ZX, BQ)
2190 printZeroExtend(MI, OutStreamer, 8, 64);
2191 break;
2192 CASE_MOVX_RM(ZX, BW)
2193 printZeroExtend(MI, OutStreamer, 8, 16);
2194 break;
2195 CASE_MOVX_RM(ZX, DQ)
2196 printZeroExtend(MI, OutStreamer, 32, 64);
2197 break;
2198 CASE_MOVX_RM(ZX, WD)
2199 printZeroExtend(MI, OutStreamer, 16, 32);
2200 break;
2201 CASE_MOVX_RM(ZX, WQ)
2202 printZeroExtend(MI, OutStreamer, 16, 64);
2203 break;
2204 }
2205}
2206
2207// Does the given operand refer to a DLLIMPORT function?
2209 return MO.isGlobal() && (MO.getTargetFlags() == X86II::MO_DLLIMPORT);
2210}
2211
2212// Is the given instruction a call to a CFGuard function?
2214 assert(MI->getOpcode() == X86::TAILJMPm64_REX ||
2215 MI->getOpcode() == X86::CALL64m);
2216 const MachineOperand &MO = MI->getOperand(3);
2217 return MO.isGlobal() && (MO.getTargetFlags() == X86II::MO_NO_FLAG) &&
2219}
2220
2221// Does the containing block for the given instruction contain any jump table
2222// info (indicating that the block is a dispatch for a jump table)?
2224 const MachineBasicBlock &MBB = *MI->getParent();
2225 for (auto I = MBB.instr_rbegin(), E = MBB.instr_rend(); I != E; ++I)
2226 if (I->isJumpTableDebugInfo())
2227 return true;
2228
2229 return false;
2230}
2231
2233 // FIXME: Enable feature predicate checks once all the test pass.
2234 // X86_MC::verifyInstructionPredicates(MI->getOpcode(),
2235 // Subtarget->getFeatureBits());
2236
2237 X86MCInstLower MCInstLowering(*MF, *this);
2238 const X86RegisterInfo *RI =
2239 MF->getSubtarget<X86Subtarget>().getRegisterInfo();
2240
2241 if (MI->getOpcode() == X86::OR64rm) {
2242 for (auto &Opd : MI->operands()) {
2243 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
2244 "swift_async_extendedFramePointerFlags") {
2245 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
2246 }
2247 }
2248 }
2249
2250 // Add comments for values loaded from constant pool.
2251 if (OutStreamer->isVerboseAsm())
2253
2254 // Add a comment about EVEX compression
2255 if (TM.Options.MCOptions.ShowMCEncoding) {
2256 if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_LEGACY)
2257 OutStreamer->AddComment("EVEX TO LEGACY Compression ", false);
2258 else if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_VEX)
2259 OutStreamer->AddComment("EVEX TO VEX Compression ", false);
2260 else if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_EVEX)
2261 OutStreamer->AddComment("EVEX TO EVEX Compression ", false);
2262 }
2263
2264 // We use this to suppress NOP padding for Windows EH.
2265 bool IsTailJump = false;
2266
2267 switch (MI->getOpcode()) {
2268 case TargetOpcode::DBG_VALUE:
2269 llvm_unreachable("Should be handled target independently");
2270
2271 case X86::EH_RETURN:
2272 case X86::EH_RETURN64: {
2273 // Lower these as normal, but add some comments.
2274 Register Reg = MI->getOperand(0).getReg();
2275 OutStreamer->AddComment(StringRef("eh_return, addr: %") +
2277 break;
2278 }
2279 case X86::CLEANUPRET: {
2280 // Lower these as normal, but add some comments.
2281 OutStreamer->AddComment("CLEANUPRET");
2282 break;
2283 }
2284
2285 case X86::CATCHRET: {
2286 // Lower these as normal, but add some comments.
2287 OutStreamer->AddComment("CATCHRET");
2288 break;
2289 }
2290
2291 case X86::ENDBR32:
2292 case X86::ENDBR64: {
2293 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
2294 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
2295 // non-empty. If MI is the initial ENDBR, place the
2296 // __patchable_function_entries label after ENDBR.
2299 MI == &MF->front().front()) {
2300 MCInst Inst;
2301 MCInstLowering.Lower(MI, Inst);
2302 EmitAndCountInstruction(Inst);
2305 return;
2306 }
2307 break;
2308 }
2309
2310 case X86::TAILJMPd64:
2311 if (IndCSPrefix && MI->hasRegisterImplicitUseOperand(X86::R11))
2312 EmitAndCountInstruction(MCInstBuilder(X86::CS_PREFIX));
2313
2314 if (EnableImportCallOptimization && isImportedFunction(MI->getOperand(0))) {
2315 emitLabelAndRecordForImportCallOptimization(
2316 IMAGE_RETPOLINE_AMD64_IMPORT_BR);
2317 }
2318
2319 // Lower this as normal, but add a comment.
2320 OutStreamer->AddComment("TAILCALL");
2321 IsTailJump = true;
2322 break;
2323
2324 case X86::TAILJMPr:
2325 case X86::TAILJMPm:
2326 case X86::TAILJMPd:
2327 case X86::TAILJMPd_CC:
2328 case X86::TAILJMPr64:
2329 case X86::TAILJMPm64:
2330 case X86::TAILJMPd64_CC:
2331 if (EnableImportCallOptimization)
2332 report_fatal_error("Unexpected TAILJMP instruction was emitted when "
2333 "import call optimization was enabled");
2334
2335 // Lower these as normal, but add some comments.
2336 OutStreamer->AddComment("TAILCALL");
2337 IsTailJump = true;
2338 break;
2339
2340 case X86::TAILJMPm64_REX:
2341 if (EnableImportCallOptimization && isCallToCFGuardFunction(MI)) {
2342 emitLabelAndRecordForImportCallOptimization(
2343 IMAGE_RETPOLINE_AMD64_CFG_BR_REX);
2344 }
2345
2346 OutStreamer->AddComment("TAILCALL");
2347 IsTailJump = true;
2348 break;
2349
2350 case X86::TAILJMPr64_REX: {
2351 if (EnableImportCallOptimization) {
2352 assert(MI->getOperand(0).getReg() == X86::RAX &&
2353 "Indirect tail calls with impcall enabled must go through RAX (as "
2354 "enforced by TCRETURNImpCallri64)");
2355 emitLabelAndRecordForImportCallOptimization(
2356 IMAGE_RETPOLINE_AMD64_INDIR_BR);
2357 }
2358
2359 OutStreamer->AddComment("TAILCALL");
2360 IsTailJump = true;
2361 break;
2362 }
2363
2364 case X86::JMP64r:
2365 if (EnableImportCallOptimization && hasJumpTableInfoInBlock(MI)) {
2366 uint16_t EncodedReg =
2367 this->getSubtarget().getRegisterInfo()->getEncodingValue(
2368 MI->getOperand(0).getReg().asMCReg());
2369 emitLabelAndRecordForImportCallOptimization(
2370 (ImportCallKind)(IMAGE_RETPOLINE_AMD64_SWITCHTABLE_FIRST +
2371 EncodedReg));
2372 }
2373 break;
2374
2375 case X86::JMP16r:
2376 case X86::JMP16m:
2377 case X86::JMP32r:
2378 case X86::JMP32m:
2379 case X86::JMP64m:
2380 if (EnableImportCallOptimization && hasJumpTableInfoInBlock(MI))
2382 "Unexpected JMP instruction was emitted for a jump-table when import "
2383 "call optimization was enabled");
2384 break;
2385
2386 case X86::TLS_addr32:
2387 case X86::TLS_addr64:
2388 case X86::TLS_addrX32:
2389 case X86::TLS_base_addr32:
2390 case X86::TLS_base_addr64:
2391 case X86::TLS_base_addrX32:
2392 case X86::TLS_desc32:
2393 case X86::TLS_desc64:
2394 return LowerTlsAddr(MCInstLowering, *MI);
2395
2396 case X86::MOVPC32r: {
2397 // This is a pseudo op for a two instruction sequence with a label, which
2398 // looks like:
2399 // call "L1$pb"
2400 // "L1$pb":
2401 // popl %esi
2402
2403 // Emit the call.
2404 MCSymbol *PICBase = MF->getPICBaseSymbol();
2405 // FIXME: We would like an efficient form for this, so we don't have to do a
2406 // lot of extra uniquing.
2407 EmitAndCountInstruction(
2408 MCInstBuilder(X86::CALLpcrel32)
2409 .addExpr(MCSymbolRefExpr::create(PICBase, OutContext)));
2410
2411 const X86FrameLowering *FrameLowering =
2412 MF->getSubtarget<X86Subtarget>().getFrameLowering();
2413 bool hasFP = FrameLowering->hasFP(*MF);
2414
2415 // TODO: This is needed only if we require precise CFA.
2416 bool HasActiveDwarfFrame = OutStreamer->getNumFrameInfos() &&
2417 !OutStreamer->getDwarfFrameInfos().back().End;
2418
2419 int stackGrowth = -RI->getSlotSize();
2420
2421 if (HasActiveDwarfFrame && !hasFP) {
2422 OutStreamer->emitCFIAdjustCfaOffset(-stackGrowth);
2423 MF->getInfo<X86MachineFunctionInfo>()->setHasCFIAdjustCfa(true);
2424 }
2425
2426 // Emit the label.
2427 OutStreamer->emitLabel(PICBase);
2428
2429 // popl $reg
2430 EmitAndCountInstruction(
2431 MCInstBuilder(X86::POP32r).addReg(MI->getOperand(0).getReg()));
2432
2433 if (HasActiveDwarfFrame && !hasFP) {
2434 OutStreamer->emitCFIAdjustCfaOffset(stackGrowth);
2435 }
2436 return;
2437 }
2438
2439 case X86::ADD32ri: {
2440 // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
2441 if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS)
2442 break;
2443
2444 // Okay, we have something like:
2445 // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL)
2446
2447 // For this, we want to print something like:
2448 // MYGLOBAL + (. - PICBASE)
2449 // However, we can't generate a ".", so just emit a new label here and refer
2450 // to it.
2451 MCSymbol *DotSym = OutContext.createTempSymbol();
2452 OutStreamer->emitLabel(DotSym);
2453
2454 // Now that we have emitted the label, lower the complex operand expression.
2455 MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2));
2456
2457 const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext);
2458 const MCExpr *PICBase =
2459 MCSymbolRefExpr::create(MF->getPICBaseSymbol(), OutContext);
2460 DotExpr = MCBinaryExpr::createSub(DotExpr, PICBase, OutContext);
2461
2462 DotExpr = MCBinaryExpr::createAdd(
2464
2465 EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri)
2466 .addReg(MI->getOperand(0).getReg())
2467 .addReg(MI->getOperand(1).getReg())
2468 .addExpr(DotExpr));
2469 return;
2470 }
2471 case TargetOpcode::STATEPOINT:
2472 return LowerSTATEPOINT(*MI, MCInstLowering);
2473
2474 case TargetOpcode::FAULTING_OP:
2475 return LowerFAULTING_OP(*MI, MCInstLowering);
2476
2477 case TargetOpcode::FENTRY_CALL:
2478 return LowerFENTRY_CALL(*MI, MCInstLowering);
2479
2480 case TargetOpcode::PATCHABLE_OP:
2481 return LowerPATCHABLE_OP(*MI, MCInstLowering);
2482
2483 case TargetOpcode::STACKMAP:
2484 return LowerSTACKMAP(*MI);
2485
2486 case TargetOpcode::PATCHPOINT:
2487 return LowerPATCHPOINT(*MI, MCInstLowering);
2488
2489 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2490 return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering);
2491
2492 case TargetOpcode::PATCHABLE_RET:
2493 return LowerPATCHABLE_RET(*MI, MCInstLowering);
2494
2495 case TargetOpcode::PATCHABLE_TAIL_CALL:
2496 return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering);
2497
2498 case TargetOpcode::PATCHABLE_EVENT_CALL:
2499 return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering);
2500
2501 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
2502 return LowerPATCHABLE_TYPED_EVENT_CALL(*MI, MCInstLowering);
2503
2504 case X86::MORESTACK_RET:
2505 EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
2506 return;
2507
2508 case X86::KCFI_CHECK:
2509 return LowerKCFI_CHECK(*MI);
2510
2511 case X86::ASAN_CHECK_MEMACCESS:
2512 return LowerASAN_CHECK_MEMACCESS(*MI);
2513
2514 case X86::MORESTACK_RET_RESTORE_R10:
2515 // Return, then restore R10.
2516 EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
2517 EmitAndCountInstruction(
2518 MCInstBuilder(X86::MOV64rr).addReg(X86::R10).addReg(X86::RAX));
2519 return;
2520
2521 case X86::SEH_PushReg:
2522 case X86::SEH_SaveReg:
2523 case X86::SEH_SaveXMM:
2524 case X86::SEH_StackAlloc:
2525 case X86::SEH_StackAlign:
2526 case X86::SEH_SetFrame:
2527 case X86::SEH_PushFrame:
2528 case X86::SEH_EndPrologue:
2529 case X86::SEH_EndEpilogue:
2530 case X86::SEH_UnwindV2Start:
2531 case X86::SEH_UnwindVersion:
2532 EmitSEHInstruction(MI);
2533 return;
2534
2535 case X86::SEH_BeginEpilogue: {
2536 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
2537 EmitSEHInstruction(MI);
2538 return;
2539 }
2540 case X86::UBSAN_UD1:
2541 EmitAndCountInstruction(MCInstBuilder(X86::UD1Lm)
2542 .addReg(X86::EAX)
2543 .addReg(X86::EAX)
2544 .addImm(1)
2545 .addReg(X86::NoRegister)
2546 .addImm(MI->getOperand(0).getImm())
2547 .addReg(X86::NoRegister));
2548 return;
2549 case X86::CALL64pcrel32:
2550 if (IndCSPrefix && MI->hasRegisterImplicitUseOperand(X86::R11))
2551 EmitAndCountInstruction(MCInstBuilder(X86::CS_PREFIX));
2552
2553 if (EnableImportCallOptimization && isImportedFunction(MI->getOperand(0))) {
2554 emitLabelAndRecordForImportCallOptimization(
2555 IMAGE_RETPOLINE_AMD64_IMPORT_CALL);
2556
2557 MCInst TmpInst;
2558 MCInstLowering.Lower(MI, TmpInst);
2559
2560 // For Import Call Optimization to work, we need a the call instruction
2561 // with a rex prefix, and a 5-byte nop after the call instruction.
2562 EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX));
2563 emitCallInstruction(TmpInst);
2564 emitNop(*OutStreamer, 5, Subtarget);
2565 maybeEmitNopAfterCallForWindowsEH(MI);
2566 return;
2567 }
2568
2569 break;
2570
2571 case X86::CALL64r:
2572 if (EnableImportCallOptimization) {
2573 assert(MI->getOperand(0).getReg() == X86::RAX &&
2574 "Indirect calls with impcall enabled must go through RAX (as "
2575 "enforced by CALL64r_ImpCall)");
2576
2577 emitLabelAndRecordForImportCallOptimization(
2578 IMAGE_RETPOLINE_AMD64_INDIR_CALL);
2579 MCInst TmpInst;
2580 MCInstLowering.Lower(MI, TmpInst);
2581 emitCallInstruction(TmpInst);
2582
2583 // For Import Call Optimization to work, we need a 3-byte nop after the
2584 // call instruction.
2585 emitNop(*OutStreamer, 3, Subtarget);
2586 maybeEmitNopAfterCallForWindowsEH(MI);
2587 return;
2588 }
2589 break;
2590
2591 case X86::CALL64m:
2592 if (EnableImportCallOptimization && isCallToCFGuardFunction(MI)) {
2593 emitLabelAndRecordForImportCallOptimization(
2594 IMAGE_RETPOLINE_AMD64_CFG_CALL);
2595 }
2596 break;
2597
2598 case X86::JCC_1:
2599 // Two instruction prefixes (2EH for branch not-taken and 3EH for branch
2600 // taken) are used as branch hints. Here we add branch taken prefix for
2601 // jump instruction with higher probability than threshold.
2602 if (getSubtarget().hasBranchHint() && EnableBranchHint) {
2603 const MachineBranchProbabilityInfo *MBPI =
2605 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
2606 BranchProbability EdgeProb =
2607 MBPI->getEdgeProbability(MI->getParent(), DestBB);
2609 if (EdgeProb > Threshold)
2610 EmitAndCountInstruction(MCInstBuilder(X86::DS_PREFIX));
2611 }
2612 break;
2613 }
2614
2615 MCInst TmpInst;
2616 MCInstLowering.Lower(MI, TmpInst);
2617
2618 if (MI->isCall()) {
2619 emitCallInstruction(TmpInst);
2620 // Since tail calls transfer control without leaving a stack frame, there is
2621 // never a need for NOP padding tail calls.
2622 if (!IsTailJump)
2623 maybeEmitNopAfterCallForWindowsEH(MI);
2624 return;
2625 }
2626
2627 EmitAndCountInstruction(TmpInst);
2628}
2629
2630void X86AsmPrinter::emitCallInstruction(const llvm::MCInst &MCI) {
2631 // Stackmap shadows cannot include branch targets, so we can count the bytes
2632 // in a call towards the shadow, but must ensure that the no thread returns
2633 // in to the stackmap shadow. The only way to achieve this is if the call
2634 // is at the end of the shadow.
2635
2636 // Count then size of the call towards the shadow
2637 SMShadowTracker.count(MCI, getSubtargetInfo(), CodeEmitter.get());
2638 // Then flush the shadow so that we fill with nops before the call, not
2639 // after it.
2640 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());
2641 // Then emit the call
2642 OutStreamer->emitInstruction(MCI, getSubtargetInfo());
2643}
2644
2645// Determines whether a NOP is required after a CALL, so that Windows EH
2646// IP2State tables have the correct information.
2647//
2648// On most Windows platforms (AMD64, ARM64, ARM32, IA64, but *not* x86-32),
2649// exception handling works by looking up instruction pointers in lookup
2650// tables. These lookup tables are stored in .xdata sections in executables.
2651// One element of the lookup tables are the "IP2State" tables (Instruction
2652// Pointer to State).
2653//
2654// If a function has any instructions that require cleanup during exception
2655// unwinding, then it will have an IP2State table. Each entry in the IP2State
2656// table describes a range of bytes in the function's instruction stream, and
2657// associates an "EH state number" with that range of instructions. A value of
2658// -1 means "the null state", which does not require any code to execute.
2659// A value other than -1 is an index into the State table.
2660//
2661// The entries in the IP2State table contain byte offsets within the instruction
2662// stream of the function. The Windows ABI requires that these offsets are
2663// aligned to instruction boundaries; they are not permitted to point to a byte
2664// that is not the first byte of an instruction.
2665//
2666// Unfortunately, CALL instructions present a problem during unwinding. CALL
2667// instructions push the address of the instruction after the CALL instruction,
2668// so that execution can resume after the CALL. If the CALL is the last
2669// instruction within an IP2State region, then the return address (on the stack)
2670// points to the *next* IP2State region. This means that the unwinder will
2671// use the wrong cleanup funclet during unwinding.
2672//
2673// To fix this problem, the Windows AMD64 ABI requires that CALL instructions
2674// are never placed at the end of an IP2State region. Stated equivalently, the
2675// end of a CALL instruction cannot be aligned to an IP2State boundary. If a
2676// CALL instruction would occur at the end of an IP2State region, then the
2677// compiler must insert a NOP instruction after the CALL. The NOP instruction
2678// is placed in the same EH region as the CALL instruction, so that the return
2679// address points to the NOP and the unwinder will locate the correct region.
2680//
2681// NOP padding is only necessary on Windows AMD64 targets. On ARM64 and ARM32,
2682// instructions have a fixed size so the unwinder knows how to "back up" by
2683// one instruction.
2684//
2685// Interaction with Import Call Optimization (ICO):
2686//
2687// Import Call Optimization (ICO) is a compiler + OS feature on Windows which
2688// improves the performance and security of DLL imports. ICO relies on using a
2689// specific CALL idiom that can be replaced by the OS DLL loader. This removes
2690// a load and indirect CALL and replaces it with a single direct CALL.
2691//
2692// To achieve this, ICO also inserts NOPs after the CALL instruction. If the
2693// end of the CALL is aligned with an EH state transition, we *also* insert
2694// a single-byte NOP. **Both forms of NOPs must be preserved.** They cannot
2695// be combined into a single larger NOP; nor can the second NOP be removed.
2696//
2697// This is necessary because, if ICO is active and the call site is modified
2698// by the loader, the loader will end up overwriting the NOPs that were inserted
2699// for ICO. That means that those NOPs cannot be used for the correct
2700// termination of the exception handling region (the IP2State transition),
2701// so we still need an additional NOP instruction. The NOPs cannot be combined
2702// into a longer NOP (which is ordinarily desirable) because then ICO would
2703// split one instruction, producing a malformed instruction after the ICO call.
2704void X86AsmPrinter::maybeEmitNopAfterCallForWindowsEH(const MachineInstr *MI) {
2705 // We only need to insert NOPs after CALLs when targeting Windows on AMD64.
2706 // (Don't let the name fool you: Itanium refers to table-based exception
2707 // handling, not the Itanium architecture.)
2708 if (MAI->getExceptionHandlingType() != ExceptionHandling::WinEH ||
2709 MAI->getWinEHEncodingType() != WinEH::EncodingType::Itanium) {
2710 return;
2711 }
2712
2713 bool HasEHPersonality = MF->getWinEHFuncInfo() != nullptr;
2714
2715 // Set up MBB iterator, initially positioned on the same MBB as MI.
2716 MachineFunction::const_iterator MFI(MI->getParent());
2718
2719 // Set up instruction iterator, positioned immediately *after* MI.
2721 MachineBasicBlock::const_iterator MBBE = MI->getParent()->end();
2722 ++MBBI; // Step over MI
2723
2724 // This loop iterates MBBs
2725 for (;;) {
2726 // This loop iterates instructions
2727 for (; MBBI != MBBE; ++MBBI) {
2728 // Check the instruction that follows this CALL.
2729 const MachineInstr &NextMI = *MBBI;
2730
2731 // If there is an EH_LABEL after this CALL, then there is an EH state
2732 // transition after this CALL. This is exactly the situation which
2733 // requires NOP padding.
2734 if (NextMI.isEHLabel()) {
2735 if (HasEHPersonality) {
2736 EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
2737 return;
2738 }
2739 // We actually want to continue, in case there is an SEH_BeginEpilogue
2740 // instruction after the EH_LABEL. In some situations, IR is produced
2741 // that contains EH_LABEL pseudo-instructions, even when we are not
2742 // generating IP2State tables. We still need to insert a NOP before
2743 // SEH_BeginEpilogue in that case.
2744 continue;
2745 }
2746
2747 // Somewhat similarly, if the CALL is the last instruction before the
2748 // SEH prologue, then we also need a NOP. This is necessary because the
2749 // Windows stack unwinder will not invoke a function's exception handler
2750 // if the instruction pointer is in the function prologue or epilogue.
2751 //
2752 // We always emit a NOP before SEH_BeginEpilogue, even if there is no
2753 // personality function (unwind info) for this frame. This is the same
2754 // behavior as MSVC.
2755 if (NextMI.getOpcode() == X86::SEH_BeginEpilogue) {
2756 EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
2757 return;
2758 }
2759
2760 if (!NextMI.isPseudo() && !NextMI.isMetaInstruction()) {
2761 // We found a real instruction. During the CALL, the return IP will
2762 // point to this instruction. Since this instruction has the same EH
2763 // state as the call itself (because there is no intervening EH_LABEL),
2764 // the IP2State table will be accurate; there is no need to insert a
2765 // NOP.
2766 return;
2767 }
2768
2769 // The next instruction is a pseudo-op. Ignore it and keep searching.
2770 // Because these instructions do not generate any machine code, they
2771 // cannot prevent the IP2State table from pointing at the wrong
2772 // instruction during a CALL.
2773 }
2774
2775 // We've reached the end of this MBB. Find the next MBB in program order.
2776 // MBB order should be finalized by this point, so falling across MBBs is
2777 // expected.
2778 ++MFI;
2779 if (MFI == MFE) {
2780 // No more blocks; we've reached the end of the function. This should
2781 // only happen with no-return functions, but double-check to be sure.
2782 if (HasEHPersonality) {
2783 // If the CALL has no successors, then it is a noreturn function.
2784 // Insert an INT3 instead of a NOP. This accomplishes the same purpose,
2785 // but is more clear to read. Also, analysis tools will understand
2786 // that they should not continue disassembling after the CALL (unless
2787 // there are other branches to that label).
2788 if (MI->getParent()->succ_empty())
2789 EmitAndCountInstruction(MCInstBuilder(X86::INT3));
2790 else
2791 EmitAndCountInstruction(MCInstBuilder(X86::NOOP));
2792 }
2793 return;
2794 }
2795
2796 // Set up iterator to scan the next basic block.
2797 const MachineBasicBlock *NextMBB = &*MFI;
2798 MBBI = NextMBB->instr_begin();
2799 MBBE = NextMBB->instr_end();
2800 }
2801}
2802
2803void X86AsmPrinter::emitLabelAndRecordForImportCallOptimization(
2804 ImportCallKind Kind) {
2805 assert(EnableImportCallOptimization);
2806
2807 MCSymbol *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
2808 OutStreamer->emitLabel(CallSiteSymbol);
2809
2810 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
2811 .push_back({CallSiteSymbol, Kind});
2812}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static void printShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef< int > Mask)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:593
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
uint64_t IntrinsicInst * II
static cl::opt< bool > EnableBranchHint("ppc-use-branch-hint", cl::init(true), cl::desc("Enable static hinting of branches on ppc"), cl::Hidden)
static MCSymbol * GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallString class.
This file contains some functions that are useful when dealing with strings.
static MCOperand LowerSymbolOperand(const MachineInstr *MI, const MachineOperand &MO, const MCSymbol *Symbol, AsmPrinter &AP)
static void emitX86Nops(MCStreamer &OS, unsigned NumBytes, const X86Subtarget *Subtarget)
Emit the optimal amount of multi-byte nops on X86.
static unsigned getRetOpcode(const X86Subtarget &Subtarget)
static void printSignExtend(const MachineInstr *MI, MCStreamer &OutStreamer, int SrcEltBits, int DstEltBits)
static unsigned convertTailJumpOpcode(unsigned Opcode)
static unsigned getSrcIdx(const MachineInstr *MI, unsigned SrcIdx)
static void printBroadcast(const MachineInstr *MI, MCStreamer &OutStreamer, int Repeats, int BitWidth)
static bool printExtend(const MachineInstr *MI, MCStreamer &OutStreamer, int SrcEltBits, int DstEltBits, bool IsSext)
static void printZeroUpperMove(const MachineInstr *MI, MCStreamer &OutStreamer, int SclWidth, int VecWidth, const char *ShuffleComment)
#define MASK_AVX512_CASE(Instr)
#define CASE_ARITH_RM(Instr)
static void addConstantComments(const MachineInstr *MI, MCStreamer &OutStreamer)
#define CASE_256_MOV_RM()
#define CASE_AVX512_ARITH_RM(Instr)
bool hasJumpTableInfoInBlock(const llvm::MachineInstr *MI)
static unsigned emitNop(MCStreamer &OS, unsigned NumBytes, const X86Subtarget *Subtarget)
Emit the largest nop instruction smaller than or equal to NumBytes bytes.
static void printDstRegisterName(raw_ostream &CS, const MachineInstr *MI, unsigned SrcOpIdx)
#define CASE_MOVX_RM(Ext, Type)
bool isImportedFunction(const MachineOperand &MO)
static cl::opt< bool > EnableBranchHint("enable-branch-hint", cl::desc("Enable branch hint."), cl::init(false), cl::Hidden)
static void printConstant(const APInt &Val, raw_ostream &CS, bool PrintZero=false)
static void printZeroExtend(const MachineInstr *MI, MCStreamer &OutStreamer, int SrcEltBits, int DstEltBits)
static std::string getShuffleComment(const MachineInstr *MI, unsigned SrcOp1Idx, unsigned SrcOp2Idx, ArrayRef< int > Mask)
bool isCallToCFGuardFunction(const MachineInstr *MI)
#define CASE_512_MOV_RM()
static cl::opt< unsigned > BranchHintProbabilityThreshold("branch-hint-probability-threshold", cl::desc("The probability threshold of enabling branch hint."), cl::init(50), cl::Hidden)
#define CASE_128_MOV_RM()
void toString(SmallVectorImpl< char > &Str, unsigned FormatPrecision=0, unsigned FormatMaxPadding=3, bool TruncateZero=true) const
Definition APFloat.h:1460
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition APFloat.h:1061
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1012
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
unsigned getNumWords() const
Get the number of words.
Definition APInt.h:1496
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
Definition APInt.cpp:985
const uint64_t * getRawData() const
This function returns a pointer to the internal storage of the APInt.
Definition APInt.h:570
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
This class is intended to be used as a driving class for all asm writers.
Definition AsmPrinter.h:91
MCSymbol * getSymbol(const GlobalValue *GV) const
MCSymbol * CurrentFnBegin
Definition AsmPrinter.h:220
TargetMachine & TM
Target machine description.
Definition AsmPrinter.h:94
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
const MCAsmInfo * MAI
Target Asm Printer information.
Definition AsmPrinter.h:97
MachineFunction * MF
The current machine function.
Definition AsmPrinter.h:109
MCSymbol * GetJTISymbol(unsigned JTID, bool isLinkerPrivate=false) const
Return the symbol for the specified jump table entry.
AsmPrinter(TargetMachine &TM, std::unique_ptr< MCStreamer > Streamer, char &ID=AsmPrinter::ID)
MCSymbol * getSymbolPreferLocal(const GlobalValue &GV) const
Similar to getSymbol() but preferred for references.
MachineModuleInfo * MMI
This is a pointer to the current MachineModuleInfo.
Definition AsmPrinter.h:112
MCContext & OutContext
This is the context for the output file that we are streaming.
Definition AsmPrinter.h:101
MCSymbol * createTempSymbol(const Twine &Name) const
MCSymbol * CurrentPatchableFunctionEntrySym
The symbol for the entry in __patchable_function_entires.
Definition AsmPrinter.h:124
std::unique_ptr< MCStreamer > OutStreamer
This is the MCStreamer object for the file we are generating.
Definition AsmPrinter.h:106
void getNameWithPrefix(SmallVectorImpl< char > &Name, const GlobalValue *GV) const
MCSymbol * GetBlockAddressSymbol(const BlockAddress *BA) const
Return the MCSymbol used to satisfy BlockAddress uses of the specified basic block.
const MCSubtargetInfo & getSubtargetInfo() const
Return information about subtarget.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
This is an important base class in LLVM.
Definition Constant.h:43
Register getReg() const
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition Function.h:706
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition Function.cpp:765
bool hasInternalLinkage() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
bool doesSetDirectiveSuppressReloc() const
Definition MCAsmInfo.h:593
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
MCCodeEmitter - Generic instruction encoding interface.
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
Context object for machine code objects.
Definition MCContext.h:83
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
const MCTargetOptions * getTargetOptions() const
Definition MCContext.h:420
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addExpr(const MCExpr *Val)
Add a new MCExpr operand.
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
unsigned getNumOperands() const
Definition MCInst.h:212
unsigned getOpcode() const
Definition MCInst.h:202
iterator insert(iterator I, const MCOperand &Op)
Definition MCInst.h:232
void setFlags(unsigned F)
Definition MCInst.h:204
void addOperand(const MCOperand Op)
Definition MCInst.h:215
iterator begin()
Definition MCInst.h:227
void setOpcode(unsigned Op)
Definition MCInst.h:201
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
Instances of this class represent operands of the MCInst class.
Definition MCInst.h:40
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
Streaming machine code generation interface.
Definition MCStreamer.h:220
virtual void emitWinCFIUnwindVersion(uint8_t Version, SMLoc Loc=SMLoc())
virtual void emitWinCFIPushReg(MCRegister Register, SMLoc Loc=SMLoc())
virtual void emitBinaryData(StringRef Data)
Functionally identical to EmitBytes.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitWinCFIUnwindV2Start(SMLoc Loc=SMLoc())
virtual void emitWinCFIEndEpilogue(SMLoc Loc=SMLoc())
virtual void emitWinCFIPushFrame(bool Code, SMLoc Loc=SMLoc())
virtual void emitWinCFISaveXMM(MCRegister Register, unsigned Offset, SMLoc Loc=SMLoc())
MCContext & getContext() const
Definition MCStreamer.h:314
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition MCStreamer.h:387
virtual void emitWinCFIBeginEpilogue(SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:324
virtual void emitWinCFISaveReg(MCRegister Register, unsigned Offset, SMLoc Loc=SMLoc())
virtual void emitWinCFIEndProlog(SMLoc Loc=SMLoc())
virtual void emitCodeAlignment(Align Alignment, const MCSubtargetInfo *STI, unsigned MaxBytesToEmit=0)
Emit nops until the byte alignment ByteAlignment is reached.
virtual void emitWinCFISetFrame(MCRegister Register, unsigned Offset, SMLoc Loc=SMLoc())
virtual void emitWinCFIAllocStack(unsigned Size, SMLoc Loc=SMLoc())
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:421
virtual void emitBytes(StringRef Data)
Emit the bytes in Data into the output.
Generic base class for all target subtargets.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::const_iterator const_iterator
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
mop_range operands()
bool isPseudo(QueryType Type=IgnoreBundle) const
Return true if this is a pseudo instruction that doesn't correspond to a real machine instruction.
const MachineOperand & getOperand(unsigned i) const
bool isEHLabel() const
bool isMetaInstruction(QueryType Type=IgnoreBundle) const
Return true if this instruction doesn't produce any output in the form of executable instructions.
StubValueTy & getGVStubEntry(MCSymbol *Sym)
PointerIntPair< MCSymbol *, 1, bool > StubValueTy
MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation for MachO targets.
Ty & getObjFileInfo()
Keep track of various per-module pieces of information for backends that would like to do so.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
const BlockAddress * getBlockAddress() const
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
const char * getSymbolName() const
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
MCSymbol * getMCSymbol() const
@ MO_Immediate
Immediate operand.
@ MO_ConstantPoolIndex
Address of indexed Constant in Constant Pool.
@ MO_MCSymbol
MCSymbol reference (for debug/eh info)
@ MO_GlobalAddress
Address of a global value.
@ MO_RegisterMask
Mask of preserved registers.
@ MO_BlockAddress
Address of a basic block.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
@ MO_JumpTableIndex
Address of indexed Jump Table for switch.
int64_t getOffset() const
Return the offset from the symbol in this operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
LLVM_ABI void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const
Print the appropriate prefix and the specified global variable's name.
Definition Mangler.cpp:121
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition Pass.cpp:140
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerTy getPointer() const
Wrapper class representing virtual and physical registers.
Definition Register.h:20
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition Register.h:107
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition Type.h:153
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition Type.h:142
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition Type.h:156
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
static const char * getRegisterName(MCRegister Reg)
void emitInstruction(const MachineInstr *MI) override
Targets should implement this to emit instructions.
const X86Subtarget & getSubtarget() const
X86AsmPrinter(TargetMachine &TM, std::unique_ptr< MCStreamer > Streamer)
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
unsigned getSlotSize() const
bool isTargetWindowsMSVC() const
bool useIndirectThunkCalls() const
virtual bool emitFPOPushReg(MCRegister Reg, SMLoc L={})
virtual bool emitFPOEndPrologue(SMLoc L={})
virtual bool emitFPOStackAlign(unsigned Align, SMLoc L={})
virtual bool emitFPOSetFrame(MCRegister Reg, SMLoc L={})
virtual bool emitFPOStackAlloc(unsigned StackAlloc, SMLoc L={})
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
std::string & str()
Returns the string's reference.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ Itanium
Windows CE ARM, PowerPC, SH3, SH4.
Definition MCAsmInfo.h:49
bool isKMergeMasked(uint64_t TSFlags)
@ MO_TLSLD
MO_TLSLD - On a symbol operand this indicates that the immediate is the offset of the GOT entry with ...
@ MO_GOTPCREL_NORELAX
MO_GOTPCREL_NORELAX - Same as MO_GOTPCREL except that R_X86_64_GOTPCREL relocations are guaranteed to...
@ MO_GOTOFF
MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...
@ MO_DARWIN_NONLAZY_PIC_BASE
MO_DARWIN_NONLAZY_PIC_BASE - On a symbol operand "FOO", this indicates that the reference is actually...
@ MO_GOT_ABSOLUTE_ADDRESS
MO_GOT_ABSOLUTE_ADDRESS - On a symbol operand, this represents a relocation of: SYMBOL_LABEL + [.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
@ MO_NTPOFF
MO_NTPOFF - On a symbol operand this indicates that the immediate is the negative thread-pointer offs...
@ MO_DARWIN_NONLAZY
MO_DARWIN_NONLAZY - On a symbol operand "FOO", this indicates that the reference is actually to the "...
@ MO_INDNTPOFF
MO_INDNTPOFF - On a symbol operand this indicates that the immediate is the absolute address of the G...
@ MO_GOTNTPOFF
MO_GOTNTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry w...
@ MO_TPOFF
MO_TPOFF - On a symbol operand this indicates that the immediate is the thread-pointer offset for the...
@ MO_TLVP_PIC_BASE
MO_TLVP_PIC_BASE - On a symbol operand this indicates that the immediate is some TLS offset from the ...
@ MO_GOT
MO_GOT - On a symbol operand this indicates that the immediate is the offset to the GOT entry for the...
@ MO_ABS8
MO_ABS8 - On a symbol operand this indicates that the symbol is known to be an absolute symbol in ran...
@ MO_PLT
MO_PLT - On a symbol operand this indicates that the immediate is offset to the PLT entry of symbol n...
@ MO_TLSGD
MO_TLSGD - On a symbol operand this indicates that the immediate is the offset of the GOT entry with ...
@ MO_NO_FLAG
MO_NO_FLAG - No flag for the operand.
@ MO_TLVP
MO_TLVP - On a symbol operand this indicates that the immediate is some TLS offset.
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand "FOO", this indicates that the reference is actually to the "__imp...
@ MO_GOTTPOFF
MO_GOTTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry wi...
@ MO_SECREL
MO_SECREL - On a symbol operand this indicates that the immediate is the offset from beginning of sec...
@ MO_DTPOFF
MO_DTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry with...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ MO_TLSLDM
MO_TLSLDM - On a symbol operand this indicates that the immediate is the offset of the GOT entry with...
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
bool isKMasked(uint64_t TSFlags)
bool isX86_64ExtendedReg(MCRegister Reg)
bool optimizeToFixedRegisterOrShortImmediateForm(MCInst &MI)
@ AddrNumOperands
Definition X86BaseInfo.h:36
bool optimizeMOV(MCInst &MI, bool In64BitMode)
Simplify things like MOV32rm to MOV32o32a.
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
bool optimizeMOVSX(MCInst &MI)
bool optimizeVPCMPWithImmediateOneOrSix(MCInst &MI)
bool optimizeShiftRotateWithImmediateOne(MCInst &MI)
bool optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc)
uint16_t Specifier
const Constant * getConstantFromPool(const MachineInstr &MI, unsigned OpNo)
Find any constant pool entry associated with a specific instruction operand.
bool optimizeINCDEC(MCInst &MI, bool In64BitMode)
unsigned getVectorRegisterWidth(const MCOperandInfo &Info)
Get the width of the vector register operand.
@ S_GOTPCREL_NORELAX
initializer< Ty > init(const Ty &Val)
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
This is an optimization pass for GlobalISel generic memory operations.
void DecodeZeroExtendMask(unsigned SrcScalarBits, unsigned DstScalarBits, unsigned NumDstElts, bool IsAnyExtend, SmallVectorImpl< int > &ShuffleMask)
Decode a zero extension instruction as a shuffle mask.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
void DecodeVPERMILPMask(unsigned NumElts, unsigned ScalarBits, ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a VPERMILPD/VPERMILPS variable mask from a raw array of constants.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
bool isCFGuardFunction(const GlobalValue *GV)
Definition CFGuard.cpp:316
@ WinEH
Windows Exception Handling.
Definition CodeGen.h:58
void DecodeVPERMIL2PMask(unsigned NumElts, unsigned ScalarBits, unsigned M2Z, ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a VPERMIL2PD/VPERMIL2PS variable mask from a raw array of constants.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Definition STLExtras.h:323
void DecodeVPPERMMask(ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a VPPERM mask from a raw array of constants such as from BUILD_VECTOR.
DWARFExpression::Operation Op
std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)
constexpr unsigned BitWidth
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
@ SM_SentinelUndef
@ SM_SentinelZero
void DecodePSHUFBMask(ArrayRef< uint64_t > RawMask, const APInt &UndefElts, SmallVectorImpl< int > &ShuffleMask)
Decode a PSHUFB mask from a raw array of constants such as from BUILD_VECTOR.
#define N
void changeAndComment(bool b)
NoAutoPaddingScope(MCStreamer &OS)
const bool OldAllowAutoPadding