Go to the documentation of this file.
41 class X86AlignBranchKind {
43 uint8_t AlignBranchKind = 0;
46 void operator=(
const std::string &Val) {
66 <<
" to -x86-align-branch=; each element must be one of: fused, "
67 "jcc, jmp, call, ret, indirect.(plus separated)\n";
72 operator uint8_t()
const {
return AlignBranchKind; }
76 X86AlignBranchKind X86AlignBranchKindLoc;
79 "x86-align-branch-boundary",
cl::init(0),
81 "Control how the assembler should align branches with NOP. If the "
82 "boundary's size is not 0, it should be a power of 2 and no less "
83 "than 32. Branches will be aligned to prevent from being across or "
84 "against the boundary of specified size. The default value 0 does not "
90 "Specify types of branches to align (plus separated list of types):"
91 "\njcc indicates conditional jumps"
92 "\nfused indicates fused conditional jumps"
93 "\njmp indicates direct unconditional jumps"
94 "\ncall indicates direct and indirect calls"
95 "\nret indicates rets"
96 "\nindirect indicates indirect unconditional jumps"),
100 "x86-branches-within-32B-boundaries",
cl::init(
false),
102 "Align selected instructions to mitigate negative performance impact "
103 "of Intel's micro code update for errata skx102. May break "
104 "assumptions about labels corresponding to particular instructions, "
105 "and should be used with caution."));
108 "x86-pad-max-prefix-size",
cl::init(0),
109 cl::desc(
"Maximum number of prefixes to use for padding"));
113 cl::desc(
"Pad previous instructions to implement align directives"));
117 cl::desc(
"Pad previous instructions to implement branch alignment"));
122 bool HasRelocationAddend,
bool foobar)
128 std::unique_ptr<const MCInstrInfo> MCII;
129 X86AlignBranchKind AlignBranchType;
131 unsigned TargetPrefixMax = 0;
135 std::pair<MCFragment *, size_t> PrevInstPosition;
138 uint8_t determinePaddingPrefix(
const MCInst &Inst)
const;
140 bool needAlign(
const MCInst &Inst)
const;
148 if (X86AlignBranchWithin32BBoundaries) {
159 if (X86AlignBranchBoundary.getNumOccurrences())
161 if (X86AlignBranch.getNumOccurrences())
162 AlignBranchType = X86AlignBranchKindLoc;
163 if (X86PadMaxPrefixSize.getNumOccurrences())
164 TargetPrefixMax = X86PadMaxPrefixSize;
167 bool allowAutoPadding()
const override;
168 bool allowEnhancedRelaxation()
const override;
172 unsigned getNumFixupKinds()
const override {
185 uint64_t
Value,
bool IsResolved,
188 bool mayNeedRelaxation(
const MCInst &Inst,
191 bool fixupNeedsRelaxation(
const MCFixup &Fixup, uint64_t
Value,
195 void relaxInstruction(
MCInst &Inst,
200 unsigned &RemainingSize)
const;
203 unsigned &RemainingSize)
const;
206 unsigned &RemainingSize)
const;
210 unsigned getMaximumNopSize()
const override;
212 bool writeNopData(
raw_ostream &OS, uint64_t Count)
const override;
222 return (Is16BitMode) ? X86::JCC_2 : X86::JCC_4;
224 return (Is16BitMode) ? X86::JMP_2 : X86::JMP_4;
235 case X86::IMUL16rri8:
return X86::IMUL16rri;
236 case X86::IMUL16rmi8:
return X86::IMUL16rmi;
237 case X86::IMUL32rri8:
return X86::IMUL32rri;
238 case X86::IMUL32rmi8:
return X86::IMUL32rmi;
239 case X86::IMUL64rri8:
return X86::IMUL64rri32;
240 case X86::IMUL64rmi8:
return X86::IMUL64rmi32;
243 case X86::AND16ri8:
return X86::AND16ri;
244 case X86::AND16mi8:
return X86::AND16mi;
245 case X86::AND32ri8:
return X86::AND32ri;
246 case X86::AND32mi8:
return X86::AND32mi;
247 case X86::AND64ri8:
return X86::AND64ri32;
248 case X86::AND64mi8:
return X86::AND64mi32;
251 case X86::OR16ri8:
return X86::OR16ri;
252 case X86::OR16mi8:
return X86::OR16mi;
253 case X86::OR32ri8:
return X86::OR32ri;
254 case X86::OR32mi8:
return X86::OR32mi;
255 case X86::OR64ri8:
return X86::OR64ri32;
256 case X86::OR64mi8:
return X86::OR64mi32;
259 case X86::XOR16ri8:
return X86::XOR16ri;
260 case X86::XOR16mi8:
return X86::XOR16mi;
261 case X86::XOR32ri8:
return X86::XOR32ri;
262 case X86::XOR32mi8:
return X86::XOR32mi;
263 case X86::XOR64ri8:
return X86::XOR64ri32;
264 case X86::XOR64mi8:
return X86::XOR64mi32;
267 case X86::ADD16ri8:
return X86::ADD16ri;
268 case X86::ADD16mi8:
return X86::ADD16mi;
269 case X86::ADD32ri8:
return X86::ADD32ri;
270 case X86::ADD32mi8:
return X86::ADD32mi;
271 case X86::ADD64ri8:
return X86::ADD64ri32;
272 case X86::ADD64mi8:
return X86::ADD64mi32;
275 case X86::ADC16ri8:
return X86::ADC16ri;
276 case X86::ADC16mi8:
return X86::ADC16mi;
277 case X86::ADC32ri8:
return X86::ADC32ri;
278 case X86::ADC32mi8:
return X86::ADC32mi;
279 case X86::ADC64ri8:
return X86::ADC64ri32;
280 case X86::ADC64mi8:
return X86::ADC64mi32;
283 case X86::SUB16ri8:
return X86::SUB16ri;
284 case X86::SUB16mi8:
return X86::SUB16mi;
285 case X86::SUB32ri8:
return X86::SUB32ri;
286 case X86::SUB32mi8:
return X86::SUB32mi;
287 case X86::SUB64ri8:
return X86::SUB64ri32;
288 case X86::SUB64mi8:
return X86::SUB64mi32;
291 case X86::SBB16ri8:
return X86::SBB16ri;
292 case X86::SBB16mi8:
return X86::SBB16mi;
293 case X86::SBB32ri8:
return X86::SBB32ri;
294 case X86::SBB32mi8:
return X86::SBB32mi;
295 case X86::SBB64ri8:
return X86::SBB64ri32;
296 case X86::SBB64mi8:
return X86::SBB64mi32;
299 case X86::CMP16ri8:
return X86::CMP16ri;
300 case X86::CMP16mi8:
return X86::CMP16mi;
301 case X86::CMP32ri8:
return X86::CMP32ri;
302 case X86::CMP32mi8:
return X86::CMP32mi;
303 case X86::CMP64ri8:
return X86::CMP64ri32;
304 case X86::CMP64mi8:
return X86::CMP64mi32;
307 case X86::PUSH32i8:
return X86::PUSHi32;
308 case X86::PUSH16i8:
return X86::PUSHi16;
309 case X86::PUSH64i8:
return X86::PUSH64i32;
322 unsigned Opcode =
MI.getOpcode();
342 unsigned Opcode =
MI.getOpcode();
344 uint64_t TSFlags = Desc.
TSFlags;
347 if (MemoryOperand < 0)
350 unsigned BaseReg =
MI.getOperand(BaseRegNum).getReg();
351 return (BaseReg == X86::RIP);
379 uint8_t X86AsmBackend::determinePaddingPrefix(
const MCInst &Inst)
const {
381 "Prefixes can be added only in 32-bit or 64-bit mode.");
383 uint64_t TSFlags = Desc.
TSFlags;
387 if (MemoryOperand != -1)
390 unsigned SegmentReg = 0;
391 if (MemoryOperand >= 0) {
424 if (MemoryOperand >= 0) {
427 if (BaseReg ==
X86::ESP || BaseReg == X86::EBP)
449 for (
auto &Operand :
MI) {
450 if (!Operand.isExpr())
452 const MCExpr &Expr = *Operand.getExpr();
460 bool X86AsmBackend::allowAutoPadding()
const {
464 bool X86AsmBackend::allowEnhancedRelaxation()
const {
465 return allowAutoPadding() && TargetPrefixMax != 0 && X86PadForBranchAlign;
492 const std::pair<MCFragment *, size_t> &PrevInstPosition) {
497 for (; isa_and_nonnull<MCDataFragment>(
F);
F =
F->getPrevNode())
498 if (cast<MCDataFragment>(
F)->getContents().size() != 0)
511 if (
auto *
DF = dyn_cast_or_null<MCDataFragment>(
F))
512 return DF != PrevInstPosition.first ||
513 DF->getContents().size() != PrevInstPosition.second;
520 if (!
F || !
F->hasInstructions())
523 switch (
F->getKind()) {
527 return cast<MCDataFragment>(*F).getContents().size();
529 return cast<MCRelaxableFragment>(*F).getContents().size();
531 return cast<MCCompactEncodedInstFragment>(*F).getContents().size();
569 assert(allowAutoPadding() &&
"incorrect initialization!");
587 bool X86AsmBackend::needAlign(
const MCInst &Inst)
const {
602 CanPadInst = canPadInst(Inst, OS);
604 if (!canPadBranches(OS))
649 if (
auto *
F = dyn_cast_or_null<MCRelaxableFragment>(CF))
650 F->setAllowAutoPadding(CanPadInst);
652 if (!canPadBranches(OS))
655 if (!needAlign(Inst) || !PendingBA)
666 if (isa_and_nonnull<MCDataFragment>(CF))
680 #define ELF_RELOC(X, Y) .Case(#X, Y)
681 #include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
683 .
Case(
"BFD_RELOC_NONE", ELF::R_X86_64_NONE)
684 .
Case(
"BFD_RELOC_8", ELF::R_X86_64_8)
685 .
Case(
"BFD_RELOC_16", ELF::R_X86_64_16)
686 .
Case(
"BFD_RELOC_32", ELF::R_X86_64_32)
687 .
Case(
"BFD_RELOC_64", ELF::R_X86_64_64)
691 #define ELF_RELOC(X, Y) .Case(#X, Y)
692 #include "llvm/BinaryFormat/ELFRelocs/i386.def"
694 .
Case(
"BFD_RELOC_NONE", ELF::R_386_NONE)
695 .
Case(
"BFD_RELOC_8", ELF::R_386_8)
696 .
Case(
"BFD_RELOC_16", ELF::R_386_16)
697 .
Case(
"BFD_RELOC_32", ELF::R_386_32)
713 {
"reloc_signed_4byte", 0, 32, 0},
714 {
"reloc_signed_4byte_relax", 0, 32, 0},
715 {
"reloc_global_offset_table", 0, 32, 0},
716 {
"reloc_global_offset_table8", 0, 64, 0},
734 bool X86AsmBackend::shouldForceRelocation(
const MCAssembler &,
777 uint64_t
Value,
bool IsResolved,
786 int64_t SignedValue =
static_cast<int64_t
>(
Value);
787 if ((
Target.isAbsolute() || IsResolved) &&
788 getFixupKindInfo(
Fixup.getKind()).Flags &
792 Asm.getContext().reportError(
793 Fixup.getLoc(),
"value of " +
Twine(SignedValue) +
794 " is too large for field of " +
Twine(
Size) +
795 ((
Size == 1) ?
" byte." :
" bytes."));
802 "Value does not fit in the Fixup field");
805 for (
unsigned i = 0;
i !=
Size; ++
i)
809 bool X86AsmBackend::mayNeedRelaxation(
const MCInst &Inst,
829 bool X86AsmBackend::fixupNeedsRelaxation(
const MCFixup &Fixup,
839 void X86AsmBackend::relaxInstruction(
MCInst &Inst,
867 unsigned &RemainingSize)
const {
881 const unsigned MaxPossiblePad =
std::min(15 - OldSize, RemainingSize);
882 const unsigned RemainingPrefixSize = [&]() ->
unsigned {
886 assert(
Code.size() < 15 &&
"The number of prefixes must be less than 15.");
893 unsigned ExistingPrefixSize =
Code.size();
894 if (TargetPrefixMax <= ExistingPrefixSize)
896 return TargetPrefixMax - ExistingPrefixSize;
898 const unsigned PrefixBytesToAdd =
899 std::min(MaxPossiblePad, RemainingPrefixSize);
900 if (PrefixBytesToAdd == 0)
912 F.setOffset(
F.getOffset() + PrefixBytesToAdd);
915 RemainingSize -= PrefixBytesToAdd;
921 unsigned &RemainingSize)
const {
935 const unsigned NewSize =
Code.size();
936 assert(NewSize >= OldSize &&
"size decrease during relaxation?");
937 unsigned Delta = NewSize - OldSize;
938 if (Delta > RemainingSize)
943 RemainingSize -= Delta;
949 unsigned &RemainingSize)
const {
950 bool Changed =
false;
951 if (RemainingSize != 0)
952 Changed |= padInstructionViaRelaxation(RF, Emitter, RemainingSize);
953 if (RemainingSize != 0)
954 Changed |= padInstructionViaPrefix(RF, Emitter, RemainingSize);
966 if (!X86PadForAlign && !X86PadForBranchAlign)
974 LabeledFragments.
insert(
S.getFragment(
false));
984 if (LabeledFragments.
count(&
F))
993 auto &RF = cast<MCRelaxableFragment>(*
I);
994 Relaxable.push_back(&RF);
999 switch (
F.getKind()) {
1003 return X86PadForAlign;
1005 return X86PadForBranchAlign;
1009 if (!canHandle(
F)) {
1017 const uint64_t OrigSize =
Asm.computeFragmentSize(Layout,
F);
1024 unsigned RemainingSize = OrigSize;
1025 while (!Relaxable.empty() && RemainingSize != 0) {
1030 if (padInstructionEncoding(RF,
Asm.getEmitter(), RemainingSize))
1031 FirstChangedFragment = &RF;
1044 if (FirstChangedFragment) {
1053 cast<MCBoundaryAlignFragment>(
F).setSize(RemainingSize);
1057 const uint64_t FinalSize =
Asm.computeFragmentSize(Layout,
F);
1058 assert(OrigOffset + OrigSize == FinalOffset + FinalSize &&
1059 "can't move start of next fragment!");
1060 assert(FinalSize == RemainingSize &&
"inconsistent size computation?");
1066 if (
auto *BF = dyn_cast<MCBoundaryAlignFragment>(&
F)) {
1067 const MCFragment *LastFragment = BF->getLastFragment();
1070 while (&*
I != LastFragment)
1080 Asm.computeFragmentSize(Layout, *
Section.getFragmentList().rbegin());
1084 unsigned X86AsmBackend::getMaximumNopSize()
const {
1104 bool X86AsmBackend::writeNopData(
raw_ostream &OS, uint64_t Count)
const {
1105 static const char Nops32Bit[10][11] = {
1115 "\x0f\x1f\x44\x00\x00",
1117 "\x66\x0f\x1f\x44\x00\x00",
1119 "\x0f\x1f\x80\x00\x00\x00\x00",
1121 "\x0f\x1f\x84\x00\x00\x00\x00\x00",
1123 "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
1125 "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
1129 static const char Nops16Bit[4][11] = {
1140 const char(*Nops)[11] =
1143 uint64_t MaxNopLength = (uint64_t)getMaximumNopSize();
1148 const uint8_t ThisNopLength = (uint8_t)
std::min(Count, MaxNopLength);
1149 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
1150 for (uint8_t
i = 0;
i < Prefixes;
i++)
1152 const uint8_t Rest = ThisNopLength - Prefixes;
1154 OS.
write(Nops[Rest - 1], Rest);
1155 Count -= ThisNopLength;
1156 }
while (Count != 0);
1165 class ELFX86AsmBackend :
public X86AsmBackend {
1169 : X86AsmBackend(
T, STI), OSABI(OSABI) {}
1172 class ELFX86_32AsmBackend :
public ELFX86AsmBackend {
1174 ELFX86_32AsmBackend(
const Target &T, uint8_t OSABI,
1176 : ELFX86AsmBackend(
T, OSABI, STI) {}
1178 std::unique_ptr<MCObjectTargetWriter>
1179 createObjectTargetWriter()
const override {
1184 class ELFX86_X32AsmBackend :
public ELFX86AsmBackend {
1186 ELFX86_X32AsmBackend(
const Target &T, uint8_t OSABI,
1188 : ELFX86AsmBackend(
T, OSABI, STI) {}
1190 std::unique_ptr<MCObjectTargetWriter>
1191 createObjectTargetWriter()
const override {
1197 class ELFX86_IAMCUAsmBackend :
public ELFX86AsmBackend {
1199 ELFX86_IAMCUAsmBackend(
const Target &T, uint8_t OSABI,
1201 : ELFX86AsmBackend(
T, OSABI, STI) {}
1203 std::unique_ptr<MCObjectTargetWriter>
1204 createObjectTargetWriter()
const override {
1210 class ELFX86_64AsmBackend :
public ELFX86AsmBackend {
1212 ELFX86_64AsmBackend(
const Target &T, uint8_t OSABI,
1214 : ELFX86AsmBackend(
T, OSABI, STI) {}
1216 std::unique_ptr<MCObjectTargetWriter>
1217 createObjectTargetWriter()
const override {
1222 class WindowsX86AsmBackend :
public X86AsmBackend {
1228 : X86AsmBackend(
T, STI)
1240 std::unique_ptr<MCObjectTargetWriter>
1241 createObjectTargetWriter()
const override {
1252 UNWIND_MODE_BP_FRAME = 0x01000000,
1255 UNWIND_MODE_STACK_IMMD = 0x02000000,
1258 UNWIND_MODE_STACK_IND = 0x03000000,
1261 UNWIND_MODE_DWARF = 0x04000000,
1264 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
1267 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
1272 class DarwinX86AsmBackend :
public X86AsmBackend {
1276 enum { CU_NUM_SAVED_REGS = 6 };
1278 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
1282 unsigned OffsetSize;
1283 unsigned MoveInstrSize;
1284 unsigned StackDivide;
1287 unsigned PushInstrSize(
unsigned Reg)
const {
1310 int getCompactUnwindRegNum(
unsigned Reg)
const {
1311 static const MCPhysReg CU32BitRegs[7] = {
1314 static const MCPhysReg CU64BitRegs[] = {
1315 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
1317 const MCPhysReg *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
1318 for (
int Idx = 1; *CURegs; ++CURegs, ++Idx)
1327 uint32_t encodeCompactUnwindRegistersWithFrame()
const {
1332 for (
int i = 0, Idx = 0;
i != CU_NUM_SAVED_REGS; ++
i) {
1333 unsigned Reg = SavedRegs[
i];
1334 if (
Reg == 0)
break;
1336 int CURegNum = getCompactUnwindRegNum(
Reg);
1337 if (CURegNum == -1)
return ~0U;
1341 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
1344 assert((RegEnc & 0x3FFFF) == RegEnc &&
1345 "Invalid compact register encoding!");
1352 uint32_t encodeCompactUnwindRegistersWithoutFrame(
unsigned RegCount)
const {
1366 for (
unsigned i = 0;
i < RegCount; ++
i) {
1367 int CUReg = getCompactUnwindRegNum(SavedRegs[
i]);
1368 if (CUReg == -1)
return ~0U;
1369 SavedRegs[
i] = CUReg;
1373 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
1375 uint32_t RenumRegs[CU_NUM_SAVED_REGS];
1376 for (
unsigned i = CU_NUM_SAVED_REGS - RegCount;
i < CU_NUM_SAVED_REGS; ++
i){
1377 unsigned Countless = 0;
1378 for (
unsigned j = CU_NUM_SAVED_REGS - RegCount;
j <
i; ++
j)
1379 if (SavedRegs[
j] < SavedRegs[
i])
1382 RenumRegs[
i] = SavedRegs[
i] - Countless - 1;
1389 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
1390 + 6 * RenumRegs[2] + 2 * RenumRegs[3]
1394 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
1395 + 6 * RenumRegs[3] + 2 * RenumRegs[4]
1399 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
1400 + 3 * RenumRegs[4] + RenumRegs[5];
1403 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
1407 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
1410 permutationEncoding |= RenumRegs[5];
1414 assert((permutationEncoding & 0x3FF) == permutationEncoding &&
1415 "Invalid compact register encoding!");
1416 return permutationEncoding;
1422 : X86AsmBackend(
T, STI),
MRI(
MRI),
TT(STI.getTargetTriple()),
1423 Is64Bit(
TT.isArch64Bit()) {
1424 memset(SavedRegs, 0,
sizeof(SavedRegs));
1425 OffsetSize = Is64Bit ? 8 : 4;
1426 MoveInstrSize = Is64Bit ? 3 : 2;
1427 StackDivide = Is64Bit ? 8 : 4;
1430 std::unique_ptr<MCObjectTargetWriter>
1431 createObjectTargetWriter()
const override {
1441 if (Instrs.
empty())
return 0;
1444 unsigned SavedRegIdx = 0;
1445 memset(SavedRegs, 0,
sizeof(SavedRegs));
1450 uint32_t CompactUnwindEncoding = 0;
1452 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
1453 unsigned InstrOffset = 0;
1454 unsigned StackAdjust = 0;
1455 unsigned StackSize = 0;
1456 unsigned NumDefCFAOffsets = 0;
1458 for (
unsigned i = 0,
e = Instrs.
size();
i !=
e; ++
i) {
1478 (Is64Bit ? X86::RBP : X86::EBP))
1482 memset(SavedRegs, 0,
sizeof(SavedRegs));
1485 InstrOffset += MoveInstrSize;
1503 StackSize = Inst.
getOffset() / StackDivide;
1520 if (SavedRegIdx == CU_NUM_SAVED_REGS)
1523 return CU::UNWIND_MODE_DWARF;
1526 SavedRegs[SavedRegIdx++] =
Reg;
1527 StackAdjust += OffsetSize;
1528 InstrOffset += PushInstrSize(
Reg);
1534 StackAdjust /= StackDivide;
1537 if ((StackAdjust & 0xFF) != StackAdjust)
1539 return CU::UNWIND_MODE_DWARF;
1542 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
1543 if (RegEnc == ~0U)
return CU::UNWIND_MODE_DWARF;
1545 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
1546 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
1547 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
1549 SubtractInstrIdx += InstrOffset;
1552 if ((StackSize & 0xFF) == StackSize) {
1554 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
1557 CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
1559 if ((StackAdjust & 0x7) != StackAdjust)
1561 return CU::UNWIND_MODE_DWARF;
1564 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
1568 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
1571 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
1576 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
1580 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
1581 if (RegEnc == ~0U)
return CU::UNWIND_MODE_DWARF;
1584 CompactUnwindEncoding |=
1585 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
1588 return CompactUnwindEncoding;
1600 return new DarwinX86AsmBackend(
T,
MRI, STI);
1603 return new WindowsX86AsmBackend(
T,
false, STI);
1608 return new ELFX86_IAMCUAsmBackend(
T, OSABI, STI);
1610 return new ELFX86_32AsmBackend(
T, OSABI, STI);
1619 return new DarwinX86AsmBackend(
T,
MRI, STI);
1622 return new WindowsX86AsmBackend(
T,
true, STI);
1627 return new ELFX86_X32AsmBackend(
T, OSABI, STI);
1628 return new ELFX86_64AsmBackend(
T, OSABI, STI);
StringSwitch & Case(StringLiteral S, T Value)
static bool isRightAfterData(MCFragment *CurrentFragment, const std::pair< MCFragment *, size_t > &PrevInstPosition)
Check if the instruction to be emitted is right after any data.
int getMemoryOperandNo(uint64_t TSFlags)
The function returns the MCInst operand # for the first field of the memory operand.
A relaxable fragment holds on to its MCInst, since it may need to be relaxed during the assembler lay...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
virtual const MCFixupKindInfo & getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
@ FK_PCRel_8
A eight-byte pc relative fixup.
static X86::CondCode getCondFromBranch(const MCInst &MI, const MCInstrInfo &MCII)
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
LocationClass< Ty > location(Ty &L)
LLVM_NODISCARD R Default(T Value)
static bool is64Bit(const char *name)
Target - Wrapper for Target specific information.
virtual void emitPrefix(const MCInst &Inst, raw_ostream &OS, const MCSubtargetInfo &STI) const
Emit the prefixes of given instruction on the output stream.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
@ AddrSegmentReg
AddrSegmentReg - The operand # of the segment in the memory operand.
Triple - Helper class for working with autoconf configuration names.
static MCInstrInfo * createMCInstrInfo()
The instances of the Type class are immutable: once they are created, they are never changed.
std::unique_ptr< MCObjectTargetWriter > createX86ELFObjectWriter(bool IsELF64, uint8_t OSABI, uint16_t EMachine)
Construct an X86 ELF object writer.
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
std::unique_ptr< MCObjectTargetWriter > createX86WinCOFFObjectWriter(bool Is64Bit)
Construct an X86 Win COFF object writer.
static unsigned getRelaxedOpcodeArith(const MCInst &Inst)
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
LLVM_NODISCARD T pop_back_val()
Instances of this class represent a single low-level machine instruction.
bool isIndirectBranch() const
Return true if this is an indirect branch, such as a branch through a register.
SmallVectorImpl< MCFixup > & getFixups()
std::pair< iterator, bool > insert(const ValueT &V)
bool getAllowAutoPadding() const
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
unsigned getNumOperands() const
bool empty() const
empty - Check if the array is empty.
@ FK_PCRel_1
A one-byte pc relative fixup.
Expected< uint32_t > getCPUType(const Triple &T)
@ reloc_signed_4byte_relax
void setOpcode(unsigned Op)
@ FirstLiteralRelocationKind
The range [FirstLiteralRelocationKind, MaxTargetFixupKind) is used for relocations coming from ....
@ FK_Data_4
A four-byte fixup.
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Generic interface to target specific assembler backends.
CompactUnwindEncodings
Compact unwind encoding values.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
void invalidateFragmentsFrom(MCFragment *F)
Invalidate the fragments starting with F because it has been resized.
@ FK_SecRel_4
A four-byte section relative fixup.
bool hasFeature(unsigned Feature) const
LLVM_NODISCARD std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
const Triple & getTargetTriple() const
static size_t getSizeForInstFragment(const MCFragment *F)
static unsigned getFixupKindSize(unsigned Kind)
@ FK_SecRel_2
A two-byte section relative fixup.
Streaming object file generation interface.
void setInst(const MCInst &Value)
Describe properties that are true of each instruction in the target description file.
static bool isFirstMacroFusibleInst(const MCInst &Inst, const MCInstrInfo &MCII)
Check if the instruction is valid as the first instruction in macro fusion.
const FeatureBitset & getFeatureBits() const
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
MCAsmBackend * createX86_32AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ") const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
raw_ostream & write(unsigned char C)
const MCInst & getInst() const
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
This class implements an extremely fast bulk output stream that can only output to a stream.
OpType getOperation() const
FirstMacroFusionInstKind classifyFirstOpcodeInMacroFusion(unsigned Opcode)
MCFragment * getCurrentFragment() const
Expected< uint32_t > getCPUSubType(const Triple &T)
This struct is a compact representation of a valid (non-zero power of two) alignment.
ArchType getArch() const
getArch - Get the parsed architecture type of this triple.
MCSection * getCurrentSectionOnly() const
unsigned getRegister() const
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
llvm::SmallVectorImpl< MCSection * > & getSectionOrder()
constexpr bool isInt< 8 >(int64_t x)
Implements a dense probed hash-table based set.
@ RawFrmMemOffs
RawFrmMemOffs - This form is for instructions that store an absolute memory offset as an immediate wi...
static bool isPrefix(const MCInst &MI, const MCInstrInfo &MCII)
Check if the instruction is a prefix.
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Error applyFixup(LinkGraph &G, Block &B, const Edge &E, char *BlockWorkingMem)
Apply fixup expression for edge to block content.
bool isCall() const
Return true if the instruction is a call.
OSType getOS() const
getOS - Get the parsed operating system type of this triple.
@ FKF_IsPCRel
Is this fixup kind PCrelative? This is used by the assembler backend to evaluate fixup values in a ta...
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
initializer< Ty > init(const Ty &Val)
void insert(MCFragment *F)
virtual Optional< MCFixupKind > getFixupKind(StringRef Name) const
Map a relocation name used in .reloc to a fixup kind.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isFullyRelaxed(const MCRelaxableFragment &RF)
Return true if this instruction has been fully relaxed into it's most general available form.
Target independent information on a fixup kind.
@ FK_PCRel_2
A two-byte pc relative fixup.
@ FK_Data_1
A one-byte fixup.
@ FK_PCRel_4
A four-byte pc relative fixup.
void setLastFragment(const MCFragment *F)
@ reloc_global_offset_table
MCAssembler & getAssembler()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
unsigned getAlignment() const
PowerPC TLS Dynamic Call Fixup
StringRef - Represent a constant reference to a string, i.e.
static unsigned getRelaxedOpcodeBranch(const MCInst &Inst, bool Is16BitMode)
bool isPrefix(uint64_t TSFlags)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
SecondMacroFusionInstKind classifySecondCondCodeInMacroFusion(X86::CondCode CC)
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
AlignBranchBoundaryKind
Defines the possible values of the branch boundary alignment mask.
@ reloc_global_offset_table8
Instances of this class represent a uniqued identifier for a section in the current translation unit.
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
bool isMacroFused(FirstMacroFusionInstKind FirstKind, SecondMacroFusionInstKind SecondKind)
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
@ FK_SecRel_1
A one-byte section relative fixup.
unsigned const MachineRegisterInfo * MRI
uint64_t getFragmentOffset(const MCFragment *F) const
Get the offset of the given fragment inside its containing section.
@ reloc_riprel_4byte_relax
bool isOSWindows() const
Tests whether the OS is Windows.
void setAlignment(Align Value)
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Interface to description of machine instruction set.
Encapsulates the layout of an assembly file at a particular point in time.
MCCodeEmitter - Generic instruction encoding interface.
Iterator for intrusive lists based on ilist_node.
uint64_t value() const
This is a hole in the type system and should not be abused.
@ reloc_riprel_4byte_movq_load
@ FK_SecRel_8
A eight-byte section relative fixup.
const MCSubtargetInfo * getSubtargetInfo() const
Retrieve the MCSubTargetInfo in effect when the instruction was encoded.
unsigned getOpcode() const
std::unique_ptr< MCObjectTargetWriter > createX86MachObjectWriter(bool Is64Bit, uint32_t CPUType, uint32_t CPUSubtype)
Construct an X86 Mach-O object writer.
SecondMacroFusionInstKind
EncodingOfSegmentOverridePrefix getSegmentOverridePrefixForReg(unsigned Reg)
Given a segment register, return the encoding of the segment override prefix for it.
static X86::SecondMacroFusionInstKind classifySecondInstInMacroFusion(const MCInst &MI, const MCInstrInfo &MCII)
bool getAllowAutoPadding() const
SectionKind getKind() const
MCFixupKind
Extensible enumeration to represent the type of a fixup.
bool isUnconditionalBranch() const
Return true if this is a branch which always transfers control flow to some other block.
@ FK_Data_8
A eight-byte fixup.
static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII)
Check if the instruction uses RIP relative addressing.
@ SymbolRef
References to labels and assigned expressions.
static unsigned getRelaxedOpcode(const MCInst &Inst, bool Is16BitMode)
const MCOperand & getOperand(unsigned i) const
size_t size() const
size - Get the array size.
Reimplement select in terms of SEL *We would really like to support but we need to prove that the add doesn t need to overflow between the two bit chunks *Implement pre post increment support(e.g. PR935) *Implement smarter const ant generation for binops with large immediates. A few ARMv6T2 ops should be pattern matched
A raw_ostream that writes to an SmallVector or SmallString.
EnvironmentType getEnvironment() const
getEnvironment - Get the parsed environment type of this triple.
static bool hasInterruptDelaySlot(const MCInst &Inst)
X86 has certain instructions which enable interrupts exactly one instruction after the instruction wh...
A switch()-like statement whose cases are string literals.
bool isReturn() const
Return true if the instruction is a return.
This represents an "assembler immediate".
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
@ reloc_riprel_4byte_relax_rex
@ FK_Data_2
A two-byte fixup.
The same transformation can work with an even modulo with the addition of a and shrink the compare RHS by the same amount Unless the target supports that transformation probably isn t worthwhile The transformation can also easily be made to work with non zero equality for n
Fragment for data and encoded instructions.
bool isBundlingEnabled() const
Generic base class for all target subtargets.
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
@ reloc_branch_4byte_pcrel
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
LLVM Value Representation.
Base class for the full range of assembler expressions which are needed for parsing.
MCAsmBackend * createX86_64AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
static bool hasVariantSymbol(const MCInst &MI)
Check if the instruction has a variant symbol operand.
SmallVectorImpl< char > & getContents()
virtual void encodeInstruction(const MCInst &Inst, raw_ostream &OS, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
EncodeInstruction - Encode the given Inst to bytes on the output stream OS.
unsigned getReg() const
Returns the register number.
Represents required padding such that a particular other set of fragments does not cross a particular...