LLVM 20.0.0git
X86AsmBackend.cpp
Go to the documentation of this file.
1//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
16#include "llvm/MC/MCAssembler.h"
18#include "llvm/MC/MCContext.h"
19#include "llvm/MC/MCDwarf.h"
22#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCInstrInfo.h"
32#include "llvm/MC/MCValue.h"
37
38using namespace llvm;
39
40namespace {
41/// A wrapper for holding a mask of the values from X86::AlignBranchBoundaryKind
42class X86AlignBranchKind {
43private:
44 uint8_t AlignBranchKind = 0;
45
46public:
47 void operator=(const std::string &Val) {
48 if (Val.empty())
49 return;
50 SmallVector<StringRef, 6> BranchTypes;
51 StringRef(Val).split(BranchTypes, '+', -1, false);
52 for (auto BranchType : BranchTypes) {
53 if (BranchType == "fused")
54 addKind(X86::AlignBranchFused);
55 else if (BranchType == "jcc")
56 addKind(X86::AlignBranchJcc);
57 else if (BranchType == "jmp")
58 addKind(X86::AlignBranchJmp);
59 else if (BranchType == "call")
60 addKind(X86::AlignBranchCall);
61 else if (BranchType == "ret")
62 addKind(X86::AlignBranchRet);
63 else if (BranchType == "indirect")
65 else {
66 errs() << "invalid argument " << BranchType.str()
67 << " to -x86-align-branch=; each element must be one of: fused, "
68 "jcc, jmp, call, ret, indirect.(plus separated)\n";
69 }
70 }
71 }
72
73 operator uint8_t() const { return AlignBranchKind; }
74 void addKind(X86::AlignBranchBoundaryKind Value) { AlignBranchKind |= Value; }
75};
76
77X86AlignBranchKind X86AlignBranchKindLoc;
78
79cl::opt<unsigned> X86AlignBranchBoundary(
80 "x86-align-branch-boundary", cl::init(0),
82 "Control how the assembler should align branches with NOP. If the "
83 "boundary's size is not 0, it should be a power of 2 and no less "
84 "than 32. Branches will be aligned to prevent from being across or "
85 "against the boundary of specified size. The default value 0 does not "
86 "align branches."));
87
89 "x86-align-branch",
91 "Specify types of branches to align (plus separated list of types):"
92 "\njcc indicates conditional jumps"
93 "\nfused indicates fused conditional jumps"
94 "\njmp indicates direct unconditional jumps"
95 "\ncall indicates direct and indirect calls"
96 "\nret indicates rets"
97 "\nindirect indicates indirect unconditional jumps"),
98 cl::location(X86AlignBranchKindLoc));
99
100cl::opt<bool> X86AlignBranchWithin32BBoundaries(
101 "x86-branches-within-32B-boundaries", cl::init(false),
102 cl::desc(
103 "Align selected instructions to mitigate negative performance impact "
104 "of Intel's micro code update for errata skx102. May break "
105 "assumptions about labels corresponding to particular instructions, "
106 "and should be used with caution."));
107
108cl::opt<unsigned> X86PadMaxPrefixSize(
109 "x86-pad-max-prefix-size", cl::init(0),
110 cl::desc("Maximum number of prefixes to use for padding"));
111
112cl::opt<bool> X86PadForAlign(
113 "x86-pad-for-align", cl::init(false), cl::Hidden,
114 cl::desc("Pad previous instructions to implement align directives"));
115
116cl::opt<bool> X86PadForBranchAlign(
117 "x86-pad-for-branch-align", cl::init(true), cl::Hidden,
118 cl::desc("Pad previous instructions to implement branch alignment"));
119
120class X86AsmBackend : public MCAsmBackend {
121 const MCSubtargetInfo &STI;
122 std::unique_ptr<const MCInstrInfo> MCII;
123 X86AlignBranchKind AlignBranchType;
124 Align AlignBoundary;
125 unsigned TargetPrefixMax = 0;
126
127 MCInst PrevInst;
128 unsigned PrevInstOpcode = 0;
129 MCBoundaryAlignFragment *PendingBA = nullptr;
130 std::pair<MCFragment *, size_t> PrevInstPosition;
131 bool IsRightAfterData = false;
132
133 uint8_t determinePaddingPrefix(const MCInst &Inst) const;
134 bool isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const;
135 bool needAlign(const MCInst &Inst) const;
136 bool canPadBranches(MCObjectStreamer &OS) const;
137 bool canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const;
138
139public:
140 X86AsmBackend(const Target &T, const MCSubtargetInfo &STI)
141 : MCAsmBackend(llvm::endianness::little), STI(STI),
142 MCII(T.createMCInstrInfo()) {
143 if (X86AlignBranchWithin32BBoundaries) {
144 // At the moment, this defaults to aligning fused branches, unconditional
145 // jumps, and (unfused) conditional jumps with nops. Both the
146 // instructions aligned and the alignment method (nop vs prefix) may
147 // change in the future.
148 AlignBoundary = assumeAligned(32);
149 AlignBranchType.addKind(X86::AlignBranchFused);
150 AlignBranchType.addKind(X86::AlignBranchJcc);
151 AlignBranchType.addKind(X86::AlignBranchJmp);
152 }
153 // Allow overriding defaults set by main flag
154 if (X86AlignBranchBoundary.getNumOccurrences())
155 AlignBoundary = assumeAligned(X86AlignBranchBoundary);
156 if (X86AlignBranch.getNumOccurrences())
157 AlignBranchType = X86AlignBranchKindLoc;
158 if (X86PadMaxPrefixSize.getNumOccurrences())
159 TargetPrefixMax = X86PadMaxPrefixSize;
160 }
161
162 bool allowAutoPadding() const override;
163 bool allowEnhancedRelaxation() const override;
164 void emitInstructionBegin(MCObjectStreamer &OS, const MCInst &Inst,
165 const MCSubtargetInfo &STI);
166 void emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst);
167
168 unsigned getNumFixupKinds() const override {
170 }
171
172 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
173
174 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
175
176 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
177 const MCValue &Target,
178 const MCSubtargetInfo *STI) override;
179
180 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
182 uint64_t Value, bool IsResolved,
183 const MCSubtargetInfo *STI) const override;
184
185 bool mayNeedRelaxation(const MCInst &Inst,
186 const MCSubtargetInfo &STI) const override;
187
189 uint64_t Value) const override;
190
191 void relaxInstruction(MCInst &Inst,
192 const MCSubtargetInfo &STI) const override;
193
194 bool padInstructionViaRelaxation(MCRelaxableFragment &RF,
196 unsigned &RemainingSize) const;
197
198 bool padInstructionViaPrefix(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
199 unsigned &RemainingSize) const;
200
201 bool padInstructionEncoding(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
202 unsigned &RemainingSize) const;
203
204 void finishLayout(const MCAssembler &Asm) const override;
205
206 unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const override;
207
208 bool writeNopData(raw_ostream &OS, uint64_t Count,
209 const MCSubtargetInfo *STI) const override;
210};
211} // end anonymous namespace
212
213static bool isRelaxableBranch(unsigned Opcode) {
214 return Opcode == X86::JCC_1 || Opcode == X86::JMP_1;
215}
216
217static unsigned getRelaxedOpcodeBranch(unsigned Opcode,
218 bool Is16BitMode = false) {
219 switch (Opcode) {
220 default:
221 llvm_unreachable("invalid opcode for branch");
222 case X86::JCC_1:
223 return (Is16BitMode) ? X86::JCC_2 : X86::JCC_4;
224 case X86::JMP_1:
225 return (Is16BitMode) ? X86::JMP_2 : X86::JMP_4;
226 }
227}
228
229static unsigned getRelaxedOpcode(const MCInst &MI, bool Is16BitMode) {
230 unsigned Opcode = MI.getOpcode();
231 return isRelaxableBranch(Opcode) ? getRelaxedOpcodeBranch(Opcode, Is16BitMode)
233}
234
236 const MCInstrInfo &MCII) {
237 unsigned Opcode = MI.getOpcode();
238 switch (Opcode) {
239 default:
240 return X86::COND_INVALID;
241 case X86::JCC_1: {
242 const MCInstrDesc &Desc = MCII.get(Opcode);
243 return static_cast<X86::CondCode>(
244 MI.getOperand(Desc.getNumOperands() - 1).getImm());
245 }
246 }
247}
248
252 return classifySecondCondCodeInMacroFusion(CC);
253}
254
255/// Check if the instruction uses RIP relative addressing.
256static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII) {
257 unsigned Opcode = MI.getOpcode();
258 const MCInstrDesc &Desc = MCII.get(Opcode);
259 uint64_t TSFlags = Desc.TSFlags;
260 unsigned CurOp = X86II::getOperandBias(Desc);
261 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
262 if (MemoryOperand < 0)
263 return false;
264 unsigned BaseRegNum = MemoryOperand + CurOp + X86::AddrBaseReg;
265 unsigned BaseReg = MI.getOperand(BaseRegNum).getReg();
266 return (BaseReg == X86::RIP);
267}
268
269/// Check if the instruction is a prefix.
270static bool isPrefix(unsigned Opcode, const MCInstrInfo &MCII) {
271 return X86II::isPrefix(MCII.get(Opcode).TSFlags);
272}
273
274/// Check if the instruction is valid as the first instruction in macro fusion.
275static bool isFirstMacroFusibleInst(const MCInst &Inst,
276 const MCInstrInfo &MCII) {
277 // An Intel instruction with RIP relative addressing is not macro fusible.
278 if (isRIPRelative(Inst, MCII))
279 return false;
282 return FIK != X86::FirstMacroFusionInstKind::Invalid;
283}
284
285/// X86 can reduce the bytes of NOP by padding instructions with prefixes to
286/// get a better peformance in some cases. Here, we determine which prefix is
287/// the most suitable.
288///
289/// If the instruction has a segment override prefix, use the existing one.
290/// If the target is 64-bit, use the CS.
291/// If the target is 32-bit,
292/// - If the instruction has a ESP/EBP base register, use SS.
293/// - Otherwise use DS.
294uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
295 assert((STI.hasFeature(X86::Is32Bit) || STI.hasFeature(X86::Is64Bit)) &&
296 "Prefixes can be added only in 32-bit or 64-bit mode.");
297 const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
298 uint64_t TSFlags = Desc.TSFlags;
299
300 // Determine where the memory operand starts, if present.
301 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
302 if (MemoryOperand != -1)
303 MemoryOperand += X86II::getOperandBias(Desc);
304
305 unsigned SegmentReg = 0;
306 if (MemoryOperand >= 0) {
307 // Check for explicit segment override on memory operand.
308 SegmentReg = Inst.getOperand(MemoryOperand + X86::AddrSegmentReg).getReg();
309 }
310
311 switch (TSFlags & X86II::FormMask) {
312 default:
313 break;
314 case X86II::RawFrmDstSrc: {
315 // Check segment override opcode prefix as needed (not for %ds).
316 if (Inst.getOperand(2).getReg() != X86::DS)
317 SegmentReg = Inst.getOperand(2).getReg();
318 break;
319 }
320 case X86II::RawFrmSrc: {
321 // Check segment override opcode prefix as needed (not for %ds).
322 if (Inst.getOperand(1).getReg() != X86::DS)
323 SegmentReg = Inst.getOperand(1).getReg();
324 break;
325 }
327 // Check segment override opcode prefix as needed.
328 SegmentReg = Inst.getOperand(1).getReg();
329 break;
330 }
331 }
332
333 if (SegmentReg != 0)
334 return X86::getSegmentOverridePrefixForReg(SegmentReg);
335
336 if (STI.hasFeature(X86::Is64Bit))
337 return X86::CS_Encoding;
338
339 if (MemoryOperand >= 0) {
340 unsigned BaseRegNum = MemoryOperand + X86::AddrBaseReg;
341 unsigned BaseReg = Inst.getOperand(BaseRegNum).getReg();
342 if (BaseReg == X86::ESP || BaseReg == X86::EBP)
343 return X86::SS_Encoding;
344 }
345 return X86::DS_Encoding;
346}
347
348/// Check if the two instructions will be macro-fused on the target cpu.
349bool X86AsmBackend::isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const {
350 const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode());
351 if (!InstDesc.isConditionalBranch())
352 return false;
353 if (!isFirstMacroFusibleInst(Cmp, *MCII))
354 return false;
355 const X86::FirstMacroFusionInstKind CmpKind =
357 const X86::SecondMacroFusionInstKind BranchKind =
359 return X86::isMacroFused(CmpKind, BranchKind);
360}
361
362/// Check if the instruction has a variant symbol operand.
363static bool hasVariantSymbol(const MCInst &MI) {
364 for (auto &Operand : MI) {
365 if (!Operand.isExpr())
366 continue;
367 const MCExpr &Expr = *Operand.getExpr();
368 if (Expr.getKind() == MCExpr::SymbolRef &&
369 cast<MCSymbolRefExpr>(Expr).getKind() != MCSymbolRefExpr::VK_None)
370 return true;
371 }
372 return false;
373}
374
375bool X86AsmBackend::allowAutoPadding() const {
376 return (AlignBoundary != Align(1) && AlignBranchType != X86::AlignBranchNone);
377}
378
379bool X86AsmBackend::allowEnhancedRelaxation() const {
380 return allowAutoPadding() && TargetPrefixMax != 0 && X86PadForBranchAlign;
381}
382
383/// X86 has certain instructions which enable interrupts exactly one
384/// instruction *after* the instruction which stores to SS. Return true if the
385/// given instruction may have such an interrupt delay slot.
386static bool mayHaveInterruptDelaySlot(unsigned InstOpcode) {
387 switch (InstOpcode) {
388 case X86::POPSS16:
389 case X86::POPSS32:
390 case X86::STI:
391 return true;
392
393 case X86::MOV16sr:
394 case X86::MOV32sr:
395 case X86::MOV64sr:
396 case X86::MOV16sm:
397 // In fact, this is only the case if the first operand is SS. However, as
398 // segment moves occur extremely rarely, this is just a minor pessimization.
399 return true;
400 }
401 return false;
402}
403
404/// Check if the instruction to be emitted is right after any data.
405static bool
407 const std::pair<MCFragment *, size_t> &PrevInstPosition) {
408 MCFragment *F = CurrentFragment;
409 // Since data is always emitted into a DataFragment, our check strategy is
410 // simple here.
411 // - If the fragment is a DataFragment
412 // - If it's empty (section start or data after align), return false.
413 // - If it's not the fragment where the previous instruction is,
414 // returns true.
415 // - If it's the fragment holding the previous instruction but its
416 // size changed since the previous instruction was emitted into
417 // it, returns true.
418 // - Otherwise returns false.
419 // - If the fragment is not a DataFragment, returns false.
420 if (auto *DF = dyn_cast_or_null<MCDataFragment>(F))
421 return DF->getContents().size() &&
422 (DF != PrevInstPosition.first ||
423 DF->getContents().size() != PrevInstPosition.second);
424
425 return false;
426}
427
428/// \returns the fragment size if it has instructions, otherwise returns 0.
429static size_t getSizeForInstFragment(const MCFragment *F) {
430 if (!F || !F->hasInstructions())
431 return 0;
432 // MCEncodedFragmentWithContents being templated makes this tricky.
433 switch (F->getKind()) {
434 default:
435 llvm_unreachable("Unknown fragment with instructions!");
437 return cast<MCDataFragment>(*F).getContents().size();
439 return cast<MCRelaxableFragment>(*F).getContents().size();
440 }
441}
442
443/// Return true if we can insert NOP or prefixes automatically before the
444/// the instruction to be emitted.
445bool X86AsmBackend::canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const {
446 if (hasVariantSymbol(Inst))
447 // Linker may rewrite the instruction with variant symbol operand(e.g.
448 // TLSCALL).
449 return false;
450
451 if (mayHaveInterruptDelaySlot(PrevInstOpcode))
452 // If this instruction follows an interrupt enabling instruction with a one
453 // instruction delay, inserting a nop would change behavior.
454 return false;
455
456 if (isPrefix(PrevInstOpcode, *MCII))
457 // If this instruction follows a prefix, inserting a nop/prefix would change
458 // semantic.
459 return false;
460
461 if (isPrefix(Inst.getOpcode(), *MCII))
462 // If this instruction is a prefix, inserting a prefix would change
463 // semantic.
464 return false;
465
466 if (IsRightAfterData)
467 // If this instruction follows any data, there is no clear
468 // instruction boundary, inserting a nop/prefix would change semantic.
469 return false;
470
471 return true;
472}
473
474bool X86AsmBackend::canPadBranches(MCObjectStreamer &OS) const {
475 if (!OS.getAllowAutoPadding())
476 return false;
477 assert(allowAutoPadding() && "incorrect initialization!");
478
479 // We only pad in text section.
480 if (!OS.getCurrentSectionOnly()->isText())
481 return false;
482
483 // To be Done: Currently don't deal with Bundle cases.
484 if (OS.getAssembler().isBundlingEnabled())
485 return false;
486
487 // Branches only need to be aligned in 32-bit or 64-bit mode.
488 if (!(STI.hasFeature(X86::Is64Bit) || STI.hasFeature(X86::Is32Bit)))
489 return false;
490
491 return true;
492}
493
494/// Check if the instruction operand needs to be aligned.
495bool X86AsmBackend::needAlign(const MCInst &Inst) const {
496 const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
497 return (Desc.isConditionalBranch() &&
498 (AlignBranchType & X86::AlignBranchJcc)) ||
499 (Desc.isUnconditionalBranch() &&
500 (AlignBranchType & X86::AlignBranchJmp)) ||
501 (Desc.isCall() && (AlignBranchType & X86::AlignBranchCall)) ||
502 (Desc.isReturn() && (AlignBranchType & X86::AlignBranchRet)) ||
503 (Desc.isIndirectBranch() &&
504 (AlignBranchType & X86::AlignBranchIndirect));
505}
506
507/// Insert BoundaryAlignFragment before instructions to align branches.
508void X86AsmBackend::emitInstructionBegin(MCObjectStreamer &OS,
509 const MCInst &Inst, const MCSubtargetInfo &STI) {
510 // Used by canPadInst. Done here, because in emitInstructionEnd, the current
511 // fragment will have changed.
512 IsRightAfterData =
513 isRightAfterData(OS.getCurrentFragment(), PrevInstPosition);
514
515 if (!canPadBranches(OS))
516 return;
517
518 // NB: PrevInst only valid if canPadBranches is true.
519 if (!isMacroFused(PrevInst, Inst))
520 // Macro fusion doesn't happen indeed, clear the pending.
521 PendingBA = nullptr;
522
523 // When branch padding is enabled (basically the skx102 erratum => unlikely),
524 // we call canPadInst (not cheap) twice. However, in the common case, we can
525 // avoid unnecessary calls to that, as this is otherwise only used for
526 // relaxable fragments.
527 if (!canPadInst(Inst, OS))
528 return;
529
530 if (PendingBA && PendingBA->getNext() == OS.getCurrentFragment()) {
531 // Macro fusion actually happens and there is no other fragment inserted
532 // after the previous instruction.
533 //
534 // Do nothing here since we already inserted a BoudaryAlign fragment when
535 // we met the first instruction in the fused pair and we'll tie them
536 // together in emitInstructionEnd.
537 //
538 // Note: When there is at least one fragment, such as MCAlignFragment,
539 // inserted after the previous instruction, e.g.
540 //
541 // \code
542 // cmp %rax %rcx
543 // .align 16
544 // je .Label0
545 // \ endcode
546 //
547 // We will treat the JCC as a unfused branch although it may be fused
548 // with the CMP.
549 return;
550 }
551
552 if (needAlign(Inst) || ((AlignBranchType & X86::AlignBranchFused) &&
553 isFirstMacroFusibleInst(Inst, *MCII))) {
554 // If we meet a unfused branch or the first instuction in a fusiable pair,
555 // insert a BoundaryAlign fragment.
556 PendingBA = OS.getContext().allocFragment<MCBoundaryAlignFragment>(
557 AlignBoundary, STI);
558 OS.insert(PendingBA);
559 }
560}
561
562/// Set the last fragment to be aligned for the BoundaryAlignFragment.
563void X86AsmBackend::emitInstructionEnd(MCObjectStreamer &OS,
564 const MCInst &Inst) {
565 MCFragment *CF = OS.getCurrentFragment();
566 if (auto *F = dyn_cast_or_null<MCRelaxableFragment>(CF))
567 F->setAllowAutoPadding(canPadInst(Inst, OS));
568
569 // Update PrevInstOpcode here, canPadInst() reads that.
570 PrevInstOpcode = Inst.getOpcode();
571 PrevInstPosition = std::make_pair(CF, getSizeForInstFragment(CF));
572
573 if (!canPadBranches(OS))
574 return;
575
576 // PrevInst is only needed if canPadBranches. Copying an MCInst isn't cheap.
577 PrevInst = Inst;
578
579 if (!needAlign(Inst) || !PendingBA)
580 return;
581
582 // Tie the aligned instructions into a pending BoundaryAlign.
583 PendingBA->setLastFragment(CF);
584 PendingBA = nullptr;
585
586 // We need to ensure that further data isn't added to the current
587 // DataFragment, so that we can get the size of instructions later in
588 // MCAssembler::relaxBoundaryAlign. The easiest way is to insert a new empty
589 // DataFragment.
590 if (isa_and_nonnull<MCDataFragment>(CF))
591 OS.insert(OS.getContext().allocFragment<MCDataFragment>());
592
593 // Update the maximum alignment on the current section if necessary.
594 MCSection *Sec = OS.getCurrentSectionOnly();
595 Sec->ensureMinAlignment(AlignBoundary);
596}
597
598std::optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
599 if (STI.getTargetTriple().isOSBinFormatELF()) {
600 unsigned Type;
601 if (STI.getTargetTriple().getArch() == Triple::x86_64) {
603#define ELF_RELOC(X, Y) .Case(#X, Y)
604#include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
605#undef ELF_RELOC
606 .Case("BFD_RELOC_NONE", ELF::R_X86_64_NONE)
607 .Case("BFD_RELOC_8", ELF::R_X86_64_8)
608 .Case("BFD_RELOC_16", ELF::R_X86_64_16)
609 .Case("BFD_RELOC_32", ELF::R_X86_64_32)
610 .Case("BFD_RELOC_64", ELF::R_X86_64_64)
611 .Default(-1u);
612 } else {
614#define ELF_RELOC(X, Y) .Case(#X, Y)
615#include "llvm/BinaryFormat/ELFRelocs/i386.def"
616#undef ELF_RELOC
617 .Case("BFD_RELOC_NONE", ELF::R_386_NONE)
618 .Case("BFD_RELOC_8", ELF::R_386_8)
619 .Case("BFD_RELOC_16", ELF::R_386_16)
620 .Case("BFD_RELOC_32", ELF::R_386_32)
621 .Default(-1u);
622 }
623 if (Type == -1u)
624 return std::nullopt;
625 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
626 }
628}
629
630const MCFixupKindInfo &X86AsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
631 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
632 {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
633 {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
634 {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
635 {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
636 {"reloc_signed_4byte", 0, 32, 0},
637 {"reloc_signed_4byte_relax", 0, 32, 0},
638 {"reloc_global_offset_table", 0, 32, 0},
639 {"reloc_global_offset_table8", 0, 64, 0},
640 {"reloc_branch_4byte_pcrel", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
641 };
642
643 // Fixup kinds from .reloc directive are like R_386_NONE/R_X86_64_NONE. They
644 // do not require any extra processing.
645 if (Kind >= FirstLiteralRelocationKind)
647
648 if (Kind < FirstTargetFixupKind)
650
651 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
652 "Invalid kind!");
653 assert(Infos[Kind - FirstTargetFixupKind].Name && "Empty fixup name!");
654 return Infos[Kind - FirstTargetFixupKind];
655}
656
657bool X86AsmBackend::shouldForceRelocation(const MCAssembler &,
658 const MCFixup &Fixup, const MCValue &,
659 const MCSubtargetInfo *STI) {
660 return Fixup.getKind() >= FirstLiteralRelocationKind;
661}
662
663static unsigned getFixupKindSize(unsigned Kind) {
664 switch (Kind) {
665 default:
666 llvm_unreachable("invalid fixup kind!");
667 case FK_NONE:
668 return 0;
669 case FK_PCRel_1:
670 case FK_SecRel_1:
671 case FK_Data_1:
672 return 1;
673 case FK_PCRel_2:
674 case FK_SecRel_2:
675 case FK_Data_2:
676 return 2;
677 case FK_PCRel_4:
686 case FK_SecRel_4:
687 case FK_Data_4:
688 return 4;
689 case FK_PCRel_8:
690 case FK_SecRel_8:
691 case FK_Data_8:
693 return 8;
694 }
695}
696
697void X86AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
698 const MCValue &Target,
700 uint64_t Value, bool IsResolved,
701 const MCSubtargetInfo *STI) const {
702 unsigned Kind = Fixup.getKind();
703 if (Kind >= FirstLiteralRelocationKind)
704 return;
705 unsigned Size = getFixupKindSize(Kind);
706
707 assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
708
709 int64_t SignedValue = static_cast<int64_t>(Value);
710 if ((Target.isAbsolute() || IsResolved) &&
711 getFixupKindInfo(Fixup.getKind()).Flags &
713 // check that PC relative fixup fits into the fixup size.
714 if (Size > 0 && !isIntN(Size * 8, SignedValue))
715 Asm.getContext().reportError(
716 Fixup.getLoc(), "value of " + Twine(SignedValue) +
717 " is too large for field of " + Twine(Size) +
718 ((Size == 1) ? " byte." : " bytes."));
719 } else {
720 // Check that uppper bits are either all zeros or all ones.
721 // Specifically ignore overflow/underflow as long as the leakage is
722 // limited to the lower bits. This is to remain compatible with
723 // other assemblers.
724 assert((Size == 0 || isIntN(Size * 8 + 1, SignedValue)) &&
725 "Value does not fit in the Fixup field");
726 }
727
728 for (unsigned i = 0; i != Size; ++i)
729 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
730}
731
732bool X86AsmBackend::mayNeedRelaxation(const MCInst &MI,
733 const MCSubtargetInfo &STI) const {
734 unsigned Opcode = MI.getOpcode();
735 unsigned SkipOperands = X86::isCCMPCC(Opcode) ? 2 : 0;
736 return isRelaxableBranch(Opcode) ||
737 (X86::getOpcodeForLongImmediateForm(Opcode) != Opcode &&
738 MI.getOperand(MI.getNumOperands() - 1 - SkipOperands).isExpr());
739}
740
741bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
742 uint64_t Value) const {
743 // Relax if the value is too big for a (signed) i8.
744 return !isInt<8>(Value);
745}
746
747// FIXME: Can tblgen help at all here to verify there aren't other instructions
748// we can relax?
749void X86AsmBackend::relaxInstruction(MCInst &Inst,
750 const MCSubtargetInfo &STI) const {
751 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
752 bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
753 unsigned RelaxedOp = getRelaxedOpcode(Inst, Is16BitMode);
754
755 if (RelaxedOp == Inst.getOpcode()) {
758 Inst.dump_pretty(OS);
759 OS << "\n";
760 report_fatal_error("unexpected instruction to relax: " + OS.str());
761 }
762
763 Inst.setOpcode(RelaxedOp);
764}
765
766bool X86AsmBackend::padInstructionViaPrefix(MCRelaxableFragment &RF,
768 unsigned &RemainingSize) const {
769 if (!RF.getAllowAutoPadding())
770 return false;
771 // If the instruction isn't fully relaxed, shifting it around might require a
772 // larger value for one of the fixups then can be encoded. The outer loop
773 // will also catch this before moving to the next instruction, but we need to
774 // prevent padding this single instruction as well.
775 if (mayNeedRelaxation(RF.getInst(), *RF.getSubtargetInfo()))
776 return false;
777
778 const unsigned OldSize = RF.getContents().size();
779 if (OldSize == 15)
780 return false;
781
782 const unsigned MaxPossiblePad = std::min(15 - OldSize, RemainingSize);
783 const unsigned RemainingPrefixSize = [&]() -> unsigned {
785 X86_MC::emitPrefix(Emitter, RF.getInst(), Code, STI);
786 assert(Code.size() < 15 && "The number of prefixes must be less than 15.");
787
788 // TODO: It turns out we need a decent amount of plumbing for the target
789 // specific bits to determine number of prefixes its safe to add. Various
790 // targets (older chips mostly, but also Atom family) encounter decoder
791 // stalls with too many prefixes. For testing purposes, we set the value
792 // externally for the moment.
793 unsigned ExistingPrefixSize = Code.size();
794 if (TargetPrefixMax <= ExistingPrefixSize)
795 return 0;
796 return TargetPrefixMax - ExistingPrefixSize;
797 }();
798 const unsigned PrefixBytesToAdd =
799 std::min(MaxPossiblePad, RemainingPrefixSize);
800 if (PrefixBytesToAdd == 0)
801 return false;
802
803 const uint8_t Prefix = determinePaddingPrefix(RF.getInst());
804
806 Code.append(PrefixBytesToAdd, Prefix);
807 Code.append(RF.getContents().begin(), RF.getContents().end());
808 RF.getContents() = Code;
809
810 // Adjust the fixups for the change in offsets
811 for (auto &F : RF.getFixups()) {
812 F.setOffset(F.getOffset() + PrefixBytesToAdd);
813 }
814
815 RemainingSize -= PrefixBytesToAdd;
816 return true;
817}
818
819bool X86AsmBackend::padInstructionViaRelaxation(MCRelaxableFragment &RF,
821 unsigned &RemainingSize) const {
822 if (!mayNeedRelaxation(RF.getInst(), *RF.getSubtargetInfo()))
823 // TODO: There are lots of other tricks we could apply for increasing
824 // encoding size without impacting performance.
825 return false;
826
827 MCInst Relaxed = RF.getInst();
828 relaxInstruction(Relaxed, *RF.getSubtargetInfo());
829
832 Emitter.encodeInstruction(Relaxed, Code, Fixups, *RF.getSubtargetInfo());
833 const unsigned OldSize = RF.getContents().size();
834 const unsigned NewSize = Code.size();
835 assert(NewSize >= OldSize && "size decrease during relaxation?");
836 unsigned Delta = NewSize - OldSize;
837 if (Delta > RemainingSize)
838 return false;
839 RF.setInst(Relaxed);
840 RF.getContents() = Code;
841 RF.getFixups() = Fixups;
842 RemainingSize -= Delta;
843 return true;
844}
845
846bool X86AsmBackend::padInstructionEncoding(MCRelaxableFragment &RF,
848 unsigned &RemainingSize) const {
849 bool Changed = false;
850 if (RemainingSize != 0)
851 Changed |= padInstructionViaRelaxation(RF, Emitter, RemainingSize);
852 if (RemainingSize != 0)
853 Changed |= padInstructionViaPrefix(RF, Emitter, RemainingSize);
854 return Changed;
855}
856
857void X86AsmBackend::finishLayout(MCAssembler const &Asm) const {
858 // See if we can further relax some instructions to cut down on the number of
859 // nop bytes required for code alignment. The actual win is in reducing
860 // instruction count, not number of bytes. Modern X86-64 can easily end up
861 // decode limited. It is often better to reduce the number of instructions
862 // (i.e. eliminate nops) even at the cost of increasing the size and
863 // complexity of others.
864 if (!X86PadForAlign && !X86PadForBranchAlign)
865 return;
866
867 // The processed regions are delimitered by LabeledFragments. -g may have more
868 // MCSymbols and therefore different relaxation results. X86PadForAlign is
869 // disabled by default to eliminate the -g vs non -g difference.
870 DenseSet<MCFragment *> LabeledFragments;
871 for (const MCSymbol &S : Asm.symbols())
872 LabeledFragments.insert(S.getFragment(false));
873
874 for (MCSection &Sec : Asm) {
875 if (!Sec.isText())
876 continue;
877
879 for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) {
880 MCFragment &F = *I;
881
882 if (LabeledFragments.count(&F))
883 Relaxable.clear();
884
885 if (F.getKind() == MCFragment::FT_Data) // Skip and ignore
886 continue;
887
888 if (F.getKind() == MCFragment::FT_Relaxable) {
889 auto &RF = cast<MCRelaxableFragment>(*I);
890 Relaxable.push_back(&RF);
891 continue;
892 }
893
894 auto canHandle = [](MCFragment &F) -> bool {
895 switch (F.getKind()) {
896 default:
897 return false;
899 return X86PadForAlign;
901 return X86PadForBranchAlign;
902 }
903 };
904 // For any unhandled kind, assume we can't change layout.
905 if (!canHandle(F)) {
906 Relaxable.clear();
907 continue;
908 }
909
910#ifndef NDEBUG
911 const uint64_t OrigOffset = Asm.getFragmentOffset(F);
912#endif
913 const uint64_t OrigSize = Asm.computeFragmentSize(F);
914
915 // To keep the effects local, prefer to relax instructions closest to
916 // the align directive. This is purely about human understandability
917 // of the resulting code. If we later find a reason to expand
918 // particular instructions over others, we can adjust.
919 unsigned RemainingSize = OrigSize;
920 while (!Relaxable.empty() && RemainingSize != 0) {
921 auto &RF = *Relaxable.pop_back_val();
922 // Give the backend a chance to play any tricks it wishes to increase
923 // the encoding size of the given instruction. Target independent code
924 // will try further relaxation, but target's may play further tricks.
925 if (padInstructionEncoding(RF, Asm.getEmitter(), RemainingSize))
926 Sec.setHasLayout(false);
927
928 // If we have an instruction which hasn't been fully relaxed, we can't
929 // skip past it and insert bytes before it. Changing its starting
930 // offset might require a larger negative offset than it can encode.
931 // We don't need to worry about larger positive offsets as none of the
932 // possible offsets between this and our align are visible, and the
933 // ones afterwards aren't changing.
934 if (mayNeedRelaxation(RF.getInst(), *RF.getSubtargetInfo()))
935 break;
936 }
937 Relaxable.clear();
938
939 // BoundaryAlign explicitly tracks it's size (unlike align)
940 if (F.getKind() == MCFragment::FT_BoundaryAlign)
941 cast<MCBoundaryAlignFragment>(F).setSize(RemainingSize);
942
943#ifndef NDEBUG
944 const uint64_t FinalOffset = Asm.getFragmentOffset(F);
945 const uint64_t FinalSize = Asm.computeFragmentSize(F);
946 assert(OrigOffset + OrigSize == FinalOffset + FinalSize &&
947 "can't move start of next fragment!");
948 assert(FinalSize == RemainingSize && "inconsistent size computation?");
949#endif
950
951 // If we're looking at a boundary align, make sure we don't try to pad
952 // its target instructions for some following directive. Doing so would
953 // break the alignment of the current boundary align.
954 if (auto *BF = dyn_cast<MCBoundaryAlignFragment>(&F)) {
955 const MCFragment *LastFragment = BF->getLastFragment();
956 if (!LastFragment)
957 continue;
958 while (&*I != LastFragment)
959 ++I;
960 }
961 }
962 }
963
964 // The layout is done. Mark every fragment as valid.
965 for (MCSection &Section : Asm) {
966 Asm.getFragmentOffset(*Section.curFragList()->Tail);
967 Asm.computeFragmentSize(*Section.curFragList()->Tail);
968 }
969}
970
971unsigned X86AsmBackend::getMaximumNopSize(const MCSubtargetInfo &STI) const {
972 if (STI.hasFeature(X86::Is16Bit))
973 return 4;
974 if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Is64Bit))
975 return 1;
976 if (STI.hasFeature(X86::TuningFast7ByteNOP))
977 return 7;
978 if (STI.hasFeature(X86::TuningFast15ByteNOP))
979 return 15;
980 if (STI.hasFeature(X86::TuningFast11ByteNOP))
981 return 11;
982 // FIXME: handle 32-bit mode
983 // 15-bytes is the longest single NOP instruction, but 10-bytes is
984 // commonly the longest that can be efficiently decoded.
985 return 10;
986}
987
988/// Write a sequence of optimal nops to the output, covering \p Count
989/// bytes.
990/// \return - true on success, false on failure
991bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
992 const MCSubtargetInfo *STI) const {
993 static const char Nops32Bit[10][11] = {
994 // nop
995 "\x90",
996 // xchg %ax,%ax
997 "\x66\x90",
998 // nopl (%[re]ax)
999 "\x0f\x1f\x00",
1000 // nopl 0(%[re]ax)
1001 "\x0f\x1f\x40\x00",
1002 // nopl 0(%[re]ax,%[re]ax,1)
1003 "\x0f\x1f\x44\x00\x00",
1004 // nopw 0(%[re]ax,%[re]ax,1)
1005 "\x66\x0f\x1f\x44\x00\x00",
1006 // nopl 0L(%[re]ax)
1007 "\x0f\x1f\x80\x00\x00\x00\x00",
1008 // nopl 0L(%[re]ax,%[re]ax,1)
1009 "\x0f\x1f\x84\x00\x00\x00\x00\x00",
1010 // nopw 0L(%[re]ax,%[re]ax,1)
1011 "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
1012 // nopw %cs:0L(%[re]ax,%[re]ax,1)
1013 "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
1014 };
1015
1016 // 16-bit mode uses different nop patterns than 32-bit.
1017 static const char Nops16Bit[4][11] = {
1018 // nop
1019 "\x90",
1020 // xchg %eax,%eax
1021 "\x66\x90",
1022 // lea 0(%si),%si
1023 "\x8d\x74\x00",
1024 // lea 0w(%si),%si
1025 "\x8d\xb4\x00\x00",
1026 };
1027
1028 const char(*Nops)[11] =
1029 STI->hasFeature(X86::Is16Bit) ? Nops16Bit : Nops32Bit;
1030
1031 uint64_t MaxNopLength = (uint64_t)getMaximumNopSize(*STI);
1032
1033 // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
1034 // length.
1035 do {
1036 const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
1037 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
1038 for (uint8_t i = 0; i < Prefixes; i++)
1039 OS << '\x66';
1040 const uint8_t Rest = ThisNopLength - Prefixes;
1041 if (Rest != 0)
1042 OS.write(Nops[Rest - 1], Rest);
1043 Count -= ThisNopLength;
1044 } while (Count != 0);
1045
1046 return true;
1047}
1048
1049/* *** */
1050
1051namespace {
1052
1053class ELFX86AsmBackend : public X86AsmBackend {
1054public:
1055 uint8_t OSABI;
1056 ELFX86AsmBackend(const Target &T, uint8_t OSABI, const MCSubtargetInfo &STI)
1057 : X86AsmBackend(T, STI), OSABI(OSABI) {}
1058};
1059
1060class ELFX86_32AsmBackend : public ELFX86AsmBackend {
1061public:
1062 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI,
1063 const MCSubtargetInfo &STI)
1064 : ELFX86AsmBackend(T, OSABI, STI) {}
1065
1066 std::unique_ptr<MCObjectTargetWriter>
1067 createObjectTargetWriter() const override {
1068 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, ELF::EM_386);
1069 }
1070};
1071
1072class ELFX86_X32AsmBackend : public ELFX86AsmBackend {
1073public:
1074 ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI,
1075 const MCSubtargetInfo &STI)
1076 : ELFX86AsmBackend(T, OSABI, STI) {}
1077
1078 std::unique_ptr<MCObjectTargetWriter>
1079 createObjectTargetWriter() const override {
1080 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1082 }
1083};
1084
1085class ELFX86_IAMCUAsmBackend : public ELFX86AsmBackend {
1086public:
1087 ELFX86_IAMCUAsmBackend(const Target &T, uint8_t OSABI,
1088 const MCSubtargetInfo &STI)
1089 : ELFX86AsmBackend(T, OSABI, STI) {}
1090
1091 std::unique_ptr<MCObjectTargetWriter>
1092 createObjectTargetWriter() const override {
1093 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1095 }
1096};
1097
1098class ELFX86_64AsmBackend : public ELFX86AsmBackend {
1099public:
1100 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI,
1101 const MCSubtargetInfo &STI)
1102 : ELFX86AsmBackend(T, OSABI, STI) {}
1103
1104 std::unique_ptr<MCObjectTargetWriter>
1105 createObjectTargetWriter() const override {
1106 return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI, ELF::EM_X86_64);
1107 }
1108};
1109
1110class WindowsX86AsmBackend : public X86AsmBackend {
1111 bool Is64Bit;
1112
1113public:
1114 WindowsX86AsmBackend(const Target &T, bool is64Bit,
1115 const MCSubtargetInfo &STI)
1116 : X86AsmBackend(T, STI)
1117 , Is64Bit(is64Bit) {
1118 }
1119
1120 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override {
1122 .Case("dir32", FK_Data_4)
1123 .Case("secrel32", FK_SecRel_4)
1124 .Case("secidx", FK_SecRel_2)
1126 }
1127
1128 std::unique_ptr<MCObjectTargetWriter>
1129 createObjectTargetWriter() const override {
1130 return createX86WinCOFFObjectWriter(Is64Bit);
1131 }
1132};
1133
1134namespace CU {
1135
1136 /// Compact unwind encoding values.
1138 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
1139 /// the return address, then [RE]SP is moved to [RE]BP.
1140 UNWIND_MODE_BP_FRAME = 0x01000000,
1141
1142 /// A frameless function with a small constant stack size.
1143 UNWIND_MODE_STACK_IMMD = 0x02000000,
1144
1145 /// A frameless function with a large constant stack size.
1146 UNWIND_MODE_STACK_IND = 0x03000000,
1147
1148 /// No compact unwind encoding is available.
1149 UNWIND_MODE_DWARF = 0x04000000,
1150
1151 /// Mask for encoding the frame registers.
1152 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
1153
1154 /// Mask for encoding the frameless registers.
1155 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
1156 };
1157
1158} // namespace CU
1159
1160class DarwinX86AsmBackend : public X86AsmBackend {
1161 const MCRegisterInfo &MRI;
1162
1163 /// Number of registers that can be saved in a compact unwind encoding.
1164 enum { CU_NUM_SAVED_REGS = 6 };
1165
1166 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
1167 Triple TT;
1168 bool Is64Bit;
1169
1170 unsigned OffsetSize; ///< Offset of a "push" instruction.
1171 unsigned MoveInstrSize; ///< Size of a "move" instruction.
1172 unsigned StackDivide; ///< Amount to adjust stack size by.
1173protected:
1174 /// Size of a "push" instruction for the given register.
1175 unsigned PushInstrSize(unsigned Reg) const {
1176 switch (Reg) {
1177 case X86::EBX:
1178 case X86::ECX:
1179 case X86::EDX:
1180 case X86::EDI:
1181 case X86::ESI:
1182 case X86::EBP:
1183 case X86::RBX:
1184 case X86::RBP:
1185 return 1;
1186 case X86::R12:
1187 case X86::R13:
1188 case X86::R14:
1189 case X86::R15:
1190 return 2;
1191 }
1192 return 1;
1193 }
1194
1195private:
1196 /// Get the compact unwind number for a given register. The number
1197 /// corresponds to the enum lists in compact_unwind_encoding.h.
1198 int getCompactUnwindRegNum(unsigned Reg) const {
1199 static const MCPhysReg CU32BitRegs[7] = {
1200 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
1201 };
1202 static const MCPhysReg CU64BitRegs[] = {
1203 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
1204 };
1205 const MCPhysReg *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
1206 for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
1207 if (*CURegs == Reg)
1208 return Idx;
1209
1210 return -1;
1211 }
1212
1213 /// Return the registers encoded for a compact encoding with a frame
1214 /// pointer.
1215 uint32_t encodeCompactUnwindRegistersWithFrame() const {
1216 // Encode the registers in the order they were saved --- 3-bits per
1217 // register. The list of saved registers is assumed to be in reverse
1218 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
1219 uint32_t RegEnc = 0;
1220 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
1221 unsigned Reg = SavedRegs[i];
1222 if (Reg == 0) break;
1223
1224 int CURegNum = getCompactUnwindRegNum(Reg);
1225 if (CURegNum == -1) return ~0U;
1226
1227 // Encode the 3-bit register number in order, skipping over 3-bits for
1228 // each register.
1229 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
1230 }
1231
1232 assert((RegEnc & 0x3FFFF) == RegEnc &&
1233 "Invalid compact register encoding!");
1234 return RegEnc;
1235 }
1236
1237 /// Create the permutation encoding used with frameless stacks. It is
1238 /// passed the number of registers to be saved and an array of the registers
1239 /// saved.
1240 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
1241 // The saved registers are numbered from 1 to 6. In order to encode the
1242 // order in which they were saved, we re-number them according to their
1243 // place in the register order. The re-numbering is relative to the last
1244 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
1245 // that order:
1246 //
1247 // Orig Re-Num
1248 // ---- ------
1249 // 6 6
1250 // 2 2
1251 // 4 3
1252 // 5 3
1253 //
1254 for (unsigned i = 0; i < RegCount; ++i) {
1255 int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
1256 if (CUReg == -1) return ~0U;
1257 SavedRegs[i] = CUReg;
1258 }
1259
1260 // Reverse the list.
1261 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
1262
1263 uint32_t RenumRegs[CU_NUM_SAVED_REGS];
1264 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
1265 unsigned Countless = 0;
1266 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
1267 if (SavedRegs[j] < SavedRegs[i])
1268 ++Countless;
1269
1270 RenumRegs[i] = SavedRegs[i] - Countless - 1;
1271 }
1272
1273 // Take the renumbered values and encode them into a 10-bit number.
1274 uint32_t permutationEncoding = 0;
1275 switch (RegCount) {
1276 case 6:
1277 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
1278 + 6 * RenumRegs[2] + 2 * RenumRegs[3]
1279 + RenumRegs[4];
1280 break;
1281 case 5:
1282 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
1283 + 6 * RenumRegs[3] + 2 * RenumRegs[4]
1284 + RenumRegs[5];
1285 break;
1286 case 4:
1287 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
1288 + 3 * RenumRegs[4] + RenumRegs[5];
1289 break;
1290 case 3:
1291 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
1292 + RenumRegs[5];
1293 break;
1294 case 2:
1295 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
1296 break;
1297 case 1:
1298 permutationEncoding |= RenumRegs[5];
1299 break;
1300 }
1301
1302 assert((permutationEncoding & 0x3FF) == permutationEncoding &&
1303 "Invalid compact register encoding!");
1304 return permutationEncoding;
1305 }
1306
1307public:
1308 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
1309 const MCSubtargetInfo &STI)
1310 : X86AsmBackend(T, STI), MRI(MRI), TT(STI.getTargetTriple()),
1311 Is64Bit(TT.isArch64Bit()) {
1312 memset(SavedRegs, 0, sizeof(SavedRegs));
1313 OffsetSize = Is64Bit ? 8 : 4;
1314 MoveInstrSize = Is64Bit ? 3 : 2;
1315 StackDivide = Is64Bit ? 8 : 4;
1316 }
1317
1318 std::unique_ptr<MCObjectTargetWriter>
1319 createObjectTargetWriter() const override {
1321 uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TT));
1322 return createX86MachObjectWriter(Is64Bit, CPUType, CPUSubType);
1323 }
1324
1325 /// Implementation of algorithm to generate the compact unwind encoding
1326 /// for the CFI instructions.
1327 uint64_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
1328 const MCContext *Ctxt) const override {
1330 if (Instrs.empty()) return 0;
1331 if (!isDarwinCanonicalPersonality(FI->Personality) &&
1333 return CU::UNWIND_MODE_DWARF;
1334
1335 // Reset the saved registers.
1336 unsigned SavedRegIdx = 0;
1337 memset(SavedRegs, 0, sizeof(SavedRegs));
1338
1339 bool HasFP = false;
1340
1341 // Encode that we are using EBP/RBP as the frame pointer.
1342 uint64_t CompactUnwindEncoding = 0;
1343
1344 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
1345 unsigned InstrOffset = 0;
1346 unsigned StackAdjust = 0;
1347 uint64_t StackSize = 0;
1348 int64_t MinAbsOffset = std::numeric_limits<int64_t>::max();
1349
1350 for (const MCCFIInstruction &Inst : Instrs) {
1351 switch (Inst.getOperation()) {
1352 default:
1353 // Any other CFI directives indicate a frame that we aren't prepared
1354 // to represent via compact unwind, so just bail out.
1355 return CU::UNWIND_MODE_DWARF;
1357 // Defines a frame pointer. E.g.
1358 //
1359 // movq %rsp, %rbp
1360 // L0:
1361 // .cfi_def_cfa_register %rbp
1362 //
1363 HasFP = true;
1364
1365 // If the frame pointer is other than esp/rsp, we do not have a way to
1366 // generate a compact unwinding representation, so bail out.
1367 if (*MRI.getLLVMRegNum(Inst.getRegister(), true) !=
1368 (Is64Bit ? X86::RBP : X86::EBP))
1369 return CU::UNWIND_MODE_DWARF;
1370
1371 // Reset the counts.
1372 memset(SavedRegs, 0, sizeof(SavedRegs));
1373 StackAdjust = 0;
1374 SavedRegIdx = 0;
1375 MinAbsOffset = std::numeric_limits<int64_t>::max();
1376 InstrOffset += MoveInstrSize;
1377 break;
1378 }
1380 // Defines a new offset for the CFA. E.g.
1381 //
1382 // With frame:
1383 //
1384 // pushq %rbp
1385 // L0:
1386 // .cfi_def_cfa_offset 16
1387 //
1388 // Without frame:
1389 //
1390 // subq $72, %rsp
1391 // L0:
1392 // .cfi_def_cfa_offset 80
1393 //
1394 StackSize = Inst.getOffset() / StackDivide;
1395 break;
1396 }
1398 // Defines a "push" of a callee-saved register. E.g.
1399 //
1400 // pushq %r15
1401 // pushq %r14
1402 // pushq %rbx
1403 // L0:
1404 // subq $120, %rsp
1405 // L1:
1406 // .cfi_offset %rbx, -40
1407 // .cfi_offset %r14, -32
1408 // .cfi_offset %r15, -24
1409 //
1410 if (SavedRegIdx == CU_NUM_SAVED_REGS)
1411 // If there are too many saved registers, we cannot use a compact
1412 // unwind encoding.
1413 return CU::UNWIND_MODE_DWARF;
1414
1415 unsigned Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1416 SavedRegs[SavedRegIdx++] = Reg;
1417 StackAdjust += OffsetSize;
1418 MinAbsOffset = std::min(MinAbsOffset, std::abs(Inst.getOffset()));
1419 InstrOffset += PushInstrSize(Reg);
1420 break;
1421 }
1422 }
1423 }
1424
1425 StackAdjust /= StackDivide;
1426
1427 if (HasFP) {
1428 if ((StackAdjust & 0xFF) != StackAdjust)
1429 // Offset was too big for a compact unwind encoding.
1430 return CU::UNWIND_MODE_DWARF;
1431
1432 // We don't attempt to track a real StackAdjust, so if the saved registers
1433 // aren't adjacent to rbp we can't cope.
1434 if (SavedRegIdx != 0 && MinAbsOffset != 3 * (int)OffsetSize)
1435 return CU::UNWIND_MODE_DWARF;
1436
1437 // Get the encoding of the saved registers when we have a frame pointer.
1438 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
1439 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1440
1441 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
1442 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
1443 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
1444 } else {
1445 SubtractInstrIdx += InstrOffset;
1446 ++StackAdjust;
1447
1448 if ((StackSize & 0xFF) == StackSize) {
1449 // Frameless stack with a small stack size.
1450 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
1451
1452 // Encode the stack size.
1453 CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
1454 } else {
1455 if ((StackAdjust & 0x7) != StackAdjust)
1456 // The extra stack adjustments are too big for us to handle.
1457 return CU::UNWIND_MODE_DWARF;
1458
1459 // Frameless stack with an offset too large for us to encode compactly.
1460 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
1461
1462 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
1463 // instruction.
1464 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
1465
1466 // Encode any extra stack adjustments (done via push instructions).
1467 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
1468 }
1469
1470 // Encode the number of registers saved. (Reverse the list first.)
1471 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
1472 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
1473
1474 // Get the encoding of the saved registers when we don't have a frame
1475 // pointer.
1476 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
1477 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1478
1479 // Encode the register encoding.
1480 CompactUnwindEncoding |=
1481 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
1482 }
1483
1484 return CompactUnwindEncoding;
1485 }
1486};
1487
1488} // end anonymous namespace
1489
1491 const MCSubtargetInfo &STI,
1492 const MCRegisterInfo &MRI,
1493 const MCTargetOptions &Options) {
1494 const Triple &TheTriple = STI.getTargetTriple();
1495 if (TheTriple.isOSBinFormatMachO())
1496 return new DarwinX86AsmBackend(T, MRI, STI);
1497
1498 if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1499 return new WindowsX86AsmBackend(T, false, STI);
1500
1501 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1502
1503 if (TheTriple.isOSIAMCU())
1504 return new ELFX86_IAMCUAsmBackend(T, OSABI, STI);
1505
1506 return new ELFX86_32AsmBackend(T, OSABI, STI);
1507}
1508
1510 const MCSubtargetInfo &STI,
1511 const MCRegisterInfo &MRI,
1512 const MCTargetOptions &Options) {
1513 const Triple &TheTriple = STI.getTargetTriple();
1514 if (TheTriple.isOSBinFormatMachO())
1515 return new DarwinX86AsmBackend(T, MRI, STI);
1516
1517 if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1518 return new WindowsX86AsmBackend(T, true, STI);
1519
1520 if (TheTriple.isUEFI()) {
1521 assert(TheTriple.isOSBinFormatCOFF() &&
1522 "Only COFF format is supported in UEFI environment.");
1523 return new WindowsX86AsmBackend(T, true, STI);
1524 }
1525
1526 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1527
1528 if (TheTriple.isX32())
1529 return new ELFX86_X32AsmBackend(T, OSABI, STI);
1530 return new ELFX86_64AsmBackend(T, OSABI, STI);
1531}
1532
1533namespace {
1534class X86ELFStreamer : public MCELFStreamer {
1535public:
1536 X86ELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
1537 std::unique_ptr<MCObjectWriter> OW,
1538 std::unique_ptr<MCCodeEmitter> Emitter)
1539 : MCELFStreamer(Context, std::move(TAB), std::move(OW),
1540 std::move(Emitter)) {}
1541
1542 void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override;
1543};
1544} // end anonymous namespace
1545
1547 const MCSubtargetInfo &STI) {
1548 auto &Backend = static_cast<X86AsmBackend &>(S.getAssembler().getBackend());
1549 Backend.emitInstructionBegin(S, Inst, STI);
1550 S.MCObjectStreamer::emitInstruction(Inst, STI);
1551 Backend.emitInstructionEnd(S, Inst);
1552}
1553
1554void X86ELFStreamer::emitInstruction(const MCInst &Inst,
1555 const MCSubtargetInfo &STI) {
1556 X86_MC::emitInstruction(*this, Inst, STI);
1557}
1558
1560 std::unique_ptr<MCAsmBackend> &&MAB,
1561 std::unique_ptr<MCObjectWriter> &&MOW,
1562 std::unique_ptr<MCCodeEmitter> &&MCE) {
1563 return new X86ELFStreamer(Context, std::move(MAB), std::move(MOW),
1564 std::move(MCE));
1565}
unsigned const MachineRegisterInfo * MRI
dxil DXContainer Global Emitter
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
PowerPC TLS Dynamic Call Fixup
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static MCInstrInfo * createMCInstrInfo()
static unsigned getRelaxedOpcodeBranch(unsigned Opcode, bool Is16BitMode=false)
static X86::SecondMacroFusionInstKind classifySecondInstInMacroFusion(const MCInst &MI, const MCInstrInfo &MCII)
static size_t getSizeForInstFragment(const MCFragment *F)
static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII)
Check if the instruction uses RIP relative addressing.
static bool isRightAfterData(MCFragment *CurrentFragment, const std::pair< MCFragment *, size_t > &PrevInstPosition)
Check if the instruction to be emitted is right after any data.
static bool mayHaveInterruptDelaySlot(unsigned InstOpcode)
X86 has certain instructions which enable interrupts exactly one instruction after the instruction wh...
static bool isFirstMacroFusibleInst(const MCInst &Inst, const MCInstrInfo &MCII)
Check if the instruction is valid as the first instruction in macro fusion.
static X86::CondCode getCondFromBranch(const MCInst &MI, const MCInstrInfo &MCII)
static unsigned getRelaxedOpcode(const MCInst &MI, bool Is16BitMode)
static unsigned getFixupKindSize(unsigned Kind)
static bool isRelaxableBranch(unsigned Opcode)
static bool isPrefix(unsigned Opcode, const MCInstrInfo &MCII)
Check if the instruction is a prefix.
static bool hasVariantSymbol(const MCInst &MI)
Check if the instruction has a variant symbol operand.
static bool is64Bit(const char *name)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Generic interface to target specific assembler backends.
Definition: MCAsmBackend.h:42
virtual bool allowEnhancedRelaxation() const
Return true if this target allows an unrelaxable instruction to be emitted into RelaxableFragment and...
Definition: MCAsmBackend.h:64
virtual unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const
Returns the maximum size of a nop in bytes on this target.
Definition: MCAsmBackend.h:211
virtual bool writeNopData(raw_ostream &OS, uint64_t Count, const MCSubtargetInfo *STI) const =0
Write an (optimal) nop sequence of Count bytes to the given output.
virtual void relaxInstruction(MCInst &Inst, const MCSubtargetInfo &STI) const
Relax the instruction in the given fragment to the next wider instruction.
Definition: MCAsmBackend.h:179
virtual bool mayNeedRelaxation(const MCInst &Inst, const MCSubtargetInfo &STI) const
Check whether the given instruction may need relaxation.
Definition: MCAsmBackend.h:155
virtual bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value) const
Simple predicate for targets where !Resolved implies requiring relaxation.
Definition: MCAsmBackend.h:169
virtual void finishLayout(MCAssembler const &Asm) const
Give backend an opportunity to finish layout after relaxation.
Definition: MCAsmBackend.h:223
virtual bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, const MCSubtargetInfo *STI)
Hook to check if a relocation is needed for some target specific reason.
Definition: MCAsmBackend.h:96
virtual unsigned getNumFixupKinds() const =0
Get the number of target specific fixup kinds.
virtual const MCFixupKindInfo & getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
virtual std::optional< MCFixupKind > getFixupKind(StringRef Name) const
Map a relocation name used in .reloc to a fixup kind.
virtual void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef< char > Data, uint64_t Value, bool IsResolved, const MCSubtargetInfo *STI) const =0
Apply the Value for given Fixup into the provided data fragment, at the offset specified by the fixup...
virtual bool allowAutoPadding() const
Return true if this target might automatically pad instructions and thus need to emit padding enable/...
Definition: MCAsmBackend.h:60
MCAsmBackend & getBackend() const
Definition: MCAssembler.h:188
Represents required padding such that a particular other set of fragments does not cross a particular...
Definition: MCFragment.h:532
void setLastFragment(const MCFragment *F)
Definition: MCFragment.h:556
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
Context object for machine code objects.
Definition: MCContext.h:83
bool emitCompactUnwindNonCanonical() const
Definition: MCContext.cpp:938
Fragment for data and encoded instructions.
Definition: MCFragment.h:219
SmallVectorImpl< MCFixup > & getFixups()
Definition: MCFragment.h:200
SmallVectorImpl< char > & getContents()
Definition: MCFragment.h:197
const MCSubtargetInfo * getSubtargetInfo() const
Retrieve the MCSubTargetInfo in effect when the instruction was encoded.
Definition: MCFragment.h:167
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:39
ExprKind getKind() const
Definition: MCExpr.h:78
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
Definition: MCFixup.h:71
MCFragment * getNext() const
Definition: MCFragment.h:95
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ", const MCRegisterInfo *RegInfo=nullptr) const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
Definition: MCInst.cpp:84
unsigned getOpcode() const
Definition: MCInst.h:198
void setOpcode(unsigned Op)
Definition: MCInst.h:197
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:206
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:317
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Streaming object file generation interface.
MCAssembler & getAssembler()
void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override
Emit the given Instruction into the current section.
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
A relaxable fragment holds on to its MCInst, since it may need to be relaxed during the assembler lay...
Definition: MCFragment.h:234
bool getAllowAutoPadding() const
Definition: MCFragment.h:247
const MCInst & getInst() const
Definition: MCFragment.h:244
void setInst(const MCInst &Value)
Definition: MCFragment.h:245
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:36
void ensureMinAlignment(Align MinAlignment)
Makes sure that Alignment is at least MinAlignment.
Definition: MCSection.h:150
bool isText() const
Definition: MCSection.h:131
iterator end() const
Definition: MCSection.h:183
void setHasLayout(bool Value)
Definition: MCSection.h:173
iterator begin() const
Definition: MCSection.h:182
Streaming machine code generation interface.
Definition: MCStreamer.h:213
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
This represents an "assembler immediate".
Definition: MCValue.h:36
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:307
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:685
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isX32() const
Tests whether the target is X32.
Definition: Triple.h:1037
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition: Triple.h:732
OSType getOS() const
Get the parsed operating system type of this triple.
Definition: Triple.h:382
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
Definition: Triple.h:724
bool isUEFI() const
Tests whether the OS is UEFI.
Definition: Triple.h:619
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:624
bool isOSIAMCU() const
Definition: Triple.h:598
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:719
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & write(unsigned char C)
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:691
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CompactUnwindEncodings
Compact unwind encoding values.
@ EM_386
Definition: ELF.h:137
@ EM_X86_64
Definition: ELF.h:179
@ EM_IAMCU
Definition: ELF.h:140
Expected< uint32_t > getCPUSubType(const Triple &T)
Definition: MachO.cpp:95
Expected< uint32_t > getCPUType(const Triple &T)
Definition: MachO.cpp:77
@ Relaxed
Definition: NVPTX.h:116
Reg
All possible values of the reg field in the ModR/M byte.
bool isPrefix(uint64_t TSFlags)
Definition: X86BaseInfo.h:882
int getMemoryOperandNo(uint64_t TSFlags)
Definition: X86BaseInfo.h:1011
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Definition: X86BaseInfo.h:968
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
Definition: X86BaseInfo.h:518
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
Definition: X86BaseInfo.h:511
@ RawFrmMemOffs
RawFrmMemOffs - This form is for instructions that store an absolute memory offset as an immediate wi...
Definition: X86BaseInfo.h:508
void emitPrefix(MCCodeEmitter &MCE, const MCInst &MI, SmallVectorImpl< char > &CB, const MCSubtargetInfo &STI)
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
FirstMacroFusionInstKind classifyFirstOpcodeInMacroFusion(unsigned Opcode)
Definition: X86BaseInfo.h:126
AlignBranchBoundaryKind
Defines the possible values of the branch boundary alignment mask.
Definition: X86BaseInfo.h:309
@ AlignBranchJmp
Definition: X86BaseInfo.h:313
@ AlignBranchIndirect
Definition: X86BaseInfo.h:316
@ AlignBranchJcc
Definition: X86BaseInfo.h:312
@ AlignBranchCall
Definition: X86BaseInfo.h:314
@ AlignBranchRet
Definition: X86BaseInfo.h:315
@ AlignBranchNone
Definition: X86BaseInfo.h:310
@ AlignBranchFused
Definition: X86BaseInfo.h:311
SecondMacroFusionInstKind
Definition: X86BaseInfo.h:116
@ AddrSegmentReg
Definition: X86BaseInfo.h:34
FirstMacroFusionInstKind
Definition: X86BaseInfo.h:107
unsigned getOpcodeForLongImmediateForm(unsigned Opcode)
EncodingOfSegmentOverridePrefix getSegmentOverridePrefixForReg(unsigned Reg)
Given a segment register, return the encoding of the segment override prefix for it.
Definition: X86BaseInfo.h:332
bool isMacroFused(FirstMacroFusionInstKind FirstKind, SecondMacroFusionInstKind SecondKind)
Definition: X86BaseInfo.h:290
@ reloc_global_offset_table8
Definition: X86FixupKinds.h:31
@ reloc_signed_4byte_relax
Definition: X86FixupKinds.h:26
@ reloc_branch_4byte_pcrel
Definition: X86FixupKinds.h:32
@ NumTargetFixupKinds
Definition: X86FixupKinds.h:35
@ reloc_riprel_4byte_relax
Definition: X86FixupKinds.h:19
@ reloc_signed_4byte
Definition: X86FixupKinds.h:23
@ reloc_riprel_4byte_relax_rex
Definition: X86FixupKinds.h:21
@ reloc_global_offset_table
Definition: X86FixupKinds.h:28
@ reloc_riprel_4byte_movq_load
Definition: X86FixupKinds.h:18
@ reloc_riprel_4byte
Definition: X86FixupKinds.h:17
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MCAsmBackend * createX86_64AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
std::unique_ptr< MCObjectTargetWriter > createX86WinCOFFObjectWriter(bool Is64Bit)
Construct an X86 Win COFF object writer.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
MCStreamer * createX86ELFStreamer(const Triple &T, MCContext &Context, std::unique_ptr< MCAsmBackend > &&MAB, std::unique_ptr< MCObjectWriter > &&MOW, std::unique_ptr< MCCodeEmitter > &&MCE)
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FirstTargetFixupKind
Definition: MCFixup.h:45
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:30
@ FK_PCRel_2
A two-byte pc relative fixup.
Definition: MCFixup.h:29
@ FK_SecRel_2
A two-byte section relative fixup.
Definition: MCFixup.h:41
@ FirstLiteralRelocationKind
The range [FirstLiteralRelocationKind, MaxTargetFixupKind) is used for relocations coming from ....
Definition: MCFixup.h:50
@ FK_Data_8
A eight-byte fixup.
Definition: MCFixup.h:26
@ FK_Data_1
A one-byte fixup.
Definition: MCFixup.h:23
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
@ FK_SecRel_8
A eight-byte section relative fixup.
Definition: MCFixup.h:43
@ FK_PCRel_8
A eight-byte pc relative fixup.
Definition: MCFixup.h:31
@ FK_NONE
A no-op fixup.
Definition: MCFixup.h:22
@ FK_SecRel_4
A four-byte section relative fixup.
Definition: MCFixup.h:42
@ FK_PCRel_1
A one-byte pc relative fixup.
Definition: MCFixup.h:28
@ FK_SecRel_1
A one-byte section relative fixup.
Definition: MCFixup.h:40
@ FK_Data_2
A two-byte fixup.
Definition: MCFixup.h:24
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:756
std::unique_ptr< MCObjectTargetWriter > createX86MachObjectWriter(bool Is64Bit, uint32_t CPUType, uint32_t CPUSubtype)
Construct an X86 Mach-O object writer.
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:260
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1856
std::unique_ptr< MCObjectTargetWriter > createX86ELFObjectWriter(bool IsELF64, uint8_t OSABI, uint16_t EMachine)
Construct an X86 ELF object writer.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:111
endianness
Definition: bit.h:70
MCAsmBackend * createX86_32AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Description of the encoding of one expression Op.
const MCSymbol * Personality
Definition: MCDwarf.h:733
std::vector< MCCFIInstruction > Instructions
Definition: MCDwarf.h:735
Target independent information on a fixup kind.
@ FKF_IsPCRel
Is this fixup kind PCrelative? This is used by the assembler backend to evaluate fixup values in a ta...