LLVM  13.0.0git
X86AsmBackend.cpp
Go to the documentation of this file.
1 //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
11 #include "llvm/ADT/StringSwitch.h"
12 #include "llvm/BinaryFormat/ELF.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAsmLayout.h"
16 #include "llvm/MC/MCAssembler.h"
17 #include "llvm/MC/MCCodeEmitter.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCDwarf.h"
21 #include "llvm/MC/MCExpr.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/MC/MCInstrInfo.h"
27 #include "llvm/MC/MCObjectWriter.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCSectionMachO.h"
31 #include "llvm/MC/MCValue.h"
36 
37 using namespace llvm;
38 
39 namespace {
40 /// A wrapper for holding a mask of the values from X86::AlignBranchBoundaryKind
41 class X86AlignBranchKind {
42 private:
43  uint8_t AlignBranchKind = 0;
44 
45 public:
46  void operator=(const std::string &Val) {
47  if (Val.empty())
48  return;
49  SmallVector<StringRef, 6> BranchTypes;
50  StringRef(Val).split(BranchTypes, '+', -1, false);
51  for (auto BranchType : BranchTypes) {
52  if (BranchType == "fused")
53  addKind(X86::AlignBranchFused);
54  else if (BranchType == "jcc")
55  addKind(X86::AlignBranchJcc);
56  else if (BranchType == "jmp")
57  addKind(X86::AlignBranchJmp);
58  else if (BranchType == "call")
59  addKind(X86::AlignBranchCall);
60  else if (BranchType == "ret")
61  addKind(X86::AlignBranchRet);
62  else if (BranchType == "indirect")
63  addKind(X86::AlignBranchIndirect);
64  else {
65  errs() << "invalid argument " << BranchType.str()
66  << " to -x86-align-branch=; each element must be one of: fused, "
67  "jcc, jmp, call, ret, indirect.(plus separated)\n";
68  }
69  }
70  }
71 
72  operator uint8_t() const { return AlignBranchKind; }
73  void addKind(X86::AlignBranchBoundaryKind Value) { AlignBranchKind |= Value; }
74 };
75 
76 X86AlignBranchKind X86AlignBranchKindLoc;
77 
78 cl::opt<unsigned> X86AlignBranchBoundary(
79  "x86-align-branch-boundary", cl::init(0),
80  cl::desc(
81  "Control how the assembler should align branches with NOP. If the "
82  "boundary's size is not 0, it should be a power of 2 and no less "
83  "than 32. Branches will be aligned to prevent from being across or "
84  "against the boundary of specified size. The default value 0 does not "
85  "align branches."));
86 
88  "x86-align-branch",
89  cl::desc(
90  "Specify types of branches to align (plus separated list of types):"
91  "\njcc indicates conditional jumps"
92  "\nfused indicates fused conditional jumps"
93  "\njmp indicates direct unconditional jumps"
94  "\ncall indicates direct and indirect calls"
95  "\nret indicates rets"
96  "\nindirect indicates indirect unconditional jumps"),
97  cl::location(X86AlignBranchKindLoc));
98 
99 cl::opt<bool> X86AlignBranchWithin32BBoundaries(
100  "x86-branches-within-32B-boundaries", cl::init(false),
101  cl::desc(
102  "Align selected instructions to mitigate negative performance impact "
103  "of Intel's micro code update for errata skx102. May break "
104  "assumptions about labels corresponding to particular instructions, "
105  "and should be used with caution."));
106 
107 cl::opt<unsigned> X86PadMaxPrefixSize(
108  "x86-pad-max-prefix-size", cl::init(0),
109  cl::desc("Maximum number of prefixes to use for padding"));
110 
111 cl::opt<bool> X86PadForAlign(
112  "x86-pad-for-align", cl::init(false), cl::Hidden,
113  cl::desc("Pad previous instructions to implement align directives"));
114 
115 cl::opt<bool> X86PadForBranchAlign(
116  "x86-pad-for-branch-align", cl::init(true), cl::Hidden,
117  cl::desc("Pad previous instructions to implement branch alignment"));
118 
119 class X86ELFObjectWriter : public MCELFObjectTargetWriter {
120 public:
121  X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine,
122  bool HasRelocationAddend, bool foobar)
123  : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {}
124 };
125 
126 class X86AsmBackend : public MCAsmBackend {
127  const MCSubtargetInfo &STI;
128  std::unique_ptr<const MCInstrInfo> MCII;
129  X86AlignBranchKind AlignBranchType;
130  Align AlignBoundary;
131  unsigned TargetPrefixMax = 0;
132 
133  MCInst PrevInst;
134  MCBoundaryAlignFragment *PendingBA = nullptr;
135  std::pair<MCFragment *, size_t> PrevInstPosition;
136  bool CanPadInst;
137 
138  uint8_t determinePaddingPrefix(const MCInst &Inst) const;
139  bool isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const;
140  bool needAlign(const MCInst &Inst) const;
141  bool canPadBranches(MCObjectStreamer &OS) const;
142  bool canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const;
143 
144 public:
145  X86AsmBackend(const Target &T, const MCSubtargetInfo &STI)
146  : MCAsmBackend(support::little), STI(STI),
147  MCII(T.createMCInstrInfo()) {
148  if (X86AlignBranchWithin32BBoundaries) {
149  // At the moment, this defaults to aligning fused branches, unconditional
150  // jumps, and (unfused) conditional jumps with nops. Both the
151  // instructions aligned and the alignment method (nop vs prefix) may
152  // change in the future.
153  AlignBoundary = assumeAligned(32);;
154  AlignBranchType.addKind(X86::AlignBranchFused);
155  AlignBranchType.addKind(X86::AlignBranchJcc);
156  AlignBranchType.addKind(X86::AlignBranchJmp);
157  }
158  // Allow overriding defaults set by master flag
159  if (X86AlignBranchBoundary.getNumOccurrences())
160  AlignBoundary = assumeAligned(X86AlignBranchBoundary);
161  if (X86AlignBranch.getNumOccurrences())
162  AlignBranchType = X86AlignBranchKindLoc;
163  if (X86PadMaxPrefixSize.getNumOccurrences())
164  TargetPrefixMax = X86PadMaxPrefixSize;
165  }
166 
167  bool allowAutoPadding() const override;
168  bool allowEnhancedRelaxation() const override;
169  void emitInstructionBegin(MCObjectStreamer &OS, const MCInst &Inst) override;
170  void emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst) override;
171 
172  unsigned getNumFixupKinds() const override {
174  }
175 
176  Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
177 
178  const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
179 
180  bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
181  const MCValue &Target) override;
182 
183  void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
185  uint64_t Value, bool IsResolved,
186  const MCSubtargetInfo *STI) const override;
187 
188  bool mayNeedRelaxation(const MCInst &Inst,
189  const MCSubtargetInfo &STI) const override;
190 
191  bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
192  const MCRelaxableFragment *DF,
193  const MCAsmLayout &Layout) const override;
194 
195  void relaxInstruction(MCInst &Inst,
196  const MCSubtargetInfo &STI) const override;
197 
198  bool padInstructionViaRelaxation(MCRelaxableFragment &RF,
199  MCCodeEmitter &Emitter,
200  unsigned &RemainingSize) const;
201 
202  bool padInstructionViaPrefix(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
203  unsigned &RemainingSize) const;
204 
205  bool padInstructionEncoding(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
206  unsigned &RemainingSize) const;
207 
208  void finishLayout(MCAssembler const &Asm, MCAsmLayout &Layout) const override;
209 
210  unsigned getMaximumNopSize() const override;
211 
212  bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
213 };
214 } // end anonymous namespace
215 
216 static unsigned getRelaxedOpcodeBranch(const MCInst &Inst, bool Is16BitMode) {
217  unsigned Op = Inst.getOpcode();
218  switch (Op) {
219  default:
220  return Op;
221  case X86::JCC_1:
222  return (Is16BitMode) ? X86::JCC_2 : X86::JCC_4;
223  case X86::JMP_1:
224  return (Is16BitMode) ? X86::JMP_2 : X86::JMP_4;
225  }
226 }
227 
228 static unsigned getRelaxedOpcodeArith(const MCInst &Inst) {
229  unsigned Op = Inst.getOpcode();
230  switch (Op) {
231  default:
232  return Op;
233 
234  // IMUL
235  case X86::IMUL16rri8: return X86::IMUL16rri;
236  case X86::IMUL16rmi8: return X86::IMUL16rmi;
237  case X86::IMUL32rri8: return X86::IMUL32rri;
238  case X86::IMUL32rmi8: return X86::IMUL32rmi;
239  case X86::IMUL64rri8: return X86::IMUL64rri32;
240  case X86::IMUL64rmi8: return X86::IMUL64rmi32;
241 
242  // AND
243  case X86::AND16ri8: return X86::AND16ri;
244  case X86::AND16mi8: return X86::AND16mi;
245  case X86::AND32ri8: return X86::AND32ri;
246  case X86::AND32mi8: return X86::AND32mi;
247  case X86::AND64ri8: return X86::AND64ri32;
248  case X86::AND64mi8: return X86::AND64mi32;
249 
250  // OR
251  case X86::OR16ri8: return X86::OR16ri;
252  case X86::OR16mi8: return X86::OR16mi;
253  case X86::OR32ri8: return X86::OR32ri;
254  case X86::OR32mi8: return X86::OR32mi;
255  case X86::OR64ri8: return X86::OR64ri32;
256  case X86::OR64mi8: return X86::OR64mi32;
257 
258  // XOR
259  case X86::XOR16ri8: return X86::XOR16ri;
260  case X86::XOR16mi8: return X86::XOR16mi;
261  case X86::XOR32ri8: return X86::XOR32ri;
262  case X86::XOR32mi8: return X86::XOR32mi;
263  case X86::XOR64ri8: return X86::XOR64ri32;
264  case X86::XOR64mi8: return X86::XOR64mi32;
265 
266  // ADD
267  case X86::ADD16ri8: return X86::ADD16ri;
268  case X86::ADD16mi8: return X86::ADD16mi;
269  case X86::ADD32ri8: return X86::ADD32ri;
270  case X86::ADD32mi8: return X86::ADD32mi;
271  case X86::ADD64ri8: return X86::ADD64ri32;
272  case X86::ADD64mi8: return X86::ADD64mi32;
273 
274  // ADC
275  case X86::ADC16ri8: return X86::ADC16ri;
276  case X86::ADC16mi8: return X86::ADC16mi;
277  case X86::ADC32ri8: return X86::ADC32ri;
278  case X86::ADC32mi8: return X86::ADC32mi;
279  case X86::ADC64ri8: return X86::ADC64ri32;
280  case X86::ADC64mi8: return X86::ADC64mi32;
281 
282  // SUB
283  case X86::SUB16ri8: return X86::SUB16ri;
284  case X86::SUB16mi8: return X86::SUB16mi;
285  case X86::SUB32ri8: return X86::SUB32ri;
286  case X86::SUB32mi8: return X86::SUB32mi;
287  case X86::SUB64ri8: return X86::SUB64ri32;
288  case X86::SUB64mi8: return X86::SUB64mi32;
289 
290  // SBB
291  case X86::SBB16ri8: return X86::SBB16ri;
292  case X86::SBB16mi8: return X86::SBB16mi;
293  case X86::SBB32ri8: return X86::SBB32ri;
294  case X86::SBB32mi8: return X86::SBB32mi;
295  case X86::SBB64ri8: return X86::SBB64ri32;
296  case X86::SBB64mi8: return X86::SBB64mi32;
297 
298  // CMP
299  case X86::CMP16ri8: return X86::CMP16ri;
300  case X86::CMP16mi8: return X86::CMP16mi;
301  case X86::CMP32ri8: return X86::CMP32ri;
302  case X86::CMP32mi8: return X86::CMP32mi;
303  case X86::CMP64ri8: return X86::CMP64ri32;
304  case X86::CMP64mi8: return X86::CMP64mi32;
305 
306  // PUSH
307  case X86::PUSH32i8: return X86::PUSHi32;
308  case X86::PUSH16i8: return X86::PUSHi16;
309  case X86::PUSH64i8: return X86::PUSH64i32;
310  }
311 }
312 
313 static unsigned getRelaxedOpcode(const MCInst &Inst, bool Is16BitMode) {
314  unsigned R = getRelaxedOpcodeArith(Inst);
315  if (R != Inst.getOpcode())
316  return R;
317  return getRelaxedOpcodeBranch(Inst, Is16BitMode);
318 }
319 
321  const MCInstrInfo &MCII) {
322  unsigned Opcode = MI.getOpcode();
323  switch (Opcode) {
324  default:
325  return X86::COND_INVALID;
326  case X86::JCC_1: {
327  const MCInstrDesc &Desc = MCII.get(Opcode);
328  return static_cast<X86::CondCode>(
329  MI.getOperand(Desc.getNumOperands() - 1).getImm());
330  }
331  }
332 }
333 
336  X86::CondCode CC = getCondFromBranch(MI, MCII);
338 }
339 
340 /// Check if the instruction uses RIP relative addressing.
341 static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII) {
342  unsigned Opcode = MI.getOpcode();
343  const MCInstrDesc &Desc = MCII.get(Opcode);
344  uint64_t TSFlags = Desc.TSFlags;
345  unsigned CurOp = X86II::getOperandBias(Desc);
346  int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
347  if (MemoryOperand < 0)
348  return false;
349  unsigned BaseRegNum = MemoryOperand + CurOp + X86::AddrBaseReg;
350  unsigned BaseReg = MI.getOperand(BaseRegNum).getReg();
351  return (BaseReg == X86::RIP);
352 }
353 
354 /// Check if the instruction is a prefix.
355 static bool isPrefix(const MCInst &MI, const MCInstrInfo &MCII) {
356  return X86II::isPrefix(MCII.get(MI.getOpcode()).TSFlags);
357 }
358 
359 /// Check if the instruction is valid as the first instruction in macro fusion.
360 static bool isFirstMacroFusibleInst(const MCInst &Inst,
361  const MCInstrInfo &MCII) {
362  // An Intel instruction with RIP relative addressing is not macro fusible.
363  if (isRIPRelative(Inst, MCII))
364  return false;
368 }
369 
370 /// X86 can reduce the bytes of NOP by padding instructions with prefixes to
371 /// get a better peformance in some cases. Here, we determine which prefix is
372 /// the most suitable.
373 ///
374 /// If the instruction has a segment override prefix, use the existing one.
375 /// If the target is 64-bit, use the CS.
376 /// If the target is 32-bit,
377 /// - If the instruction has a ESP/EBP base register, use SS.
378 /// - Otherwise use DS.
379 uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
380  assert((STI.hasFeature(X86::Mode32Bit) || STI.hasFeature(X86::Mode64Bit)) &&
381  "Prefixes can be added only in 32-bit or 64-bit mode.");
382  const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
383  uint64_t TSFlags = Desc.TSFlags;
384 
385  // Determine where the memory operand starts, if present.
386  int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
387  if (MemoryOperand != -1)
388  MemoryOperand += X86II::getOperandBias(Desc);
389 
390  unsigned SegmentReg = 0;
391  if (MemoryOperand >= 0) {
392  // Check for explicit segment override on memory operand.
393  SegmentReg = Inst.getOperand(MemoryOperand + X86::AddrSegmentReg).getReg();
394  }
395 
396  switch (TSFlags & X86II::FormMask) {
397  default:
398  break;
399  case X86II::RawFrmDstSrc: {
400  // Check segment override opcode prefix as needed (not for %ds).
401  if (Inst.getOperand(2).getReg() != X86::DS)
402  SegmentReg = Inst.getOperand(2).getReg();
403  break;
404  }
405  case X86II::RawFrmSrc: {
406  // Check segment override opcode prefix as needed (not for %ds).
407  if (Inst.getOperand(1).getReg() != X86::DS)
408  SegmentReg = Inst.getOperand(1).getReg();
409  break;
410  }
411  case X86II::RawFrmMemOffs: {
412  // Check segment override opcode prefix as needed.
413  SegmentReg = Inst.getOperand(1).getReg();
414  break;
415  }
416  }
417 
418  if (SegmentReg != 0)
419  return X86::getSegmentOverridePrefixForReg(SegmentReg);
420 
421  if (STI.hasFeature(X86::Mode64Bit))
422  return X86::CS_Encoding;
423 
424  if (MemoryOperand >= 0) {
425  unsigned BaseRegNum = MemoryOperand + X86::AddrBaseReg;
426  unsigned BaseReg = Inst.getOperand(BaseRegNum).getReg();
427  if (BaseReg == X86::ESP || BaseReg == X86::EBP)
428  return X86::SS_Encoding;
429  }
430  return X86::DS_Encoding;
431 }
432 
433 /// Check if the two instructions will be macro-fused on the target cpu.
434 bool X86AsmBackend::isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const {
435  const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode());
436  if (!InstDesc.isConditionalBranch())
437  return false;
438  if (!isFirstMacroFusibleInst(Cmp, *MCII))
439  return false;
440  const X86::FirstMacroFusionInstKind CmpKind =
442  const X86::SecondMacroFusionInstKind BranchKind =
444  return X86::isMacroFused(CmpKind, BranchKind);
445 }
446 
447 /// Check if the instruction has a variant symbol operand.
448 static bool hasVariantSymbol(const MCInst &MI) {
449  for (auto &Operand : MI) {
450  if (!Operand.isExpr())
451  continue;
452  const MCExpr &Expr = *Operand.getExpr();
453  if (Expr.getKind() == MCExpr::SymbolRef &&
454  cast<MCSymbolRefExpr>(Expr).getKind() != MCSymbolRefExpr::VK_None)
455  return true;
456  }
457  return false;
458 }
459 
460 bool X86AsmBackend::allowAutoPadding() const {
461  return (AlignBoundary != Align(1) && AlignBranchType != X86::AlignBranchNone);
462 }
463 
464 bool X86AsmBackend::allowEnhancedRelaxation() const {
465  return allowAutoPadding() && TargetPrefixMax != 0 && X86PadForBranchAlign;
466 }
467 
468 /// X86 has certain instructions which enable interrupts exactly one
469 /// instruction *after* the instruction which stores to SS. Return true if the
470 /// given instruction has such an interrupt delay slot.
471 static bool hasInterruptDelaySlot(const MCInst &Inst) {
472  switch (Inst.getOpcode()) {
473  case X86::POPSS16:
474  case X86::POPSS32:
475  case X86::STI:
476  return true;
477 
478  case X86::MOV16sr:
479  case X86::MOV32sr:
480  case X86::MOV64sr:
481  case X86::MOV16sm:
482  if (Inst.getOperand(0).getReg() == X86::SS)
483  return true;
484  break;
485  }
486  return false;
487 }
488 
489 /// Check if the instruction to be emitted is right after any data.
490 static bool
491 isRightAfterData(MCFragment *CurrentFragment,
492  const std::pair<MCFragment *, size_t> &PrevInstPosition) {
493  MCFragment *F = CurrentFragment;
494  // Empty data fragments may be created to prevent further data being
495  // added into the previous fragment, we need to skip them since they
496  // have no contents.
497  for (; isa_and_nonnull<MCDataFragment>(F); F = F->getPrevNode())
498  if (cast<MCDataFragment>(F)->getContents().size() != 0)
499  break;
500 
501  // Since data is always emitted into a DataFragment, our check strategy is
502  // simple here.
503  // - If the fragment is a DataFragment
504  // - If it's not the fragment where the previous instruction is,
505  // returns true.
506  // - If it's the fragment holding the previous instruction but its
507  // size changed since the the previous instruction was emitted into
508  // it, returns true.
509  // - Otherwise returns false.
510  // - If the fragment is not a DataFragment, returns false.
511  if (auto *DF = dyn_cast_or_null<MCDataFragment>(F))
512  return DF != PrevInstPosition.first ||
513  DF->getContents().size() != PrevInstPosition.second;
514 
515  return false;
516 }
517 
518 /// \returns the fragment size if it has instructions, otherwise returns 0.
519 static size_t getSizeForInstFragment(const MCFragment *F) {
520  if (!F || !F->hasInstructions())
521  return 0;
522  // MCEncodedFragmentWithContents being templated makes this tricky.
523  switch (F->getKind()) {
524  default:
525  llvm_unreachable("Unknown fragment with instructions!");
526  case MCFragment::FT_Data:
527  return cast<MCDataFragment>(*F).getContents().size();
529  return cast<MCRelaxableFragment>(*F).getContents().size();
531  return cast<MCCompactEncodedInstFragment>(*F).getContents().size();
532  }
533 }
534 
535 /// Return true if we can insert NOP or prefixes automatically before the
536 /// the instruction to be emitted.
537 bool X86AsmBackend::canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const {
538  if (hasVariantSymbol(Inst))
539  // Linker may rewrite the instruction with variant symbol operand(e.g.
540  // TLSCALL).
541  return false;
542 
543  if (hasInterruptDelaySlot(PrevInst))
544  // If this instruction follows an interrupt enabling instruction with a one
545  // instruction delay, inserting a nop would change behavior.
546  return false;
547 
548  if (isPrefix(PrevInst, *MCII))
549  // If this instruction follows a prefix, inserting a nop/prefix would change
550  // semantic.
551  return false;
552 
553  if (isPrefix(Inst, *MCII))
554  // If this instruction is a prefix, inserting a prefix would change
555  // semantic.
556  return false;
557 
558  if (isRightAfterData(OS.getCurrentFragment(), PrevInstPosition))
559  // If this instruction follows any data, there is no clear
560  // instruction boundary, inserting a nop/prefix would change semantic.
561  return false;
562 
563  return true;
564 }
565 
566 bool X86AsmBackend::canPadBranches(MCObjectStreamer &OS) const {
567  if (!OS.getAllowAutoPadding())
568  return false;
569  assert(allowAutoPadding() && "incorrect initialization!");
570 
571  // We only pad in text section.
572  if (!OS.getCurrentSectionOnly()->getKind().isText())
573  return false;
574 
575  // To be Done: Currently don't deal with Bundle cases.
576  if (OS.getAssembler().isBundlingEnabled())
577  return false;
578 
579  // Branches only need to be aligned in 32-bit or 64-bit mode.
580  if (!(STI.hasFeature(X86::Mode64Bit) || STI.hasFeature(X86::Mode32Bit)))
581  return false;
582 
583  return true;
584 }
585 
586 /// Check if the instruction operand needs to be aligned.
587 bool X86AsmBackend::needAlign(const MCInst &Inst) const {
588  const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
589  return (Desc.isConditionalBranch() &&
590  (AlignBranchType & X86::AlignBranchJcc)) ||
591  (Desc.isUnconditionalBranch() &&
592  (AlignBranchType & X86::AlignBranchJmp)) ||
593  (Desc.isCall() && (AlignBranchType & X86::AlignBranchCall)) ||
594  (Desc.isReturn() && (AlignBranchType & X86::AlignBranchRet)) ||
595  (Desc.isIndirectBranch() &&
596  (AlignBranchType & X86::AlignBranchIndirect));
597 }
598 
599 /// Insert BoundaryAlignFragment before instructions to align branches.
600 void X86AsmBackend::emitInstructionBegin(MCObjectStreamer &OS,
601  const MCInst &Inst) {
602  CanPadInst = canPadInst(Inst, OS);
603 
604  if (!canPadBranches(OS))
605  return;
606 
607  if (!isMacroFused(PrevInst, Inst))
608  // Macro fusion doesn't happen indeed, clear the pending.
609  PendingBA = nullptr;
610 
611  if (!CanPadInst)
612  return;
613 
614  if (PendingBA && OS.getCurrentFragment()->getPrevNode() == PendingBA) {
615  // Macro fusion actually happens and there is no other fragment inserted
616  // after the previous instruction.
617  //
618  // Do nothing here since we already inserted a BoudaryAlign fragment when
619  // we met the first instruction in the fused pair and we'll tie them
620  // together in emitInstructionEnd.
621  //
622  // Note: When there is at least one fragment, such as MCAlignFragment,
623  // inserted after the previous instruction, e.g.
624  //
625  // \code
626  // cmp %rax %rcx
627  // .align 16
628  // je .Label0
629  // \ endcode
630  //
631  // We will treat the JCC as a unfused branch although it may be fused
632  // with the CMP.
633  return;
634  }
635 
636  if (needAlign(Inst) || ((AlignBranchType & X86::AlignBranchFused) &&
637  isFirstMacroFusibleInst(Inst, *MCII))) {
638  // If we meet a unfused branch or the first instuction in a fusiable pair,
639  // insert a BoundaryAlign fragment.
640  OS.insert(PendingBA = new MCBoundaryAlignFragment(AlignBoundary));
641  }
642 }
643 
644 /// Set the last fragment to be aligned for the BoundaryAlignFragment.
645 void X86AsmBackend::emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst) {
646  PrevInst = Inst;
647  MCFragment *CF = OS.getCurrentFragment();
648  PrevInstPosition = std::make_pair(CF, getSizeForInstFragment(CF));
649  if (auto *F = dyn_cast_or_null<MCRelaxableFragment>(CF))
650  F->setAllowAutoPadding(CanPadInst);
651 
652  if (!canPadBranches(OS))
653  return;
654 
655  if (!needAlign(Inst) || !PendingBA)
656  return;
657 
658  // Tie the aligned instructions into a a pending BoundaryAlign.
659  PendingBA->setLastFragment(CF);
660  PendingBA = nullptr;
661 
662  // We need to ensure that further data isn't added to the current
663  // DataFragment, so that we can get the size of instructions later in
664  // MCAssembler::relaxBoundaryAlign. The easiest way is to insert a new empty
665  // DataFragment.
666  if (isa_and_nonnull<MCDataFragment>(CF))
667  OS.insert(new MCDataFragment());
668 
669  // Update the maximum alignment on the current section if necessary.
670  MCSection *Sec = OS.getCurrentSectionOnly();
671  if (AlignBoundary.value() > Sec->getAlignment())
672  Sec->setAlignment(AlignBoundary);
673 }
674 
675 Optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
676  if (STI.getTargetTriple().isOSBinFormatELF()) {
677  unsigned Type;
678  if (STI.getTargetTriple().getArch() == Triple::x86_64) {
680 #define ELF_RELOC(X, Y) .Case(#X, Y)
681 #include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
682 #undef ELF_RELOC
683  .Case("BFD_RELOC_NONE", ELF::R_X86_64_NONE)
684  .Case("BFD_RELOC_8", ELF::R_X86_64_8)
685  .Case("BFD_RELOC_16", ELF::R_X86_64_16)
686  .Case("BFD_RELOC_32", ELF::R_X86_64_32)
687  .Case("BFD_RELOC_64", ELF::R_X86_64_64)
688  .Default(-1u);
689  } else {
691 #define ELF_RELOC(X, Y) .Case(#X, Y)
692 #include "llvm/BinaryFormat/ELFRelocs/i386.def"
693 #undef ELF_RELOC
694  .Case("BFD_RELOC_NONE", ELF::R_386_NONE)
695  .Case("BFD_RELOC_8", ELF::R_386_8)
696  .Case("BFD_RELOC_16", ELF::R_386_16)
697  .Case("BFD_RELOC_32", ELF::R_386_32)
698  .Default(-1u);
699  }
700  if (Type == -1u)
701  return None;
702  return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
703  }
705 }
706 
707 const MCFixupKindInfo &X86AsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
708  const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
709  {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
710  {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
711  {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
712  {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
713  {"reloc_signed_4byte", 0, 32, 0},
714  {"reloc_signed_4byte_relax", 0, 32, 0},
715  {"reloc_global_offset_table", 0, 32, 0},
716  {"reloc_global_offset_table8", 0, 64, 0},
717  {"reloc_branch_4byte_pcrel", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
718  };
719 
720  // Fixup kinds from .reloc directive are like R_386_NONE/R_X86_64_NONE. They
721  // do not require any extra processing.
724 
727 
728  assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
729  "Invalid kind!");
730  assert(Infos[Kind - FirstTargetFixupKind].Name && "Empty fixup name!");
731  return Infos[Kind - FirstTargetFixupKind];
732 }
733 
734 bool X86AsmBackend::shouldForceRelocation(const MCAssembler &,
735  const MCFixup &Fixup,
736  const MCValue &) {
737  return Fixup.getKind() >= FirstLiteralRelocationKind;
738 }
739 
740 static unsigned getFixupKindSize(unsigned Kind) {
741  switch (Kind) {
742  default:
743  llvm_unreachable("invalid fixup kind!");
744  case FK_NONE:
745  return 0;
746  case FK_PCRel_1:
747  case FK_SecRel_1:
748  case FK_Data_1:
749  return 1;
750  case FK_PCRel_2:
751  case FK_SecRel_2:
752  case FK_Data_2:
753  return 2;
754  case FK_PCRel_4:
763  case FK_SecRel_4:
764  case FK_Data_4:
765  return 4;
766  case FK_PCRel_8:
767  case FK_SecRel_8:
768  case FK_Data_8:
770  return 8;
771  }
772 }
773 
774 void X86AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
775  const MCValue &Target,
777  uint64_t Value, bool IsResolved,
778  const MCSubtargetInfo *STI) const {
779  unsigned Kind = Fixup.getKind();
781  return;
782  unsigned Size = getFixupKindSize(Kind);
783 
784  assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
785 
786  int64_t SignedValue = static_cast<int64_t>(Value);
787  if ((Target.isAbsolute() || IsResolved) &&
788  getFixupKindInfo(Fixup.getKind()).Flags &
790  // check that PC relative fixup fits into the fixup size.
791  if (Size > 0 && !isIntN(Size * 8, SignedValue))
792  Asm.getContext().reportError(
793  Fixup.getLoc(), "value of " + Twine(SignedValue) +
794  " is too large for field of " + Twine(Size) +
795  ((Size == 1) ? " byte." : " bytes."));
796  } else {
797  // Check that uppper bits are either all zeros or all ones.
798  // Specifically ignore overflow/underflow as long as the leakage is
799  // limited to the lower bits. This is to remain compatible with
800  // other assemblers.
801  assert((Size == 0 || isIntN(Size * 8 + 1, SignedValue)) &&
802  "Value does not fit in the Fixup field");
803  }
804 
805  for (unsigned i = 0; i != Size; ++i)
806  Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
807 }
808 
809 bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst,
810  const MCSubtargetInfo &STI) const {
811  // Branches can always be relaxed in either mode.
812  if (getRelaxedOpcodeBranch(Inst, false) != Inst.getOpcode())
813  return true;
814 
815  // Check if this instruction is ever relaxable.
816  if (getRelaxedOpcodeArith(Inst) == Inst.getOpcode())
817  return false;
818 
819 
820  // Check if the relaxable operand has an expression. For the current set of
821  // relaxable instructions, the relaxable operand is always the last operand.
822  unsigned RelaxableOp = Inst.getNumOperands() - 1;
823  if (Inst.getOperand(RelaxableOp).isExpr())
824  return true;
825 
826  return false;
827 }
828 
829 bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
830  uint64_t Value,
831  const MCRelaxableFragment *DF,
832  const MCAsmLayout &Layout) const {
833  // Relax if the value is too big for a (signed) i8.
834  return !isInt<8>(Value);
835 }
836 
837 // FIXME: Can tblgen help at all here to verify there aren't other instructions
838 // we can relax?
839 void X86AsmBackend::relaxInstruction(MCInst &Inst,
840  const MCSubtargetInfo &STI) const {
841  // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
842  bool Is16BitMode = STI.getFeatureBits()[X86::Mode16Bit];
843  unsigned RelaxedOp = getRelaxedOpcode(Inst, Is16BitMode);
844 
845  if (RelaxedOp == Inst.getOpcode()) {
846  SmallString<256> Tmp;
847  raw_svector_ostream OS(Tmp);
848  Inst.dump_pretty(OS);
849  OS << "\n";
850  report_fatal_error("unexpected instruction to relax: " + OS.str());
851  }
852 
853  Inst.setOpcode(RelaxedOp);
854 }
855 
856 /// Return true if this instruction has been fully relaxed into it's most
857 /// general available form.
858 static bool isFullyRelaxed(const MCRelaxableFragment &RF) {
859  auto &Inst = RF.getInst();
860  auto &STI = *RF.getSubtargetInfo();
861  bool Is16BitMode = STI.getFeatureBits()[X86::Mode16Bit];
862  return getRelaxedOpcode(Inst, Is16BitMode) == Inst.getOpcode();
863 }
864 
865 bool X86AsmBackend::padInstructionViaPrefix(MCRelaxableFragment &RF,
866  MCCodeEmitter &Emitter,
867  unsigned &RemainingSize) const {
868  if (!RF.getAllowAutoPadding())
869  return false;
870  // If the instruction isn't fully relaxed, shifting it around might require a
871  // larger value for one of the fixups then can be encoded. The outer loop
872  // will also catch this before moving to the next instruction, but we need to
873  // prevent padding this single instruction as well.
874  if (!isFullyRelaxed(RF))
875  return false;
876 
877  const unsigned OldSize = RF.getContents().size();
878  if (OldSize == 15)
879  return false;
880 
881  const unsigned MaxPossiblePad = std::min(15 - OldSize, RemainingSize);
882  const unsigned RemainingPrefixSize = [&]() -> unsigned {
884  raw_svector_ostream VecOS(Code);
885  Emitter.emitPrefix(RF.getInst(), VecOS, STI);
886  assert(Code.size() < 15 && "The number of prefixes must be less than 15.");
887 
888  // TODO: It turns out we need a decent amount of plumbing for the target
889  // specific bits to determine number of prefixes its safe to add. Various
890  // targets (older chips mostly, but also Atom family) encounter decoder
891  // stalls with too many prefixes. For testing purposes, we set the value
892  // externally for the moment.
893  unsigned ExistingPrefixSize = Code.size();
894  if (TargetPrefixMax <= ExistingPrefixSize)
895  return 0;
896  return TargetPrefixMax - ExistingPrefixSize;
897  }();
898  const unsigned PrefixBytesToAdd =
899  std::min(MaxPossiblePad, RemainingPrefixSize);
900  if (PrefixBytesToAdd == 0)
901  return false;
902 
903  const uint8_t Prefix = determinePaddingPrefix(RF.getInst());
904 
906  Code.append(PrefixBytesToAdd, Prefix);
907  Code.append(RF.getContents().begin(), RF.getContents().end());
908  RF.getContents() = Code;
909 
910  // Adjust the fixups for the change in offsets
911  for (auto &F : RF.getFixups()) {
912  F.setOffset(F.getOffset() + PrefixBytesToAdd);
913  }
914 
915  RemainingSize -= PrefixBytesToAdd;
916  return true;
917 }
918 
919 bool X86AsmBackend::padInstructionViaRelaxation(MCRelaxableFragment &RF,
920  MCCodeEmitter &Emitter,
921  unsigned &RemainingSize) const {
922  if (isFullyRelaxed(RF))
923  // TODO: There are lots of other tricks we could apply for increasing
924  // encoding size without impacting performance.
925  return false;
926 
927  MCInst Relaxed = RF.getInst();
928  relaxInstruction(Relaxed, *RF.getSubtargetInfo());
929 
932  raw_svector_ostream VecOS(Code);
933  Emitter.encodeInstruction(Relaxed, VecOS, Fixups, *RF.getSubtargetInfo());
934  const unsigned OldSize = RF.getContents().size();
935  const unsigned NewSize = Code.size();
936  assert(NewSize >= OldSize && "size decrease during relaxation?");
937  unsigned Delta = NewSize - OldSize;
938  if (Delta > RemainingSize)
939  return false;
940  RF.setInst(Relaxed);
941  RF.getContents() = Code;
942  RF.getFixups() = Fixups;
943  RemainingSize -= Delta;
944  return true;
945 }
946 
947 bool X86AsmBackend::padInstructionEncoding(MCRelaxableFragment &RF,
948  MCCodeEmitter &Emitter,
949  unsigned &RemainingSize) const {
950  bool Changed = false;
951  if (RemainingSize != 0)
952  Changed |= padInstructionViaRelaxation(RF, Emitter, RemainingSize);
953  if (RemainingSize != 0)
954  Changed |= padInstructionViaPrefix(RF, Emitter, RemainingSize);
955  return Changed;
956 }
957 
958 void X86AsmBackend::finishLayout(MCAssembler const &Asm,
959  MCAsmLayout &Layout) const {
960  // See if we can further relax some instructions to cut down on the number of
961  // nop bytes required for code alignment. The actual win is in reducing
962  // instruction count, not number of bytes. Modern X86-64 can easily end up
963  // decode limited. It is often better to reduce the number of instructions
964  // (i.e. eliminate nops) even at the cost of increasing the size and
965  // complexity of others.
966  if (!X86PadForAlign && !X86PadForBranchAlign)
967  return;
968 
969  // The processed regions are delimitered by LabeledFragments. -g may have more
970  // MCSymbols and therefore different relaxation results. X86PadForAlign is
971  // disabled by default to eliminate the -g vs non -g difference.
972  DenseSet<MCFragment *> LabeledFragments;
973  for (const MCSymbol &S : Asm.symbols())
974  LabeledFragments.insert(S.getFragment(false));
975 
976  for (MCSection &Sec : Asm) {
977  if (!Sec.getKind().isText())
978  continue;
979 
981  for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) {
982  MCFragment &F = *I;
983 
984  if (LabeledFragments.count(&F))
985  Relaxable.clear();
986 
987  if (F.getKind() == MCFragment::FT_Data ||
989  // Skip and ignore
990  continue;
991 
992  if (F.getKind() == MCFragment::FT_Relaxable) {
993  auto &RF = cast<MCRelaxableFragment>(*I);
994  Relaxable.push_back(&RF);
995  continue;
996  }
997 
998  auto canHandle = [](MCFragment &F) -> bool {
999  switch (F.getKind()) {
1000  default:
1001  return false;
1002  case MCFragment::FT_Align:
1003  return X86PadForAlign;
1005  return X86PadForBranchAlign;
1006  }
1007  };
1008  // For any unhandled kind, assume we can't change layout.
1009  if (!canHandle(F)) {
1010  Relaxable.clear();
1011  continue;
1012  }
1013 
1014 #ifndef NDEBUG
1015  const uint64_t OrigOffset = Layout.getFragmentOffset(&F);
1016 #endif
1017  const uint64_t OrigSize = Asm.computeFragmentSize(Layout, F);
1018 
1019  // To keep the effects local, prefer to relax instructions closest to
1020  // the align directive. This is purely about human understandability
1021  // of the resulting code. If we later find a reason to expand
1022  // particular instructions over others, we can adjust.
1023  MCFragment *FirstChangedFragment = nullptr;
1024  unsigned RemainingSize = OrigSize;
1025  while (!Relaxable.empty() && RemainingSize != 0) {
1026  auto &RF = *Relaxable.pop_back_val();
1027  // Give the backend a chance to play any tricks it wishes to increase
1028  // the encoding size of the given instruction. Target independent code
1029  // will try further relaxation, but target's may play further tricks.
1030  if (padInstructionEncoding(RF, Asm.getEmitter(), RemainingSize))
1031  FirstChangedFragment = &RF;
1032 
1033  // If we have an instruction which hasn't been fully relaxed, we can't
1034  // skip past it and insert bytes before it. Changing its starting
1035  // offset might require a larger negative offset than it can encode.
1036  // We don't need to worry about larger positive offsets as none of the
1037  // possible offsets between this and our align are visible, and the
1038  // ones afterwards aren't changing.
1039  if (!isFullyRelaxed(RF))
1040  break;
1041  }
1042  Relaxable.clear();
1043 
1044  if (FirstChangedFragment) {
1045  // Make sure the offsets for any fragments in the effected range get
1046  // updated. Note that this (conservatively) invalidates the offsets of
1047  // those following, but this is not required.
1048  Layout.invalidateFragmentsFrom(FirstChangedFragment);
1049  }
1050 
1051  // BoundaryAlign explicitly tracks it's size (unlike align)
1052  if (F.getKind() == MCFragment::FT_BoundaryAlign)
1053  cast<MCBoundaryAlignFragment>(F).setSize(RemainingSize);
1054 
1055 #ifndef NDEBUG
1056  const uint64_t FinalOffset = Layout.getFragmentOffset(&F);
1057  const uint64_t FinalSize = Asm.computeFragmentSize(Layout, F);
1058  assert(OrigOffset + OrigSize == FinalOffset + FinalSize &&
1059  "can't move start of next fragment!");
1060  assert(FinalSize == RemainingSize && "inconsistent size computation?");
1061 #endif
1062 
1063  // If we're looking at a boundary align, make sure we don't try to pad
1064  // its target instructions for some following directive. Doing so would
1065  // break the alignment of the current boundary align.
1066  if (auto *BF = dyn_cast<MCBoundaryAlignFragment>(&F)) {
1067  const MCFragment *LastFragment = BF->getLastFragment();
1068  if (!LastFragment)
1069  continue;
1070  while (&*I != LastFragment)
1071  ++I;
1072  }
1073  }
1074  }
1075 
1076  // The layout is done. Mark every fragment as valid.
1077  for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
1078  MCSection &Section = *Layout.getSectionOrder()[i];
1079  Layout.getFragmentOffset(&*Section.getFragmentList().rbegin());
1080  Asm.computeFragmentSize(Layout, *Section.getFragmentList().rbegin());
1081  }
1082 }
1083 
1084 unsigned X86AsmBackend::getMaximumNopSize() const {
1085  if (STI.hasFeature(X86::Mode16Bit))
1086  return 4;
1087  if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Mode64Bit))
1088  return 1;
1089  if (STI.getFeatureBits()[X86::FeatureFast7ByteNOP])
1090  return 7;
1091  if (STI.getFeatureBits()[X86::FeatureFast15ByteNOP])
1092  return 15;
1093  if (STI.getFeatureBits()[X86::FeatureFast11ByteNOP])
1094  return 11;
1095  // FIXME: handle 32-bit mode
1096  // 15-bytes is the longest single NOP instruction, but 10-bytes is
1097  // commonly the longest that can be efficiently decoded.
1098  return 10;
1099 }
1100 
1101 /// Write a sequence of optimal nops to the output, covering \p Count
1102 /// bytes.
1103 /// \return - true on success, false on failure
1104 bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
1105  static const char Nops32Bit[10][11] = {
1106  // nop
1107  "\x90",
1108  // xchg %ax,%ax
1109  "\x66\x90",
1110  // nopl (%[re]ax)
1111  "\x0f\x1f\x00",
1112  // nopl 0(%[re]ax)
1113  "\x0f\x1f\x40\x00",
1114  // nopl 0(%[re]ax,%[re]ax,1)
1115  "\x0f\x1f\x44\x00\x00",
1116  // nopw 0(%[re]ax,%[re]ax,1)
1117  "\x66\x0f\x1f\x44\x00\x00",
1118  // nopl 0L(%[re]ax)
1119  "\x0f\x1f\x80\x00\x00\x00\x00",
1120  // nopl 0L(%[re]ax,%[re]ax,1)
1121  "\x0f\x1f\x84\x00\x00\x00\x00\x00",
1122  // nopw 0L(%[re]ax,%[re]ax,1)
1123  "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
1124  // nopw %cs:0L(%[re]ax,%[re]ax,1)
1125  "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
1126  };
1127 
1128  // 16-bit mode uses different nop patterns than 32-bit.
1129  static const char Nops16Bit[4][11] = {
1130  // nop
1131  "\x90",
1132  // xchg %eax,%eax
1133  "\x66\x90",
1134  // lea 0(%si),%si
1135  "\x8d\x74\x00",
1136  // lea 0w(%si),%si
1137  "\x8d\xb4\x00\x00",
1138  };
1139 
1140  const char(*Nops)[11] =
1141  STI.getFeatureBits()[X86::Mode16Bit] ? Nops16Bit : Nops32Bit;
1142 
1143  uint64_t MaxNopLength = (uint64_t)getMaximumNopSize();
1144 
1145  // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
1146  // length.
1147  do {
1148  const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
1149  const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
1150  for (uint8_t i = 0; i < Prefixes; i++)
1151  OS << '\x66';
1152  const uint8_t Rest = ThisNopLength - Prefixes;
1153  if (Rest != 0)
1154  OS.write(Nops[Rest - 1], Rest);
1155  Count -= ThisNopLength;
1156  } while (Count != 0);
1157 
1158  return true;
1159 }
1160 
1161 /* *** */
1162 
1163 namespace {
1164 
1165 class ELFX86AsmBackend : public X86AsmBackend {
1166 public:
1167  uint8_t OSABI;
1168  ELFX86AsmBackend(const Target &T, uint8_t OSABI, const MCSubtargetInfo &STI)
1169  : X86AsmBackend(T, STI), OSABI(OSABI) {}
1170 };
1171 
1172 class ELFX86_32AsmBackend : public ELFX86AsmBackend {
1173 public:
1174  ELFX86_32AsmBackend(const Target &T, uint8_t OSABI,
1175  const MCSubtargetInfo &STI)
1176  : ELFX86AsmBackend(T, OSABI, STI) {}
1177 
1178  std::unique_ptr<MCObjectTargetWriter>
1179  createObjectTargetWriter() const override {
1180  return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, ELF::EM_386);
1181  }
1182 };
1183 
1184 class ELFX86_X32AsmBackend : public ELFX86AsmBackend {
1185 public:
1186  ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI,
1187  const MCSubtargetInfo &STI)
1188  : ELFX86AsmBackend(T, OSABI, STI) {}
1189 
1190  std::unique_ptr<MCObjectTargetWriter>
1191  createObjectTargetWriter() const override {
1192  return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1193  ELF::EM_X86_64);
1194  }
1195 };
1196 
1197 class ELFX86_IAMCUAsmBackend : public ELFX86AsmBackend {
1198 public:
1199  ELFX86_IAMCUAsmBackend(const Target &T, uint8_t OSABI,
1200  const MCSubtargetInfo &STI)
1201  : ELFX86AsmBackend(T, OSABI, STI) {}
1202 
1203  std::unique_ptr<MCObjectTargetWriter>
1204  createObjectTargetWriter() const override {
1205  return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1206  ELF::EM_IAMCU);
1207  }
1208 };
1209 
1210 class ELFX86_64AsmBackend : public ELFX86AsmBackend {
1211 public:
1212  ELFX86_64AsmBackend(const Target &T, uint8_t OSABI,
1213  const MCSubtargetInfo &STI)
1214  : ELFX86AsmBackend(T, OSABI, STI) {}
1215 
1216  std::unique_ptr<MCObjectTargetWriter>
1217  createObjectTargetWriter() const override {
1218  return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI, ELF::EM_X86_64);
1219  }
1220 };
1221 
1222 class WindowsX86AsmBackend : public X86AsmBackend {
1223  bool Is64Bit;
1224 
1225 public:
1226  WindowsX86AsmBackend(const Target &T, bool is64Bit,
1227  const MCSubtargetInfo &STI)
1228  : X86AsmBackend(T, STI)
1229  , Is64Bit(is64Bit) {
1230  }
1231 
1232  Optional<MCFixupKind> getFixupKind(StringRef Name) const override {
1234  .Case("dir32", FK_Data_4)
1235  .Case("secrel32", FK_SecRel_4)
1236  .Case("secidx", FK_SecRel_2)
1238  }
1239 
1240  std::unique_ptr<MCObjectTargetWriter>
1241  createObjectTargetWriter() const override {
1242  return createX86WinCOFFObjectWriter(Is64Bit);
1243  }
1244 };
1245 
1246 namespace CU {
1247 
1248  /// Compact unwind encoding values.
1250  /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
1251  /// the return address, then [RE]SP is moved to [RE]BP.
1252  UNWIND_MODE_BP_FRAME = 0x01000000,
1253 
1254  /// A frameless function with a small constant stack size.
1255  UNWIND_MODE_STACK_IMMD = 0x02000000,
1256 
1257  /// A frameless function with a large constant stack size.
1258  UNWIND_MODE_STACK_IND = 0x03000000,
1259 
1260  /// No compact unwind encoding is available.
1261  UNWIND_MODE_DWARF = 0x04000000,
1262 
1263  /// Mask for encoding the frame registers.
1264  UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
1265 
1266  /// Mask for encoding the frameless registers.
1267  UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
1268  };
1269 
1270 } // namespace CU
1271 
1272 class DarwinX86AsmBackend : public X86AsmBackend {
1273  const MCRegisterInfo &MRI;
1274 
1275  /// Number of registers that can be saved in a compact unwind encoding.
1276  enum { CU_NUM_SAVED_REGS = 6 };
1277 
1278  mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
1279  Triple TT;
1280  bool Is64Bit;
1281 
1282  unsigned OffsetSize; ///< Offset of a "push" instruction.
1283  unsigned MoveInstrSize; ///< Size of a "move" instruction.
1284  unsigned StackDivide; ///< Amount to adjust stack size by.
1285 protected:
1286  /// Size of a "push" instruction for the given register.
1287  unsigned PushInstrSize(unsigned Reg) const {
1288  switch (Reg) {
1289  case X86::EBX:
1290  case X86::ECX:
1291  case X86::EDX:
1292  case X86::EDI:
1293  case X86::ESI:
1294  case X86::EBP:
1295  case X86::RBX:
1296  case X86::RBP:
1297  return 1;
1298  case X86::R12:
1299  case X86::R13:
1300  case X86::R14:
1301  case X86::R15:
1302  return 2;
1303  }
1304  return 1;
1305  }
1306 
1307 private:
1308  /// Get the compact unwind number for a given register. The number
1309  /// corresponds to the enum lists in compact_unwind_encoding.h.
1310  int getCompactUnwindRegNum(unsigned Reg) const {
1311  static const MCPhysReg CU32BitRegs[7] = {
1312  X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
1313  };
1314  static const MCPhysReg CU64BitRegs[] = {
1315  X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
1316  };
1317  const MCPhysReg *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
1318  for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
1319  if (*CURegs == Reg)
1320  return Idx;
1321 
1322  return -1;
1323  }
1324 
1325  /// Return the registers encoded for a compact encoding with a frame
1326  /// pointer.
1327  uint32_t encodeCompactUnwindRegistersWithFrame() const {
1328  // Encode the registers in the order they were saved --- 3-bits per
1329  // register. The list of saved registers is assumed to be in reverse
1330  // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
1331  uint32_t RegEnc = 0;
1332  for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
1333  unsigned Reg = SavedRegs[i];
1334  if (Reg == 0) break;
1335 
1336  int CURegNum = getCompactUnwindRegNum(Reg);
1337  if (CURegNum == -1) return ~0U;
1338 
1339  // Encode the 3-bit register number in order, skipping over 3-bits for
1340  // each register.
1341  RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
1342  }
1343 
1344  assert((RegEnc & 0x3FFFF) == RegEnc &&
1345  "Invalid compact register encoding!");
1346  return RegEnc;
1347  }
1348 
1349  /// Create the permutation encoding used with frameless stacks. It is
1350  /// passed the number of registers to be saved and an array of the registers
1351  /// saved.
1352  uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
1353  // The saved registers are numbered from 1 to 6. In order to encode the
1354  // order in which they were saved, we re-number them according to their
1355  // place in the register order. The re-numbering is relative to the last
1356  // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
1357  // that order:
1358  //
1359  // Orig Re-Num
1360  // ---- ------
1361  // 6 6
1362  // 2 2
1363  // 4 3
1364  // 5 3
1365  //
1366  for (unsigned i = 0; i < RegCount; ++i) {
1367  int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
1368  if (CUReg == -1) return ~0U;
1369  SavedRegs[i] = CUReg;
1370  }
1371 
1372  // Reverse the list.
1373  std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
1374 
1375  uint32_t RenumRegs[CU_NUM_SAVED_REGS];
1376  for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
1377  unsigned Countless = 0;
1378  for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
1379  if (SavedRegs[j] < SavedRegs[i])
1380  ++Countless;
1381 
1382  RenumRegs[i] = SavedRegs[i] - Countless - 1;
1383  }
1384 
1385  // Take the renumbered values and encode them into a 10-bit number.
1386  uint32_t permutationEncoding = 0;
1387  switch (RegCount) {
1388  case 6:
1389  permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
1390  + 6 * RenumRegs[2] + 2 * RenumRegs[3]
1391  + RenumRegs[4];
1392  break;
1393  case 5:
1394  permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
1395  + 6 * RenumRegs[3] + 2 * RenumRegs[4]
1396  + RenumRegs[5];
1397  break;
1398  case 4:
1399  permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
1400  + 3 * RenumRegs[4] + RenumRegs[5];
1401  break;
1402  case 3:
1403  permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
1404  + RenumRegs[5];
1405  break;
1406  case 2:
1407  permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
1408  break;
1409  case 1:
1410  permutationEncoding |= RenumRegs[5];
1411  break;
1412  }
1413 
1414  assert((permutationEncoding & 0x3FF) == permutationEncoding &&
1415  "Invalid compact register encoding!");
1416  return permutationEncoding;
1417  }
1418 
1419 public:
1420  DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
1421  const MCSubtargetInfo &STI)
1422  : X86AsmBackend(T, STI), MRI(MRI), TT(STI.getTargetTriple()),
1423  Is64Bit(TT.isArch64Bit()) {
1424  memset(SavedRegs, 0, sizeof(SavedRegs));
1425  OffsetSize = Is64Bit ? 8 : 4;
1426  MoveInstrSize = Is64Bit ? 3 : 2;
1427  StackDivide = Is64Bit ? 8 : 4;
1428  }
1429 
1430  std::unique_ptr<MCObjectTargetWriter>
1431  createObjectTargetWriter() const override {
1433  uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TT));
1434  return createX86MachObjectWriter(Is64Bit, CPUType, CPUSubType);
1435  }
1436 
1437  /// Implementation of algorithm to generate the compact unwind encoding
1438  /// for the CFI instructions.
1439  uint32_t
1440  generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const override {
1441  if (Instrs.empty()) return 0;
1442 
1443  // Reset the saved registers.
1444  unsigned SavedRegIdx = 0;
1445  memset(SavedRegs, 0, sizeof(SavedRegs));
1446 
1447  bool HasFP = false;
1448 
1449  // Encode that we are using EBP/RBP as the frame pointer.
1450  uint32_t CompactUnwindEncoding = 0;
1451 
1452  unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
1453  unsigned InstrOffset = 0;
1454  unsigned StackAdjust = 0;
1455  unsigned StackSize = 0;
1456  unsigned NumDefCFAOffsets = 0;
1457 
1458  for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
1459  const MCCFIInstruction &Inst = Instrs[i];
1460 
1461  switch (Inst.getOperation()) {
1462  default:
1463  // Any other CFI directives indicate a frame that we aren't prepared
1464  // to represent via compact unwind, so just bail out.
1465  return 0;
1467  // Defines a frame pointer. E.g.
1468  //
1469  // movq %rsp, %rbp
1470  // L0:
1471  // .cfi_def_cfa_register %rbp
1472  //
1473  HasFP = true;
1474 
1475  // If the frame pointer is other than esp/rsp, we do not have a way to
1476  // generate a compact unwinding representation, so bail out.
1477  if (*MRI.getLLVMRegNum(Inst.getRegister(), true) !=
1478  (Is64Bit ? X86::RBP : X86::EBP))
1479  return 0;
1480 
1481  // Reset the counts.
1482  memset(SavedRegs, 0, sizeof(SavedRegs));
1483  StackAdjust = 0;
1484  SavedRegIdx = 0;
1485  InstrOffset += MoveInstrSize;
1486  break;
1487  }
1489  // Defines a new offset for the CFA. E.g.
1490  //
1491  // With frame:
1492  //
1493  // pushq %rbp
1494  // L0:
1495  // .cfi_def_cfa_offset 16
1496  //
1497  // Without frame:
1498  //
1499  // subq $72, %rsp
1500  // L0:
1501  // .cfi_def_cfa_offset 80
1502  //
1503  StackSize = Inst.getOffset() / StackDivide;
1504  ++NumDefCFAOffsets;
1505  break;
1506  }
1508  // Defines a "push" of a callee-saved register. E.g.
1509  //
1510  // pushq %r15
1511  // pushq %r14
1512  // pushq %rbx
1513  // L0:
1514  // subq $120, %rsp
1515  // L1:
1516  // .cfi_offset %rbx, -40
1517  // .cfi_offset %r14, -32
1518  // .cfi_offset %r15, -24
1519  //
1520  if (SavedRegIdx == CU_NUM_SAVED_REGS)
1521  // If there are too many saved registers, we cannot use a compact
1522  // unwind encoding.
1523  return CU::UNWIND_MODE_DWARF;
1524 
1525  unsigned Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1526  SavedRegs[SavedRegIdx++] = Reg;
1527  StackAdjust += OffsetSize;
1528  InstrOffset += PushInstrSize(Reg);
1529  break;
1530  }
1531  }
1532  }
1533 
1534  StackAdjust /= StackDivide;
1535 
1536  if (HasFP) {
1537  if ((StackAdjust & 0xFF) != StackAdjust)
1538  // Offset was too big for a compact unwind encoding.
1539  return CU::UNWIND_MODE_DWARF;
1540 
1541  // Get the encoding of the saved registers when we have a frame pointer.
1542  uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
1543  if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1544 
1545  CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
1546  CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
1547  CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
1548  } else {
1549  SubtractInstrIdx += InstrOffset;
1550  ++StackAdjust;
1551 
1552  if ((StackSize & 0xFF) == StackSize) {
1553  // Frameless stack with a small stack size.
1554  CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
1555 
1556  // Encode the stack size.
1557  CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
1558  } else {
1559  if ((StackAdjust & 0x7) != StackAdjust)
1560  // The extra stack adjustments are too big for us to handle.
1561  return CU::UNWIND_MODE_DWARF;
1562 
1563  // Frameless stack with an offset too large for us to encode compactly.
1564  CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
1565 
1566  // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
1567  // instruction.
1568  CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
1569 
1570  // Encode any extra stack adjustments (done via push instructions).
1571  CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
1572  }
1573 
1574  // Encode the number of registers saved. (Reverse the list first.)
1575  std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
1576  CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
1577 
1578  // Get the encoding of the saved registers when we don't have a frame
1579  // pointer.
1580  uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
1581  if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1582 
1583  // Encode the register encoding.
1584  CompactUnwindEncoding |=
1585  RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
1586  }
1587 
1588  return CompactUnwindEncoding;
1589  }
1590 };
1591 
1592 } // end anonymous namespace
1593 
1595  const MCSubtargetInfo &STI,
1596  const MCRegisterInfo &MRI,
1597  const MCTargetOptions &Options) {
1598  const Triple &TheTriple = STI.getTargetTriple();
1599  if (TheTriple.isOSBinFormatMachO())
1600  return new DarwinX86AsmBackend(T, MRI, STI);
1601 
1602  if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1603  return new WindowsX86AsmBackend(T, false, STI);
1604 
1605  uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1606 
1607  if (TheTriple.isOSIAMCU())
1608  return new ELFX86_IAMCUAsmBackend(T, OSABI, STI);
1609 
1610  return new ELFX86_32AsmBackend(T, OSABI, STI);
1611 }
1612 
1614  const MCSubtargetInfo &STI,
1615  const MCRegisterInfo &MRI,
1616  const MCTargetOptions &Options) {
1617  const Triple &TheTriple = STI.getTargetTriple();
1618  if (TheTriple.isOSBinFormatMachO())
1619  return new DarwinX86AsmBackend(T, MRI, STI);
1620 
1621  if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1622  return new WindowsX86AsmBackend(T, true, STI);
1623 
1624  uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1625 
1626  if (TheTriple.getEnvironment() == Triple::GNUX32)
1627  return new ELFX86_X32AsmBackend(T, OSABI, STI);
1628  return new ELFX86_64AsmBackend(T, OSABI, STI);
1629 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::StringSwitch::Case
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:67
llvm::SectionKind::isText
bool isText() const
Definition: SectionKind.h:121
i
i
Definition: README.txt:29
llvm::X86::AddrBaseReg
@ AddrBaseReg
Definition: X86BaseInfo.h:32
llvm::EngineKind::Kind
Kind
Definition: ExecutionEngine.h:524
llvm::N86::EDI
@ EDI
Definition: X86MCTargetDesc.h:51
llvm::X86::CS_Encoding
@ CS_Encoding
Definition: X86BaseInfo.h:369
isRightAfterData
static bool isRightAfterData(MCFragment *CurrentFragment, const std::pair< MCFragment *, size_t > &PrevInstPosition)
Check if the instruction to be emitted is right after any data.
Definition: X86AsmBackend.cpp:491
MCDwarf.h
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:100
llvm
Definition: AllocatorList.h:23
llvm::X86II::getMemoryOperandNo
int getMemoryOperandNo(uint64_t TSFlags)
The function returns the MCInst operand # for the first field of the memory operand.
Definition: X86BaseInfo.h:1086
llvm::MCFragment::FT_BoundaryAlign
@ FT_BoundaryAlign
Definition: MCFragment.h:46
llvm::MCRelaxableFragment
A relaxable fragment holds on to its MCInst, since it may need to be relaxed during the assembler lay...
Definition: MCFragment.h:271
Reg
unsigned Reg
Definition: MachineSink.cpp:1566
llvm::MCSymbol
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
llvm::ARM::PredBlockMask::TT
@ TT
llvm::MCCFIInstruction::OpDefCfaRegister
@ OpDefCfaRegister
Definition: MCDwarf.h:448
llvm::MCAsmBackend::getFixupKindInfo
virtual const MCFixupKindInfo & getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
Definition: MCAsmBackend.cpp:74
llvm::FK_PCRel_8
@ FK_PCRel_8
A eight-byte pc relative fixup.
Definition: MCFixup.h:31
getCondFromBranch
static X86::CondCode getCondFromBranch(const MCInst &MI, const MCInstrInfo &MCII)
Definition: X86AsmBackend.cpp:320
llvm::Triple::isOSBinFormatCOFF
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
Definition: Triple.h:637
llvm::cl::Prefix
@ Prefix
Definition: CommandLine.h:161
llvm::cl::location
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:456
llvm::SystemZII::BranchType
BranchType
Definition: SystemZInstrInfo.h:78
llvm::MCCFIInstruction::OpOffset
@ OpOffset
Definition: MCDwarf.h:447
llvm::StringSwitch::Default
LLVM_NODISCARD R Default(T Value)
Definition: StringSwitch.h:181
is64Bit
static bool is64Bit(const char *name)
Definition: X86Disassembler.cpp:1005
MCCodeEmitter.h
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:124
llvm::MCCodeEmitter::emitPrefix
virtual void emitPrefix(const MCInst &Inst, raw_ostream &OS, const MCSubtargetInfo &STI) const
Emit the prefixes of given instruction on the output stream.
Definition: MCCodeEmitter.h:37
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
llvm::tgtok::Code
@ Code
Definition: TGLexer.h:50
llvm::FirstTargetFixupKind
@ FirstTargetFixupKind
Definition: MCFixup.h:55
llvm::X86::AddrSegmentReg
@ AddrSegmentReg
AddrSegmentReg - The operand # of the segment in the memory operand.
Definition: X86BaseInfo.h:38
ErrorHandling.h
llvm::X86::AlignBranchRet
@ AlignBranchRet
Definition: X86BaseInfo.h:363
llvm::X86AS::SS
@ SS
Definition: X86.h:184
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:45
MCAssembler.h
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:140
createMCInstrInfo
static MCInstrInfo * createMCInstrInfo()
Definition: WebAssemblyMCTargetDesc.cpp:43
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:46
llvm::Triple::x86_64
@ x86_64
Definition: Triple.h:84
llvm::createX86ELFObjectWriter
std::unique_ptr< MCObjectTargetWriter > createX86ELFObjectWriter(bool IsELF64, uint8_t OSABI, uint16_t EMachine)
Construct an X86 ELF object writer.
Definition: X86ELFObjectWriter.cpp:343
llvm::X86::AlignBranchFused
@ AlignBranchFused
Definition: X86BaseInfo.h:359
llvm::reverse
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
Definition: STLExtras.h:338
llvm::createX86WinCOFFObjectWriter
std::unique_ptr< MCObjectTargetWriter > createX86WinCOFFObjectWriter(bool Is64Bit)
Construct an X86 Win COFF object writer.
Definition: X86WinCOFFObjectWriter.cpp:111
llvm::MachO::CPUType
CPUType
Definition: MachO.h:1418
llvm::Optional
Definition: APInt.h:34
MCFixupKindInfo.h
getRelaxedOpcodeArith
static unsigned getRelaxedOpcodeArith(const MCInst &Inst)
Definition: X86AsmBackend.cpp:228
T
#define T
Definition: Mips16ISelLowering.cpp:341
llvm::X86II::FormMask
@ FormMask
Definition: X86BaseInfo.h:744
llvm::X86::reloc_riprel_4byte
@ reloc_riprel_4byte
Definition: X86FixupKinds.h:17
llvm::errs
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Definition: raw_ostream.cpp:892
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::MCInst
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:183
llvm::X86::CondCode
CondCode
Definition: X86BaseInfo.h:80
llvm::MCInstrDesc::isIndirectBranch
bool isIndirectBranch() const
Return true if this is an indirect branch, such as a branch through a register.
Definition: MCInstrDesc.h:303
llvm::MCEncodedFragmentWithFixups::getFixups
SmallVectorImpl< MCFixup > & getFixups()
Definition: MCFragment.h:223
llvm::X86::COND_INVALID
@ COND_INVALID
Definition: X86BaseInfo.h:107
llvm::detail::DenseSetImpl< ValueT, DenseMap< ValueT, detail::DenseSetEmpty, DenseMapInfo< ValueT >, detail::DenseSetPair< ValueT > >, DenseMapInfo< ValueT > >::insert
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
llvm::MCStreamer::getAllowAutoPadding
bool getAllowAutoPadding() const
Definition: MCStreamer.h:290
llvm::detail::DenseSetImpl< ValueT, DenseMap< ValueT, detail::DenseSetEmpty, DenseMapInfo< ValueT >, detail::DenseSetPair< ValueT > >, DenseMapInfo< ValueT > >::count
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
llvm::MCInst::getNumOperands
unsigned getNumOperands() const
Definition: MCInst.h:207
llvm::Data
@ Data
Definition: SIMachineScheduler.h:56
llvm::ArrayRef::empty
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:158
llvm::FK_PCRel_1
@ FK_PCRel_1
A one-byte pc relative fixup.
Definition: MCFixup.h:28
llvm::MachO::getCPUType
Expected< uint32_t > getCPUType(const Triple &T)
Definition: MachO.cpp:77
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::X86::reloc_signed_4byte_relax
@ reloc_signed_4byte_relax
Definition: X86FixupKinds.h:26
llvm::MCSection::end
iterator end()
Definition: MCSection.h:173
llvm::MCInst::setOpcode
void setOpcode(unsigned Op)
Definition: MCInst.h:196
llvm::ARMBuildAttrs::Section
@ Section
Legacy Tags.
Definition: ARMBuildAttributes.h:78
MCObjectStreamer.h
llvm::FirstLiteralRelocationKind
@ FirstLiteralRelocationKind
The range [FirstLiteralRelocationKind, MaxTargetFixupKind) is used for relocations coming from ....
Definition: MCFixup.h:60
llvm::MCFragment
Definition: MCFragment.h:31
CommandLine.h
llvm::FK_Data_4
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
llvm::MCInstrDesc::TSFlags
uint64_t TSFlags
Definition: MCInstrDesc.h:204
llvm::Triple::isOSBinFormatELF
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:632
llvm::MCAsmBackend
Generic interface to target specific assembler backends.
Definition: MCAsmBackend.h:36
ELF.h
CompactUnwindEncodings
CompactUnwindEncodings
Compact unwind encoding values.
Definition: AArch64AsmBackend.cpp:518
MCAsmBackend.h
llvm::MutableArrayRef
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:305
llvm::MCAsmLayout::invalidateFragmentsFrom
void invalidateFragmentsFrom(MCFragment *F)
Invalidate the fragments starting with F because it has been resized.
Definition: MCFragment.cpp:70
llvm::FK_SecRel_4
@ FK_SecRel_4
A four-byte section relative fixup.
Definition: MCFixup.h:42
llvm::MCSubtargetInfo::hasFeature
bool hasFeature(unsigned Feature) const
Definition: MCSubtargetInfo.h:118
llvm::support::little
@ little
Definition: Endian.h:27
llvm::StringRef::split
LLVM_NODISCARD std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:730
llvm::X86::AlignBranchCall
@ AlignBranchCall
Definition: X86BaseInfo.h:362
llvm::MCSubtargetInfo::getTargetTriple
const Triple & getTargetTriple() const
Definition: MCSubtargetInfo.h:107
getSizeForInstFragment
static size_t getSizeForInstFragment(const MCFragment *F)
Definition: X86AsmBackend.cpp:519
MCContext.h
getFixupKindSize
static unsigned getFixupKindSize(unsigned Kind)
Definition: X86AsmBackend.cpp:740
MCSectionMachO.h
llvm::X86::AlignBranchJcc
@ AlignBranchJcc
Definition: X86BaseInfo.h:360
MCInstrInfo.h
llvm::ELF::EM_IAMCU
@ EM_IAMCU
Definition: ELF.h:138
llvm::MCCFIInstruction::getOffset
int getOffset() const
Definition: MCDwarf.h:602
MCInst.h
llvm::FK_SecRel_2
@ FK_SecRel_2
A two-byte section relative fixup.
Definition: MCFixup.h:41
llvm::MCObjectStreamer
Streaming object file generation interface.
Definition: MCObjectStreamer.h:36
llvm::MCRelaxableFragment::setInst
void setInst(const MCInst &Value)
Definition: MCFragment.h:285
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:196
llvm::AArch64::Fixups
Fixups
Definition: AArch64FixupKinds.h:17
MCSubtargetInfo.h
isFirstMacroFusibleInst
static bool isFirstMacroFusibleInst(const MCInst &Inst, const MCInstrInfo &MCII)
Check if the instruction is valid as the first instruction in macro fusion.
Definition: X86AsmBackend.cpp:360
llvm::MCSubtargetInfo::getFeatureBits
const FeatureBitset & getFeatureBits() const
Definition: MCSubtargetInfo.h:111
llvm::codeview::ProcSymFlags::HasFP
@ HasFP
llvm::Triple::isOSBinFormatMachO
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition: Triple.h:645
llvm::createX86_32AsmBackend
MCAsmBackend * createX86_32AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
Definition: X86AsmBackend.cpp:1594
llvm::N86::ESI
@ ESI
Definition: X86MCTargetDesc.h:51
llvm::MCInst::dump_pretty
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ") const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
Definition: MCInst.cpp:74
llvm::raw_ostream::write
raw_ostream & write(unsigned char C)
Definition: raw_ostream.cpp:220
llvm::MCRelaxableFragment::getInst
const MCInst & getInst() const
Definition: MCFragment.h:284
llvm::MCELFObjectTargetWriter
Definition: MCELFObjectWriter.h:53
llvm::Triple::isOSIAMCU
bool isOSIAMCU() const
Definition: Triple.h:524
llvm::report_fatal_error
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:50
MCMachObjectWriter.h
llvm::MCCFIInstruction::getOperation
OpType getOperation() const
Definition: MCDwarf.h:586
llvm::SIInstrFlags::DS
@ DS
Definition: SIDefines.h:52
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::X86::AlignBranchIndirect
@ AlignBranchIndirect
Definition: X86BaseInfo.h:364
llvm::X86::classifyFirstOpcodeInMacroFusion
FirstMacroFusionInstKind classifyFirstOpcodeInMacroFusion(unsigned Opcode)
Definition: X86BaseInfo.h:140
llvm::MCExpr::getKind
ExprKind getKind() const
Definition: MCExpr.h:81
llvm::MCObjectStreamer::getCurrentFragment
MCFragment * getCurrentFragment() const
Definition: MCObjectStreamer.cpp:184
llvm::MachO::getCPUSubType
Expected< uint32_t > getCPUSubType(const Triple &T)
Definition: MachO.cpp:95
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::Triple::getArch
ArchType getArch() const
getArch - Get the parsed architecture type of this triple.
Definition: Triple.h:307
llvm::MCStreamer::getCurrentSectionOnly
MCSection * getCurrentSectionOnly() const
Definition: MCStreamer.h:374
llvm::MCCFIInstruction::getRegister
unsigned getRegister() const
Definition: MCDwarf.h:589
llvm::isIntN
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:460
llvm::None
const NoneType None
Definition: None.h:23
llvm::MCCFIInstruction
Definition: MCDwarf.h:441
llvm::X86::reloc_signed_4byte
@ reloc_signed_4byte
Definition: X86FixupKinds.h:23
DF
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
llvm::SmallString< 256 >
llvm::N86::EBX
@ EBX
Definition: X86MCTargetDesc.h:51
llvm::X86::NumTargetFixupKinds
@ NumTargetFixupKinds
Definition: X86FixupKinds.h:35
llvm::N86::EDX
@ EDX
Definition: X86MCTargetDesc.h:51
llvm::ARM_PROC::IE
@ IE
Definition: ARMBaseInfo.h:27
llvm::MCAsmLayout::getSectionOrder
llvm::SmallVectorImpl< MCSection * > & getSectionOrder()
Definition: MCAsmLayout.h:69
llvm::isInt< 8 >
constexpr bool isInt< 8 >(int64_t x)
Definition: MathExtras.h:367
llvm::DenseSet
Implements a dense probed hash-table based set.
Definition: DenseSet.h:268
llvm::X86II::RawFrmMemOffs
@ RawFrmMemOffs
RawFrmMemOffs - This form is for instructions that store an absolute memory offset as an immediate wi...
Definition: X86BaseInfo.h:594
llvm::cl::opt
Definition: CommandLine.h:1419
isPrefix
static bool isPrefix(const MCInst &MI, const MCInstrInfo &MCII)
Check if the instruction is a prefix.
Definition: X86AsmBackend.cpp:355
llvm::Triple::GNUX32
@ GNUX32
Definition: Triple.h:212
llvm::MCAssembler
Definition: MCAssembler.h:60
llvm::X86II::getOperandBias
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Definition: X86BaseInfo.h:1046
llvm::MCCFIInstruction::OpDefCfaOffset
@ OpDefCfaOffset
Definition: MCDwarf.h:449
llvm::MCInstrDesc::isCall
bool isCall() const
Return true if the instruction is a call.
Definition: MCInstrDesc.h:280
llvm::Triple::getOS
OSType getOS() const
getOS - Get the parsed operating system type of this triple.
Definition: Triple.h:316
MCELFObjectWriter.h
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::MCFixupKindInfo::FKF_IsPCRel
@ FKF_IsPCRel
Is this fixup kind PCrelative? This is used by the assembler backend to evaluate fixup values in a ta...
Definition: MCFixupKindInfo.h:19
llvm::assumeAligned
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:113
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::MCPhysReg
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition: MCRegister.h:19
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:440
MCRegisterInfo.h
llvm::N86::ESP
@ ESP
Definition: X86MCTargetDesc.h:51
llvm::MCObjectStreamer::insert
void insert(MCFragment *F)
Definition: MCObjectStreamer.h:78
llvm::MCAsmBackend::getFixupKind
virtual Optional< MCFixupKind > getFixupKind(StringRef Name) const
Map a relocation name used in .reloc to a fixup kind.
Definition: MCAsmBackend.cpp:70
llvm::ilist_node_with_parent::getPrevNode
NodeTy * getPrevNode()
Definition: ilist_node.h:274
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
isFullyRelaxed
static bool isFullyRelaxed(const MCRelaxableFragment &RF)
Return true if this instruction has been fully relaxed into it's most general available form.
Definition: X86AsmBackend.cpp:858
llvm::MCFixupKindInfo
Target independent information on a fixup kind.
Definition: MCFixupKindInfo.h:15
llvm::FK_PCRel_2
@ FK_PCRel_2
A two-byte pc relative fixup.
Definition: MCFixup.h:29
llvm::FK_Data_1
@ FK_Data_1
A one-byte fixup.
Definition: MCFixup.h:23
llvm::FK_PCRel_4
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:30
llvm::MCTargetOptions
Definition: MCTargetOptions.h:36
llvm::MCFragment::FT_Align
@ FT_Align
Definition: MCFragment.h:36
llvm::MCBoundaryAlignFragment::setLastFragment
void setLastFragment(const MCFragment *F)
Definition: MCFragment.h:587
llvm::FK_NONE
@ FK_NONE
A no-op fixup.
Definition: MCFixup.h:22
llvm::MCELFObjectTargetWriter::getOSABI
uint8_t getOSABI() const
Definition: MCELFObjectWriter.h:99
llvm::X86::reloc_global_offset_table
@ reloc_global_offset_table
Definition: X86FixupKinds.h:28
llvm::MCObjectStreamer::getAssembler
MCAssembler & getAssembler()
Definition: MCObjectStreamer.h:112
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:33
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::MCSection::getAlignment
unsigned getAlignment() const
Definition: MCSection.h:131
Fixup
PowerPC TLS Dynamic Call Fixup
Definition: PPCTLSDynamicCall.cpp:235
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:57
getRelaxedOpcodeBranch
static unsigned getRelaxedOpcodeBranch(const MCInst &Inst, bool Is16BitMode)
Definition: X86AsmBackend.cpp:216
llvm::X86II::isPrefix
bool isPrefix(uint64_t TSFlags)
Definition: X86BaseInfo.h:965
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::X86II::RawFrmSrc
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
Definition: X86BaseInfo.h:598
llvm::X86::classifySecondCondCodeInMacroFusion
SecondMacroFusionInstKind classifySecondCondCodeInMacroFusion(X86::CondCode CC)
Definition: X86BaseInfo.h:289
llvm::MCFragment::FT_Data
@ FT_Data
Definition: MCFragment.h:37
llvm::cantFail
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:708
uint32_t
llvm::X86::AlignBranchBoundaryKind
AlignBranchBoundaryKind
Defines the possible values of the branch boundary alignment mask.
Definition: X86BaseInfo.h:357
llvm::X86::reloc_global_offset_table8
@ reloc_global_offset_table8
Definition: X86FixupKinds.h:31
llvm::MCSection
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:39
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::X86::isMacroFused
bool isMacroFused(FirstMacroFusionInstKind FirstKind, SecondMacroFusionInstKind SecondKind)
Definition: X86BaseInfo.h:338
llvm::MCRegisterInfo
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Definition: MCRegisterInfo.h:135
llvm::FK_SecRel_1
@ FK_SecRel_1
A one-byte section relative fixup.
Definition: MCFixup.h:40
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::MCAsmLayout::getFragmentOffset
uint64_t getFragmentOffset(const MCFragment *F) const
Get the offset of the given fragment inside its containing section.
Definition: MCFragment.cpp:96
MCAsmLayout.h
llvm::X86::reloc_riprel_4byte_relax
@ reloc_riprel_4byte_relax
Definition: X86FixupKinds.h:19
llvm::Triple::isOSWindows
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:547
llvm::MCSection::setAlignment
void setAlignment(Align Value)
Definition: MCSection.h:132
llvm::X86II::RawFrmDstSrc
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
Definition: X86BaseInfo.h:607
j
return j(j<< 16)
MCObjectWriter.h
llvm::N86::ECX
@ ECX
Definition: X86MCTargetDesc.h:51
llvm::Twine
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
llvm::MCInstrInfo
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:25
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:52
llvm::X86::DS_Encoding
@ DS_Encoding
Definition: X86BaseInfo.h:370
uint16_t
llvm::MCAsmLayout
Encapsulates the layout of an assembly file at a particular point in time.
Definition: MCAsmLayout.h:28
llvm::ELF::EM_X86_64
@ EM_X86_64
Definition: ELF.h:177
llvm::MCCodeEmitter
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:314
llvm::X86::FirstMacroFusionInstKind::Cmp
@ Cmp
llvm::ilist_iterator
Iterator for intrusive lists based on ilist_node.
Definition: ilist_iterator.h:57
llvm::Align::value
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
llvm::X86::reloc_riprel_4byte_movq_load
@ reloc_riprel_4byte_movq_load
Definition: X86FixupKinds.h:18
llvm::FK_SecRel_8
@ FK_SecRel_8
A eight-byte section relative fixup.
Definition: MCFixup.h:43
llvm::ELF::EM_386
@ EM_386
Definition: ELF.h:135
llvm::MCEncodedFragment::getSubtargetInfo
const MCSubtargetInfo * getSubtargetInfo() const
Retrieve the MCSubTargetInfo in effect when the instruction was encoded.
Definition: MCFragment.h:173
llvm::X86::AlignBranchJmp
@ AlignBranchJmp
Definition: X86BaseInfo.h:361
llvm::TargetStackID::Value
Value
Definition: TargetFrameLowering.h:27
llvm::X86::SS_Encoding
@ SS_Encoding
Definition: X86BaseInfo.h:374
llvm::MCInst::getOpcode
unsigned getOpcode() const
Definition: MCInst.h:197
llvm::createX86MachObjectWriter
std::unique_ptr< MCObjectTargetWriter > createX86MachObjectWriter(bool Is64Bit, uint32_t CPUType, uint32_t CPUSubtype)
Construct an X86 Mach-O object writer.
Definition: X86MachObjectWriter.cpp:600
StringSwitch.h
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:585
llvm::X86::SecondMacroFusionInstKind
SecondMacroFusionInstKind
Definition: X86BaseInfo.h:127
llvm::X86::getSegmentOverridePrefixForReg
EncodingOfSegmentOverridePrefix getSegmentOverridePrefixForReg(unsigned Reg)
Given a segment register, return the encoding of the segment override prefix for it.
Definition: X86BaseInfo.h:380
classifySecondInstInMacroFusion
static X86::SecondMacroFusionInstKind classifySecondInstInMacroFusion(const MCInst &MI, const MCInstrInfo &MCII)
Definition: X86AsmBackend.cpp:335
MCValue.h
llvm::MCOperand::isExpr
bool isExpr() const
Definition: MCInst.h:64
llvm::MCFragment::FT_Relaxable
@ FT_Relaxable
Definition: MCFragment.h:41
llvm::MCRelaxableFragment::getAllowAutoPadding
bool getAllowAutoPadding() const
Definition: MCFragment.h:287
llvm::MCSection::getKind
SectionKind getKind() const
Definition: MCSection.h:116
llvm::MCSection::begin
iterator begin()
Definition: MCSection.h:170
llvm::MCFixupKind
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
X86BaseInfo.h
llvm::MCInstrDesc::isUnconditionalBranch
bool isUnconditionalBranch() const
Return true if this is a branch which always transfers control flow to some other block.
Definition: MCInstrDesc.h:317
llvm::FK_Data_8
@ FK_Data_8
A eight-byte fixup.
Definition: MCFixup.h:26
llvm::X86::FirstMacroFusionInstKind::Invalid
@ Invalid
isRIPRelative
static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII)
Check if the instruction uses RIP relative addressing.
Definition: X86AsmBackend.cpp:341
llvm::MCExpr::SymbolRef
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:40
getRelaxedOpcode
static unsigned getRelaxedOpcode(const MCInst &Inst, bool Is16BitMode)
Definition: X86AsmBackend.cpp:313
llvm::MCInst::getOperand
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:205
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
support
Reimplement select in terms of SEL *We would really like to support but we need to prove that the add doesn t need to overflow between the two bit chunks *Implement pre post increment support(e.g. PR935) *Implement smarter const ant generation for binops with large immediates. A few ARMv6T2 ops should be pattern matched
Definition: README.txt:10
llvm::raw_svector_ostream
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:647
llvm::X86::AlignBranchNone
@ AlignBranchNone
Definition: X86BaseInfo.h:358
llvm::Triple::getEnvironment
EnvironmentType getEnvironment() const
getEnvironment - Get the parsed environment type of this triple.
Definition: Triple.h:325
llvm::X86::FirstMacroFusionInstKind
FirstMacroFusionInstKind
Definition: X86BaseInfo.h:111
hasInterruptDelaySlot
static bool hasInterruptDelaySlot(const MCInst &Inst)
X86 has certain instructions which enable interrupts exactly one instruction after the instruction wh...
Definition: X86AsmBackend.cpp:471
llvm::StringSwitch
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:42
llvm::HexStyle::Asm
@ Asm
0ffh
Definition: MCInstPrinter.h:34
llvm::MCInstrDesc::isReturn
bool isReturn() const
Return true if the instruction is a return.
Definition: MCInstrDesc.h:268
llvm::MCValue
This represents an "assembler immediate".
Definition: MCValue.h:37
llvm::MCSymbolRefExpr::VK_None
@ VK_None
Definition: MCExpr.h:195
llvm::MCInstrInfo::get
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:62
MachO.h
llvm::cl::desc
Definition: CommandLine.h:411
llvm::X86::reloc_riprel_4byte_relax_rex
@ reloc_riprel_4byte_relax_rex
Definition: X86FixupKinds.h:21
raw_ostream.h
llvm::FK_Data_2
@ FK_Data_2
A two-byte fixup.
Definition: MCFixup.h:24
n
The same transformation can work with an even modulo with the addition of a and shrink the compare RHS by the same amount Unless the target supports that transformation probably isn t worthwhile The transformation can also easily be made to work with non zero equality for n
Definition: README.txt:685
llvm::MCDataFragment
Fragment for data and encoded instructions.
Definition: MCFragment.h:242
TargetRegistry.h
llvm::MCAssembler::isBundlingEnabled
bool isBundlingEnabled() const
Definition: MCAssembler.h:331
MCExpr.h
llvm::MCSubtargetInfo
Generic base class for all target subtargets.
Definition: MCSubtargetInfo.h:75
CU
Definition: AArch64AsmBackend.cpp:515
llvm::MCFixup
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
Definition: MCFixup.h:81
llvm::X86::reloc_branch_4byte_pcrel
@ reloc_branch_4byte_pcrel
Definition: X86FixupKinds.h:32
llvm::MCInstrDesc::getNumOperands
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:229
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
llvm::MCExpr
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
llvm::createX86_64AsmBackend
MCAsmBackend * createX86_64AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
Definition: X86AsmBackend.cpp:1613
llvm::MCInstrDesc::isConditionalBranch
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:309
X86FixupKinds.h
hasVariantSymbol
static bool hasVariantSymbol(const MCInst &MI)
Check if the instruction has a variant symbol operand.
Definition: X86AsmBackend.cpp:448
llvm::MCEncodedFragmentWithContents::getContents
SmallVectorImpl< char > & getContents()
Definition: MCFragment.h:197
llvm::MCFragment::FT_CompactEncodedInst
@ FT_CompactEncodedInst
Definition: MCFragment.h:38
llvm::MCCodeEmitter::encodeInstruction
virtual void encodeInstruction(const MCInst &Inst, raw_ostream &OS, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
EncodeInstruction - Encode the given Inst to bytes on the output stream OS.
llvm::MCOperand::getReg
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:68
llvm::MCBoundaryAlignFragment
Represents required padding such that a particular other set of fragments does not cross a particular...
Definition: MCFragment.h:566