LLVM  14.0.0git
MachO_x86_64.cpp
Go to the documentation of this file.
1 //===---- MachO_x86_64.cpp -JIT linker implementation for MachO/x86-64 ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // MachO/x86-64 jit-link implementation.
10 //
11 //===----------------------------------------------------------------------===//
12 
15 
16 #include "MachOLinkGraphBuilder.h"
18 
19 #define DEBUG_TYPE "jitlink"
20 
21 using namespace llvm;
22 using namespace llvm::jitlink;
23 
24 namespace {
25 
26 class MachOLinkGraphBuilder_x86_64 : public MachOLinkGraphBuilder {
27 public:
28  MachOLinkGraphBuilder_x86_64(const object::MachOObjectFile &Obj)
29  : MachOLinkGraphBuilder(Obj, Triple("x86_64-apple-darwin"),
31 
32 private:
33  enum MachONormalizedRelocationType : unsigned {
34  MachOBranch32,
35  MachOPointer32,
36  MachOPointer64,
37  MachOPointer64Anon,
38  MachOPCRel32,
39  MachOPCRel32Minus1,
40  MachOPCRel32Minus2,
41  MachOPCRel32Minus4,
42  MachOPCRel32Anon,
43  MachOPCRel32Minus1Anon,
44  MachOPCRel32Minus2Anon,
45  MachOPCRel32Minus4Anon,
46  MachOPCRel32GOTLoad,
47  MachOPCRel32GOT,
48  MachOPCRel32TLV,
49  MachOSubtractor32,
50  MachOSubtractor64,
51  };
52 
54  getRelocKind(const MachO::relocation_info &RI) {
55  switch (RI.r_type) {
57  if (!RI.r_pcrel) {
58  if (RI.r_length == 3)
59  return RI.r_extern ? MachOPointer64 : MachOPointer64Anon;
60  else if (RI.r_extern && RI.r_length == 2)
61  return MachOPointer32;
62  }
63  break;
65  if (RI.r_pcrel && RI.r_length == 2)
66  return RI.r_extern ? MachOPCRel32 : MachOPCRel32Anon;
67  break;
69  if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
70  return MachOBranch32;
71  break;
73  if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
74  return MachOPCRel32GOTLoad;
75  break;
77  if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
78  return MachOPCRel32GOT;
79  break;
81  if (!RI.r_pcrel && RI.r_extern) {
82  if (RI.r_length == 2)
83  return MachOSubtractor32;
84  else if (RI.r_length == 3)
85  return MachOSubtractor64;
86  }
87  break;
89  if (RI.r_pcrel && RI.r_length == 2)
90  return RI.r_extern ? MachOPCRel32Minus1 : MachOPCRel32Minus1Anon;
91  break;
93  if (RI.r_pcrel && RI.r_length == 2)
94  return RI.r_extern ? MachOPCRel32Minus2 : MachOPCRel32Minus2Anon;
95  break;
97  if (RI.r_pcrel && RI.r_length == 2)
98  return RI.r_extern ? MachOPCRel32Minus4 : MachOPCRel32Minus4Anon;
99  break;
101  if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
102  return MachOPCRel32TLV;
103  break;
104  }
105 
106  return make_error<JITLinkError>(
107  "Unsupported x86-64 relocation: address=" +
108  formatv("{0:x8}", RI.r_address) +
109  ", symbolnum=" + formatv("{0:x6}", RI.r_symbolnum) +
110  ", kind=" + formatv("{0:x1}", RI.r_type) +
111  ", pc_rel=" + (RI.r_pcrel ? "true" : "false") +
112  ", extern=" + (RI.r_extern ? "true" : "false") +
113  ", length=" + formatv("{0:d}", RI.r_length));
114  }
115 
116  using PairRelocInfo = std::tuple<Edge::Kind, Symbol *, uint64_t>;
117 
118  // Parses paired SUBTRACTOR/UNSIGNED relocations and, on success,
119  // returns the edge kind and addend to be used.
120  Expected<PairRelocInfo> parsePairRelocation(
121  Block &BlockToFix, MachONormalizedRelocationType SubtractorKind,
122  const MachO::relocation_info &SubRI, JITTargetAddress FixupAddress,
123  const char *FixupContent, object::relocation_iterator &UnsignedRelItr,
124  object::relocation_iterator &RelEnd) {
125  using namespace support;
126 
127  assert(((SubtractorKind == MachOSubtractor32 && SubRI.r_length == 2) ||
128  (SubtractorKind == MachOSubtractor64 && SubRI.r_length == 3)) &&
129  "Subtractor kind should match length");
130  assert(SubRI.r_extern && "SUBTRACTOR reloc symbol should be extern");
131  assert(!SubRI.r_pcrel && "SUBTRACTOR reloc should not be PCRel");
132 
133  if (UnsignedRelItr == RelEnd)
134  return make_error<JITLinkError>("x86_64 SUBTRACTOR without paired "
135  "UNSIGNED relocation");
136 
137  auto UnsignedRI = getRelocationInfo(UnsignedRelItr);
138 
139  if (SubRI.r_address != UnsignedRI.r_address)
140  return make_error<JITLinkError>("x86_64 SUBTRACTOR and paired UNSIGNED "
141  "point to different addresses");
142 
143  if (SubRI.r_length != UnsignedRI.r_length)
144  return make_error<JITLinkError>("length of x86_64 SUBTRACTOR and paired "
145  "UNSIGNED reloc must match");
146 
147  Symbol *FromSymbol;
148  if (auto FromSymbolOrErr = findSymbolByIndex(SubRI.r_symbolnum))
149  FromSymbol = FromSymbolOrErr->GraphSymbol;
150  else
151  return FromSymbolOrErr.takeError();
152 
153  // Read the current fixup value.
154  uint64_t FixupValue = 0;
155  if (SubRI.r_length == 3)
156  FixupValue = *(const little64_t *)FixupContent;
157  else
158  FixupValue = *(const little32_t *)FixupContent;
159 
160  // Find 'ToSymbol' using symbol number or address, depending on whether the
161  // paired UNSIGNED relocation is extern.
162  Symbol *ToSymbol = nullptr;
163  if (UnsignedRI.r_extern) {
164  // Find target symbol by symbol index.
165  if (auto ToSymbolOrErr = findSymbolByIndex(UnsignedRI.r_symbolnum))
166  ToSymbol = ToSymbolOrErr->GraphSymbol;
167  else
168  return ToSymbolOrErr.takeError();
169  } else {
170  auto ToSymbolSec = findSectionByIndex(UnsignedRI.r_symbolnum - 1);
171  if (!ToSymbolSec)
172  return ToSymbolSec.takeError();
173  ToSymbol = getSymbolByAddress(ToSymbolSec->Address);
174  assert(ToSymbol && "No symbol for section");
175  FixupValue -= ToSymbol->getAddress();
176  }
177 
178  Edge::Kind DeltaKind;
179  Symbol *TargetSymbol;
180  uint64_t Addend;
181  if (&BlockToFix == &FromSymbol->getAddressable()) {
182  TargetSymbol = ToSymbol;
183  DeltaKind = (SubRI.r_length == 3) ? x86_64::Delta64 : x86_64::Delta32;
184  Addend = FixupValue + (FixupAddress - FromSymbol->getAddress());
185  // FIXME: handle extern 'from'.
186  } else if (&BlockToFix == &ToSymbol->getAddressable()) {
187  TargetSymbol = FromSymbol;
188  DeltaKind =
190  Addend = FixupValue - (FixupAddress - ToSymbol->getAddress());
191  } else {
192  // BlockToFix was neither FromSymbol nor ToSymbol.
193  return make_error<JITLinkError>("SUBTRACTOR relocation must fix up "
194  "either 'A' or 'B' (or a symbol in one "
195  "of their alt-entry chains)");
196  }
197 
198  return PairRelocInfo(DeltaKind, TargetSymbol, Addend);
199  }
200 
201  Error addRelocations() override {
202  using namespace support;
203  auto &Obj = getObject();
204 
205  LLVM_DEBUG(dbgs() << "Processing relocations:\n");
206 
207  for (auto &S : Obj.sections()) {
208 
209  JITTargetAddress SectionAddress = S.getAddress();
210 
211  // Skip relocations virtual sections.
212  if (S.isVirtual()) {
213  if (S.relocation_begin() != S.relocation_end())
214  return make_error<JITLinkError>("Virtual section contains "
215  "relocations");
216  continue;
217  }
218 
219  // Skip relocations for debug symbols.
220  {
221  auto &NSec =
222  getSectionByIndex(Obj.getSectionIndex(S.getRawDataRefImpl()));
223  if (!NSec.GraphSection) {
224  LLVM_DEBUG({
225  dbgs() << " Skipping relocations for MachO section "
226  << NSec.SegName << "/" << NSec.SectName
227  << " which has no associated graph section\n";
228  });
229  continue;
230  }
231  }
232 
233  // Add relocations for section.
234  for (auto RelItr = S.relocation_begin(), RelEnd = S.relocation_end();
235  RelItr != RelEnd; ++RelItr) {
236 
237  MachO::relocation_info RI = getRelocationInfo(RelItr);
238 
239  // Find the address of the value to fix up.
240  JITTargetAddress FixupAddress = SectionAddress + (uint32_t)RI.r_address;
241 
242  LLVM_DEBUG({
243  auto &NSec =
244  getSectionByIndex(Obj.getSectionIndex(S.getRawDataRefImpl()));
245  dbgs() << " " << NSec.SectName << " + "
246  << formatv("{0:x8}", RI.r_address) << ":\n";
247  });
248 
249  // Find the block that the fixup points to.
250  Block *BlockToFix = nullptr;
251  {
252  auto SymbolToFixOrErr = findSymbolByAddress(FixupAddress);
253  if (!SymbolToFixOrErr)
254  return SymbolToFixOrErr.takeError();
255  BlockToFix = &SymbolToFixOrErr->getBlock();
256  }
257 
258  if (FixupAddress + static_cast<JITTargetAddress>(1ULL << RI.r_length) >
259  BlockToFix->getAddress() + BlockToFix->getContent().size())
260  return make_error<JITLinkError>(
261  "Relocation extends past end of fixup block");
262 
263  // Get a pointer to the fixup content.
264  const char *FixupContent = BlockToFix->getContent().data() +
265  (FixupAddress - BlockToFix->getAddress());
266 
267  size_t FixupOffset = FixupAddress - BlockToFix->getAddress();
268 
269  // The target symbol and addend will be populated by the switch below.
270  Symbol *TargetSymbol = nullptr;
271  uint64_t Addend = 0;
272 
273  // Sanity check the relocation kind.
274  auto MachORelocKind = getRelocKind(RI);
275  if (!MachORelocKind)
276  return MachORelocKind.takeError();
277 
279 
280  switch (*MachORelocKind) {
281  case MachOBranch32:
282  if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
283  TargetSymbol = TargetSymbolOrErr->GraphSymbol;
284  else
285  return TargetSymbolOrErr.takeError();
286  Addend = *(const little32_t *)FixupContent;
288  break;
289  case MachOPCRel32:
290  if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
291  TargetSymbol = TargetSymbolOrErr->GraphSymbol;
292  else
293  return TargetSymbolOrErr.takeError();
294  Addend = *(const little32_t *)FixupContent - 4;
296  break;
297  case MachOPCRel32GOTLoad:
298  if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
299  TargetSymbol = TargetSymbolOrErr->GraphSymbol;
300  else
301  return TargetSymbolOrErr.takeError();
302  Addend = *(const little32_t *)FixupContent;
304  if (FixupOffset < 3)
305  return make_error<JITLinkError>("GOTLD at invalid offset " +
306  formatv("{0}", FixupOffset));
307  break;
308  case MachOPCRel32GOT:
309  if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
310  TargetSymbol = TargetSymbolOrErr->GraphSymbol;
311  else
312  return TargetSymbolOrErr.takeError();
313  Addend = *(const little32_t *)FixupContent - 4;
315  break;
316  case MachOPCRel32TLV:
317  if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
318  TargetSymbol = TargetSymbolOrErr->GraphSymbol;
319  else
320  return TargetSymbolOrErr.takeError();
321  Addend = *(const little32_t *)FixupContent;
323  break;
324  case MachOPointer32:
325  if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
326  TargetSymbol = TargetSymbolOrErr->GraphSymbol;
327  else
328  return TargetSymbolOrErr.takeError();
329  Addend = *(const ulittle32_t *)FixupContent;
331  break;
332  case MachOPointer64:
333  if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
334  TargetSymbol = TargetSymbolOrErr->GraphSymbol;
335  else
336  return TargetSymbolOrErr.takeError();
337  Addend = *(const ulittle64_t *)FixupContent;
339  break;
340  case MachOPointer64Anon: {
341  JITTargetAddress TargetAddress = *(const ulittle64_t *)FixupContent;
342  if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
343  TargetSymbol = &*TargetSymbolOrErr;
344  else
345  return TargetSymbolOrErr.takeError();
346  Addend = TargetAddress - TargetSymbol->getAddress();
348  break;
349  }
350  case MachOPCRel32Minus1:
351  case MachOPCRel32Minus2:
352  case MachOPCRel32Minus4:
353  if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
354  TargetSymbol = TargetSymbolOrErr->GraphSymbol;
355  else
356  return TargetSymbolOrErr.takeError();
357  Addend = *(const little32_t *)FixupContent - 4;
359  break;
360  case MachOPCRel32Anon: {
361  JITTargetAddress TargetAddress =
362  FixupAddress + 4 + *(const little32_t *)FixupContent;
363  if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
364  TargetSymbol = &*TargetSymbolOrErr;
365  else
366  return TargetSymbolOrErr.takeError();
367  Addend = TargetAddress - TargetSymbol->getAddress() - 4;
369  break;
370  }
371  case MachOPCRel32Minus1Anon:
372  case MachOPCRel32Minus2Anon:
373  case MachOPCRel32Minus4Anon: {
374  JITTargetAddress Delta =
375  4 + static_cast<JITTargetAddress>(
376  1ULL << (*MachORelocKind - MachOPCRel32Minus1Anon));
377  JITTargetAddress TargetAddress =
378  FixupAddress + Delta + *(const little32_t *)FixupContent;
379  if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
380  TargetSymbol = &*TargetSymbolOrErr;
381  else
382  return TargetSymbolOrErr.takeError();
383  Addend = TargetAddress - TargetSymbol->getAddress() - Delta;
385  break;
386  }
387  case MachOSubtractor32:
388  case MachOSubtractor64: {
389  // We use Delta32/Delta64 to represent SUBTRACTOR relocations.
390  // parsePairRelocation handles the paired reloc, and returns the
391  // edge kind to be used (either Delta32/Delta64, or
392  // NegDelta32/NegDelta64, depending on the direction of the
393  // subtraction) along with the addend.
394  auto PairInfo =
395  parsePairRelocation(*BlockToFix, *MachORelocKind, RI,
396  FixupAddress, FixupContent, ++RelItr, RelEnd);
397  if (!PairInfo)
398  return PairInfo.takeError();
399  std::tie(Kind, TargetSymbol, Addend) = *PairInfo;
400  assert(TargetSymbol && "No target symbol from parsePairRelocation?");
401  break;
402  }
403  }
404 
405  LLVM_DEBUG({
406  dbgs() << " ";
407  Edge GE(Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
408  Addend);
409  printEdge(dbgs(), *BlockToFix, GE, x86_64::getEdgeKindName(Kind));
410  dbgs() << "\n";
411  });
412  BlockToFix->addEdge(Kind, FixupAddress - BlockToFix->getAddress(),
413  *TargetSymbol, Addend);
414  }
415  }
416  return Error::success();
417  }
418 };
419 
420 class PerGraphGOTAndPLTStubsBuilder_MachO_x86_64
422  PerGraphGOTAndPLTStubsBuilder_MachO_x86_64> {
423 public:
424 
426  PerGraphGOTAndPLTStubsBuilder_MachO_x86_64>::
427  PerGraphGOTAndPLTStubsBuilder;
428 
429  bool isGOTEdgeToFix(Edge &E) const {
430  return E.getKind() == x86_64::RequestGOTAndTransformToDelta32 ||
431  E.getKind() ==
433  }
434 
435  Symbol &createGOTEntry(Symbol &Target) {
436  return x86_64::createAnonymousPointer(G, getGOTSection(), &Target);
437  }
438 
439  void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
440  // Fix the edge kind.
441  switch (E.getKind()) {
443  E.setKind(x86_64::Delta32);
444  break;
447  break;
448  default:
449  llvm_unreachable("Not a GOT transform edge");
450  }
451  // Fix the target, leave the addend as-is.
452  E.setTarget(GOTEntry);
453  }
454 
455  bool isExternalBranchEdge(Edge &E) {
456  return E.getKind() == x86_64::BranchPCRel32 && E.getTarget().isExternal();
457  }
458 
459  Symbol &createPLTStub(Symbol &Target) {
460  return x86_64::createAnonymousPointerJumpStub(G, getStubsSection(),
461  getGOTEntry(Target));
462  }
463 
464  void fixPLTEdge(Edge &E, Symbol &Stub) {
465  assert(E.getKind() == x86_64::BranchPCRel32 && "Not a Branch32 edge?");
466  assert(E.getAddend() == 0 &&
467  "BranchPCRel32 edge has unexpected addend value");
468 
469  // Set the edge kind to BranchPCRel32ToPtrJumpStubRelaxable. We will use
470  // this to check for stub optimization opportunities in the
471  // optimizeMachO_x86_64_GOTAndStubs pass below.
473  E.setTarget(Stub);
474  }
475 
476 private:
477  Section &getGOTSection() {
478  if (!GOTSection)
479  GOTSection = &G.createSection("$__GOT", sys::Memory::MF_READ);
480  return *GOTSection;
481  }
482 
483  Section &getStubsSection() {
484  if (!StubsSection) {
485  auto StubsProt = static_cast<sys::Memory::ProtectionFlags>(
487  StubsSection = &G.createSection("$__STUBS", StubsProt);
488  }
489  return *StubsSection;
490  }
491 
492  Section *GOTSection = nullptr;
493  Section *StubsSection = nullptr;
494 };
495 
496 } // namespace
497 
499  LLVM_DEBUG(dbgs() << "Optimizing GOT entries and stubs:\n");
500 
501  for (auto *B : G.blocks())
502  for (auto &E : B->edges())
503  if (E.getKind() == x86_64::PCRel32GOTLoadRelaxable) {
504  assert(E.getOffset() >= 3 && "GOT edge occurs too early in block");
505 
506  // Optimize GOT references.
507  auto &GOTBlock = E.getTarget().getBlock();
508  assert(GOTBlock.getSize() == G.getPointerSize() &&
509  "GOT entry block should be pointer sized");
510  assert(GOTBlock.edges_size() == 1 &&
511  "GOT entry should only have one outgoing edge");
512 
513  auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
514  JITTargetAddress EdgeAddr = B->getAddress() + E.getOffset();
515  JITTargetAddress TargetAddr = GOTTarget.getAddress();
516 
517  // Check that this is a recognized MOV instruction.
518  // FIXME: Can we assume this?
519  constexpr uint8_t MOVQRIPRel[] = {0x48, 0x8b};
520  if (strncmp(B->getContent().data() + E.getOffset() - 3,
521  reinterpret_cast<const char *>(MOVQRIPRel), 2) != 0)
522  continue;
523 
524  int64_t Displacement = TargetAddr - EdgeAddr + 4;
525  if (Displacement >= std::numeric_limits<int32_t>::min() &&
526  Displacement <= std::numeric_limits<int32_t>::max()) {
527  E.setTarget(GOTTarget);
528  E.setKind(x86_64::Delta32);
529  E.setAddend(E.getAddend() - 4);
530  char *BlockData = B->getMutableContent(G).data();
531  BlockData[E.getOffset() - 2] = (char)0x8d;
532  LLVM_DEBUG({
533  dbgs() << " Replaced GOT load wih LEA:\n ";
534  printEdge(dbgs(), *B, E, x86_64::getEdgeKindName(E.getKind()));
535  dbgs() << "\n";
536  });
537  }
538  } else if (E.getKind() == x86_64::BranchPCRel32ToPtrJumpStubRelaxable) {
539  auto &StubBlock = E.getTarget().getBlock();
540  assert(StubBlock.getSize() == sizeof(x86_64::PointerJumpStubContent) &&
541  "Stub block should be stub sized");
542  assert(StubBlock.edges_size() == 1 &&
543  "Stub block should only have one outgoing edge");
544 
545  auto &GOTBlock = StubBlock.edges().begin()->getTarget().getBlock();
546  assert(GOTBlock.getSize() == G.getPointerSize() &&
547  "GOT block should be pointer sized");
548  assert(GOTBlock.edges_size() == 1 &&
549  "GOT block should only have one outgoing edge");
550 
551  auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
552  JITTargetAddress EdgeAddr = B->getAddress() + E.getOffset();
553  JITTargetAddress TargetAddr = GOTTarget.getAddress();
554 
555  int64_t Displacement = TargetAddr - EdgeAddr + 4;
556  if (Displacement >= std::numeric_limits<int32_t>::min() &&
557  Displacement <= std::numeric_limits<int32_t>::max()) {
558  E.setKind(x86_64::BranchPCRel32);
559  E.setTarget(GOTTarget);
560  LLVM_DEBUG({
561  dbgs() << " Replaced stub branch with direct branch:\n ";
562  printEdge(dbgs(), *B, E, x86_64::getEdgeKindName(E.getKind()));
563  dbgs() << "\n";
564  });
565  }
566  }
567 
568  return Error::success();
569 }
570 
571 namespace llvm {
572 namespace jitlink {
573 
574 class MachOJITLinker_x86_64 : public JITLinker<MachOJITLinker_x86_64> {
576 
577 public:
578  MachOJITLinker_x86_64(std::unique_ptr<JITLinkContext> Ctx,
579  std::unique_ptr<LinkGraph> G,
580  PassConfiguration PassConfig)
581  : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
582 
583 private:
584  Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
585  return x86_64::applyFixup(G, B, E);
586  }
587 };
588 
591  auto MachOObj = object::ObjectFile::createMachOObjectFile(ObjectBuffer);
592  if (!MachOObj)
593  return MachOObj.takeError();
594  return MachOLinkGraphBuilder_x86_64(**MachOObj).buildGraph();
595 }
596 
597 void link_MachO_x86_64(std::unique_ptr<LinkGraph> G,
598  std::unique_ptr<JITLinkContext> Ctx) {
599 
600  PassConfiguration Config;
601 
602  if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) {
603  // Add eh-frame passses.
606 
607  // Add a mark-live pass.
608  if (auto MarkLive = Ctx->getMarkLivePass(G->getTargetTriple()))
609  Config.PrePrunePasses.push_back(std::move(MarkLive));
610  else
611  Config.PrePrunePasses.push_back(markAllSymbolsLive);
612 
613  // Add an in-place GOT/Stubs pass.
614  Config.PostPrunePasses.push_back(
615  PerGraphGOTAndPLTStubsBuilder_MachO_x86_64::asPass);
616 
617  // Add GOT/Stubs optimizer pass.
619  }
620 
621  if (auto Err = Ctx->modifyPassConfig(*G, Config))
622  return Ctx->notifyFailed(std::move(Err));
623 
624  // Construct a JITLinker and run the link function.
626 }
627 
629  return EHFrameSplitter("__TEXT,__eh_frame");
630 }
631 
633  return EHFrameEdgeFixer("__TEXT,__eh_frame", x86_64::PointerSize,
635 }
636 
637 } // end namespace jitlink
638 } // end namespace llvm
llvm::sys::Memory::MF_READ
@ MF_READ
Definition: Memory.h:55
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::MachO::relocation_info::r_length
uint32_t r_length
Definition: MachO.h:959
llvm::object::MachOObjectFile::getSectionIndex
uint64_t getSectionIndex(DataRefImpl Sec) const override
Definition: MachOObjectFile.cpp:1923
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:137
llvm::MachO::X86_64_RELOC_SIGNED_4
@ X86_64_RELOC_SIGNED_4
Definition: MachO.h:482
llvm::object::ObjectFile::createMachOObjectFile
static Expected< std::unique_ptr< MachOObjectFile > > createMachOObjectFile(MemoryBufferRef Object, uint32_t UniversalCputype=0, uint32_t UniversalIndex=0)
Definition: MachOObjectFile.cpp:4657
llvm::Error::success
static ErrorSuccess success()
Create a success value.
Definition: Error.h:331
llvm::MachO::relocation_info::r_address
int32_t r_address
Definition: MachO.h:958
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:45
llvm::Expected
Tagged union holding either a T or a Error.
Definition: APFloat.h:42
llvm::MachO::X86_64_RELOC_SIGNED_2
@ X86_64_RELOC_SIGNED_2
Definition: MachO.h:481
llvm::MemoryBufferRef
Definition: MemoryBufferRef.h:22
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:122
llvm::MachO::X86_64_RELOC_BRANCH
@ X86_64_RELOC_BRANCH
Definition: MachO.h:476
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::formatv
auto formatv(const char *Fmt, Ts &&... Vals) -> formatv_object< decltype(std::make_tuple(detail::build_format_adapter(std::forward< Ts >(Vals))...))>
Definition: FormatVariadic.h:250
llvm::support::little64_t
detail::packed_endian_specific_integral< int64_t, little, unaligned > little64_t
Definition: Endian.h:281
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::sys::Memory::ProtectionFlags
ProtectionFlags
Definition: Memory.h:54
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
MachO_x86_64.h
optimizeMachO_x86_64_GOTAndStubs
static Error optimizeMachO_x86_64_GOTAndStubs(LinkGraph &G)
Definition: MachO_x86_64.cpp:498
llvm::lltok::Kind
Kind
Definition: LLToken.h:18
llvm::object::MachOObjectFile
Definition: MachO.h:262
llvm::support::ulittle32_t
detail::packed_endian_specific_integral< uint32_t, little, unaligned > ulittle32_t
Definition: Endian.h:272
llvm::sys::Memory::MF_EXEC
@ MF_EXEC
Definition: Memory.h:57
G
const DataFlowGraph & G
Definition: RDFGraph.cpp:202
llvm::MachO::X86_64_RELOC_UNSIGNED
@ X86_64_RELOC_UNSIGNED
Definition: MachO.h:474
llvm::MachO::relocation_info
Definition: MachO.h:957
llvm::MachO::relocation_info::r_type
uint32_t r_type
Definition: MachO.h:960
llvm::MachO::relocation_info::r_extern
uint32_t r_extern
Definition: MachO.h:959
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::MachO::X86_64_RELOC_GOT
@ X86_64_RELOC_GOT
Definition: MachO.h:478
llvm::MachO::X86_64_RELOC_GOT_LOAD
@ X86_64_RELOC_GOT_LOAD
Definition: MachO.h:477
G
#define G(x, y, z)
Definition: MD5.cpp:57
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::move
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1605
llvm::object::content_iterator
Definition: SymbolicFile.h:67
llvm::MachO::X86_64_RELOC_SUBTRACTOR
@ X86_64_RELOC_SUBTRACTOR
Definition: MachO.h:479
llvm::AArch64CC::GE
@ GE
Definition: AArch64BaseInfo.h:246
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
uint32_t
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
x86_64.h
llvm::MachO::X86_64_RELOC_SIGNED
@ X86_64_RELOC_SIGNED
Definition: MachO.h:475
PerGraphGOTAndPLTStubsBuilder.h
llvm::MachO::X86_64_RELOC_TLV
@ X86_64_RELOC_TLV
Definition: MachO.h:483
std
Definition: BitVector.h:838
llvm::Error
Lightweight error class with error context and mandatory checking.
Definition: Error.h:157
llvm::support::little32_t
detail::packed_endian_specific_integral< int32_t, little, unaligned > little32_t
Definition: Endian.h:279
getObject
static Error getObject(const T *&Obj, MemoryBufferRef M, const void *Ptr, const uint64_t Size=sizeof(T))
Definition: COFFObjectFile.cpp:58
llvm::JITTargetAddress
uint64_t JITTargetAddress
Represents an address in the target process's address space.
Definition: JITSymbol.h:42
llvm::support::ulittle64_t
detail::packed_endian_specific_integral< uint64_t, little, unaligned > ulittle64_t
Definition: Endian.h:274
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
support
Reimplement select in terms of SEL *We would really like to support but we need to prove that the add doesn t need to overflow between the two bit chunks *Implement pre post increment support(e.g. PR935) *Implement smarter const ant generation for binops with large immediates. A few ARMv6T2 ops should be pattern matched
Definition: README.txt:10
llvm::object::ObjectFile::sections
section_iterator_range sections() const
Definition: ObjectFile.h:322
llvm::MachO::relocation_info::r_pcrel
uint32_t r_pcrel
Definition: MachO.h:959
MachOLinkGraphBuilder.h
llvm::MachO::X86_64_RELOC_SIGNED_1
@ X86_64_RELOC_SIGNED_1
Definition: MachO.h:480
llvm::MachO::relocation_info::r_symbolnum
uint32_t r_symbolnum
Definition: MachO.h:959