LLVM  16.0.0git
RuntimeDyldELF.cpp
Go to the documentation of this file.
1 //===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implementation of ELF support for the MC-JIT runtime dynamic linker.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RuntimeDyldELF.h"
14 #include "RuntimeDyldCheckerImpl.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/BinaryFormat/ELF.h"
21 #include "llvm/Object/ObjectFile.h"
22 #include "llvm/Support/Endian.h"
24 
25 using namespace llvm;
26 using namespace llvm::object;
27 using namespace llvm::support::endian;
28 
29 #define DEBUG_TYPE "dyld"
30 
31 static void or32le(void *P, int32_t V) { write32le(P, read32le(P) | V); }
32 
33 static void or32AArch64Imm(void *L, uint64_t Imm) {
34  or32le(L, (Imm & 0xFFF) << 10);
35 }
36 
37 template <class T> static void write(bool isBE, void *P, T V) {
38  isBE ? write<T, support::big>(P, V) : write<T, support::little>(P, V);
39 }
40 
41 static void write32AArch64Addr(void *L, uint64_t Imm) {
42  uint32_t ImmLo = (Imm & 0x3) << 29;
43  uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
44  uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
45  write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
46 }
47 
48 // Return the bits [Start, End] from Val shifted Start bits.
49 // For instance, getBits(0xF0, 4, 8) returns 0xF.
50 static uint64_t getBits(uint64_t Val, int Start, int End) {
51  uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
52  return (Val >> Start) & Mask;
53 }
54 
55 namespace {
56 
57 template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> {
59 
60  typedef typename ELFT::uint addr_type;
61 
62  DyldELFObject(ELFObjectFile<ELFT> &&Obj);
63 
64 public:
66  create(MemoryBufferRef Wrapper);
67 
68  void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
69 
70  void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr);
71 
72  // Methods for type inquiry through isa, cast and dyn_cast
73  static bool classof(const Binary *v) {
74  return (isa<ELFObjectFile<ELFT>>(v) &&
75  classof(cast<ELFObjectFile<ELFT>>(v)));
76  }
77  static bool classof(const ELFObjectFile<ELFT> *v) {
78  return v->isDyldType();
79  }
80 };
81 
82 
83 
84 // The MemoryBuffer passed into this constructor is just a wrapper around the
85 // actual memory. Ultimately, the Binary parent class will take ownership of
86 // this MemoryBuffer object but not the underlying memory.
87 template <class ELFT>
88 DyldELFObject<ELFT>::DyldELFObject(ELFObjectFile<ELFT> &&Obj)
89  : ELFObjectFile<ELFT>(std::move(Obj)) {
90  this->isDyldELFObject = true;
91 }
92 
93 template <class ELFT>
95 DyldELFObject<ELFT>::create(MemoryBufferRef Wrapper) {
97  if (auto E = Obj.takeError())
98  return std::move(E);
99  std::unique_ptr<DyldELFObject<ELFT>> Ret(
100  new DyldELFObject<ELFT>(std::move(*Obj)));
101  return std::move(Ret);
102 }
103 
104 template <class ELFT>
105 void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec,
106  uint64_t Addr) {
107  DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
108  Elf_Shdr *shdr =
109  const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
110 
111  // This assumes the address passed in matches the target address bitness
112  // The template-based type cast handles everything else.
113  shdr->sh_addr = static_cast<addr_type>(Addr);
114 }
115 
116 template <class ELFT>
117 void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
118  uint64_t Addr) {
119 
120  Elf_Sym *sym = const_cast<Elf_Sym *>(
122 
123  // This assumes the address passed in matches the target address bitness
124  // The template-based type cast handles everything else.
125  sym->st_value = static_cast<addr_type>(Addr);
126 }
127 
128 class LoadedELFObjectInfo final
129  : public LoadedObjectInfoHelper<LoadedELFObjectInfo,
130  RuntimeDyld::LoadedObjectInfo> {
131 public:
132  LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
133  : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
134 
136  getObjectForDebug(const ObjectFile &Obj) const override;
137 };
138 
139 template <typename ELFT>
141 createRTDyldELFObject(MemoryBufferRef Buffer, const ObjectFile &SourceObject,
142  const LoadedELFObjectInfo &L) {
143  typedef typename ELFT::Shdr Elf_Shdr;
144  typedef typename ELFT::uint addr_type;
145 
147  DyldELFObject<ELFT>::create(Buffer);
148  if (Error E = ObjOrErr.takeError())
149  return std::move(E);
150 
151  std::unique_ptr<DyldELFObject<ELFT>> Obj = std::move(*ObjOrErr);
152 
153  // Iterate over all sections in the object.
154  auto SI = SourceObject.section_begin();
155  for (const auto &Sec : Obj->sections()) {
156  Expected<StringRef> NameOrErr = Sec.getName();
157  if (!NameOrErr) {
158  consumeError(NameOrErr.takeError());
159  continue;
160  }
161 
162  if (*NameOrErr != "") {
163  DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
164  Elf_Shdr *shdr = const_cast<Elf_Shdr *>(
165  reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
166 
167  if (uint64_t SecLoadAddr = L.getSectionLoadAddress(*SI)) {
168  // This assumes that the address passed in matches the target address
169  // bitness. The template-based type cast handles everything else.
170  shdr->sh_addr = static_cast<addr_type>(SecLoadAddr);
171  }
172  }
173  ++SI;
174  }
175 
176  return std::move(Obj);
177 }
178 
180 createELFDebugObject(const ObjectFile &Obj, const LoadedELFObjectInfo &L) {
181  assert(Obj.isELF() && "Not an ELF object file.");
182 
183  std::unique_ptr<MemoryBuffer> Buffer =
184  MemoryBuffer::getMemBufferCopy(Obj.getData(), Obj.getFileName());
185 
186  Expected<std::unique_ptr<ObjectFile>> DebugObj(nullptr);
187  handleAllErrors(DebugObj.takeError());
188  if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian())
189  DebugObj =
190  createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), Obj, L);
191  else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian())
192  DebugObj =
193  createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), Obj, L);
194  else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian())
195  DebugObj =
196  createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), Obj, L);
197  else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian())
198  DebugObj =
199  createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), Obj, L);
200  else
201  llvm_unreachable("Unexpected ELF format");
202 
203  handleAllErrors(DebugObj.takeError());
204  return OwningBinary<ObjectFile>(std::move(*DebugObj), std::move(Buffer));
205 }
206 
208 LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const {
209  return createELFDebugObject(Obj, *this);
210 }
211 
212 } // anonymous namespace
213 
214 namespace llvm {
215 
218  : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {}
220 
222  for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) {
223  SID EHFrameSID = UnregisteredEHFrameSections[i];
224  uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
225  uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
226  size_t EHFrameSize = Sections[EHFrameSID].getSize();
227  MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
228  }
229  UnregisteredEHFrameSections.clear();
230 }
231 
232 std::unique_ptr<RuntimeDyldELF>
236  switch (Arch) {
237  default:
238  return std::make_unique<RuntimeDyldELF>(MemMgr, Resolver);
239  case Triple::mips:
240  case Triple::mipsel:
241  case Triple::mips64:
242  case Triple::mips64el:
243  return std::make_unique<RuntimeDyldELFMips>(MemMgr, Resolver);
244  }
245 }
246 
247 std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
249  if (auto ObjSectionToIDOrErr = loadObjectImpl(O))
250  return std::make_unique<LoadedELFObjectInfo>(*this, *ObjSectionToIDOrErr);
251  else {
252  HasError = true;
253  raw_string_ostream ErrStream(ErrorStr);
254  logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
255  return nullptr;
256  }
257 }
258 
259 void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
260  uint64_t Offset, uint64_t Value,
261  uint32_t Type, int64_t Addend,
262  uint64_t SymOffset) {
263  switch (Type) {
264  default:
265  report_fatal_error("Relocation type not implemented yet!");
266  break;
267  case ELF::R_X86_64_NONE:
268  break;
269  case ELF::R_X86_64_8: {
270  Value += Addend;
271  assert((int64_t)Value <= INT8_MAX && (int64_t)Value >= INT8_MIN);
272  uint8_t TruncatedAddr = (Value & 0xFF);
273  *Section.getAddressWithOffset(Offset) = TruncatedAddr;
274  LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
275  << format("%p\n", Section.getAddressWithOffset(Offset)));
276  break;
277  }
278  case ELF::R_X86_64_16: {
279  Value += Addend;
280  assert((int64_t)Value <= INT16_MAX && (int64_t)Value >= INT16_MIN);
281  uint16_t TruncatedAddr = (Value & 0xFFFF);
282  support::ulittle16_t::ref(Section.getAddressWithOffset(Offset)) =
283  TruncatedAddr;
284  LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
285  << format("%p\n", Section.getAddressWithOffset(Offset)));
286  break;
287  }
288  case ELF::R_X86_64_64: {
289  support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
290  Value + Addend;
291  LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
292  << format("%p\n", Section.getAddressWithOffset(Offset)));
293  break;
294  }
295  case ELF::R_X86_64_32:
296  case ELF::R_X86_64_32S: {
297  Value += Addend;
298  assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) ||
299  (Type == ELF::R_X86_64_32S &&
300  ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
301  uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
302  support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
303  TruncatedAddr;
304  LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
305  << format("%p\n", Section.getAddressWithOffset(Offset)));
306  break;
307  }
308  case ELF::R_X86_64_PC8: {
309  uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
310  int64_t RealOffset = Value + Addend - FinalAddress;
311  assert(isInt<8>(RealOffset));
312  int8_t TruncOffset = (RealOffset & 0xFF);
313  Section.getAddress()[Offset] = TruncOffset;
314  break;
315  }
316  case ELF::R_X86_64_PC32: {
317  uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
318  int64_t RealOffset = Value + Addend - FinalAddress;
319  assert(isInt<32>(RealOffset));
320  int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
321  support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
322  TruncOffset;
323  break;
324  }
325  case ELF::R_X86_64_PC64: {
326  uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
327  int64_t RealOffset = Value + Addend - FinalAddress;
328  support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
329  RealOffset;
330  LLVM_DEBUG(dbgs() << "Writing " << format("%p", RealOffset) << " at "
331  << format("%p\n", FinalAddress));
332  break;
333  }
334  case ELF::R_X86_64_GOTOFF64: {
335  // Compute Value - GOTBase.
336  uint64_t GOTBase = 0;
337  for (const auto &Section : Sections) {
338  if (Section.getName() == ".got") {
339  GOTBase = Section.getLoadAddressWithOffset(0);
340  break;
341  }
342  }
343  assert(GOTBase != 0 && "missing GOT");
344  int64_t GOTOffset = Value - GOTBase + Addend;
345  support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = GOTOffset;
346  break;
347  }
348  case ELF::R_X86_64_DTPMOD64: {
349  // We only have one DSO, so the module id is always 1.
350  support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = 1;
351  break;
352  }
353  case ELF::R_X86_64_DTPOFF64:
354  case ELF::R_X86_64_TPOFF64: {
355  // DTPOFF64 should resolve to the offset in the TLS block, TPOFF64 to the
356  // offset in the *initial* TLS block. Since we are statically linking, all
357  // TLS blocks already exist in the initial block, so resolve both
358  // relocations equally.
359  support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
360  Value + Addend;
361  break;
362  }
363  case ELF::R_X86_64_DTPOFF32:
364  case ELF::R_X86_64_TPOFF32: {
365  // As for the (D)TPOFF64 relocations above, both DTPOFF32 and TPOFF32 can
366  // be resolved equally.
367  int64_t RealValue = Value + Addend;
368  assert(RealValue >= INT32_MIN && RealValue <= INT32_MAX);
369  int32_t TruncValue = RealValue;
370  support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
371  TruncValue;
372  break;
373  }
374  }
375 }
376 
377 void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
378  uint64_t Offset, uint32_t Value,
379  uint32_t Type, int32_t Addend) {
380  switch (Type) {
381  case ELF::R_386_32: {
382  support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
383  Value + Addend;
384  break;
385  }
386  // Handle R_386_PLT32 like R_386_PC32 since it should be able to
387  // reach any 32 bit address.
388  case ELF::R_386_PLT32:
389  case ELF::R_386_PC32: {
390  uint32_t FinalAddress =
391  Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
392  uint32_t RealOffset = Value + Addend - FinalAddress;
393  support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
394  RealOffset;
395  break;
396  }
397  default:
398  // There are other relocation types, but it appears these are the
399  // only ones currently used by the LLVM ELF object writer
400  report_fatal_error("Relocation type not implemented yet!");
401  break;
402  }
403 }
404 
405 void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
406  uint64_t Offset, uint64_t Value,
407  uint32_t Type, int64_t Addend) {
408  uint32_t *TargetPtr =
409  reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
410  uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
411  // Data should use target endian. Code should always use little endian.
412  bool isBE = Arch == Triple::aarch64_be;
413 
414  LLVM_DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
415  << format("%llx", Section.getAddressWithOffset(Offset))
416  << " FinalAddress: 0x" << format("%llx", FinalAddress)
417  << " Value: 0x" << format("%llx", Value) << " Type: 0x"
418  << format("%x", Type) << " Addend: 0x"
419  << format("%llx", Addend) << "\n");
420 
421  switch (Type) {
422  default:
423  report_fatal_error("Relocation type not implemented yet!");
424  break;
425  case ELF::R_AARCH64_NONE:
426  break;
427  case ELF::R_AARCH64_ABS16: {
428  uint64_t Result = Value + Addend;
429  assert(static_cast<int64_t>(Result) >= INT16_MIN && Result < UINT16_MAX);
430  write(isBE, TargetPtr, static_cast<uint16_t>(Result & 0xffffU));
431  break;
432  }
433  case ELF::R_AARCH64_ABS32: {
434  uint64_t Result = Value + Addend;
435  assert(static_cast<int64_t>(Result) >= INT32_MIN && Result < UINT32_MAX);
436  write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
437  break;
438  }
439  case ELF::R_AARCH64_ABS64:
440  write(isBE, TargetPtr, Value + Addend);
441  break;
442  case ELF::R_AARCH64_PLT32: {
443  uint64_t Result = Value + Addend - FinalAddress;
444  assert(static_cast<int64_t>(Result) >= INT32_MIN &&
445  static_cast<int64_t>(Result) <= INT32_MAX);
446  write(isBE, TargetPtr, static_cast<uint32_t>(Result));
447  break;
448  }
449  case ELF::R_AARCH64_PREL16: {
450  uint64_t Result = Value + Addend - FinalAddress;
451  assert(static_cast<int64_t>(Result) >= INT16_MIN &&
452  static_cast<int64_t>(Result) <= UINT16_MAX);
453  write(isBE, TargetPtr, static_cast<uint16_t>(Result & 0xffffU));
454  break;
455  }
456  case ELF::R_AARCH64_PREL32: {
457  uint64_t Result = Value + Addend - FinalAddress;
458  assert(static_cast<int64_t>(Result) >= INT32_MIN &&
459  static_cast<int64_t>(Result) <= UINT32_MAX);
460  write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
461  break;
462  }
463  case ELF::R_AARCH64_PREL64:
464  write(isBE, TargetPtr, Value + Addend - FinalAddress);
465  break;
466  case ELF::R_AARCH64_CONDBR19: {
467  uint64_t BranchImm = Value + Addend - FinalAddress;
468 
469  assert(isInt<21>(BranchImm));
470  *TargetPtr &= 0xff00001fU;
471  // Immediate:20:2 goes in bits 23:5 of Bcc, CBZ, CBNZ
472  or32le(TargetPtr, (BranchImm & 0x001FFFFC) << 3);
473  break;
474  }
475  case ELF::R_AARCH64_TSTBR14: {
476  uint64_t BranchImm = Value + Addend - FinalAddress;
477 
478  assert(isInt<16>(BranchImm));
479 
480  *TargetPtr &= 0xfff8001fU;
481  // Immediate:15:2 goes in bits 18:5 of TBZ, TBNZ
482  or32le(TargetPtr, (BranchImm & 0x0000FFFC) << 3);
483  break;
484  }
485  case ELF::R_AARCH64_CALL26: // fallthrough
486  case ELF::R_AARCH64_JUMP26: {
487  // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
488  // calculation.
489  uint64_t BranchImm = Value + Addend - FinalAddress;
490 
491  // "Check that -2^27 <= result < 2^27".
492  assert(isInt<28>(BranchImm));
493  or32le(TargetPtr, (BranchImm & 0x0FFFFFFC) >> 2);
494  break;
495  }
496  case ELF::R_AARCH64_MOVW_UABS_G3:
497  or32le(TargetPtr, ((Value + Addend) & 0xFFFF000000000000) >> 43);
498  break;
499  case ELF::R_AARCH64_MOVW_UABS_G2_NC:
500  or32le(TargetPtr, ((Value + Addend) & 0xFFFF00000000) >> 27);
501  break;
502  case ELF::R_AARCH64_MOVW_UABS_G1_NC:
503  or32le(TargetPtr, ((Value + Addend) & 0xFFFF0000) >> 11);
504  break;
505  case ELF::R_AARCH64_MOVW_UABS_G0_NC:
506  or32le(TargetPtr, ((Value + Addend) & 0xFFFF) << 5);
507  break;
508  case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
509  // Operation: Page(S+A) - Page(P)
510  uint64_t Result =
511  ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
512 
513  // Check that -2^32 <= X < 2^32
514  assert(isInt<33>(Result) && "overflow check failed for relocation");
515 
516  // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
517  // from bits 32:12 of X.
518  write32AArch64Addr(TargetPtr, Result >> 12);
519  break;
520  }
521  case ELF::R_AARCH64_ADD_ABS_LO12_NC:
522  // Operation: S + A
523  // Immediate goes in bits 21:10 of LD/ST instruction, taken
524  // from bits 11:0 of X
525  or32AArch64Imm(TargetPtr, Value + Addend);
526  break;
527  case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
528  // Operation: S + A
529  // Immediate goes in bits 21:10 of LD/ST instruction, taken
530  // from bits 11:0 of X
531  or32AArch64Imm(TargetPtr, getBits(Value + Addend, 0, 11));
532  break;
533  case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
534  // Operation: S + A
535  // Immediate goes in bits 21:10 of LD/ST instruction, taken
536  // from bits 11:1 of X
537  or32AArch64Imm(TargetPtr, getBits(Value + Addend, 1, 11));
538  break;
539  case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
540  // Operation: S + A
541  // Immediate goes in bits 21:10 of LD/ST instruction, taken
542  // from bits 11:2 of X
543  or32AArch64Imm(TargetPtr, getBits(Value + Addend, 2, 11));
544  break;
545  case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
546  // Operation: S + A
547  // Immediate goes in bits 21:10 of LD/ST instruction, taken
548  // from bits 11:3 of X
549  or32AArch64Imm(TargetPtr, getBits(Value + Addend, 3, 11));
550  break;
551  case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
552  // Operation: S + A
553  // Immediate goes in bits 21:10 of LD/ST instruction, taken
554  // from bits 11:4 of X
555  or32AArch64Imm(TargetPtr, getBits(Value + Addend, 4, 11));
556  break;
557  case ELF::R_AARCH64_LD_PREL_LO19: {
558  // Operation: S + A - P
559  uint64_t Result = Value + Addend - FinalAddress;
560 
561  // "Check that -2^20 <= result < 2^20".
562  assert(isInt<21>(Result));
563 
564  *TargetPtr &= 0xff00001fU;
565  // Immediate goes in bits 23:5 of LD imm instruction, taken
566  // from bits 20:2 of X
567  *TargetPtr |= ((Result & 0xffc) << (5 - 2));
568  break;
569  }
570  case ELF::R_AARCH64_ADR_PREL_LO21: {
571  // Operation: S + A - P
572  uint64_t Result = Value + Addend - FinalAddress;
573 
574  // "Check that -2^20 <= result < 2^20".
575  assert(isInt<21>(Result));
576 
577  *TargetPtr &= 0x9f00001fU;
578  // Immediate goes in bits 23:5, 30:29 of ADR imm instruction, taken
579  // from bits 20:0 of X
580  *TargetPtr |= ((Result & 0xffc) << (5 - 2));
581  *TargetPtr |= (Result & 0x3) << 29;
582  break;
583  }
584  }
585 }
586 
587 void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
588  uint64_t Offset, uint32_t Value,
589  uint32_t Type, int32_t Addend) {
590  // TODO: Add Thumb relocations.
591  uint32_t *TargetPtr =
592  reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
593  uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
594  Value += Addend;
595 
596  LLVM_DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
597  << Section.getAddressWithOffset(Offset)
598  << " FinalAddress: " << format("%p", FinalAddress)
599  << " Value: " << format("%x", Value)
600  << " Type: " << format("%x", Type)
601  << " Addend: " << format("%x", Addend) << "\n");
602 
603  switch (Type) {
604  default:
605  llvm_unreachable("Not implemented relocation type!");
606 
607  case ELF::R_ARM_NONE:
608  break;
609  // Write a 31bit signed offset
610  case ELF::R_ARM_PREL31:
611  support::ulittle32_t::ref{TargetPtr} =
612  (support::ulittle32_t::ref{TargetPtr} & 0x80000000) |
613  ((Value - FinalAddress) & ~0x80000000);
614  break;
615  case ELF::R_ARM_TARGET1:
616  case ELF::R_ARM_ABS32:
617  support::ulittle32_t::ref{TargetPtr} = Value;
618  break;
619  // Write first 16 bit of 32 bit value to the mov instruction.
620  // Last 4 bit should be shifted.
621  case ELF::R_ARM_MOVW_ABS_NC:
622  case ELF::R_ARM_MOVT_ABS:
623  if (Type == ELF::R_ARM_MOVW_ABS_NC)
624  Value = Value & 0xFFFF;
625  else if (Type == ELF::R_ARM_MOVT_ABS)
626  Value = (Value >> 16) & 0xFFFF;
627  support::ulittle32_t::ref{TargetPtr} =
628  (support::ulittle32_t::ref{TargetPtr} & ~0x000F0FFF) | (Value & 0xFFF) |
629  (((Value >> 12) & 0xF) << 16);
630  break;
631  // Write 24 bit relative value to the branch instruction.
632  case ELF::R_ARM_PC24: // Fall through.
633  case ELF::R_ARM_CALL: // Fall through.
634  case ELF::R_ARM_JUMP24:
635  int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
636  RelValue = (RelValue & 0x03FFFFFC) >> 2;
637  assert((support::ulittle32_t::ref{TargetPtr} & 0xFFFFFF) == 0xFFFFFE);
638  support::ulittle32_t::ref{TargetPtr} =
639  (support::ulittle32_t::ref{TargetPtr} & 0xFF000000) | RelValue;
640  break;
641  }
642 }
643 
644 void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) {
645  if (Arch == Triple::UnknownArch ||
646  !StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) {
647  IsMipsO32ABI = false;
648  IsMipsN32ABI = false;
649  IsMipsN64ABI = false;
650  return;
651  }
652  if (auto *E = dyn_cast<ELFObjectFileBase>(&Obj)) {
653  unsigned AbiVariant = E->getPlatformFlags();
654  IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32;
655  IsMipsN32ABI = AbiVariant & ELF::EF_MIPS_ABI2;
656  }
657  IsMipsN64ABI = Obj.getFileFormatName().equals("elf64-mips");
658 }
659 
660 // Return the .TOC. section and offset.
661 Error RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj,
662  ObjSectionToIDMap &LocalSections,
663  RelocationValueRef &Rel) {
664  // Set a default SectionID in case we do not find a TOC section below.
665  // This may happen for references to TOC base base (sym@toc, .odp
666  // relocation) without a .toc directive. In this case just use the
667  // first section (which is usually the .odp) since the code won't
668  // reference the .toc base directly.
669  Rel.SymbolName = nullptr;
670  Rel.SectionID = 0;
671 
672  // The TOC consists of sections .got, .toc, .tocbss, .plt in that
673  // order. The TOC starts where the first of these sections starts.
674  for (auto &Section : Obj.sections()) {
675  Expected<StringRef> NameOrErr = Section.getName();
676  if (!NameOrErr)
677  return NameOrErr.takeError();
678  StringRef SectionName = *NameOrErr;
679 
680  if (SectionName == ".got"
681  || SectionName == ".toc"
682  || SectionName == ".tocbss"
683  || SectionName == ".plt") {
684  if (auto SectionIDOrErr =
685  findOrEmitSection(Obj, Section, false, LocalSections))
686  Rel.SectionID = *SectionIDOrErr;
687  else
688  return SectionIDOrErr.takeError();
689  break;
690  }
691  }
692 
693  // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
694  // thus permitting a full 64 Kbytes segment.
695  Rel.Addend = 0x8000;
696 
697  return Error::success();
698 }
699 
700 // Returns the sections and offset associated with the ODP entry referenced
701 // by Symbol.
702 Error RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj,
703  ObjSectionToIDMap &LocalSections,
704  RelocationValueRef &Rel) {
705  // Get the ELF symbol value (st_value) to compare with Relocation offset in
706  // .opd entries
707  for (section_iterator si = Obj.section_begin(), se = Obj.section_end();
708  si != se; ++si) {
709 
710  Expected<section_iterator> RelSecOrErr = si->getRelocatedSection();
711  if (!RelSecOrErr)
712  report_fatal_error(Twine(toString(RelSecOrErr.takeError())));
713 
714  section_iterator RelSecI = *RelSecOrErr;
715  if (RelSecI == Obj.section_end())
716  continue;
717 
718  Expected<StringRef> NameOrErr = RelSecI->getName();
719  if (!NameOrErr)
720  return NameOrErr.takeError();
721  StringRef RelSectionName = *NameOrErr;
722 
723  if (RelSectionName != ".opd")
724  continue;
725 
726  for (elf_relocation_iterator i = si->relocation_begin(),
727  e = si->relocation_end();
728  i != e;) {
729  // The R_PPC64_ADDR64 relocation indicates the first field
730  // of a .opd entry
731  uint64_t TypeFunc = i->getType();
732  if (TypeFunc != ELF::R_PPC64_ADDR64) {
733  ++i;
734  continue;
735  }
736 
737  uint64_t TargetSymbolOffset = i->getOffset();
738  symbol_iterator TargetSymbol = i->getSymbol();
739  int64_t Addend;
740  if (auto AddendOrErr = i->getAddend())
741  Addend = *AddendOrErr;
742  else
743  return AddendOrErr.takeError();
744 
745  ++i;
746  if (i == e)
747  break;
748 
749  // Just check if following relocation is a R_PPC64_TOC
750  uint64_t TypeTOC = i->getType();
751  if (TypeTOC != ELF::R_PPC64_TOC)
752  continue;
753 
754  // Finally compares the Symbol value and the target symbol offset
755  // to check if this .opd entry refers to the symbol the relocation
756  // points to.
757  if (Rel.Addend != (int64_t)TargetSymbolOffset)
758  continue;
759 
760  section_iterator TSI = Obj.section_end();
761  if (auto TSIOrErr = TargetSymbol->getSection())
762  TSI = *TSIOrErr;
763  else
764  return TSIOrErr.takeError();
765  assert(TSI != Obj.section_end() && "TSI should refer to a valid section");
766 
767  bool IsCode = TSI->isText();
768  if (auto SectionIDOrErr = findOrEmitSection(Obj, *TSI, IsCode,
769  LocalSections))
770  Rel.SectionID = *SectionIDOrErr;
771  else
772  return SectionIDOrErr.takeError();
773  Rel.Addend = (intptr_t)Addend;
774  return Error::success();
775  }
776  }
777  llvm_unreachable("Attempting to get address of ODP entry!");
778 }
779 
780 // Relocation masks following the #lo(value), #hi(value), #ha(value),
781 // #higher(value), #highera(value), #highest(value), and #highesta(value)
782 // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
783 // document.
784 
785 static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; }
786 
787 static inline uint16_t applyPPChi(uint64_t value) {
788  return (value >> 16) & 0xffff;
789 }
790 
791 static inline uint16_t applyPPCha (uint64_t value) {
792  return ((value + 0x8000) >> 16) & 0xffff;
793 }
794 
795 static inline uint16_t applyPPChigher(uint64_t value) {
796  return (value >> 32) & 0xffff;
797 }
798 
799 static inline uint16_t applyPPChighera (uint64_t value) {
800  return ((value + 0x8000) >> 32) & 0xffff;
801 }
802 
803 static inline uint16_t applyPPChighest(uint64_t value) {
804  return (value >> 48) & 0xffff;
805 }
806 
807 static inline uint16_t applyPPChighesta (uint64_t value) {
808  return ((value + 0x8000) >> 48) & 0xffff;
809 }
810 
811 void RuntimeDyldELF::resolvePPC32Relocation(const SectionEntry &Section,
812  uint64_t Offset, uint64_t Value,
813  uint32_t Type, int64_t Addend) {
814  uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
815  switch (Type) {
816  default:
817  report_fatal_error("Relocation type not implemented yet!");
818  break;
819  case ELF::R_PPC_ADDR16_LO:
820  writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
821  break;
822  case ELF::R_PPC_ADDR16_HI:
823  writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
824  break;
825  case ELF::R_PPC_ADDR16_HA:
826  writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
827  break;
828  }
829 }
830 
831 void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
832  uint64_t Offset, uint64_t Value,
833  uint32_t Type, int64_t Addend) {
834  uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
835  switch (Type) {
836  default:
837  report_fatal_error("Relocation type not implemented yet!");
838  break;
839  case ELF::R_PPC64_ADDR16:
840  writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
841  break;
842  case ELF::R_PPC64_ADDR16_DS:
843  writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
844  break;
845  case ELF::R_PPC64_ADDR16_LO:
846  writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
847  break;
848  case ELF::R_PPC64_ADDR16_LO_DS:
849  writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
850  break;
851  case ELF::R_PPC64_ADDR16_HI:
852  case ELF::R_PPC64_ADDR16_HIGH:
853  writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
854  break;
855  case ELF::R_PPC64_ADDR16_HA:
856  case ELF::R_PPC64_ADDR16_HIGHA:
857  writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
858  break;
859  case ELF::R_PPC64_ADDR16_HIGHER:
860  writeInt16BE(LocalAddress, applyPPChigher(Value + Addend));
861  break;
862  case ELF::R_PPC64_ADDR16_HIGHERA:
863  writeInt16BE(LocalAddress, applyPPChighera(Value + Addend));
864  break;
865  case ELF::R_PPC64_ADDR16_HIGHEST:
866  writeInt16BE(LocalAddress, applyPPChighest(Value + Addend));
867  break;
868  case ELF::R_PPC64_ADDR16_HIGHESTA:
869  writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend));
870  break;
871  case ELF::R_PPC64_ADDR14: {
872  assert(((Value + Addend) & 3) == 0);
873  // Preserve the AA/LK bits in the branch instruction
874  uint8_t aalk = *(LocalAddress + 3);
875  writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc));
876  } break;
877  case ELF::R_PPC64_REL16_LO: {
878  uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
879  uint64_t Delta = Value - FinalAddress + Addend;
880  writeInt16BE(LocalAddress, applyPPClo(Delta));
881  } break;
882  case ELF::R_PPC64_REL16_HI: {
883  uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
884  uint64_t Delta = Value - FinalAddress + Addend;
885  writeInt16BE(LocalAddress, applyPPChi(Delta));
886  } break;
887  case ELF::R_PPC64_REL16_HA: {
888  uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
889  uint64_t Delta = Value - FinalAddress + Addend;
890  writeInt16BE(LocalAddress, applyPPCha(Delta));
891  } break;
892  case ELF::R_PPC64_ADDR32: {
893  int64_t Result = static_cast<int64_t>(Value + Addend);
894  if (SignExtend64<32>(Result) != Result)
895  llvm_unreachable("Relocation R_PPC64_ADDR32 overflow");
896  writeInt32BE(LocalAddress, Result);
897  } break;
898  case ELF::R_PPC64_REL24: {
899  uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
900  int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
901  if (SignExtend64<26>(delta) != delta)
902  llvm_unreachable("Relocation R_PPC64_REL24 overflow");
903  // We preserve bits other than LI field, i.e. PO and AA/LK fields.
904  uint32_t Inst = readBytesUnaligned(LocalAddress, 4);
905  writeInt32BE(LocalAddress, (Inst & 0xFC000003) | (delta & 0x03FFFFFC));
906  } break;
907  case ELF::R_PPC64_REL32: {
908  uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
909  int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
910  if (SignExtend64<32>(delta) != delta)
911  llvm_unreachable("Relocation R_PPC64_REL32 overflow");
912  writeInt32BE(LocalAddress, delta);
913  } break;
914  case ELF::R_PPC64_REL64: {
915  uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
916  uint64_t Delta = Value - FinalAddress + Addend;
917  writeInt64BE(LocalAddress, Delta);
918  } break;
919  case ELF::R_PPC64_ADDR64:
920  writeInt64BE(LocalAddress, Value + Addend);
921  break;
922  }
923 }
924 
925 void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
926  uint64_t Offset, uint64_t Value,
927  uint32_t Type, int64_t Addend) {
928  uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
929  switch (Type) {
930  default:
931  report_fatal_error("Relocation type not implemented yet!");
932  break;
933  case ELF::R_390_PC16DBL:
934  case ELF::R_390_PLT16DBL: {
935  int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
936  assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow");
937  writeInt16BE(LocalAddress, Delta / 2);
938  break;
939  }
940  case ELF::R_390_PC32DBL:
941  case ELF::R_390_PLT32DBL: {
942  int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
943  assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow");
944  writeInt32BE(LocalAddress, Delta / 2);
945  break;
946  }
947  case ELF::R_390_PC16: {
948  int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
949  assert(int16_t(Delta) == Delta && "R_390_PC16 overflow");
950  writeInt16BE(LocalAddress, Delta);
951  break;
952  }
953  case ELF::R_390_PC32: {
954  int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
955  assert(int32_t(Delta) == Delta && "R_390_PC32 overflow");
956  writeInt32BE(LocalAddress, Delta);
957  break;
958  }
959  case ELF::R_390_PC64: {
960  int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
961  writeInt64BE(LocalAddress, Delta);
962  break;
963  }
964  case ELF::R_390_8:
965  *LocalAddress = (uint8_t)(Value + Addend);
966  break;
967  case ELF::R_390_16:
968  writeInt16BE(LocalAddress, Value + Addend);
969  break;
970  case ELF::R_390_32:
971  writeInt32BE(LocalAddress, Value + Addend);
972  break;
973  case ELF::R_390_64:
974  writeInt64BE(LocalAddress, Value + Addend);
975  break;
976  }
977 }
978 
979 void RuntimeDyldELF::resolveBPFRelocation(const SectionEntry &Section,
980  uint64_t Offset, uint64_t Value,
981  uint32_t Type, int64_t Addend) {
982  bool isBE = Arch == Triple::bpfeb;
983 
984  switch (Type) {
985  default:
986  report_fatal_error("Relocation type not implemented yet!");
987  break;
988  case ELF::R_BPF_NONE:
989  case ELF::R_BPF_64_64:
990  case ELF::R_BPF_64_32:
991  case ELF::R_BPF_64_NODYLD32:
992  break;
993  case ELF::R_BPF_64_ABS64: {
994  write(isBE, Section.getAddressWithOffset(Offset), Value + Addend);
995  LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
996  << format("%p\n", Section.getAddressWithOffset(Offset)));
997  break;
998  }
999  case ELF::R_BPF_64_ABS32: {
1000  Value += Addend;
1001  assert(Value <= UINT32_MAX);
1002  write(isBE, Section.getAddressWithOffset(Offset), static_cast<uint32_t>(Value));
1003  LLVM_DEBUG(dbgs() << "Writing " << format("%p", Value) << " at "
1004  << format("%p\n", Section.getAddressWithOffset(Offset)));
1005  break;
1006  }
1007  }
1008 }
1009 
1010 // The target location for the relocation is described by RE.SectionID and
1011 // RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
1012 // SectionEntry has three members describing its location.
1013 // SectionEntry::Address is the address at which the section has been loaded
1014 // into memory in the current (host) process. SectionEntry::LoadAddress is the
1015 // address that the section will have in the target process.
1016 // SectionEntry::ObjAddress is the address of the bits for this section in the
1017 // original emitted object image (also in the current address space).
1018 //
1019 // Relocations will be applied as if the section were loaded at
1020 // SectionEntry::LoadAddress, but they will be applied at an address based
1021 // on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to
1022 // Target memory contents if they are required for value calculations.
1023 //
1024 // The Value parameter here is the load address of the symbol for the
1025 // relocation to be applied. For relocations which refer to symbols in the
1026 // current object Value will be the LoadAddress of the section in which
1027 // the symbol resides (RE.Addend provides additional information about the
1028 // symbol location). For external symbols, Value will be the address of the
1029 // symbol in the target address space.
1030 void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
1031  uint64_t Value) {
1032  const SectionEntry &Section = Sections[RE.SectionID];
1033  return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
1034  RE.SymOffset, RE.SectionID);
1035 }
1036 
1037 void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
1038  uint64_t Offset, uint64_t Value,
1039  uint32_t Type, int64_t Addend,
1040  uint64_t SymOffset, SID SectionID) {
1041  switch (Arch) {
1042  case Triple::x86_64:
1043  resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset);
1044  break;
1045  case Triple::x86:
1046  resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
1047  (uint32_t)(Addend & 0xffffffffL));
1048  break;
1049  case Triple::aarch64:
1050  case Triple::aarch64_be:
1051  resolveAArch64Relocation(Section, Offset, Value, Type, Addend);
1052  break;
1053  case Triple::arm: // Fall through.
1054  case Triple::armeb:
1055  case Triple::thumb:
1056  case Triple::thumbeb:
1057  resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
1058  (uint32_t)(Addend & 0xffffffffL));
1059  break;
1060  case Triple::ppc: // Fall through.
1061  case Triple::ppcle:
1062  resolvePPC32Relocation(Section, Offset, Value, Type, Addend);
1063  break;
1064  case Triple::ppc64: // Fall through.
1065  case Triple::ppc64le:
1066  resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
1067  break;
1068  case Triple::systemz:
1069  resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
1070  break;
1071  case Triple::bpfel:
1072  case Triple::bpfeb:
1073  resolveBPFRelocation(Section, Offset, Value, Type, Addend);
1074  break;
1075  default:
1076  llvm_unreachable("Unsupported CPU type!");
1077  }
1078 }
1079 
1080 void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const {
1081  return (void *)(Sections[SectionID].getObjAddress() + Offset);
1082 }
1083 
1084 void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) {
1085  RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset);
1086  if (Value.SymbolName)
1087  addRelocationForSymbol(RE, Value.SymbolName);
1088  else
1089  addRelocationForSection(RE, Value.SectionID);
1090 }
1091 
1092 uint32_t RuntimeDyldELF::getMatchingLoRelocation(uint32_t RelType,
1093  bool IsLocal) const {
1094  switch (RelType) {
1095  case ELF::R_MICROMIPS_GOT16:
1096  if (IsLocal)
1097  return ELF::R_MICROMIPS_LO16;
1098  break;
1099  case ELF::R_MICROMIPS_HI16:
1100  return ELF::R_MICROMIPS_LO16;
1101  case ELF::R_MIPS_GOT16:
1102  if (IsLocal)
1103  return ELF::R_MIPS_LO16;
1104  break;
1105  case ELF::R_MIPS_HI16:
1106  return ELF::R_MIPS_LO16;
1107  case ELF::R_MIPS_PCHI16:
1108  return ELF::R_MIPS_PCLO16;
1109  default:
1110  break;
1111  }
1112  return ELF::R_MIPS_NONE;
1113 }
1114 
1115 // Sometimes we don't need to create thunk for a branch.
1116 // This typically happens when branch target is located
1117 // in the same object file. In such case target is either
1118 // a weak symbol or symbol in a different executable section.
1119 // This function checks if branch target is located in the
1120 // same object file and if distance between source and target
1121 // fits R_AARCH64_CALL26 relocation. If both conditions are
1122 // met, it emits direct jump to the target and returns true.
1123 // Otherwise false is returned and thunk is created.
1124 bool RuntimeDyldELF::resolveAArch64ShortBranch(
1125  unsigned SectionID, relocation_iterator RelI,
1126  const RelocationValueRef &Value) {
1127  uint64_t Address;
1128  if (Value.SymbolName) {
1129  auto Loc = GlobalSymbolTable.find(Value.SymbolName);
1130 
1131  // Don't create direct branch for external symbols.
1132  if (Loc == GlobalSymbolTable.end())
1133  return false;
1134 
1135  const auto &SymInfo = Loc->second;
1136  Address =
1137  uint64_t(Sections[SymInfo.getSectionID()].getLoadAddressWithOffset(
1138  SymInfo.getOffset()));
1139  } else {
1140  Address = uint64_t(Sections[Value.SectionID].getLoadAddress());
1141  }
1142  uint64_t Offset = RelI->getOffset();
1143  uint64_t SourceAddress = Sections[SectionID].getLoadAddressWithOffset(Offset);
1144 
1145  // R_AARCH64_CALL26 requires immediate to be in range -2^27 <= imm < 2^27
1146  // If distance between source and target is out of range then we should
1147  // create thunk.
1148  if (!isInt<28>(Address + Value.Addend - SourceAddress))
1149  return false;
1150 
1151  resolveRelocation(Sections[SectionID], Offset, Address, RelI->getType(),
1152  Value.Addend);
1153 
1154  return true;
1155 }
1156 
1157 void RuntimeDyldELF::resolveAArch64Branch(unsigned SectionID,
1158  const RelocationValueRef &Value,
1159  relocation_iterator RelI,
1160  StubMap &Stubs) {
1161 
1162  LLVM_DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
1163  SectionEntry &Section = Sections[SectionID];
1164 
1165  uint64_t Offset = RelI->getOffset();
1166  unsigned RelType = RelI->getType();
1167  // Look for an existing stub.
1168  StubMap::const_iterator i = Stubs.find(Value);
1169  if (i != Stubs.end()) {
1170  resolveRelocation(Section, Offset,
1171  (uint64_t)Section.getAddressWithOffset(i->second),
1172  RelType, 0);
1173  LLVM_DEBUG(dbgs() << " Stub function found\n");
1174  } else if (!resolveAArch64ShortBranch(SectionID, RelI, Value)) {
1175  // Create a new stub function.
1176  LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1177  Stubs[Value] = Section.getStubOffset();
1178  uint8_t *StubTargetAddr = createStubFunction(
1179  Section.getAddressWithOffset(Section.getStubOffset()));
1180 
1181  RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.getAddress(),
1182  ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
1183  RelocationEntry REmovk_g2(SectionID,
1184  StubTargetAddr - Section.getAddress() + 4,
1185  ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
1186  RelocationEntry REmovk_g1(SectionID,
1187  StubTargetAddr - Section.getAddress() + 8,
1188  ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
1189  RelocationEntry REmovk_g0(SectionID,
1190  StubTargetAddr - Section.getAddress() + 12,
1191  ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend);
1192 
1193  if (Value.SymbolName) {
1194  addRelocationForSymbol(REmovz_g3, Value.SymbolName);
1195  addRelocationForSymbol(REmovk_g2, Value.SymbolName);
1196  addRelocationForSymbol(REmovk_g1, Value.SymbolName);
1197  addRelocationForSymbol(REmovk_g0, Value.SymbolName);
1198  } else {
1199  addRelocationForSection(REmovz_g3, Value.SectionID);
1200  addRelocationForSection(REmovk_g2, Value.SectionID);
1201  addRelocationForSection(REmovk_g1, Value.SectionID);
1202  addRelocationForSection(REmovk_g0, Value.SectionID);
1203  }
1204  resolveRelocation(Section, Offset,
1205  reinterpret_cast<uint64_t>(Section.getAddressWithOffset(
1206  Section.getStubOffset())),
1207  RelType, 0);
1208  Section.advanceStubOffset(getMaxStubSize());
1209  }
1210 }
1211 
1214  unsigned SectionID, relocation_iterator RelI, const ObjectFile &O,
1215  ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) {
1216  const auto &Obj = cast<ELFObjectFileBase>(O);
1217  uint64_t RelType = RelI->getType();
1218  int64_t Addend = 0;
1219  if (Expected<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend())
1220  Addend = *AddendOrErr;
1221  else
1222  consumeError(AddendOrErr.takeError());
1223  elf_symbol_iterator Symbol = RelI->getSymbol();
1224 
1225  // Obtain the symbol name which is referenced in the relocation
1226  StringRef TargetName;
1227  if (Symbol != Obj.symbol_end()) {
1228  if (auto TargetNameOrErr = Symbol->getName())
1229  TargetName = *TargetNameOrErr;
1230  else
1231  return TargetNameOrErr.takeError();
1232  }
1233  LLVM_DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
1234  << " TargetName: " << TargetName << "\n");
1236  // First search for the symbol in the local symbol table
1237  SymbolRef::Type SymType = SymbolRef::ST_Unknown;
1238 
1239  // Search for the symbol in the global symbol table
1241  if (Symbol != Obj.symbol_end()) {
1242  gsi = GlobalSymbolTable.find(TargetName.data());
1243  Expected<SymbolRef::Type> SymTypeOrErr = Symbol->getType();
1244  if (!SymTypeOrErr) {
1245  std::string Buf;
1246  raw_string_ostream OS(Buf);
1247  logAllUnhandledErrors(SymTypeOrErr.takeError(), OS);
1248  report_fatal_error(Twine(OS.str()));
1249  }
1250  SymType = *SymTypeOrErr;
1251  }
1252  if (gsi != GlobalSymbolTable.end()) {
1253  const auto &SymInfo = gsi->second;
1254  Value.SectionID = SymInfo.getSectionID();
1255  Value.Offset = SymInfo.getOffset();
1256  Value.Addend = SymInfo.getOffset() + Addend;
1257  } else {
1258  switch (SymType) {
1259  case SymbolRef::ST_Debug: {
1260  // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
1261  // and can be changed by another developers. Maybe best way is add
1262  // a new symbol type ST_Section to SymbolRef and use it.
1263  auto SectionOrErr = Symbol->getSection();
1264  if (!SectionOrErr) {
1265  std::string Buf;
1266  raw_string_ostream OS(Buf);
1267  logAllUnhandledErrors(SectionOrErr.takeError(), OS);
1268  report_fatal_error(Twine(OS.str()));
1269  }
1270  section_iterator si = *SectionOrErr;
1271  if (si == Obj.section_end())
1272  llvm_unreachable("Symbol section not found, bad object file format!");
1273  LLVM_DEBUG(dbgs() << "\t\tThis is section symbol\n");
1274  bool isCode = si->isText();
1275  if (auto SectionIDOrErr = findOrEmitSection(Obj, (*si), isCode,
1276  ObjSectionToID))
1277  Value.SectionID = *SectionIDOrErr;
1278  else
1279  return SectionIDOrErr.takeError();
1280  Value.Addend = Addend;
1281  break;
1282  }
1283  case SymbolRef::ST_Data:
1284  case SymbolRef::ST_Function:
1285  case SymbolRef::ST_Unknown: {
1286  Value.SymbolName = TargetName.data();
1287  Value.Addend = Addend;
1288 
1289  // Absolute relocations will have a zero symbol ID (STN_UNDEF), which
1290  // will manifest here as a NULL symbol name.
1291  // We can set this as a valid (but empty) symbol name, and rely
1292  // on addRelocationForSymbol to handle this.
1293  if (!Value.SymbolName)
1294  Value.SymbolName = "";
1295  break;
1296  }
1297  default:
1298  llvm_unreachable("Unresolved symbol type!");
1299  break;
1300  }
1301  }
1302 
1303  uint64_t Offset = RelI->getOffset();
1304 
1305  LLVM_DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
1306  << "\n");
1307  if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be)) {
1308  if ((RelType == ELF::R_AARCH64_CALL26 ||
1309  RelType == ELF::R_AARCH64_JUMP26) &&
1311  resolveAArch64Branch(SectionID, Value, RelI, Stubs);
1312  } else if (RelType == ELF::R_AARCH64_ADR_GOT_PAGE) {
1313  // Create new GOT entry or find existing one. If GOT entry is
1314  // to be created, then we also emit ABS64 relocation for it.
1315  uint64_t GOTOffset = findOrAllocGOTEntry(Value, ELF::R_AARCH64_ABS64);
1316  resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
1317  ELF::R_AARCH64_ADR_PREL_PG_HI21);
1318 
1319  } else if (RelType == ELF::R_AARCH64_LD64_GOT_LO12_NC) {
1320  uint64_t GOTOffset = findOrAllocGOTEntry(Value, ELF::R_AARCH64_ABS64);
1321  resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
1322  ELF::R_AARCH64_LDST64_ABS_LO12_NC);
1323  } else {
1324  processSimpleRelocation(SectionID, Offset, RelType, Value);
1325  }
1326  } else if (Arch == Triple::arm) {
1327  if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL ||
1328  RelType == ELF::R_ARM_JUMP24) {
1329  // This is an ARM branch relocation, need to use a stub function.
1330  LLVM_DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.\n");
1331  SectionEntry &Section = Sections[SectionID];
1332 
1333  // Look for an existing stub.
1334  StubMap::const_iterator i = Stubs.find(Value);
1335  if (i != Stubs.end()) {
1336  resolveRelocation(
1337  Section, Offset,
1338  reinterpret_cast<uint64_t>(Section.getAddressWithOffset(i->second)),
1339  RelType, 0);
1340  LLVM_DEBUG(dbgs() << " Stub function found\n");
1341  } else {
1342  // Create a new stub function.
1343  LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1344  Stubs[Value] = Section.getStubOffset();
1345  uint8_t *StubTargetAddr = createStubFunction(
1346  Section.getAddressWithOffset(Section.getStubOffset()));
1347  RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
1348  ELF::R_ARM_ABS32, Value.Addend);
1349  if (Value.SymbolName)
1350  addRelocationForSymbol(RE, Value.SymbolName);
1351  else
1352  addRelocationForSection(RE, Value.SectionID);
1353 
1354  resolveRelocation(Section, Offset, reinterpret_cast<uint64_t>(
1355  Section.getAddressWithOffset(
1356  Section.getStubOffset())),
1357  RelType, 0);
1358  Section.advanceStubOffset(getMaxStubSize());
1359  }
1360  } else {
1361  uint32_t *Placeholder =
1362  reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset));
1363  if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 ||
1364  RelType == ELF::R_ARM_ABS32) {
1365  Value.Addend += *Placeholder;
1366  } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) {
1367  // See ELF for ARM documentation
1368  Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12));
1369  }
1370  processSimpleRelocation(SectionID, Offset, RelType, Value);
1371  }
1372  } else if (IsMipsO32ABI) {
1373  uint8_t *Placeholder = reinterpret_cast<uint8_t *>(
1374  computePlaceholderAddress(SectionID, Offset));
1375  uint32_t Opcode = readBytesUnaligned(Placeholder, 4);
1376  if (RelType == ELF::R_MIPS_26) {
1377  // This is an Mips branch relocation, need to use a stub function.
1378  LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
1379  SectionEntry &Section = Sections[SectionID];
1380 
1381  // Extract the addend from the instruction.
1382  // We shift up by two since the Value will be down shifted again
1383  // when applying the relocation.
1384  uint32_t Addend = (Opcode & 0x03ffffff) << 2;
1385 
1386  Value.Addend += Addend;
1387 
1388  // Look up for existing stub.
1389  StubMap::const_iterator i = Stubs.find(Value);
1390  if (i != Stubs.end()) {
1391  RelocationEntry RE(SectionID, Offset, RelType, i->second);
1392  addRelocationForSection(RE, SectionID);
1393  LLVM_DEBUG(dbgs() << " Stub function found\n");
1394  } else {
1395  // Create a new stub function.
1396  LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1397  Stubs[Value] = Section.getStubOffset();
1398 
1399  unsigned AbiVariant = Obj.getPlatformFlags();
1400 
1401  uint8_t *StubTargetAddr = createStubFunction(
1402  Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant);
1403 
1404  // Creating Hi and Lo relocations for the filled stub instructions.
1405  RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
1406  ELF::R_MIPS_HI16, Value.Addend);
1407  RelocationEntry RELo(SectionID,
1408  StubTargetAddr - Section.getAddress() + 4,
1409  ELF::R_MIPS_LO16, Value.Addend);
1410 
1411  if (Value.SymbolName) {
1412  addRelocationForSymbol(REHi, Value.SymbolName);
1413  addRelocationForSymbol(RELo, Value.SymbolName);
1414  } else {
1415  addRelocationForSection(REHi, Value.SectionID);
1416  addRelocationForSection(RELo, Value.SectionID);
1417  }
1418 
1419  RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
1420  addRelocationForSection(RE, SectionID);
1421  Section.advanceStubOffset(getMaxStubSize());
1422  }
1423  } else if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16) {
1424  int64_t Addend = (Opcode & 0x0000ffff) << 16;
1425  RelocationEntry RE(SectionID, Offset, RelType, Addend);
1426  PendingRelocs.push_back(std::make_pair(Value, RE));
1427  } else if (RelType == ELF::R_MIPS_LO16 || RelType == ELF::R_MIPS_PCLO16) {
1428  int64_t Addend = Value.Addend + SignExtend32<16>(Opcode & 0x0000ffff);
1429  for (auto I = PendingRelocs.begin(); I != PendingRelocs.end();) {
1430  const RelocationValueRef &MatchingValue = I->first;
1431  RelocationEntry &Reloc = I->second;
1432  if (MatchingValue == Value &&
1433  RelType == getMatchingLoRelocation(Reloc.RelType) &&
1434  SectionID == Reloc.SectionID) {
1435  Reloc.Addend += Addend;
1436  if (Value.SymbolName)
1437  addRelocationForSymbol(Reloc, Value.SymbolName);
1438  else
1439  addRelocationForSection(Reloc, Value.SectionID);
1440  I = PendingRelocs.erase(I);
1441  } else
1442  ++I;
1443  }
1444  RelocationEntry RE(SectionID, Offset, RelType, Addend);
1445  if (Value.SymbolName)
1446  addRelocationForSymbol(RE, Value.SymbolName);
1447  else
1448  addRelocationForSection(RE, Value.SectionID);
1449  } else {
1450  if (RelType == ELF::R_MIPS_32)
1451  Value.Addend += Opcode;
1452  else if (RelType == ELF::R_MIPS_PC16)
1453  Value.Addend += SignExtend32<18>((Opcode & 0x0000ffff) << 2);
1454  else if (RelType == ELF::R_MIPS_PC19_S2)
1455  Value.Addend += SignExtend32<21>((Opcode & 0x0007ffff) << 2);
1456  else if (RelType == ELF::R_MIPS_PC21_S2)
1457  Value.Addend += SignExtend32<23>((Opcode & 0x001fffff) << 2);
1458  else if (RelType == ELF::R_MIPS_PC26_S2)
1459  Value.Addend += SignExtend32<28>((Opcode & 0x03ffffff) << 2);
1460  processSimpleRelocation(SectionID, Offset, RelType, Value);
1461  }
1462  } else if (IsMipsN32ABI || IsMipsN64ABI) {
1463  uint32_t r_type = RelType & 0xff;
1464  RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
1465  if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE
1466  || r_type == ELF::R_MIPS_GOT_DISP) {
1467  StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName);
1468  if (i != GOTSymbolOffsets.end())
1469  RE.SymOffset = i->second;
1470  else {
1471  RE.SymOffset = allocateGOTEntries(1);
1472  GOTSymbolOffsets[TargetName] = RE.SymOffset;
1473  }
1474  if (Value.SymbolName)
1475  addRelocationForSymbol(RE, Value.SymbolName);
1476  else
1477  addRelocationForSection(RE, Value.SectionID);
1478  } else if (RelType == ELF::R_MIPS_26) {
1479  // This is an Mips branch relocation, need to use a stub function.
1480  LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
1481  SectionEntry &Section = Sections[SectionID];
1482 
1483  // Look up for existing stub.
1484  StubMap::const_iterator i = Stubs.find(Value);
1485  if (i != Stubs.end()) {
1486  RelocationEntry RE(SectionID, Offset, RelType, i->second);
1487  addRelocationForSection(RE, SectionID);
1488  LLVM_DEBUG(dbgs() << " Stub function found\n");
1489  } else {
1490  // Create a new stub function.
1491  LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1492  Stubs[Value] = Section.getStubOffset();
1493 
1494  unsigned AbiVariant = Obj.getPlatformFlags();
1495 
1496  uint8_t *StubTargetAddr = createStubFunction(
1497  Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant);
1498 
1499  if (IsMipsN32ABI) {
1500  // Creating Hi and Lo relocations for the filled stub instructions.
1501  RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
1502  ELF::R_MIPS_HI16, Value.Addend);
1503  RelocationEntry RELo(SectionID,
1504  StubTargetAddr - Section.getAddress() + 4,
1505  ELF::R_MIPS_LO16, Value.Addend);
1506  if (Value.SymbolName) {
1507  addRelocationForSymbol(REHi, Value.SymbolName);
1508  addRelocationForSymbol(RELo, Value.SymbolName);
1509  } else {
1510  addRelocationForSection(REHi, Value.SectionID);
1511  addRelocationForSection(RELo, Value.SectionID);
1512  }
1513  } else {
1514  // Creating Highest, Higher, Hi and Lo relocations for the filled stub
1515  // instructions.
1516  RelocationEntry REHighest(SectionID,
1517  StubTargetAddr - Section.getAddress(),
1518  ELF::R_MIPS_HIGHEST, Value.Addend);
1519  RelocationEntry REHigher(SectionID,
1520  StubTargetAddr - Section.getAddress() + 4,
1521  ELF::R_MIPS_HIGHER, Value.Addend);
1522  RelocationEntry REHi(SectionID,
1523  StubTargetAddr - Section.getAddress() + 12,
1524  ELF::R_MIPS_HI16, Value.Addend);
1525  RelocationEntry RELo(SectionID,
1526  StubTargetAddr - Section.getAddress() + 20,
1527  ELF::R_MIPS_LO16, Value.Addend);
1528  if (Value.SymbolName) {
1529  addRelocationForSymbol(REHighest, Value.SymbolName);
1530  addRelocationForSymbol(REHigher, Value.SymbolName);
1531  addRelocationForSymbol(REHi, Value.SymbolName);
1532  addRelocationForSymbol(RELo, Value.SymbolName);
1533  } else {
1534  addRelocationForSection(REHighest, Value.SectionID);
1535  addRelocationForSection(REHigher, Value.SectionID);
1536  addRelocationForSection(REHi, Value.SectionID);
1537  addRelocationForSection(RELo, Value.SectionID);
1538  }
1539  }
1540  RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
1541  addRelocationForSection(RE, SectionID);
1542  Section.advanceStubOffset(getMaxStubSize());
1543  }
1544  } else {
1545  processSimpleRelocation(SectionID, Offset, RelType, Value);
1546  }
1547 
1548  } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
1549  if (RelType == ELF::R_PPC64_REL24) {
1550  // Determine ABI variant in use for this object.
1551  unsigned AbiVariant = Obj.getPlatformFlags();
1552  AbiVariant &= ELF::EF_PPC64_ABI;
1553  // A PPC branch relocation will need a stub function if the target is
1554  // an external symbol (either Value.SymbolName is set, or SymType is
1555  // Symbol::ST_Unknown) or if the target address is not within the
1556  // signed 24-bits branch address.
1557  SectionEntry &Section = Sections[SectionID];
1558  uint8_t *Target = Section.getAddressWithOffset(Offset);
1559  bool RangeOverflow = false;
1560  bool IsExtern = Value.SymbolName || SymType == SymbolRef::ST_Unknown;
1561  if (!IsExtern) {
1562  if (AbiVariant != 2) {
1563  // In the ELFv1 ABI, a function call may point to the .opd entry,
1564  // so the final symbol value is calculated based on the relocation
1565  // values in the .opd section.
1566  if (auto Err = findOPDEntrySection(Obj, ObjSectionToID, Value))
1567  return std::move(Err);
1568  } else {
1569  // In the ELFv2 ABI, a function symbol may provide a local entry
1570  // point, which must be used for direct calls.
1571  if (Value.SectionID == SectionID){
1572  uint8_t SymOther = Symbol->getOther();
1573  Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther);
1574  }
1575  }
1576  uint8_t *RelocTarget =
1577  Sections[Value.SectionID].getAddressWithOffset(Value.Addend);
1578  int64_t delta = static_cast<int64_t>(Target - RelocTarget);
1579  // If it is within 26-bits branch range, just set the branch target
1580  if (SignExtend64<26>(delta) != delta) {
1581  RangeOverflow = true;
1582  } else if ((AbiVariant != 2) ||
1583  (AbiVariant == 2 && Value.SectionID == SectionID)) {
1584  RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
1585  addRelocationForSection(RE, Value.SectionID);
1586  }
1587  }
1588  if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID) ||
1589  RangeOverflow) {
1590  // It is an external symbol (either Value.SymbolName is set, or
1591  // SymType is SymbolRef::ST_Unknown) or out of range.
1592  StubMap::const_iterator i = Stubs.find(Value);
1593  if (i != Stubs.end()) {
1594  // Symbol function stub already created, just relocate to it
1595  resolveRelocation(Section, Offset,
1596  reinterpret_cast<uint64_t>(
1597  Section.getAddressWithOffset(i->second)),
1598  RelType, 0);
1599  LLVM_DEBUG(dbgs() << " Stub function found\n");
1600  } else {
1601  // Create a new stub function.
1602  LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1603  Stubs[Value] = Section.getStubOffset();
1604  uint8_t *StubTargetAddr = createStubFunction(
1605  Section.getAddressWithOffset(Section.getStubOffset()),
1606  AbiVariant);
1607  RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
1608  ELF::R_PPC64_ADDR64, Value.Addend);
1609 
1610  // Generates the 64-bits address loads as exemplified in section
1611  // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to
1612  // apply to the low part of the instructions, so we have to update
1613  // the offset according to the target endianness.
1614  uint64_t StubRelocOffset = StubTargetAddr - Section.getAddress();
1615  if (!IsTargetLittleEndian)
1616  StubRelocOffset += 2;
1617 
1618  RelocationEntry REhst(SectionID, StubRelocOffset + 0,
1619  ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
1620  RelocationEntry REhr(SectionID, StubRelocOffset + 4,
1621  ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
1622  RelocationEntry REh(SectionID, StubRelocOffset + 12,
1623  ELF::R_PPC64_ADDR16_HI, Value.Addend);
1624  RelocationEntry REl(SectionID, StubRelocOffset + 16,
1625  ELF::R_PPC64_ADDR16_LO, Value.Addend);
1626 
1627  if (Value.SymbolName) {
1628  addRelocationForSymbol(REhst, Value.SymbolName);
1629  addRelocationForSymbol(REhr, Value.SymbolName);
1630  addRelocationForSymbol(REh, Value.SymbolName);
1631  addRelocationForSymbol(REl, Value.SymbolName);
1632  } else {
1633  addRelocationForSection(REhst, Value.SectionID);
1634  addRelocationForSection(REhr, Value.SectionID);
1635  addRelocationForSection(REh, Value.SectionID);
1636  addRelocationForSection(REl, Value.SectionID);
1637  }
1638 
1639  resolveRelocation(Section, Offset, reinterpret_cast<uint64_t>(
1640  Section.getAddressWithOffset(
1641  Section.getStubOffset())),
1642  RelType, 0);
1643  Section.advanceStubOffset(getMaxStubSize());
1644  }
1645  if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID)) {
1646  // Restore the TOC for external calls
1647  if (AbiVariant == 2)
1648  writeInt32BE(Target + 4, 0xE8410018); // ld r2,24(r1)
1649  else
1650  writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1)
1651  }
1652  }
1653  } else if (RelType == ELF::R_PPC64_TOC16 ||
1654  RelType == ELF::R_PPC64_TOC16_DS ||
1655  RelType == ELF::R_PPC64_TOC16_LO ||
1656  RelType == ELF::R_PPC64_TOC16_LO_DS ||
1657  RelType == ELF::R_PPC64_TOC16_HI ||
1658  RelType == ELF::R_PPC64_TOC16_HA) {
1659  // These relocations are supposed to subtract the TOC address from
1660  // the final value. This does not fit cleanly into the RuntimeDyld
1661  // scheme, since there may be *two* sections involved in determining
1662  // the relocation value (the section of the symbol referred to by the
1663  // relocation, and the TOC section associated with the current module).
1664  //
1665  // Fortunately, these relocations are currently only ever generated
1666  // referring to symbols that themselves reside in the TOC, which means
1667  // that the two sections are actually the same. Thus they cancel out
1668  // and we can immediately resolve the relocation right now.
1669  switch (RelType) {
1670  case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break;
1671  case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break;
1672  case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break;
1673  case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break;
1674  case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break;
1675  case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break;
1676  default: llvm_unreachable("Wrong relocation type.");
1677  }
1678 
1679  RelocationValueRef TOCValue;
1680  if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, TOCValue))
1681  return std::move(Err);
1682  if (Value.SymbolName || Value.SectionID != TOCValue.SectionID)
1683  llvm_unreachable("Unsupported TOC relocation.");
1684  Value.Addend -= TOCValue.Addend;
1685  resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0);
1686  } else {
1687  // There are two ways to refer to the TOC address directly: either
1688  // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are
1689  // ignored), or via any relocation that refers to the magic ".TOC."
1690  // symbols (in which case the addend is respected).
1691  if (RelType == ELF::R_PPC64_TOC) {
1692  RelType = ELF::R_PPC64_ADDR64;
1693  if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
1694  return std::move(Err);
1695  } else if (TargetName == ".TOC.") {
1696  if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
1697  return std::move(Err);
1698  Value.Addend += Addend;
1699  }
1700 
1701  RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
1702 
1703  if (Value.SymbolName)
1704  addRelocationForSymbol(RE, Value.SymbolName);
1705  else
1706  addRelocationForSection(RE, Value.SectionID);
1707  }
1708  } else if (Arch == Triple::systemz &&
1709  (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) {
1710  // Create function stubs for both PLT and GOT references, regardless of
1711  // whether the GOT reference is to data or code. The stub contains the
1712  // full address of the symbol, as needed by GOT references, and the
1713  // executable part only adds an overhead of 8 bytes.
1714  //
1715  // We could try to conserve space by allocating the code and data
1716  // parts of the stub separately. However, as things stand, we allocate
1717  // a stub for every relocation, so using a GOT in JIT code should be
1718  // no less space efficient than using an explicit constant pool.
1719  LLVM_DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
1720  SectionEntry &Section = Sections[SectionID];
1721 
1722  // Look for an existing stub.
1723  StubMap::const_iterator i = Stubs.find(Value);
1724  uintptr_t StubAddress;
1725  if (i != Stubs.end()) {
1726  StubAddress = uintptr_t(Section.getAddressWithOffset(i->second));
1727  LLVM_DEBUG(dbgs() << " Stub function found\n");
1728  } else {
1729  // Create a new stub function.
1730  LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1731 
1732  uintptr_t BaseAddress = uintptr_t(Section.getAddress());
1733  uintptr_t StubAlignment = getStubAlignment();
1734  StubAddress =
1735  (BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
1736  -StubAlignment;
1737  unsigned StubOffset = StubAddress - BaseAddress;
1738 
1739  Stubs[Value] = StubOffset;
1740  createStubFunction((uint8_t *)StubAddress);
1741  RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64,
1742  Value.Offset);
1743  if (Value.SymbolName)
1744  addRelocationForSymbol(RE, Value.SymbolName);
1745  else
1746  addRelocationForSection(RE, Value.SectionID);
1747  Section.advanceStubOffset(getMaxStubSize());
1748  }
1749 
1750  if (RelType == ELF::R_390_GOTENT)
1751  resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL,
1752  Addend);
1753  else
1754  resolveRelocation(Section, Offset, StubAddress, RelType, Addend);
1755  } else if (Arch == Triple::x86_64) {
1756  if (RelType == ELF::R_X86_64_PLT32) {
1757  // The way the PLT relocations normally work is that the linker allocates
1758  // the
1759  // PLT and this relocation makes a PC-relative call into the PLT. The PLT
1760  // entry will then jump to an address provided by the GOT. On first call,
1761  // the
1762  // GOT address will point back into PLT code that resolves the symbol. After
1763  // the first call, the GOT entry points to the actual function.
1764  //
1765  // For local functions we're ignoring all of that here and just replacing
1766  // the PLT32 relocation type with PC32, which will translate the relocation
1767  // into a PC-relative call directly to the function. For external symbols we
1768  // can't be sure the function will be within 2^32 bytes of the call site, so
1769  // we need to create a stub, which calls into the GOT. This case is
1770  // equivalent to the usual PLT implementation except that we use the stub
1771  // mechanism in RuntimeDyld (which puts stubs at the end of the section)
1772  // rather than allocating a PLT section.
1773  if (Value.SymbolName && MemMgr.allowStubAllocation()) {
1774  // This is a call to an external function.
1775  // Look for an existing stub.
1776  SectionEntry *Section = &Sections[SectionID];
1777  StubMap::const_iterator i = Stubs.find(Value);
1778  uintptr_t StubAddress;
1779  if (i != Stubs.end()) {
1780  StubAddress = uintptr_t(Section->getAddress()) + i->second;
1781  LLVM_DEBUG(dbgs() << " Stub function found\n");
1782  } else {
1783  // Create a new stub function (equivalent to a PLT entry).
1784  LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1785 
1786  uintptr_t BaseAddress = uintptr_t(Section->getAddress());
1787  uintptr_t StubAlignment = getStubAlignment();
1788  StubAddress =
1789  (BaseAddress + Section->getStubOffset() + StubAlignment - 1) &
1790  -StubAlignment;
1791  unsigned StubOffset = StubAddress - BaseAddress;
1792  Stubs[Value] = StubOffset;
1793  createStubFunction((uint8_t *)StubAddress);
1794 
1795  // Bump our stub offset counter
1796  Section->advanceStubOffset(getMaxStubSize());
1797 
1798  // Allocate a GOT Entry
1799  uint64_t GOTOffset = allocateGOTEntries(1);
1800  // This potentially creates a new Section which potentially
1801  // invalidates the Section pointer, so reload it.
1802  Section = &Sections[SectionID];
1803 
1804  // The load of the GOT address has an addend of -4
1805  resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4,
1806  ELF::R_X86_64_PC32);
1807 
1808  // Fill in the value of the symbol we're targeting into the GOT
1810  computeGOTOffsetRE(GOTOffset, 0, ELF::R_X86_64_64),
1811  Value.SymbolName);
1812  }
1813 
1814  // Make the target call a call into the stub table.
1815  resolveRelocation(*Section, Offset, StubAddress, ELF::R_X86_64_PC32,
1816  Addend);
1817  } else {
1818  Value.Addend += support::ulittle32_t::ref(
1819  computePlaceholderAddress(SectionID, Offset));
1820  processSimpleRelocation(SectionID, Offset, ELF::R_X86_64_PC32, Value);
1821  }
1822  } else if (RelType == ELF::R_X86_64_GOTPCREL ||
1823  RelType == ELF::R_X86_64_GOTPCRELX ||
1824  RelType == ELF::R_X86_64_REX_GOTPCRELX) {
1825  uint64_t GOTOffset = allocateGOTEntries(1);
1826  resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
1827  ELF::R_X86_64_PC32);
1828 
1829  // Fill in the value of the symbol we're targeting into the GOT
1830  RelocationEntry RE =
1831  computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_64);
1832  if (Value.SymbolName)
1833  addRelocationForSymbol(RE, Value.SymbolName);
1834  else
1835  addRelocationForSection(RE, Value.SectionID);
1836  } else if (RelType == ELF::R_X86_64_GOT64) {
1837  // Fill in a 64-bit GOT offset.
1838  uint64_t GOTOffset = allocateGOTEntries(1);
1839  resolveRelocation(Sections[SectionID], Offset, GOTOffset,
1840  ELF::R_X86_64_64, 0);
1841 
1842  // Fill in the value of the symbol we're targeting into the GOT
1843  RelocationEntry RE =
1844  computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_64);
1845  if (Value.SymbolName)
1846  addRelocationForSymbol(RE, Value.SymbolName);
1847  else
1848  addRelocationForSection(RE, Value.SectionID);
1849  } else if (RelType == ELF::R_X86_64_GOTPC32) {
1850  // Materialize the address of the base of the GOT relative to the PC.
1851  // This doesn't create a GOT entry, but it does mean we need a GOT
1852  // section.
1853  (void)allocateGOTEntries(0);
1854  resolveGOTOffsetRelocation(SectionID, Offset, Addend, ELF::R_X86_64_PC32);
1855  } else if (RelType == ELF::R_X86_64_GOTPC64) {
1856  (void)allocateGOTEntries(0);
1857  resolveGOTOffsetRelocation(SectionID, Offset, Addend, ELF::R_X86_64_PC64);
1858  } else if (RelType == ELF::R_X86_64_GOTOFF64) {
1859  // GOTOFF relocations ultimately require a section difference relocation.
1860  (void)allocateGOTEntries(0);
1861  processSimpleRelocation(SectionID, Offset, RelType, Value);
1862  } else if (RelType == ELF::R_X86_64_PC32) {
1863  Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
1864  processSimpleRelocation(SectionID, Offset, RelType, Value);
1865  } else if (RelType == ELF::R_X86_64_PC64) {
1866  Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset));
1867  processSimpleRelocation(SectionID, Offset, RelType, Value);
1868  } else if (RelType == ELF::R_X86_64_GOTTPOFF) {
1869  processX86_64GOTTPOFFRelocation(SectionID, Offset, Value, Addend);
1870  } else if (RelType == ELF::R_X86_64_TLSGD ||
1871  RelType == ELF::R_X86_64_TLSLD) {
1872  // The next relocation must be the relocation for __tls_get_addr.
1873  ++RelI;
1874  auto &GetAddrRelocation = *RelI;
1875  processX86_64TLSRelocation(SectionID, Offset, RelType, Value, Addend,
1876  GetAddrRelocation);
1877  } else {
1878  processSimpleRelocation(SectionID, Offset, RelType, Value);
1879  }
1880  } else {
1881  if (Arch == Triple::x86) {
1882  Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
1883  }
1884  processSimpleRelocation(SectionID, Offset, RelType, Value);
1885  }
1886  return ++RelI;
1887 }
1888 
1889 void RuntimeDyldELF::processX86_64GOTTPOFFRelocation(unsigned SectionID,
1890  uint64_t Offset,
1892  int64_t Addend) {
1893  // Use the approach from "x86-64 Linker Optimizations" from the TLS spec
1894  // to replace the GOTTPOFF relocation with a TPOFF relocation. The spec
1895  // only mentions one optimization even though there are two different
1896  // code sequences for the Initial Exec TLS Model. We match the code to
1897  // find out which one was used.
1898 
1899  // A possible TLS code sequence and its replacement
1900  struct CodeSequence {
1901  // The expected code sequence
1902  ArrayRef<uint8_t> ExpectedCodeSequence;
1903  // The negative offset of the GOTTPOFF relocation to the beginning of
1904  // the sequence
1905  uint64_t TLSSequenceOffset;
1906  // The new code sequence
1907  ArrayRef<uint8_t> NewCodeSequence;
1908  // The offset of the new TPOFF relocation
1909  uint64_t TpoffRelocationOffset;
1910  };
1911 
1912  std::array<CodeSequence, 2> CodeSequences;
1913 
1914  // Initial Exec Code Model Sequence
1915  {
1916  static const std::initializer_list<uint8_t> ExpectedCodeSequenceList = {
1917  0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
1918  0x00, // mov %fs:0, %rax
1919  0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00 // add x@gotpoff(%rip),
1920  // %rax
1921  };
1922  CodeSequences[0].ExpectedCodeSequence =
1923  ArrayRef<uint8_t>(ExpectedCodeSequenceList);
1924  CodeSequences[0].TLSSequenceOffset = 12;
1925 
1926  static const std::initializer_list<uint8_t> NewCodeSequenceList = {
1927  0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0, %rax
1928  0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff(%rax), %rax
1929  };
1930  CodeSequences[0].NewCodeSequence = ArrayRef<uint8_t>(NewCodeSequenceList);
1931  CodeSequences[0].TpoffRelocationOffset = 12;
1932  }
1933 
1934  // Initial Exec Code Model Sequence, II
1935  {
1936  static const std::initializer_list<uint8_t> ExpectedCodeSequenceList = {
1937  0x48, 0x8b, 0x05, 0x00, 0x00, 0x00, 0x00, // mov x@gotpoff(%rip), %rax
1938  0x64, 0x48, 0x8b, 0x00, 0x00, 0x00, 0x00 // mov %fs:(%rax), %rax
1939  };
1940  CodeSequences[1].ExpectedCodeSequence =
1941  ArrayRef<uint8_t>(ExpectedCodeSequenceList);
1942  CodeSequences[1].TLSSequenceOffset = 3;
1943 
1944  static const std::initializer_list<uint8_t> NewCodeSequenceList = {
1945  0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00, // 6 byte nop
1946  0x64, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:x@tpoff, %rax
1947  };
1948  CodeSequences[1].NewCodeSequence = ArrayRef<uint8_t>(NewCodeSequenceList);
1949  CodeSequences[1].TpoffRelocationOffset = 10;
1950  }
1951 
1952  bool Resolved = false;
1953  auto &Section = Sections[SectionID];
1954  for (const auto &C : CodeSequences) {
1955  assert(C.ExpectedCodeSequence.size() == C.NewCodeSequence.size() &&
1956  "Old and new code sequences must have the same size");
1957 
1958  if (Offset < C.TLSSequenceOffset ||
1959  (Offset - C.TLSSequenceOffset + C.NewCodeSequence.size()) >
1960  Section.getSize()) {
1961  // This can't be a matching sequence as it doesn't fit in the current
1962  // section
1963  continue;
1964  }
1965 
1966  auto TLSSequenceStartOffset = Offset - C.TLSSequenceOffset;
1967  auto *TLSSequence = Section.getAddressWithOffset(TLSSequenceStartOffset);
1968  if (ArrayRef<uint8_t>(TLSSequence, C.ExpectedCodeSequence.size()) !=
1969  C.ExpectedCodeSequence) {
1970  continue;
1971  }
1972 
1973  memcpy(TLSSequence, C.NewCodeSequence.data(), C.NewCodeSequence.size());
1974 
1975  // The original GOTTPOFF relocation has an addend as it is PC relative,
1976  // so it needs to be corrected. The TPOFF32 relocation is used as an
1977  // absolute value (which is an offset from %fs:0), so remove the addend
1978  // again.
1979  RelocationEntry RE(SectionID,
1980  TLSSequenceStartOffset + C.TpoffRelocationOffset,
1981  ELF::R_X86_64_TPOFF32, Value.Addend - Addend);
1982 
1983  if (Value.SymbolName)
1984  addRelocationForSymbol(RE, Value.SymbolName);
1985  else
1986  addRelocationForSection(RE, Value.SectionID);
1987 
1988  Resolved = true;
1989  break;
1990  }
1991 
1992  if (!Resolved) {
1993  // The GOTTPOFF relocation was not used in one of the sequences
1994  // described in the spec, so we can't optimize it to a TPOFF
1995  // relocation.
1996  uint64_t GOTOffset = allocateGOTEntries(1);
1997  resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
1998  ELF::R_X86_64_PC32);
1999  RelocationEntry RE =
2000  computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_TPOFF64);
2001  if (Value.SymbolName)
2002  addRelocationForSymbol(RE, Value.SymbolName);
2003  else
2004  addRelocationForSection(RE, Value.SectionID);
2005  }
2006 }
2007 
2008 void RuntimeDyldELF::processX86_64TLSRelocation(
2009  unsigned SectionID, uint64_t Offset, uint64_t RelType,
2010  RelocationValueRef Value, int64_t Addend,
2011  const RelocationRef &GetAddrRelocation) {
2012  // Since we are statically linking and have no additional DSOs, we can resolve
2013  // the relocation directly without using __tls_get_addr.
2014  // Use the approach from "x86-64 Linker Optimizations" from the TLS spec
2015  // to replace it with the Local Exec relocation variant.
2016 
2017  // Find out whether the code was compiled with the large or small memory
2018  // model. For this we look at the next relocation which is the relocation
2019  // for the __tls_get_addr function. If it's a 32 bit relocation, it's the
2020  // small code model, with a 64 bit relocation it's the large code model.
2021  bool IsSmallCodeModel;
2022  // Is the relocation for the __tls_get_addr a PC-relative GOT relocation?
2023  bool IsGOTPCRel = false;
2024 
2025  switch (GetAddrRelocation.getType()) {
2026  case ELF::R_X86_64_GOTPCREL:
2027  case ELF::R_X86_64_REX_GOTPCRELX:
2028  case ELF::R_X86_64_GOTPCRELX:
2029  IsGOTPCRel = true;
2030  [[fallthrough]];
2031  case ELF::R_X86_64_PLT32:
2032  IsSmallCodeModel = true;
2033  break;
2034  case ELF::R_X86_64_PLTOFF64:
2035  IsSmallCodeModel = false;
2036  break;
2037  default:
2039  "invalid TLS relocations for General/Local Dynamic TLS Model: "
2040  "expected PLT or GOT relocation for __tls_get_addr function");
2041  }
2042 
2043  // The negative offset to the start of the TLS code sequence relative to
2044  // the offset of the TLSGD/TLSLD relocation
2045  uint64_t TLSSequenceOffset;
2046  // The expected start of the code sequence
2047  ArrayRef<uint8_t> ExpectedCodeSequence;
2048  // The new TLS code sequence that will replace the existing code
2049  ArrayRef<uint8_t> NewCodeSequence;
2050 
2051  if (RelType == ELF::R_X86_64_TLSGD) {
2052  // The offset of the new TPOFF32 relocation (offset starting from the
2053  // beginning of the whole TLS sequence)
2054  uint64_t TpoffRelocOffset;
2055 
2056  if (IsSmallCodeModel) {
2057  if (!IsGOTPCRel) {
2058  static const std::initializer_list<uint8_t> CodeSequence = {
2059  0x66, // data16 (no-op prefix)
2060  0x48, 0x8d, 0x3d, 0x00, 0x00,
2061  0x00, 0x00, // lea <disp32>(%rip), %rdi
2062  0x66, 0x66, // two data16 prefixes
2063  0x48, // rex64 (no-op prefix)
2064  0xe8, 0x00, 0x00, 0x00, 0x00 // call __tls_get_addr@plt
2065  };
2066  ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2067  TLSSequenceOffset = 4;
2068  } else {
2069  // This code sequence is not described in the TLS spec but gcc
2070  // generates it sometimes.
2071  static const std::initializer_list<uint8_t> CodeSequence = {
2072  0x66, // data16 (no-op prefix)
2073  0x48, 0x8d, 0x3d, 0x00, 0x00,
2074  0x00, 0x00, // lea <disp32>(%rip), %rdi
2075  0x66, // data16 prefix (no-op prefix)
2076  0x48, // rex64 (no-op prefix)
2077  0xff, 0x15, 0x00, 0x00, 0x00,
2078  0x00 // call *__tls_get_addr@gotpcrel(%rip)
2079  };
2080  ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2081  TLSSequenceOffset = 4;
2082  }
2083 
2084  // The replacement code for the small code model. It's the same for
2085  // both sequences.
2086  static const std::initializer_list<uint8_t> SmallSequence = {
2087  0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
2088  0x00, // mov %fs:0, %rax
2089  0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff(%rax),
2090  // %rax
2091  };
2092  NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
2093  TpoffRelocOffset = 12;
2094  } else {
2095  static const std::initializer_list<uint8_t> CodeSequence = {
2096  0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, // lea <disp32>(%rip),
2097  // %rdi
2098  0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2099  0x00, // movabs $__tls_get_addr@pltoff, %rax
2100  0x48, 0x01, 0xd8, // add %rbx, %rax
2101  0xff, 0xd0 // call *%rax
2102  };
2103  ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2104  TLSSequenceOffset = 3;
2105 
2106  // The replacement code for the large code model
2107  static const std::initializer_list<uint8_t> LargeSequence = {
2108  0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
2109  0x00, // mov %fs:0, %rax
2110  0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00, // lea x@tpoff(%rax),
2111  // %rax
2112  0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 // nopw 0x0(%rax,%rax,1)
2113  };
2114  NewCodeSequence = ArrayRef<uint8_t>(LargeSequence);
2115  TpoffRelocOffset = 12;
2116  }
2117 
2118  // The TLSGD/TLSLD relocations are PC-relative, so they have an addend.
2119  // The new TPOFF32 relocations is used as an absolute offset from
2120  // %fs:0, so remove the TLSGD/TLSLD addend again.
2121  RelocationEntry RE(SectionID, Offset - TLSSequenceOffset + TpoffRelocOffset,
2122  ELF::R_X86_64_TPOFF32, Value.Addend - Addend);
2123  if (Value.SymbolName)
2124  addRelocationForSymbol(RE, Value.SymbolName);
2125  else
2126  addRelocationForSection(RE, Value.SectionID);
2127  } else if (RelType == ELF::R_X86_64_TLSLD) {
2128  if (IsSmallCodeModel) {
2129  if (!IsGOTPCRel) {
2130  static const std::initializer_list<uint8_t> CodeSequence = {
2131  0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, // leaq <disp32>(%rip), %rdi
2132  0x00, 0xe8, 0x00, 0x00, 0x00, 0x00 // call __tls_get_addr@plt
2133  };
2134  ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2135  TLSSequenceOffset = 3;
2136 
2137  // The replacement code for the small code model
2138  static const std::initializer_list<uint8_t> SmallSequence = {
2139  0x66, 0x66, 0x66, // three data16 prefixes (no-op)
2140  0x64, 0x48, 0x8b, 0x04, 0x25,
2141  0x00, 0x00, 0x00, 0x00 // mov %fs:0, %rax
2142  };
2143  NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
2144  } else {
2145  // This code sequence is not described in the TLS spec but gcc
2146  // generates it sometimes.
2147  static const std::initializer_list<uint8_t> CodeSequence = {
2148  0x48, 0x8d, 0x3d, 0x00,
2149  0x00, 0x00, 0x00, // leaq <disp32>(%rip), %rdi
2150  0xff, 0x15, 0x00, 0x00,
2151  0x00, 0x00 // call
2152  // *__tls_get_addr@gotpcrel(%rip)
2153  };
2154  ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2155  TLSSequenceOffset = 3;
2156 
2157  // The replacement is code is just like above but it needs to be
2158  // one byte longer.
2159  static const std::initializer_list<uint8_t> SmallSequence = {
2160  0x0f, 0x1f, 0x40, 0x00, // 4 byte nop
2161  0x64, 0x48, 0x8b, 0x04, 0x25,
2162  0x00, 0x00, 0x00, 0x00 // mov %fs:0, %rax
2163  };
2164  NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
2165  }
2166  } else {
2167  // This is the same sequence as for the TLSGD sequence with the large
2168  // memory model above
2169  static const std::initializer_list<uint8_t> CodeSequence = {
2170  0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, // lea <disp32>(%rip),
2171  // %rdi
2172  0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2173  0x48, // movabs $__tls_get_addr@pltoff, %rax
2174  0x01, 0xd8, // add %rbx, %rax
2175  0xff, 0xd0 // call *%rax
2176  };
2177  ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2178  TLSSequenceOffset = 3;
2179 
2180  // The replacement code for the large code model
2181  static const std::initializer_list<uint8_t> LargeSequence = {
2182  0x66, 0x66, 0x66, // three data16 prefixes (no-op)
2183  0x66, 0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00,
2184  0x00, // 10 byte nop
2185  0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
2186  };
2187  NewCodeSequence = ArrayRef<uint8_t>(LargeSequence);
2188  }
2189  } else {
2190  llvm_unreachable("both TLS relocations handled above");
2191  }
2192 
2193  assert(ExpectedCodeSequence.size() == NewCodeSequence.size() &&
2194  "Old and new code sequences must have the same size");
2195 
2196  auto &Section = Sections[SectionID];
2197  if (Offset < TLSSequenceOffset ||
2198  (Offset - TLSSequenceOffset + NewCodeSequence.size()) >
2199  Section.getSize()) {
2200  report_fatal_error("unexpected end of section in TLS sequence");
2201  }
2202 
2203  auto *TLSSequence = Section.getAddressWithOffset(Offset - TLSSequenceOffset);
2204  if (ArrayRef<uint8_t>(TLSSequence, ExpectedCodeSequence.size()) !=
2205  ExpectedCodeSequence) {
2207  "invalid TLS sequence for Global/Local Dynamic TLS Model");
2208  }
2209 
2210  memcpy(TLSSequence, NewCodeSequence.data(), NewCodeSequence.size());
2211 }
2212 
2214  // We don't use the GOT in all of these cases, but it's essentially free
2215  // to put them all here.
2216  size_t Result = 0;
2217  switch (Arch) {
2218  case Triple::x86_64:
2219  case Triple::aarch64:
2220  case Triple::aarch64_be:
2221  case Triple::ppc64:
2222  case Triple::ppc64le:
2223  case Triple::systemz:
2224  Result = sizeof(uint64_t);
2225  break;
2226  case Triple::x86:
2227  case Triple::arm:
2228  case Triple::thumb:
2229  Result = sizeof(uint32_t);
2230  break;
2231  case Triple::mips:
2232  case Triple::mipsel:
2233  case Triple::mips64:
2234  case Triple::mips64el:
2235  if (IsMipsO32ABI || IsMipsN32ABI)
2236  Result = sizeof(uint32_t);
2237  else if (IsMipsN64ABI)
2238  Result = sizeof(uint64_t);
2239  else
2240  llvm_unreachable("Mips ABI not handled");
2241  break;
2242  default:
2243  llvm_unreachable("Unsupported CPU type!");
2244  }
2245  return Result;
2246 }
2247 
2248 uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned no) {
2249  if (GOTSectionID == 0) {
2250  GOTSectionID = Sections.size();
2251  // Reserve a section id. We'll allocate the section later
2252  // once we know the total size
2253  Sections.push_back(SectionEntry(".got", nullptr, 0, 0, 0));
2254  }
2255  uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize();
2256  CurrentGOTIndex += no;
2257  return StartOffset;
2258 }
2259 
2260 uint64_t RuntimeDyldELF::findOrAllocGOTEntry(const RelocationValueRef &Value,
2261  unsigned GOTRelType) {
2262  auto E = GOTOffsetMap.insert({Value, 0});
2263  if (E.second) {
2264  uint64_t GOTOffset = allocateGOTEntries(1);
2265 
2266  // Create relocation for newly created GOT entry
2267  RelocationEntry RE =
2268  computeGOTOffsetRE(GOTOffset, Value.Offset, GOTRelType);
2269  if (Value.SymbolName)
2270  addRelocationForSymbol(RE, Value.SymbolName);
2271  else
2272  addRelocationForSection(RE, Value.SectionID);
2273 
2274  E.first->second = GOTOffset;
2275  }
2276 
2277  return E.first->second;
2278 }
2279 
2280 void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID,
2281  uint64_t Offset,
2282  uint64_t GOTOffset,
2283  uint32_t Type) {
2284  // Fill in the relative address of the GOT Entry into the stub
2285  RelocationEntry GOTRE(SectionID, Offset, Type, GOTOffset);
2286  addRelocationForSection(GOTRE, GOTSectionID);
2287 }
2288 
2289 RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(uint64_t GOTOffset,
2290  uint64_t SymbolOffset,
2291  uint32_t Type) {
2292  return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset);
2293 }
2294 
2295 void RuntimeDyldELF::processNewSymbol(const SymbolRef &ObjSymbol, SymbolTableEntry& Symbol) {
2296  // This should never return an error as `processNewSymbol` wouldn't have been
2297  // called if getFlags() returned an error before.
2298  auto ObjSymbolFlags = cantFail(ObjSymbol.getFlags());
2299 
2300  if (ObjSymbolFlags & SymbolRef::SF_Indirect) {
2301  if (IFuncStubSectionID == 0) {
2302  // Create a dummy section for the ifunc stubs. It will be actually
2303  // allocated in finalizeLoad() below.
2304  IFuncStubSectionID = Sections.size();
2305  Sections.push_back(
2306  SectionEntry(".text.__llvm_IFuncStubs", nullptr, 0, 0, 0));
2307  // First 64B are reserverd for the IFunc resolver
2308  IFuncStubOffset = 64;
2309  }
2310 
2311  IFuncStubs.push_back(IFuncStub{IFuncStubOffset, Symbol});
2312  // Modify the symbol so that it points to the ifunc stub instead of to the
2313  // resolver function.
2314  Symbol = SymbolTableEntry(IFuncStubSectionID, IFuncStubOffset,
2315  Symbol.getFlags());
2316  IFuncStubOffset += getMaxIFuncStubSize();
2317  }
2318 }
2319 
2321  ObjSectionToIDMap &SectionMap) {
2322  if (IsMipsO32ABI)
2323  if (!PendingRelocs.empty())
2324  return make_error<RuntimeDyldError>("Can't find matching LO16 reloc");
2325 
2326  // Create the IFunc stubs if necessary. This must be done before processing
2327  // the GOT entries, as the IFunc stubs may create some.
2328  if (IFuncStubSectionID != 0) {
2329  uint8_t *IFuncStubsAddr = MemMgr.allocateCodeSection(
2330  IFuncStubOffset, 1, IFuncStubSectionID, ".text.__llvm_IFuncStubs");
2331  if (!IFuncStubsAddr)
2332  return make_error<RuntimeDyldError>(
2333  "Unable to allocate memory for IFunc stubs!");
2334  Sections[IFuncStubSectionID] =
2335  SectionEntry(".text.__llvm_IFuncStubs", IFuncStubsAddr, IFuncStubOffset,
2336  IFuncStubOffset, 0);
2337 
2338  createIFuncResolver(IFuncStubsAddr);
2339 
2340  LLVM_DEBUG(dbgs() << "Creating IFunc stubs SectionID: "
2341  << IFuncStubSectionID << " Addr: "
2342  << Sections[IFuncStubSectionID].getAddress() << '\n');
2343  for (auto &IFuncStub : IFuncStubs) {
2344  auto &Symbol = IFuncStub.OriginalSymbol;
2345  LLVM_DEBUG(dbgs() << "\tSectionID: " << Symbol.getSectionID()
2346  << " Offset: " << format("%p", Symbol.getOffset())
2347  << " IFuncStubOffset: "
2348  << format("%p\n", IFuncStub.StubOffset));
2349  createIFuncStub(IFuncStubSectionID, 0, IFuncStub.StubOffset,
2350  Symbol.getSectionID(), Symbol.getOffset());
2351  }
2352 
2353  IFuncStubSectionID = 0;
2354  IFuncStubOffset = 0;
2355  IFuncStubs.clear();
2356  }
2357 
2358  // If necessary, allocate the global offset table
2359  if (GOTSectionID != 0) {
2360  // Allocate memory for the section
2361  size_t TotalSize = CurrentGOTIndex * getGOTEntrySize();
2362  uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(),
2363  GOTSectionID, ".got", false);
2364  if (!Addr)
2365  return make_error<RuntimeDyldError>("Unable to allocate memory for GOT!");
2366 
2367  Sections[GOTSectionID] =
2368  SectionEntry(".got", Addr, TotalSize, TotalSize, 0);
2369 
2370  // For now, initialize all GOT entries to zero. We'll fill them in as
2371  // needed when GOT-based relocations are applied.
2372  memset(Addr, 0, TotalSize);
2373  if (IsMipsN32ABI || IsMipsN64ABI) {
2374  // To correctly resolve Mips GOT relocations, we need a mapping from
2375  // object's sections to GOTs.
2376  for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
2377  SI != SE; ++SI) {
2378  if (SI->relocation_begin() != SI->relocation_end()) {
2379  Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
2380  if (!RelSecOrErr)
2381  return make_error<RuntimeDyldError>(
2382  toString(RelSecOrErr.takeError()));
2383 
2384  section_iterator RelocatedSection = *RelSecOrErr;
2385  ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection);
2386  assert(i != SectionMap.end());
2387  SectionToGOTMap[i->second] = GOTSectionID;
2388  }
2389  }
2390  GOTSymbolOffsets.clear();
2391  }
2392  }
2393 
2394  // Look for and record the EH frame section.
2395  ObjSectionToIDMap::iterator i, e;
2396  for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
2397  const SectionRef &Section = i->first;
2398 
2399  StringRef Name;
2400  Expected<StringRef> NameOrErr = Section.getName();
2401  if (NameOrErr)
2402  Name = *NameOrErr;
2403  else
2404  consumeError(NameOrErr.takeError());
2405 
2406  if (Name == ".eh_frame") {
2407  UnregisteredEHFrameSections.push_back(i->second);
2408  break;
2409  }
2410  }
2411 
2412  GOTSectionID = 0;
2413  CurrentGOTIndex = 0;
2414 
2415  return Error::success();
2416 }
2417 
2419  return Obj.isELF();
2420 }
2421 
2422 void RuntimeDyldELF::createIFuncResolver(uint8_t *Addr) const {
2423  if (Arch == Triple::x86_64) {
2424  // The adddres of the GOT1 entry is in %r11, the GOT2 entry is in %r11+8
2425  // (see createIFuncStub() for details)
2426  // The following code first saves all registers that contain the original
2427  // function arguments as those registers are not saved by the resolver
2428  // function. %r11 is saved as well so that the GOT2 entry can be updated
2429  // afterwards. Then it calls the actual IFunc resolver function whose
2430  // address is stored in GOT2. After the resolver function returns, all
2431  // saved registers are restored and the return value is written to GOT1.
2432  // Finally, jump to the now resolved function.
2433  // clang-format off
2434  const uint8_t StubCode[] = {
2435  0x57, // push %rdi
2436  0x56, // push %rsi
2437  0x52, // push %rdx
2438  0x51, // push %rcx
2439  0x41, 0x50, // push %r8
2440  0x41, 0x51, // push %r9
2441  0x41, 0x53, // push %r11
2442  0x41, 0xff, 0x53, 0x08, // call *0x8(%r11)
2443  0x41, 0x5b, // pop %r11
2444  0x41, 0x59, // pop %r9
2445  0x41, 0x58, // pop %r8
2446  0x59, // pop %rcx
2447  0x5a, // pop %rdx
2448  0x5e, // pop %rsi
2449  0x5f, // pop %rdi
2450  0x49, 0x89, 0x03, // mov %rax,(%r11)
2451  0xff, 0xe0 // jmp *%rax
2452  };
2453  // clang-format on
2454  static_assert(sizeof(StubCode) <= 64,
2455  "maximum size of the IFunc resolver is 64B");
2456  memcpy(Addr, StubCode, sizeof(StubCode));
2457  } else {
2459  "IFunc resolver is not supported for target architecture");
2460  }
2461 }
2462 
2463 void RuntimeDyldELF::createIFuncStub(unsigned IFuncStubSectionID,
2464  uint64_t IFuncResolverOffset,
2465  uint64_t IFuncStubOffset,
2466  unsigned IFuncSectionID,
2467  uint64_t IFuncOffset) {
2468  auto &IFuncStubSection = Sections[IFuncStubSectionID];
2469  auto *Addr = IFuncStubSection.getAddressWithOffset(IFuncStubOffset);
2470 
2471  if (Arch == Triple::x86_64) {
2472  // The first instruction loads a PC-relative address into %r11 which is a
2473  // GOT entry for this stub. This initially contains the address to the
2474  // IFunc resolver. We can use %r11 here as it's caller saved but not used
2475  // to pass any arguments. In fact, x86_64 ABI even suggests using %r11 for
2476  // code in the PLT. The IFunc resolver will use %r11 to update the GOT
2477  // entry.
2478  //
2479  // The next instruction just jumps to the address contained in the GOT
2480  // entry. As mentioned above, we do this two-step jump by first setting
2481  // %r11 so that the IFunc resolver has access to it.
2482  //
2483  // The IFunc resolver of course also needs to know the actual address of
2484  // the actual IFunc resolver function. This will be stored in a GOT entry
2485  // right next to the first one for this stub. So, the IFunc resolver will
2486  // be able to call it with %r11+8.
2487  //
2488  // In total, two adjacent GOT entries (+relocation) and one additional
2489  // relocation are required:
2490  // GOT1: Address of the IFunc resolver.
2491  // GOT2: Address of the IFunc resolver function.
2492  // IFuncStubOffset+3: 32-bit PC-relative address of GOT1.
2493  uint64_t GOT1 = allocateGOTEntries(2);
2494  uint64_t GOT2 = GOT1 + getGOTEntrySize();
2495 
2496  RelocationEntry RE1(GOTSectionID, GOT1, ELF::R_X86_64_64,
2497  IFuncResolverOffset, {});
2498  addRelocationForSection(RE1, IFuncStubSectionID);
2499  RelocationEntry RE2(GOTSectionID, GOT2, ELF::R_X86_64_64, IFuncOffset, {});
2500  addRelocationForSection(RE2, IFuncSectionID);
2501 
2502  const uint8_t StubCode[] = {
2503  0x4c, 0x8d, 0x1d, 0x00, 0x00, 0x00, 0x00, // leaq 0x0(%rip),%r11
2504  0x41, 0xff, 0x23 // jmpq *(%r11)
2505  };
2506  assert(sizeof(StubCode) <= getMaxIFuncStubSize() &&
2507  "IFunc stub size must not exceed getMaxIFuncStubSize()");
2508  memcpy(Addr, StubCode, sizeof(StubCode));
2509 
2510  // The PC-relative value starts 4 bytes from the end of the leaq
2511  // instruction, so the addend is -4.
2512  resolveGOTOffsetRelocation(IFuncStubSectionID, IFuncStubOffset + 3,
2513  GOT1 - 4, ELF::R_X86_64_PC32);
2514  } else {
2515  report_fatal_error("IFunc stub is not supported for target architecture");
2516  }
2517 }
2518 
2519 unsigned RuntimeDyldELF::getMaxIFuncStubSize() const {
2520  if (Arch == Triple::x86_64) {
2521  return 10;
2522  }
2523  return 0;
2524 }
2525 
2526 bool RuntimeDyldELF::relocationNeedsGot(const RelocationRef &R) const {
2527  unsigned RelTy = R.getType();
2529  return RelTy == ELF::R_AARCH64_ADR_GOT_PAGE ||
2530  RelTy == ELF::R_AARCH64_LD64_GOT_LO12_NC;
2531 
2532  if (Arch == Triple::x86_64)
2533  return RelTy == ELF::R_X86_64_GOTPCREL ||
2534  RelTy == ELF::R_X86_64_GOTPCRELX ||
2535  RelTy == ELF::R_X86_64_GOT64 ||
2536  RelTy == ELF::R_X86_64_REX_GOTPCRELX;
2537  return false;
2538 }
2539 
2540 bool RuntimeDyldELF::relocationNeedsStub(const RelocationRef &R) const {
2541  if (Arch != Triple::x86_64)
2542  return true; // Conservative answer
2543 
2544  switch (R.getType()) {
2545  default:
2546  return true; // Conservative answer
2547 
2548 
2549  case ELF::R_X86_64_GOTPCREL:
2550  case ELF::R_X86_64_GOTPCRELX:
2551  case ELF::R_X86_64_REX_GOTPCRELX:
2552  case ELF::R_X86_64_GOTPC64:
2553  case ELF::R_X86_64_GOT64:
2554  case ELF::R_X86_64_GOTOFF64:
2555  case ELF::R_X86_64_PC32:
2556  case ELF::R_X86_64_PC64:
2557  case ELF::R_X86_64_64:
2558  // We know that these reloation types won't need a stub function. This list
2559  // can be extended as needed.
2560  return false;
2561  }
2562 }
2563 
2564 } // namespace llvm
i
i
Definition: README.txt:29
MemoryBuffer.h
llvm::RuntimeDyldImpl::writeInt64BE
void writeInt64BE(uint8_t *Addr, uint64_t Value)
Definition: RuntimeDyldImpl.h:330
llvm::applyPPChighesta
static uint16_t applyPPChighesta(uint64_t value)
Definition: RuntimeDyldELF.cpp:807
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::object::ObjectFile::getBytesInAddress
virtual uint8_t getBytesInAddress() const =0
The number of bytes used to represent an address in this object file format.
llvm::RuntimeDyldImpl::MemMgr
RuntimeDyld::MemoryManager & MemMgr
Definition: RuntimeDyldImpl.h:244
llvm::Triple::bpfeb
@ bpfeb
Definition: Triple.h:57
llvm::Triple::UnknownArch
@ UnknownArch
Definition: Triple.h:47
intptr_t
llvm::RuntimeDyldELF::registerEHFrames
void registerEHFrames() override
Definition: RuntimeDyldELF.cpp:221
llvm::support::detail::packed_endian_specific_integral::ref
Definition: Endian.h:251
llvm::isa
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition: Casting.h:548
T
llvm::RuntimeDyldELF::isCompatibleFile
bool isCompatibleFile(const object::ObjectFile &Obj) const override
Definition: RuntimeDyldELF.cpp:2418
llvm::applyPPCha
static uint16_t applyPPCha(uint64_t value)
Definition: RuntimeDyldELF.cpp:791
StringRef.h
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::Triple::x86
@ x86
Definition: Triple.h:85
llvm::raw_string_ostream
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:629
llvm::write
Error write(MCStreamer &Out, ArrayRef< std::string > Inputs)
Definition: DWP.cpp:549
llvm::object::Binary::isLittleEndian
bool isLittleEndian() const
Definition: Binary.h:152
llvm::RuntimeDyldImpl::ErrorStr
std::string ErrorStr
Definition: RuntimeDyldImpl.h:318
llvm::Target
Target - Wrapper for Target specific information.
Definition: TargetRegistry.h:149
llvm::RuntimeDyldImpl::Sections
SectionList Sections
Definition: RuntimeDyldImpl.h:254
Wrapper
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
Definition: AMDGPUAliasAnalysis.cpp:31
llvm::LoadedObjectInfoHelper
Definition: DIContext.h:307
llvm::support::endian::write32le
void write32le(void *P, uint32_t V)
Definition: Endian.h:416
llvm::object::SectionRef::getRawDataRefImpl
DataRefImpl getRawDataRefImpl() const
Definition: ObjectFile.h:540
llvm::Error::success
static ErrorSuccess success()
Create a success value.
Definition: Error.h:329
llvm::object::SectionRef::getName
Expected< StringRef > getName() const
Definition: ObjectFile.h:460
llvm::ELF::EF_PPC64_ABI
@ EF_PPC64_ABI
Definition: ELF.h:403
llvm::StringMap::end
iterator end()
Definition: StringMap.h:204
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::Triple::x86_64
@ x86_64
Definition: Triple.h:86
llvm::RuntimeDyldImpl::addRelocationForSection
void addRelocationForSection(const RelocationEntry &RE, unsigned SectionID)
Definition: RuntimeDyld.cpp:961
llvm::Triple::ppc
@ ppc
Definition: Triple.h:69
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:119
llvm::Expected
Tagged union holding either a T or a Error.
Definition: APFloat.h:41
llvm::RuntimeDyldImpl
Definition: RuntimeDyldImpl.h:238
STLExtras.h
llvm::StringMap::find
iterator find(StringRef Key)
Definition: StringMap.h:217
llvm::object::ObjectFile::section_begin
virtual section_iterator section_begin() const =0
llvm::RuntimeDyldImpl::addRelocationForSymbol
void addRelocationForSymbol(const RelocationEntry &RE, StringRef SymbolName)
Definition: RuntimeDyld.cpp:966
llvm::RuntimeDyldImpl::IsMipsO32ABI
bool IsMipsO32ABI
Definition: RuntimeDyldImpl.h:290
llvm::Triple::mips64
@ mips64
Definition: Triple.h:66
llvm::consumeError
void consumeError(Error Err)
Consume a Error without doing anything.
Definition: Error.h:1042
llvm::MemoryBufferRef
Definition: MemoryBufferRef.h:22
llvm::RelocationEntry::SymOffset
uint64_t SymOffset
Definition: RuntimeDyldImpl.h:140
llvm::ELF::EF_MIPS_ABI_O32
@ EF_MIPS_ABI_O32
Definition: ELF.h:519
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::ArrayRef::data
const T * data() const
Definition: ArrayRef.h:161
llvm::RISCVFenceField::R
@ R
Definition: RISCVBaseInfo.h:265
uint
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint
Definition: README.txt:239
llvm::applyPPChi
static uint16_t applyPPChi(uint64_t value)
Definition: RuntimeDyldELF.cpp:787
llvm::RelocationEntry::SectionID
unsigned SectionID
SectionID - the section this relocation points to.
Definition: RuntimeDyldImpl.h:120
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::object::ELFObjectFileBase
Definition: ELFObjectFile.h:51
llvm::ARMBuildAttrs::Section
@ Section
Legacy Tags.
Definition: ARMBuildAttributes.h:82
llvm::Resolver
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition: Record.h:2134
llvm::applyPPChighest
static uint16_t applyPPChighest(uint64_t value)
Definition: RuntimeDyldELF.cpp:803
llvm::support::endian
Definition: Endian.h:42
llvm::object::RelocationRef::getType
uint64_t getType() const
Definition: ObjectFile.h:570
llvm::object::SymbolicFile::symbol_end
virtual basic_symbol_iterator symbol_end() const =0
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::RuntimeDyldELF::finalizeLoad
Error finalizeLoad(const ObjectFile &Obj, ObjSectionToIDMap &SectionMap) override
Definition: RuntimeDyldELF.cpp:2320
llvm::cast
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition: Casting.h:565
llvm::RelocationEntry::Offset
uint64_t Offset
Offset - offset into the section.
Definition: RuntimeDyldImpl.h:123
llvm::object::ELFObjectFileBase::getPlatformFlags
virtual unsigned getPlatformFlags() const =0
Returns platform-specific object flags, if any.
llvm::object::ELFRelocationRef
Definition: ELFObjectFile.h:204
llvm::object
Definition: DWARFDebugLoc.h:24
llvm::RuntimeDyldImpl::HasError
bool HasError
Definition: RuntimeDyldImpl.h:317
ELF.h
llvm::Triple::ArchType
ArchType
Definition: Triple.h:46
llvm::RuntimeDyldImpl::SID
unsigned SID
Definition: RuntimeDyldImpl.h:256
llvm::applyPPChighera
static uint16_t applyPPChighera(uint64_t value)
Definition: RuntimeDyldELF.cpp:799
llvm::object::ObjectFile::section_end
virtual section_iterator section_end() const =0
llvm::RuntimeDyldImpl::IsMipsN64ABI
bool IsMipsN64ABI
Definition: RuntimeDyldImpl.h:292
llvm::object::BasicSymbolRef::getRawDataRefImpl
DataRefImpl getRawDataRefImpl() const
Definition: SymbolicFile.h:208
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
getBits
static uint64_t getBits(uint64_t Val, int Start, int End)
Definition: RuntimeDyldELF.cpp:50
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::Triple::mips64el
@ mips64el
Definition: Triple.h:67
llvm::StringMapConstIterator
Definition: StringMap.h:26
SI
@ SI
Definition: SIInstrInfo.cpp:7985
llvm::object::ObjectFile::getFileFormatName
virtual StringRef getFileFormatName() const =0
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::Triple::ppc64
@ ppc64
Definition: Triple.h:71
llvm::StringMap::clear
void clear()
Definition: StringMap.h:348
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
llvm::StringRef::data
const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
llvm::object::DataRefImpl::p
uintptr_t p
Definition: SymbolicFile.h:41
llvm::RuntimeDyld::MemoryManager::registerEHFrames
virtual void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size)=0
Register the EH frames with the runtime so that c++ exceptions work.
llvm::RuntimeDyldImpl::writeInt32BE
void writeInt32BE(uint8_t *Addr, uint32_t Value)
Definition: RuntimeDyldImpl.h:325
llvm::StringMap
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:110
llvm::StringRef::equals
bool equals(StringRef RHS) const
equals - Check for string equality, this is more efficient than compare() when the relative ordering ...
Definition: StringRef.h:164
llvm::RelocationEntry::Addend
int64_t Addend
Addend - the relocation addend encoded in the instruction itself.
Definition: RuntimeDyldImpl.h:130
llvm::object::SectionRef
This is a value type class that represents a single section in the list of sections in the object fil...
Definition: ObjectFile.h:80
llvm::RelocationEntry
RelocationEntry - used to represent relocations internally in the dynamic linker.
Definition: RuntimeDyldImpl.h:117
llvm::Triple::ppc64le
@ ppc64le
Definition: Triple.h:72
RuntimeDyldELFMips.h
llvm::RISCVFenceField::O
@ O
Definition: RISCVBaseInfo.h:264
llvm::object::SymbolRef::getSection
Expected< section_iterator > getSection() const
Get section this symbol is defined in reference to.
Definition: ObjectFile.h:423
llvm::pdb::PDB_ColorItem::Address
@ Address
llvm::AMDGPU::Hwreg::Offset
Offset
Definition: SIDefines.h:419
llvm::object::BasicSymbolRef::getFlags
Expected< uint32_t > getFlags() const
Get symbol flags (bitwise OR of SymbolRef::Flags)
Definition: SymbolicFile.h:204
uint64_t
llvm::object::symbol_iterator
Definition: ObjectFile.h:207
llvm::RuntimeDyldELF::RuntimeDyldELF
RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr, JITSymbolResolver &Resolver)
Definition: RuntimeDyldELF.cpp:216
Addr
uint64_t Addr
Definition: ELFObjHandler.cpp:79
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:53
llvm::RuntimeDyldImpl::loadObjectImpl
Expected< ObjSectionToIDMap > loadObjectImpl(const object::ObjectFile &Obj)
Definition: RuntimeDyld.cpp:182
llvm::or32le
static void or32le(void *P, int32_t V)
Definition: RuntimeDyldCOFFAArch64.h:35
or32AArch64Imm
static void or32AArch64Imm(void *L, uint64_t Imm)
Definition: RuntimeDyldELF.cpp:33
llvm::RuntimeDyldImpl::IsTargetLittleEndian
bool IsTargetLittleEndian
Definition: RuntimeDyldImpl.h:289
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::SectionEntry
SectionEntry - represents a section emitted into memory by the dynamic linker.
Definition: RuntimeDyldImpl.h:45
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
RuntimeDyldELF.h
llvm::move
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1861
ObjectFile.h
memcpy
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
llvm::object::Binary::isELF
bool isELF() const
Definition: Binary.h:122
llvm::object::content_iterator
Definition: SymbolicFile.h:69
llvm::Triple::bpfel
@ bpfel
Definition: Triple.h:56
llvm::ELF::decodePPC64LocalEntryOffset
static int64_t decodePPC64LocalEntryOffset(unsigned Other)
Definition: ELF.h:411
llvm::RuntimeDyldImpl::IsMipsN32ABI
bool IsMipsN32ABI
Definition: RuntimeDyldImpl.h:291
llvm::Triple::armeb
@ armeb
Definition: Triple.h:50
llvm::object::elf_relocation_iterator
Definition: ELFObjectFile.h:219
Triple.h
llvm::ArrayRef< uint8_t >
llvm::Triple::arm
@ arm
Definition: Triple.h:49
llvm::Triple::aarch64_be
@ aarch64_be
Definition: Triple.h:52
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
llvm::object::ObjectFile
This class is the base class for all object file types.
Definition: ObjectFile.h:228
llvm::object::Binary
Definition: Binary.h:32
llvm::Triple::ppcle
@ ppcle
Definition: Triple.h:70
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::cantFail
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:744
uint32_t
llvm::Triple::thumb
@ thumb
Definition: Triple.h:83
RuntimeDyldCheckerImpl.h
llvm::object::DataRefImpl
Definition: SymbolicFile.h:35
llvm::format
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition: Format.h:124
llvm::RuntimeDyld::MemoryManager::allocateCodeSection
virtual uint8_t * allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName)=0
Allocate a memory block of (at least) the given size suitable for executable code.
ELFObjectFile.h
llvm::RelocationValueRef::SectionID
unsigned SectionID
Definition: RuntimeDyldImpl.h:192
LLVM_ELF_IMPORT_TYPES_ELFT
#define LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
Definition: ELFTypes.h:104
llvm::RuntimeDyldImpl::createStubFunction
uint8_t * createStubFunction(uint8_t *Addr, unsigned AbiVariant=0)
Emits long jump instruction to Addr.
Definition: RuntimeDyld.cpp:985
llvm::logAllUnhandledErrors
void logAllUnhandledErrors(Error E, raw_ostream &OS, Twine ErrorBanner={})
Log all errors (if any) in E to OS.
Definition: Error.cpp:63
llvm::RuntimeDyldELF::getGOTEntrySize
size_t getGOTEntrySize() override
Definition: RuntimeDyldELF.cpp:2213
llvm::SymbolTableEntry
Symbol info for RuntimeDyld.
Definition: RuntimeDyldImpl.h:217
llvm::JITSymbolResolver
Symbol resolution interface.
Definition: JITSymbol.h:371
llvm::Twine
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std
Definition: BitVector.h:851
llvm::object::SymbolRef::Type
Type
Definition: ObjectFile.h:171
llvm::RuntimeDyldELF::create
static std::unique_ptr< RuntimeDyldELF > create(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MemMgr, JITSymbolResolver &Resolver)
Definition: RuntimeDyldELF.cpp:233
llvm::RuntimeDyldImpl::findOrEmitSection
Expected< unsigned > findOrEmitSection(const ObjectFile &Obj, const SectionRef &Section, bool IsCode, ObjSectionToIDMap &LocalSections)
Find Section in LocalSections.
Definition: RuntimeDyld.cpp:942
uint16_t
llvm::object::OwningBinary
Definition: RuntimeDyld.h:36
llvm::toString
const char * toString(DWARFSectionKind Kind)
Definition: DWARFUnitIndex.h:67
llvm::SectionName
Definition: DWARFSection.h:21
llvm::Error
Lightweight error class with error context and mandatory checking.
Definition: Error.h:155
llvm::applyPPClo
static uint16_t applyPPClo(uint64_t value)
Definition: RuntimeDyldELF.cpp:785
llvm::TargetStackID::Value
Value
Definition: TargetFrameLowering.h:27
llvm::Triple::mipsel
@ mipsel
Definition: Triple.h:65
llvm::Triple::getArchTypePrefix
static StringRef getArchTypePrefix(ArchType Kind)
Get the "prefix" canonical name for the Kind architecture.
Definition: Triple.cpp:91
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:614
llvm::Triple::systemz
@ systemz
Definition: Triple.h:80
llvm::ARMBuildAttrs::Symbol
@ Symbol
Definition: ARMBuildAttributes.h:83
llvm::RISCVMatInt::Imm
@ Imm
Definition: RISCVMatInt.h:23
llvm::RuntimeDyldImpl::Arch
Triple::ArchType Arch
Definition: RuntimeDyldImpl.h:288
llvm::Expected::takeError
Error takeError()
Take ownership of the stored error.
Definition: Error.h:596
llvm::RuntimeDyldELF::SectionToGOTMap
DenseMap< SID, SID > SectionToGOTMap
Definition: RuntimeDyldELF.h:143
llvm::object::Binary::getFileName
StringRef getFileName() const
Definition: Binary.cpp:41
llvm::support::endian::read32le
uint32_t read32le(const void *P)
Definition: Endian.h:381
llvm::Triple::thumbeb
@ thumbeb
Definition: Triple.h:84
llvm::RelocationValueRef::SymbolName
const char * SymbolName
Definition: RuntimeDyldImpl.h:195
llvm::RuntimeDyld::MemoryManager::allowStubAllocation
virtual bool allowStubAllocation() const
Override to return false to tell LLVM no stub space will be needed.
Definition: RuntimeDyld.h:149
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:164
llvm::RuntimeDyldELF::loadObject
std::unique_ptr< RuntimeDyld::LoadedObjectInfo > loadObject(const object::ObjectFile &O) override
Definition: RuntimeDyldELF.cpp:248
llvm::object::ObjectFile::sections
section_iterator_range sections() const
Definition: ObjectFile.h:327
llvm::ELF::EF_MIPS_ABI2
@ EF_MIPS_ABI2
Definition: ELF.h:511
SymInfo
SymInfo contains information about symbol: it's address and section index which is -1LL for absolute ...
Definition: DWARFContext.cpp:1459
llvm::object::elf_symbol_iterator
Definition: ELFObjectFile.h:189
llvm::RuntimeDyldImpl::readBytesUnaligned
uint64_t readBytesUnaligned(uint8_t *Src, unsigned Size) const
Endian-aware read Read the least significant Size bytes from Src.
Definition: RuntimeDyld.cpp:720
llvm::object::SymbolRef
This is a value type class that represents a single symbol in the list of symbols in the object file.
Definition: ObjectFile.h:167
llvm::object::Binary::getData
StringRef getData() const
Definition: Binary.cpp:39
llvm::RuntimeDyldImpl::StubMap
std::map< RelocationValueRef, uintptr_t > StubMap
Definition: RuntimeDyldImpl.h:286
llvm::object::ELFObjectFile::isDyldType
bool isDyldType() const
Definition: ELFObjectFile.h:452
llvm::applyPPChigher
static uint16_t applyPPChigher(uint64_t value)
Definition: RuntimeDyldELF.cpp:795
llvm::orc::SymbolState::Resolved
@ Resolved
Queried, materialization begun.
llvm::object::RelocationRef
This is a value type class that represents a single relocation in the list of relocations in the obje...
Definition: ObjectFile.h:51
llvm::object::ELFRelocationRef::getAddend
Expected< int64_t > getAddend() const
Definition: ELFObjectFile.h:214
llvm::RuntimeDyld::MemoryManager::allocateDataSection
virtual uint8_t * allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName, bool IsReadOnly)=0
Allocate a memory block of (at least) the given size suitable for data.
llvm::RuntimeDyldELF::~RuntimeDyldELF
~RuntimeDyldELF() override
llvm::write32AArch64Addr
static void write32AArch64Addr(void *T, uint64_t s, uint64_t p, int shift)
Definition: RuntimeDyldCOFFAArch64.h:55
Endian.h
llvm::RelocationValueRef
Definition: RuntimeDyldImpl.h:190
llvm::object::ELFObjectFile
Definition: ELFObjectFile.h:241
llvm::raw_string_ostream::str
std::string & str()
Returns the string's reference.
Definition: raw_ostream.h:647
llvm::RelocationValueRef::Addend
int64_t Addend
Definition: RuntimeDyldImpl.h:194
llvm::handleAllErrors
void handleAllErrors(Error E, HandlerTs &&... Handlers)
Behaves the same as handleErrors, except that by contract all errors must be handled by the given han...
Definition: Error.h:965
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::Triple::mips
@ mips
Definition: Triple.h:64
llvm::RelocationEntry::RelType
uint32_t RelType
RelType - relocation type.
Definition: RuntimeDyldImpl.h:126
llvm::RuntimeDyldImpl::writeInt16BE
void writeInt16BE(uint8_t *Addr, uint16_t Value)
Definition: RuntimeDyldImpl.h:320
llvm::Triple::aarch64
@ aarch64
Definition: Triple.h:51
llvm::RuntimeDyldImpl::GlobalSymbolTable
RTDyldSymbolTable GlobalSymbolTable
Definition: RuntimeDyldImpl.h:264
llvm::RuntimeDyldELF::processRelocationRef
Expected< relocation_iterator > processRelocationRef(unsigned SectionID, relocation_iterator RelI, const ObjectFile &Obj, ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) override
Parses one or more object file relocations (some object files use relocation pairs) and stores it to ...
Definition: RuntimeDyldELF.cpp:1213
llvm::RuntimeDyldImpl::ObjSectionToIDMap
std::map< SectionRef, unsigned > ObjSectionToIDMap
Definition: RuntimeDyldImpl.h:261
Shdr
Elf_Shdr Shdr
Definition: ELFObjHandler.cpp:78
llvm::RuntimeDyld::MemoryManager
Memory Management.
Definition: RuntimeDyld.h:92