LLVM 17.0.0git
RuntimeDyldELF.cpp
Go to the documentation of this file.
1//===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Implementation of ELF support for the MC-JIT runtime dynamic linker.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RuntimeDyldELF.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/StringRef.h"
21#include "llvm/Support/Endian.h"
24
25using namespace llvm;
26using namespace llvm::object;
27using namespace llvm::support::endian;
28
29#define DEBUG_TYPE "dyld"
30
31static void or32le(void *P, int32_t V) { write32le(P, read32le(P) | V); }
32
33static void or32AArch64Imm(void *L, uint64_t Imm) {
34 or32le(L, (Imm & 0xFFF) << 10);
35}
36
37template <class T> static void write(bool isBE, void *P, T V) {
38 isBE ? write<T, support::big>(P, V) : write<T, support::little>(P, V);
39}
40
41static void write32AArch64Addr(void *L, uint64_t Imm) {
42 uint32_t ImmLo = (Imm & 0x3) << 29;
43 uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
44 uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
45 write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
46}
47
48// Return the bits [Start, End] from Val shifted Start bits.
49// For instance, getBits(0xF0, 4, 8) returns 0xF.
50static uint64_t getBits(uint64_t Val, int Start, int End) {
51 uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
52 return (Val >> Start) & Mask;
53}
54
55namespace {
56
57template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> {
59
60 typedef typename ELFT::uint addr_type;
61
62 DyldELFObject(ELFObjectFile<ELFT> &&Obj);
63
64public:
67
68 void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
69
70 void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr);
71
72 // Methods for type inquiry through isa, cast and dyn_cast
73 static bool classof(const Binary *v) {
74 return (isa<ELFObjectFile<ELFT>>(v) &&
76 }
77 static bool classof(const ELFObjectFile<ELFT> *v) {
78 return v->isDyldType();
79 }
80};
81
82
83
84// The MemoryBuffer passed into this constructor is just a wrapper around the
85// actual memory. Ultimately, the Binary parent class will take ownership of
86// this MemoryBuffer object but not the underlying memory.
87template <class ELFT>
88DyldELFObject<ELFT>::DyldELFObject(ELFObjectFile<ELFT> &&Obj)
89 : ELFObjectFile<ELFT>(std::move(Obj)) {
90 this->isDyldELFObject = true;
91}
92
93template <class ELFT>
95DyldELFObject<ELFT>::create(MemoryBufferRef Wrapper) {
97 if (auto E = Obj.takeError())
98 return std::move(E);
99 std::unique_ptr<DyldELFObject<ELFT>> Ret(
100 new DyldELFObject<ELFT>(std::move(*Obj)));
101 return std::move(Ret);
102}
103
104template <class ELFT>
105void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec,
106 uint64_t Addr) {
107 DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
108 Elf_Shdr *shdr =
109 const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
110
111 // This assumes the address passed in matches the target address bitness
112 // The template-based type cast handles everything else.
113 shdr->sh_addr = static_cast<addr_type>(Addr);
114}
115
116template <class ELFT>
117void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
118 uint64_t Addr) {
119
120 Elf_Sym *sym = const_cast<Elf_Sym *>(
122
123 // This assumes the address passed in matches the target address bitness
124 // The template-based type cast handles everything else.
125 sym->st_value = static_cast<addr_type>(Addr);
126}
127
128class LoadedELFObjectInfo final
129 : public LoadedObjectInfoHelper<LoadedELFObjectInfo,
130 RuntimeDyld::LoadedObjectInfo> {
131public:
132 LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
133 : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
134
136 getObjectForDebug(const ObjectFile &Obj) const override;
137};
138
139template <typename ELFT>
141createRTDyldELFObject(MemoryBufferRef Buffer, const ObjectFile &SourceObject,
142 const LoadedELFObjectInfo &L) {
143 typedef typename ELFT::Shdr Elf_Shdr;
144 typedef typename ELFT::uint addr_type;
145
147 DyldELFObject<ELFT>::create(Buffer);
148 if (Error E = ObjOrErr.takeError())
149 return std::move(E);
150
151 std::unique_ptr<DyldELFObject<ELFT>> Obj = std::move(*ObjOrErr);
152
153 // Iterate over all sections in the object.
154 auto SI = SourceObject.section_begin();
155 for (const auto &Sec : Obj->sections()) {
156 Expected<StringRef> NameOrErr = Sec.getName();
157 if (!NameOrErr) {
158 consumeError(NameOrErr.takeError());
159 continue;
160 }
161
162 if (*NameOrErr != "") {
163 DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
164 Elf_Shdr *shdr = const_cast<Elf_Shdr *>(
165 reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
166
167 if (uint64_t SecLoadAddr = L.getSectionLoadAddress(*SI)) {
168 // This assumes that the address passed in matches the target address
169 // bitness. The template-based type cast handles everything else.
170 shdr->sh_addr = static_cast<addr_type>(SecLoadAddr);
171 }
172 }
173 ++SI;
174 }
175
176 return std::move(Obj);
177}
178
180createELFDebugObject(const ObjectFile &Obj, const LoadedELFObjectInfo &L) {
181 assert(Obj.isELF() && "Not an ELF object file.");
182
183 std::unique_ptr<MemoryBuffer> Buffer =
185
186 Expected<std::unique_ptr<ObjectFile>> DebugObj(nullptr);
187 handleAllErrors(DebugObj.takeError());
188 if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian())
189 DebugObj =
190 createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), Obj, L);
191 else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian())
192 DebugObj =
193 createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), Obj, L);
194 else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian())
195 DebugObj =
196 createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), Obj, L);
197 else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian())
198 DebugObj =
199 createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), Obj, L);
200 else
201 llvm_unreachable("Unexpected ELF format");
202
203 handleAllErrors(DebugObj.takeError());
204 return OwningBinary<ObjectFile>(std::move(*DebugObj), std::move(Buffer));
205}
206
208LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const {
209 return createELFDebugObject(Obj, *this);
210}
211
212} // anonymous namespace
213
214namespace llvm {
215
218 : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {}
220
222 for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) {
223 SID EHFrameSID = UnregisteredEHFrameSections[i];
224 uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
225 uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
226 size_t EHFrameSize = Sections[EHFrameSID].getSize();
227 MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
228 }
229 UnregisteredEHFrameSections.clear();
230}
231
232std::unique_ptr<RuntimeDyldELF>
236 switch (Arch) {
237 default:
238 return std::make_unique<RuntimeDyldELF>(MemMgr, Resolver);
239 case Triple::mips:
240 case Triple::mipsel:
241 case Triple::mips64:
242 case Triple::mips64el:
243 return std::make_unique<RuntimeDyldELFMips>(MemMgr, Resolver);
244 }
245}
246
247std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
249 if (auto ObjSectionToIDOrErr = loadObjectImpl(O))
250 return std::make_unique<LoadedELFObjectInfo>(*this, *ObjSectionToIDOrErr);
251 else {
252 HasError = true;
253 raw_string_ostream ErrStream(ErrorStr);
254 logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
255 return nullptr;
256 }
257}
258
259void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
261 uint32_t Type, int64_t Addend,
262 uint64_t SymOffset) {
263 switch (Type) {
264 default:
265 report_fatal_error("Relocation type not implemented yet!");
266 break;
267 case ELF::R_X86_64_NONE:
268 break;
269 case ELF::R_X86_64_8: {
270 Value += Addend;
271 assert((int64_t)Value <= INT8_MAX && (int64_t)Value >= INT8_MIN);
272 uint8_t TruncatedAddr = (Value & 0xFF);
273 *Section.getAddressWithOffset(Offset) = TruncatedAddr;
274 LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
275 << format("%p\n", Section.getAddressWithOffset(Offset)));
276 break;
277 }
278 case ELF::R_X86_64_16: {
279 Value += Addend;
280 assert((int64_t)Value <= INT16_MAX && (int64_t)Value >= INT16_MIN);
281 uint16_t TruncatedAddr = (Value & 0xFFFF);
282 support::ulittle16_t::ref(Section.getAddressWithOffset(Offset)) =
283 TruncatedAddr;
284 LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
285 << format("%p\n", Section.getAddressWithOffset(Offset)));
286 break;
287 }
288 case ELF::R_X86_64_64: {
289 support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
290 Value + Addend;
291 LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
292 << format("%p\n", Section.getAddressWithOffset(Offset)));
293 break;
294 }
295 case ELF::R_X86_64_32:
296 case ELF::R_X86_64_32S: {
297 Value += Addend;
298 assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) ||
299 (Type == ELF::R_X86_64_32S &&
300 ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
301 uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
302 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
303 TruncatedAddr;
304 LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
305 << format("%p\n", Section.getAddressWithOffset(Offset)));
306 break;
307 }
308 case ELF::R_X86_64_PC8: {
309 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
310 int64_t RealOffset = Value + Addend - FinalAddress;
311 assert(isInt<8>(RealOffset));
312 int8_t TruncOffset = (RealOffset & 0xFF);
313 Section.getAddress()[Offset] = TruncOffset;
314 break;
315 }
316 case ELF::R_X86_64_PC32: {
317 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
318 int64_t RealOffset = Value + Addend - FinalAddress;
319 assert(isInt<32>(RealOffset));
320 int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
321 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
322 TruncOffset;
323 break;
324 }
325 case ELF::R_X86_64_PC64: {
326 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
327 int64_t RealOffset = Value + Addend - FinalAddress;
328 support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
329 RealOffset;
330 LLVM_DEBUG(dbgs() << "Writing " << format("%p", RealOffset) << " at "
331 << format("%p\n", FinalAddress));
332 break;
333 }
334 case ELF::R_X86_64_GOTOFF64: {
335 // Compute Value - GOTBase.
336 uint64_t GOTBase = 0;
337 for (const auto &Section : Sections) {
338 if (Section.getName() == ".got") {
339 GOTBase = Section.getLoadAddressWithOffset(0);
340 break;
341 }
342 }
343 assert(GOTBase != 0 && "missing GOT");
344 int64_t GOTOffset = Value - GOTBase + Addend;
345 support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = GOTOffset;
346 break;
347 }
348 case ELF::R_X86_64_DTPMOD64: {
349 // We only have one DSO, so the module id is always 1.
350 support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = 1;
351 break;
352 }
353 case ELF::R_X86_64_DTPOFF64:
354 case ELF::R_X86_64_TPOFF64: {
355 // DTPOFF64 should resolve to the offset in the TLS block, TPOFF64 to the
356 // offset in the *initial* TLS block. Since we are statically linking, all
357 // TLS blocks already exist in the initial block, so resolve both
358 // relocations equally.
359 support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
360 Value + Addend;
361 break;
362 }
363 case ELF::R_X86_64_DTPOFF32:
364 case ELF::R_X86_64_TPOFF32: {
365 // As for the (D)TPOFF64 relocations above, both DTPOFF32 and TPOFF32 can
366 // be resolved equally.
367 int64_t RealValue = Value + Addend;
368 assert(RealValue >= INT32_MIN && RealValue <= INT32_MAX);
369 int32_t TruncValue = RealValue;
370 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
371 TruncValue;
372 break;
373 }
374 }
375}
376
377void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
379 uint32_t Type, int32_t Addend) {
380 switch (Type) {
381 case ELF::R_386_32: {
382 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
383 Value + Addend;
384 break;
385 }
386 // Handle R_386_PLT32 like R_386_PC32 since it should be able to
387 // reach any 32 bit address.
388 case ELF::R_386_PLT32:
389 case ELF::R_386_PC32: {
390 uint32_t FinalAddress =
391 Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
392 uint32_t RealOffset = Value + Addend - FinalAddress;
393 support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
394 RealOffset;
395 break;
396 }
397 default:
398 // There are other relocation types, but it appears these are the
399 // only ones currently used by the LLVM ELF object writer
400 report_fatal_error("Relocation type not implemented yet!");
401 break;
402 }
403}
404
405void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
407 uint32_t Type, int64_t Addend) {
408 uint32_t *TargetPtr =
409 reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
410 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
411 // Data should use target endian. Code should always use little endian.
412 bool isBE = Arch == Triple::aarch64_be;
413
414 LLVM_DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
415 << format("%llx", Section.getAddressWithOffset(Offset))
416 << " FinalAddress: 0x" << format("%llx", FinalAddress)
417 << " Value: 0x" << format("%llx", Value) << " Type: 0x"
418 << format("%x", Type) << " Addend: 0x"
419 << format("%llx", Addend) << "\n");
420
421 switch (Type) {
422 default:
423 report_fatal_error("Relocation type not implemented yet!");
424 break;
425 case ELF::R_AARCH64_NONE:
426 break;
427 case ELF::R_AARCH64_ABS16: {
428 uint64_t Result = Value + Addend;
429 assert(static_cast<int64_t>(Result) >= INT16_MIN && Result < UINT16_MAX);
430 write(isBE, TargetPtr, static_cast<uint16_t>(Result & 0xffffU));
431 break;
432 }
433 case ELF::R_AARCH64_ABS32: {
434 uint64_t Result = Value + Addend;
435 assert(static_cast<int64_t>(Result) >= INT32_MIN && Result < UINT32_MAX);
436 write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
437 break;
438 }
439 case ELF::R_AARCH64_ABS64:
440 write(isBE, TargetPtr, Value + Addend);
441 break;
442 case ELF::R_AARCH64_PLT32: {
443 uint64_t Result = Value + Addend - FinalAddress;
444 assert(static_cast<int64_t>(Result) >= INT32_MIN &&
445 static_cast<int64_t>(Result) <= INT32_MAX);
446 write(isBE, TargetPtr, static_cast<uint32_t>(Result));
447 break;
448 }
449 case ELF::R_AARCH64_PREL16: {
450 uint64_t Result = Value + Addend - FinalAddress;
451 assert(static_cast<int64_t>(Result) >= INT16_MIN &&
452 static_cast<int64_t>(Result) <= UINT16_MAX);
453 write(isBE, TargetPtr, static_cast<uint16_t>(Result & 0xffffU));
454 break;
455 }
456 case ELF::R_AARCH64_PREL32: {
457 uint64_t Result = Value + Addend - FinalAddress;
458 assert(static_cast<int64_t>(Result) >= INT32_MIN &&
459 static_cast<int64_t>(Result) <= UINT32_MAX);
460 write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
461 break;
462 }
463 case ELF::R_AARCH64_PREL64:
464 write(isBE, TargetPtr, Value + Addend - FinalAddress);
465 break;
466 case ELF::R_AARCH64_CONDBR19: {
467 uint64_t BranchImm = Value + Addend - FinalAddress;
468
469 assert(isInt<21>(BranchImm));
470 *TargetPtr &= 0xff00001fU;
471 // Immediate:20:2 goes in bits 23:5 of Bcc, CBZ, CBNZ
472 or32le(TargetPtr, (BranchImm & 0x001FFFFC) << 3);
473 break;
474 }
475 case ELF::R_AARCH64_TSTBR14: {
476 uint64_t BranchImm = Value + Addend - FinalAddress;
477
478 assert(isInt<16>(BranchImm));
479
480 *TargetPtr &= 0xfff8001fU;
481 // Immediate:15:2 goes in bits 18:5 of TBZ, TBNZ
482 or32le(TargetPtr, (BranchImm & 0x0000FFFC) << 3);
483 break;
484 }
485 case ELF::R_AARCH64_CALL26: // fallthrough
486 case ELF::R_AARCH64_JUMP26: {
487 // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
488 // calculation.
489 uint64_t BranchImm = Value + Addend - FinalAddress;
490
491 // "Check that -2^27 <= result < 2^27".
492 assert(isInt<28>(BranchImm));
493 or32le(TargetPtr, (BranchImm & 0x0FFFFFFC) >> 2);
494 break;
495 }
496 case ELF::R_AARCH64_MOVW_UABS_G3:
497 or32le(TargetPtr, ((Value + Addend) & 0xFFFF000000000000) >> 43);
498 break;
499 case ELF::R_AARCH64_MOVW_UABS_G2_NC:
500 or32le(TargetPtr, ((Value + Addend) & 0xFFFF00000000) >> 27);
501 break;
502 case ELF::R_AARCH64_MOVW_UABS_G1_NC:
503 or32le(TargetPtr, ((Value + Addend) & 0xFFFF0000) >> 11);
504 break;
505 case ELF::R_AARCH64_MOVW_UABS_G0_NC:
506 or32le(TargetPtr, ((Value + Addend) & 0xFFFF) << 5);
507 break;
508 case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
509 // Operation: Page(S+A) - Page(P)
511 ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
512
513 // Check that -2^32 <= X < 2^32
514 assert(isInt<33>(Result) && "overflow check failed for relocation");
515
516 // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
517 // from bits 32:12 of X.
518 write32AArch64Addr(TargetPtr, Result >> 12);
519 break;
520 }
521 case ELF::R_AARCH64_ADD_ABS_LO12_NC:
522 // Operation: S + A
523 // Immediate goes in bits 21:10 of LD/ST instruction, taken
524 // from bits 11:0 of X
525 or32AArch64Imm(TargetPtr, Value + Addend);
526 break;
527 case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
528 // Operation: S + A
529 // Immediate goes in bits 21:10 of LD/ST instruction, taken
530 // from bits 11:0 of X
531 or32AArch64Imm(TargetPtr, getBits(Value + Addend, 0, 11));
532 break;
533 case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
534 // Operation: S + A
535 // Immediate goes in bits 21:10 of LD/ST instruction, taken
536 // from bits 11:1 of X
537 or32AArch64Imm(TargetPtr, getBits(Value + Addend, 1, 11));
538 break;
539 case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
540 // Operation: S + A
541 // Immediate goes in bits 21:10 of LD/ST instruction, taken
542 // from bits 11:2 of X
543 or32AArch64Imm(TargetPtr, getBits(Value + Addend, 2, 11));
544 break;
545 case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
546 // Operation: S + A
547 // Immediate goes in bits 21:10 of LD/ST instruction, taken
548 // from bits 11:3 of X
549 or32AArch64Imm(TargetPtr, getBits(Value + Addend, 3, 11));
550 break;
551 case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
552 // Operation: S + A
553 // Immediate goes in bits 21:10 of LD/ST instruction, taken
554 // from bits 11:4 of X
555 or32AArch64Imm(TargetPtr, getBits(Value + Addend, 4, 11));
556 break;
557 case ELF::R_AARCH64_LD_PREL_LO19: {
558 // Operation: S + A - P
559 uint64_t Result = Value + Addend - FinalAddress;
560
561 // "Check that -2^20 <= result < 2^20".
562 assert(isInt<21>(Result));
563
564 *TargetPtr &= 0xff00001fU;
565 // Immediate goes in bits 23:5 of LD imm instruction, taken
566 // from bits 20:2 of X
567 *TargetPtr |= ((Result & 0xffc) << (5 - 2));
568 break;
569 }
570 case ELF::R_AARCH64_ADR_PREL_LO21: {
571 // Operation: S + A - P
572 uint64_t Result = Value + Addend - FinalAddress;
573
574 // "Check that -2^20 <= result < 2^20".
575 assert(isInt<21>(Result));
576
577 *TargetPtr &= 0x9f00001fU;
578 // Immediate goes in bits 23:5, 30:29 of ADR imm instruction, taken
579 // from bits 20:0 of X
580 *TargetPtr |= ((Result & 0xffc) << (5 - 2));
581 *TargetPtr |= (Result & 0x3) << 29;
582 break;
583 }
584 }
585}
586
587void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
589 uint32_t Type, int32_t Addend) {
590 // TODO: Add Thumb relocations.
591 uint32_t *TargetPtr =
592 reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
593 uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
594 Value += Addend;
595
596 LLVM_DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
597 << Section.getAddressWithOffset(Offset)
598 << " FinalAddress: " << format("%p", FinalAddress)
599 << " Value: " << format("%x", Value)
600 << " Type: " << format("%x", Type)
601 << " Addend: " << format("%x", Addend) << "\n");
602
603 switch (Type) {
604 default:
605 llvm_unreachable("Not implemented relocation type!");
606
607 case ELF::R_ARM_NONE:
608 break;
609 // Write a 31bit signed offset
610 case ELF::R_ARM_PREL31:
611 support::ulittle32_t::ref{TargetPtr} =
612 (support::ulittle32_t::ref{TargetPtr} & 0x80000000) |
613 ((Value - FinalAddress) & ~0x80000000);
614 break;
615 case ELF::R_ARM_TARGET1:
616 case ELF::R_ARM_ABS32:
617 support::ulittle32_t::ref{TargetPtr} = Value;
618 break;
619 // Write first 16 bit of 32 bit value to the mov instruction.
620 // Last 4 bit should be shifted.
621 case ELF::R_ARM_MOVW_ABS_NC:
622 case ELF::R_ARM_MOVT_ABS:
623 if (Type == ELF::R_ARM_MOVW_ABS_NC)
624 Value = Value & 0xFFFF;
625 else if (Type == ELF::R_ARM_MOVT_ABS)
626 Value = (Value >> 16) & 0xFFFF;
627 support::ulittle32_t::ref{TargetPtr} =
628 (support::ulittle32_t::ref{TargetPtr} & ~0x000F0FFF) | (Value & 0xFFF) |
629 (((Value >> 12) & 0xF) << 16);
630 break;
631 // Write 24 bit relative value to the branch instruction.
632 case ELF::R_ARM_PC24: // Fall through.
633 case ELF::R_ARM_CALL: // Fall through.
634 case ELF::R_ARM_JUMP24:
635 int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
636 RelValue = (RelValue & 0x03FFFFFC) >> 2;
637 assert((support::ulittle32_t::ref{TargetPtr} & 0xFFFFFF) == 0xFFFFFE);
638 support::ulittle32_t::ref{TargetPtr} =
639 (support::ulittle32_t::ref{TargetPtr} & 0xFF000000) | RelValue;
640 break;
641 }
642}
643
644void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) {
645 if (Arch == Triple::UnknownArch ||
646 !StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) {
647 IsMipsO32ABI = false;
648 IsMipsN32ABI = false;
649 IsMipsN64ABI = false;
650 return;
651 }
652 if (auto *E = dyn_cast<ELFObjectFileBase>(&Obj)) {
653 unsigned AbiVariant = E->getPlatformFlags();
654 IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32;
655 IsMipsN32ABI = AbiVariant & ELF::EF_MIPS_ABI2;
656 }
657 IsMipsN64ABI = Obj.getFileFormatName().equals("elf64-mips");
658}
659
660// Return the .TOC. section and offset.
661Error RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj,
662 ObjSectionToIDMap &LocalSections,
663 RelocationValueRef &Rel) {
664 // Set a default SectionID in case we do not find a TOC section below.
665 // This may happen for references to TOC base base (sym@toc, .odp
666 // relocation) without a .toc directive. In this case just use the
667 // first section (which is usually the .odp) since the code won't
668 // reference the .toc base directly.
669 Rel.SymbolName = nullptr;
670 Rel.SectionID = 0;
671
672 // The TOC consists of sections .got, .toc, .tocbss, .plt in that
673 // order. The TOC starts where the first of these sections starts.
674 for (auto &Section : Obj.sections()) {
675 Expected<StringRef> NameOrErr = Section.getName();
676 if (!NameOrErr)
677 return NameOrErr.takeError();
678 StringRef SectionName = *NameOrErr;
679
680 if (SectionName == ".got"
681 || SectionName == ".toc"
682 || SectionName == ".tocbss"
683 || SectionName == ".plt") {
684 if (auto SectionIDOrErr =
685 findOrEmitSection(Obj, Section, false, LocalSections))
686 Rel.SectionID = *SectionIDOrErr;
687 else
688 return SectionIDOrErr.takeError();
689 break;
690 }
691 }
692
693 // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
694 // thus permitting a full 64 Kbytes segment.
695 Rel.Addend = 0x8000;
696
697 return Error::success();
698}
699
700// Returns the sections and offset associated with the ODP entry referenced
701// by Symbol.
702Error RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj,
703 ObjSectionToIDMap &LocalSections,
704 RelocationValueRef &Rel) {
705 // Get the ELF symbol value (st_value) to compare with Relocation offset in
706 // .opd entries
707 for (section_iterator si = Obj.section_begin(), se = Obj.section_end();
708 si != se; ++si) {
709
710 Expected<section_iterator> RelSecOrErr = si->getRelocatedSection();
711 if (!RelSecOrErr)
713
714 section_iterator RelSecI = *RelSecOrErr;
715 if (RelSecI == Obj.section_end())
716 continue;
717
718 Expected<StringRef> NameOrErr = RelSecI->getName();
719 if (!NameOrErr)
720 return NameOrErr.takeError();
721 StringRef RelSectionName = *NameOrErr;
722
723 if (RelSectionName != ".opd")
724 continue;
725
726 for (elf_relocation_iterator i = si->relocation_begin(),
727 e = si->relocation_end();
728 i != e;) {
729 // The R_PPC64_ADDR64 relocation indicates the first field
730 // of a .opd entry
731 uint64_t TypeFunc = i->getType();
732 if (TypeFunc != ELF::R_PPC64_ADDR64) {
733 ++i;
734 continue;
735 }
736
737 uint64_t TargetSymbolOffset = i->getOffset();
738 symbol_iterator TargetSymbol = i->getSymbol();
739 int64_t Addend;
740 if (auto AddendOrErr = i->getAddend())
741 Addend = *AddendOrErr;
742 else
743 return AddendOrErr.takeError();
744
745 ++i;
746 if (i == e)
747 break;
748
749 // Just check if following relocation is a R_PPC64_TOC
750 uint64_t TypeTOC = i->getType();
751 if (TypeTOC != ELF::R_PPC64_TOC)
752 continue;
753
754 // Finally compares the Symbol value and the target symbol offset
755 // to check if this .opd entry refers to the symbol the relocation
756 // points to.
757 if (Rel.Addend != (int64_t)TargetSymbolOffset)
758 continue;
759
760 section_iterator TSI = Obj.section_end();
761 if (auto TSIOrErr = TargetSymbol->getSection())
762 TSI = *TSIOrErr;
763 else
764 return TSIOrErr.takeError();
765 assert(TSI != Obj.section_end() && "TSI should refer to a valid section");
766
767 bool IsCode = TSI->isText();
768 if (auto SectionIDOrErr = findOrEmitSection(Obj, *TSI, IsCode,
769 LocalSections))
770 Rel.SectionID = *SectionIDOrErr;
771 else
772 return SectionIDOrErr.takeError();
773 Rel.Addend = (intptr_t)Addend;
774 return Error::success();
775 }
776 }
777 llvm_unreachable("Attempting to get address of ODP entry!");
778}
779
780// Relocation masks following the #lo(value), #hi(value), #ha(value),
781// #higher(value), #highera(value), #highest(value), and #highesta(value)
782// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
783// document.
784
785static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; }
786
788 return (value >> 16) & 0xffff;
789}
790
792 return ((value + 0x8000) >> 16) & 0xffff;
793}
794
796 return (value >> 32) & 0xffff;
797}
798
800 return ((value + 0x8000) >> 32) & 0xffff;
801}
802
804 return (value >> 48) & 0xffff;
805}
806
808 return ((value + 0x8000) >> 48) & 0xffff;
809}
810
811void RuntimeDyldELF::resolvePPC32Relocation(const SectionEntry &Section,
813 uint32_t Type, int64_t Addend) {
814 uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
815 switch (Type) {
816 default:
817 report_fatal_error("Relocation type not implemented yet!");
818 break;
819 case ELF::R_PPC_ADDR16_LO:
820 writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
821 break;
822 case ELF::R_PPC_ADDR16_HI:
823 writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
824 break;
825 case ELF::R_PPC_ADDR16_HA:
826 writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
827 break;
828 }
829}
830
831void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
833 uint32_t Type, int64_t Addend) {
834 uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
835 switch (Type) {
836 default:
837 report_fatal_error("Relocation type not implemented yet!");
838 break;
839 case ELF::R_PPC64_ADDR16:
840 writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
841 break;
842 case ELF::R_PPC64_ADDR16_DS:
843 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
844 break;
845 case ELF::R_PPC64_ADDR16_LO:
846 writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
847 break;
848 case ELF::R_PPC64_ADDR16_LO_DS:
849 writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
850 break;
851 case ELF::R_PPC64_ADDR16_HI:
852 case ELF::R_PPC64_ADDR16_HIGH:
853 writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
854 break;
855 case ELF::R_PPC64_ADDR16_HA:
856 case ELF::R_PPC64_ADDR16_HIGHA:
857 writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
858 break;
859 case ELF::R_PPC64_ADDR16_HIGHER:
860 writeInt16BE(LocalAddress, applyPPChigher(Value + Addend));
861 break;
862 case ELF::R_PPC64_ADDR16_HIGHERA:
863 writeInt16BE(LocalAddress, applyPPChighera(Value + Addend));
864 break;
865 case ELF::R_PPC64_ADDR16_HIGHEST:
866 writeInt16BE(LocalAddress, applyPPChighest(Value + Addend));
867 break;
868 case ELF::R_PPC64_ADDR16_HIGHESTA:
869 writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend));
870 break;
871 case ELF::R_PPC64_ADDR14: {
872 assert(((Value + Addend) & 3) == 0);
873 // Preserve the AA/LK bits in the branch instruction
874 uint8_t aalk = *(LocalAddress + 3);
875 writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc));
876 } break;
877 case ELF::R_PPC64_REL16_LO: {
878 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
879 uint64_t Delta = Value - FinalAddress + Addend;
880 writeInt16BE(LocalAddress, applyPPClo(Delta));
881 } break;
882 case ELF::R_PPC64_REL16_HI: {
883 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
884 uint64_t Delta = Value - FinalAddress + Addend;
885 writeInt16BE(LocalAddress, applyPPChi(Delta));
886 } break;
887 case ELF::R_PPC64_REL16_HA: {
888 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
889 uint64_t Delta = Value - FinalAddress + Addend;
890 writeInt16BE(LocalAddress, applyPPCha(Delta));
891 } break;
892 case ELF::R_PPC64_ADDR32: {
893 int64_t Result = static_cast<int64_t>(Value + Addend);
894 if (SignExtend64<32>(Result) != Result)
895 llvm_unreachable("Relocation R_PPC64_ADDR32 overflow");
896 writeInt32BE(LocalAddress, Result);
897 } break;
898 case ELF::R_PPC64_REL24: {
899 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
900 int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
901 if (SignExtend64<26>(delta) != delta)
902 llvm_unreachable("Relocation R_PPC64_REL24 overflow");
903 // We preserve bits other than LI field, i.e. PO and AA/LK fields.
904 uint32_t Inst = readBytesUnaligned(LocalAddress, 4);
905 writeInt32BE(LocalAddress, (Inst & 0xFC000003) | (delta & 0x03FFFFFC));
906 } break;
907 case ELF::R_PPC64_REL32: {
908 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
909 int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
910 if (SignExtend64<32>(delta) != delta)
911 llvm_unreachable("Relocation R_PPC64_REL32 overflow");
912 writeInt32BE(LocalAddress, delta);
913 } break;
914 case ELF::R_PPC64_REL64: {
915 uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
916 uint64_t Delta = Value - FinalAddress + Addend;
917 writeInt64BE(LocalAddress, Delta);
918 } break;
919 case ELF::R_PPC64_ADDR64:
920 writeInt64BE(LocalAddress, Value + Addend);
921 break;
922 }
923}
924
925void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
927 uint32_t Type, int64_t Addend) {
928 uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
929 switch (Type) {
930 default:
931 report_fatal_error("Relocation type not implemented yet!");
932 break;
933 case ELF::R_390_PC16DBL:
934 case ELF::R_390_PLT16DBL: {
935 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
936 assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow");
937 writeInt16BE(LocalAddress, Delta / 2);
938 break;
939 }
940 case ELF::R_390_PC32DBL:
941 case ELF::R_390_PLT32DBL: {
942 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
943 assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow");
944 writeInt32BE(LocalAddress, Delta / 2);
945 break;
946 }
947 case ELF::R_390_PC16: {
948 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
949 assert(int16_t(Delta) == Delta && "R_390_PC16 overflow");
950 writeInt16BE(LocalAddress, Delta);
951 break;
952 }
953 case ELF::R_390_PC32: {
954 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
955 assert(int32_t(Delta) == Delta && "R_390_PC32 overflow");
956 writeInt32BE(LocalAddress, Delta);
957 break;
958 }
959 case ELF::R_390_PC64: {
960 int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
961 writeInt64BE(LocalAddress, Delta);
962 break;
963 }
964 case ELF::R_390_8:
965 *LocalAddress = (uint8_t)(Value + Addend);
966 break;
967 case ELF::R_390_16:
968 writeInt16BE(LocalAddress, Value + Addend);
969 break;
970 case ELF::R_390_32:
971 writeInt32BE(LocalAddress, Value + Addend);
972 break;
973 case ELF::R_390_64:
974 writeInt64BE(LocalAddress, Value + Addend);
975 break;
976 }
977}
978
979void RuntimeDyldELF::resolveBPFRelocation(const SectionEntry &Section,
981 uint32_t Type, int64_t Addend) {
982 bool isBE = Arch == Triple::bpfeb;
983
984 switch (Type) {
985 default:
986 report_fatal_error("Relocation type not implemented yet!");
987 break;
988 case ELF::R_BPF_NONE:
989 case ELF::R_BPF_64_64:
990 case ELF::R_BPF_64_32:
991 case ELF::R_BPF_64_NODYLD32:
992 break;
993 case ELF::R_BPF_64_ABS64: {
994 write(isBE, Section.getAddressWithOffset(Offset), Value + Addend);
995 LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
996 << format("%p\n", Section.getAddressWithOffset(Offset)));
997 break;
998 }
999 case ELF::R_BPF_64_ABS32: {
1000 Value += Addend;
1001 assert(Value <= UINT32_MAX);
1002 write(isBE, Section.getAddressWithOffset(Offset), static_cast<uint32_t>(Value));
1003 LLVM_DEBUG(dbgs() << "Writing " << format("%p", Value) << " at "
1004 << format("%p\n", Section.getAddressWithOffset(Offset)));
1005 break;
1006 }
1007 }
1008}
1009
1010// The target location for the relocation is described by RE.SectionID and
1011// RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
1012// SectionEntry has three members describing its location.
1013// SectionEntry::Address is the address at which the section has been loaded
1014// into memory in the current (host) process. SectionEntry::LoadAddress is the
1015// address that the section will have in the target process.
1016// SectionEntry::ObjAddress is the address of the bits for this section in the
1017// original emitted object image (also in the current address space).
1018//
1019// Relocations will be applied as if the section were loaded at
1020// SectionEntry::LoadAddress, but they will be applied at an address based
1021// on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to
1022// Target memory contents if they are required for value calculations.
1023//
1024// The Value parameter here is the load address of the symbol for the
1025// relocation to be applied. For relocations which refer to symbols in the
1026// current object Value will be the LoadAddress of the section in which
1027// the symbol resides (RE.Addend provides additional information about the
1028// symbol location). For external symbols, Value will be the address of the
1029// symbol in the target address space.
1030void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
1031 uint64_t Value) {
1032 const SectionEntry &Section = Sections[RE.SectionID];
1033 return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
1034 RE.SymOffset, RE.SectionID);
1035}
1036
1037void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
1039 uint32_t Type, int64_t Addend,
1040 uint64_t SymOffset, SID SectionID) {
1041 switch (Arch) {
1042 case Triple::x86_64:
1043 resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset);
1044 break;
1045 case Triple::x86:
1046 resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
1047 (uint32_t)(Addend & 0xffffffffL));
1048 break;
1049 case Triple::aarch64:
1050 case Triple::aarch64_be:
1051 resolveAArch64Relocation(Section, Offset, Value, Type, Addend);
1052 break;
1053 case Triple::arm: // Fall through.
1054 case Triple::armeb:
1055 case Triple::thumb:
1056 case Triple::thumbeb:
1057 resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
1058 (uint32_t)(Addend & 0xffffffffL));
1059 break;
1060 case Triple::ppc: // Fall through.
1061 case Triple::ppcle:
1062 resolvePPC32Relocation(Section, Offset, Value, Type, Addend);
1063 break;
1064 case Triple::ppc64: // Fall through.
1065 case Triple::ppc64le:
1066 resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
1067 break;
1068 case Triple::systemz:
1069 resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
1070 break;
1071 case Triple::bpfel:
1072 case Triple::bpfeb:
1073 resolveBPFRelocation(Section, Offset, Value, Type, Addend);
1074 break;
1075 default:
1076 llvm_unreachable("Unsupported CPU type!");
1077 }
1078}
1079
1080void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const {
1081 return (void *)(Sections[SectionID].getObjAddress() + Offset);
1082}
1083
1084void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) {
1085 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset);
1086 if (Value.SymbolName)
1087 addRelocationForSymbol(RE, Value.SymbolName);
1088 else
1089 addRelocationForSection(RE, Value.SectionID);
1090}
1091
1092uint32_t RuntimeDyldELF::getMatchingLoRelocation(uint32_t RelType,
1093 bool IsLocal) const {
1094 switch (RelType) {
1095 case ELF::R_MICROMIPS_GOT16:
1096 if (IsLocal)
1097 return ELF::R_MICROMIPS_LO16;
1098 break;
1099 case ELF::R_MICROMIPS_HI16:
1100 return ELF::R_MICROMIPS_LO16;
1101 case ELF::R_MIPS_GOT16:
1102 if (IsLocal)
1103 return ELF::R_MIPS_LO16;
1104 break;
1105 case ELF::R_MIPS_HI16:
1106 return ELF::R_MIPS_LO16;
1107 case ELF::R_MIPS_PCHI16:
1108 return ELF::R_MIPS_PCLO16;
1109 default:
1110 break;
1111 }
1112 return ELF::R_MIPS_NONE;
1113}
1114
1115// Sometimes we don't need to create thunk for a branch.
1116// This typically happens when branch target is located
1117// in the same object file. In such case target is either
1118// a weak symbol or symbol in a different executable section.
1119// This function checks if branch target is located in the
1120// same object file and if distance between source and target
1121// fits R_AARCH64_CALL26 relocation. If both conditions are
1122// met, it emits direct jump to the target and returns true.
1123// Otherwise false is returned and thunk is created.
1124bool RuntimeDyldELF::resolveAArch64ShortBranch(
1125 unsigned SectionID, relocation_iterator RelI,
1126 const RelocationValueRef &Value) {
1128 if (Value.SymbolName) {
1129 auto Loc = GlobalSymbolTable.find(Value.SymbolName);
1130
1131 // Don't create direct branch for external symbols.
1132 if (Loc == GlobalSymbolTable.end())
1133 return false;
1134
1135 const auto &SymInfo = Loc->second;
1136 Address =
1137 uint64_t(Sections[SymInfo.getSectionID()].getLoadAddressWithOffset(
1138 SymInfo.getOffset()));
1139 } else {
1140 Address = uint64_t(Sections[Value.SectionID].getLoadAddress());
1141 }
1142 uint64_t Offset = RelI->getOffset();
1143 uint64_t SourceAddress = Sections[SectionID].getLoadAddressWithOffset(Offset);
1144
1145 // R_AARCH64_CALL26 requires immediate to be in range -2^27 <= imm < 2^27
1146 // If distance between source and target is out of range then we should
1147 // create thunk.
1148 if (!isInt<28>(Address + Value.Addend - SourceAddress))
1149 return false;
1150
1151 resolveRelocation(Sections[SectionID], Offset, Address, RelI->getType(),
1152 Value.Addend);
1153
1154 return true;
1155}
1156
1157void RuntimeDyldELF::resolveAArch64Branch(unsigned SectionID,
1160 StubMap &Stubs) {
1161
1162 LLVM_DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
1163 SectionEntry &Section = Sections[SectionID];
1164
1165 uint64_t Offset = RelI->getOffset();
1166 unsigned RelType = RelI->getType();
1167 // Look for an existing stub.
1168 StubMap::const_iterator i = Stubs.find(Value);
1169 if (i != Stubs.end()) {
1170 resolveRelocation(Section, Offset,
1171 (uint64_t)Section.getAddressWithOffset(i->second),
1172 RelType, 0);
1173 LLVM_DEBUG(dbgs() << " Stub function found\n");
1174 } else if (!resolveAArch64ShortBranch(SectionID, RelI, Value)) {
1175 // Create a new stub function.
1176 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1177 Stubs[Value] = Section.getStubOffset();
1178 uint8_t *StubTargetAddr = createStubFunction(
1179 Section.getAddressWithOffset(Section.getStubOffset()));
1180
1181 RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.getAddress(),
1182 ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
1183 RelocationEntry REmovk_g2(SectionID,
1184 StubTargetAddr - Section.getAddress() + 4,
1185 ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
1186 RelocationEntry REmovk_g1(SectionID,
1187 StubTargetAddr - Section.getAddress() + 8,
1188 ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
1189 RelocationEntry REmovk_g0(SectionID,
1190 StubTargetAddr - Section.getAddress() + 12,
1191 ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend);
1192
1193 if (Value.SymbolName) {
1194 addRelocationForSymbol(REmovz_g3, Value.SymbolName);
1195 addRelocationForSymbol(REmovk_g2, Value.SymbolName);
1196 addRelocationForSymbol(REmovk_g1, Value.SymbolName);
1197 addRelocationForSymbol(REmovk_g0, Value.SymbolName);
1198 } else {
1199 addRelocationForSection(REmovz_g3, Value.SectionID);
1200 addRelocationForSection(REmovk_g2, Value.SectionID);
1201 addRelocationForSection(REmovk_g1, Value.SectionID);
1202 addRelocationForSection(REmovk_g0, Value.SectionID);
1203 }
1204 resolveRelocation(Section, Offset,
1205 reinterpret_cast<uint64_t>(Section.getAddressWithOffset(
1206 Section.getStubOffset())),
1207 RelType, 0);
1208 Section.advanceStubOffset(getMaxStubSize());
1209 }
1210}
1211
1214 unsigned SectionID, relocation_iterator RelI, const ObjectFile &O,
1215 ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) {
1216 const auto &Obj = cast<ELFObjectFileBase>(O);
1217 uint64_t RelType = RelI->getType();
1218 int64_t Addend = 0;
1219 if (Expected<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend())
1220 Addend = *AddendOrErr;
1221 else
1222 consumeError(AddendOrErr.takeError());
1223 elf_symbol_iterator Symbol = RelI->getSymbol();
1224
1225 // Obtain the symbol name which is referenced in the relocation
1226 StringRef TargetName;
1227 if (Symbol != Obj.symbol_end()) {
1228 if (auto TargetNameOrErr = Symbol->getName())
1229 TargetName = *TargetNameOrErr;
1230 else
1231 return TargetNameOrErr.takeError();
1232 }
1233 LLVM_DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
1234 << " TargetName: " << TargetName << "\n");
1236 // First search for the symbol in the local symbol table
1238
1239 // Search for the symbol in the global symbol table
1241 if (Symbol != Obj.symbol_end()) {
1242 gsi = GlobalSymbolTable.find(TargetName.data());
1243 Expected<SymbolRef::Type> SymTypeOrErr = Symbol->getType();
1244 if (!SymTypeOrErr) {
1245 std::string Buf;
1247 logAllUnhandledErrors(SymTypeOrErr.takeError(), OS);
1248 report_fatal_error(Twine(OS.str()));
1249 }
1250 SymType = *SymTypeOrErr;
1251 }
1252 if (gsi != GlobalSymbolTable.end()) {
1253 const auto &SymInfo = gsi->second;
1254 Value.SectionID = SymInfo.getSectionID();
1255 Value.Offset = SymInfo.getOffset();
1256 Value.Addend = SymInfo.getOffset() + Addend;
1257 } else {
1258 switch (SymType) {
1259 case SymbolRef::ST_Debug: {
1260 // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
1261 // and can be changed by another developers. Maybe best way is add
1262 // a new symbol type ST_Section to SymbolRef and use it.
1263 auto SectionOrErr = Symbol->getSection();
1264 if (!SectionOrErr) {
1265 std::string Buf;
1267 logAllUnhandledErrors(SectionOrErr.takeError(), OS);
1268 report_fatal_error(Twine(OS.str()));
1269 }
1270 section_iterator si = *SectionOrErr;
1271 if (si == Obj.section_end())
1272 llvm_unreachable("Symbol section not found, bad object file format!");
1273 LLVM_DEBUG(dbgs() << "\t\tThis is section symbol\n");
1274 bool isCode = si->isText();
1275 if (auto SectionIDOrErr = findOrEmitSection(Obj, (*si), isCode,
1276 ObjSectionToID))
1277 Value.SectionID = *SectionIDOrErr;
1278 else
1279 return SectionIDOrErr.takeError();
1280 Value.Addend = Addend;
1281 break;
1282 }
1283 case SymbolRef::ST_Data:
1286 case SymbolRef::ST_Unknown: {
1287 Value.SymbolName = TargetName.data();
1288 Value.Addend = Addend;
1289
1290 // Absolute relocations will have a zero symbol ID (STN_UNDEF), which
1291 // will manifest here as a NULL symbol name.
1292 // We can set this as a valid (but empty) symbol name, and rely
1293 // on addRelocationForSymbol to handle this.
1294 if (!Value.SymbolName)
1295 Value.SymbolName = "";
1296 break;
1297 }
1298 default:
1299 llvm_unreachable("Unresolved symbol type!");
1300 break;
1301 }
1302 }
1303
1304 uint64_t Offset = RelI->getOffset();
1305
1306 LLVM_DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
1307 << "\n");
1309 if ((RelType == ELF::R_AARCH64_CALL26 ||
1310 RelType == ELF::R_AARCH64_JUMP26) &&
1312 resolveAArch64Branch(SectionID, Value, RelI, Stubs);
1313 } else if (RelType == ELF::R_AARCH64_ADR_GOT_PAGE) {
1314 // Create new GOT entry or find existing one. If GOT entry is
1315 // to be created, then we also emit ABS64 relocation for it.
1316 uint64_t GOTOffset = findOrAllocGOTEntry(Value, ELF::R_AARCH64_ABS64);
1317 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
1318 ELF::R_AARCH64_ADR_PREL_PG_HI21);
1319
1320 } else if (RelType == ELF::R_AARCH64_LD64_GOT_LO12_NC) {
1321 uint64_t GOTOffset = findOrAllocGOTEntry(Value, ELF::R_AARCH64_ABS64);
1322 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
1323 ELF::R_AARCH64_LDST64_ABS_LO12_NC);
1324 } else {
1325 processSimpleRelocation(SectionID, Offset, RelType, Value);
1326 }
1327 } else if (Arch == Triple::arm) {
1328 if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL ||
1329 RelType == ELF::R_ARM_JUMP24) {
1330 // This is an ARM branch relocation, need to use a stub function.
1331 LLVM_DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.\n");
1332 SectionEntry &Section = Sections[SectionID];
1333
1334 // Look for an existing stub.
1335 StubMap::const_iterator i = Stubs.find(Value);
1336 if (i != Stubs.end()) {
1337 resolveRelocation(
1338 Section, Offset,
1339 reinterpret_cast<uint64_t>(Section.getAddressWithOffset(i->second)),
1340 RelType, 0);
1341 LLVM_DEBUG(dbgs() << " Stub function found\n");
1342 } else {
1343 // Create a new stub function.
1344 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1345 Stubs[Value] = Section.getStubOffset();
1346 uint8_t *StubTargetAddr = createStubFunction(
1347 Section.getAddressWithOffset(Section.getStubOffset()));
1348 RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
1349 ELF::R_ARM_ABS32, Value.Addend);
1350 if (Value.SymbolName)
1351 addRelocationForSymbol(RE, Value.SymbolName);
1352 else
1353 addRelocationForSection(RE, Value.SectionID);
1354
1355 resolveRelocation(Section, Offset, reinterpret_cast<uint64_t>(
1356 Section.getAddressWithOffset(
1357 Section.getStubOffset())),
1358 RelType, 0);
1359 Section.advanceStubOffset(getMaxStubSize());
1360 }
1361 } else {
1362 uint32_t *Placeholder =
1363 reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset));
1364 if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 ||
1365 RelType == ELF::R_ARM_ABS32) {
1366 Value.Addend += *Placeholder;
1367 } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) {
1368 // See ELF for ARM documentation
1369 Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12));
1370 }
1371 processSimpleRelocation(SectionID, Offset, RelType, Value);
1372 }
1373 } else if (IsMipsO32ABI) {
1374 uint8_t *Placeholder = reinterpret_cast<uint8_t *>(
1375 computePlaceholderAddress(SectionID, Offset));
1376 uint32_t Opcode = readBytesUnaligned(Placeholder, 4);
1377 if (RelType == ELF::R_MIPS_26) {
1378 // This is an Mips branch relocation, need to use a stub function.
1379 LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
1380 SectionEntry &Section = Sections[SectionID];
1381
1382 // Extract the addend from the instruction.
1383 // We shift up by two since the Value will be down shifted again
1384 // when applying the relocation.
1385 uint32_t Addend = (Opcode & 0x03ffffff) << 2;
1386
1387 Value.Addend += Addend;
1388
1389 // Look up for existing stub.
1390 StubMap::const_iterator i = Stubs.find(Value);
1391 if (i != Stubs.end()) {
1392 RelocationEntry RE(SectionID, Offset, RelType, i->second);
1393 addRelocationForSection(RE, SectionID);
1394 LLVM_DEBUG(dbgs() << " Stub function found\n");
1395 } else {
1396 // Create a new stub function.
1397 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1398 Stubs[Value] = Section.getStubOffset();
1399
1400 unsigned AbiVariant = Obj.getPlatformFlags();
1401
1402 uint8_t *StubTargetAddr = createStubFunction(
1403 Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant);
1404
1405 // Creating Hi and Lo relocations for the filled stub instructions.
1406 RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
1407 ELF::R_MIPS_HI16, Value.Addend);
1408 RelocationEntry RELo(SectionID,
1409 StubTargetAddr - Section.getAddress() + 4,
1410 ELF::R_MIPS_LO16, Value.Addend);
1411
1412 if (Value.SymbolName) {
1413 addRelocationForSymbol(REHi, Value.SymbolName);
1414 addRelocationForSymbol(RELo, Value.SymbolName);
1415 } else {
1416 addRelocationForSection(REHi, Value.SectionID);
1417 addRelocationForSection(RELo, Value.SectionID);
1418 }
1419
1420 RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
1421 addRelocationForSection(RE, SectionID);
1422 Section.advanceStubOffset(getMaxStubSize());
1423 }
1424 } else if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16) {
1425 int64_t Addend = (Opcode & 0x0000ffff) << 16;
1426 RelocationEntry RE(SectionID, Offset, RelType, Addend);
1427 PendingRelocs.push_back(std::make_pair(Value, RE));
1428 } else if (RelType == ELF::R_MIPS_LO16 || RelType == ELF::R_MIPS_PCLO16) {
1429 int64_t Addend = Value.Addend + SignExtend32<16>(Opcode & 0x0000ffff);
1430 for (auto I = PendingRelocs.begin(); I != PendingRelocs.end();) {
1431 const RelocationValueRef &MatchingValue = I->first;
1432 RelocationEntry &Reloc = I->second;
1433 if (MatchingValue == Value &&
1434 RelType == getMatchingLoRelocation(Reloc.RelType) &&
1435 SectionID == Reloc.SectionID) {
1436 Reloc.Addend += Addend;
1437 if (Value.SymbolName)
1438 addRelocationForSymbol(Reloc, Value.SymbolName);
1439 else
1440 addRelocationForSection(Reloc, Value.SectionID);
1441 I = PendingRelocs.erase(I);
1442 } else
1443 ++I;
1444 }
1445 RelocationEntry RE(SectionID, Offset, RelType, Addend);
1446 if (Value.SymbolName)
1447 addRelocationForSymbol(RE, Value.SymbolName);
1448 else
1449 addRelocationForSection(RE, Value.SectionID);
1450 } else {
1451 if (RelType == ELF::R_MIPS_32)
1452 Value.Addend += Opcode;
1453 else if (RelType == ELF::R_MIPS_PC16)
1454 Value.Addend += SignExtend32<18>((Opcode & 0x0000ffff) << 2);
1455 else if (RelType == ELF::R_MIPS_PC19_S2)
1456 Value.Addend += SignExtend32<21>((Opcode & 0x0007ffff) << 2);
1457 else if (RelType == ELF::R_MIPS_PC21_S2)
1458 Value.Addend += SignExtend32<23>((Opcode & 0x001fffff) << 2);
1459 else if (RelType == ELF::R_MIPS_PC26_S2)
1460 Value.Addend += SignExtend32<28>((Opcode & 0x03ffffff) << 2);
1461 processSimpleRelocation(SectionID, Offset, RelType, Value);
1462 }
1463 } else if (IsMipsN32ABI || IsMipsN64ABI) {
1464 uint32_t r_type = RelType & 0xff;
1465 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
1466 if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE
1467 || r_type == ELF::R_MIPS_GOT_DISP) {
1468 StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName);
1469 if (i != GOTSymbolOffsets.end())
1470 RE.SymOffset = i->second;
1471 else {
1472 RE.SymOffset = allocateGOTEntries(1);
1473 GOTSymbolOffsets[TargetName] = RE.SymOffset;
1474 }
1475 if (Value.SymbolName)
1476 addRelocationForSymbol(RE, Value.SymbolName);
1477 else
1478 addRelocationForSection(RE, Value.SectionID);
1479 } else if (RelType == ELF::R_MIPS_26) {
1480 // This is an Mips branch relocation, need to use a stub function.
1481 LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
1482 SectionEntry &Section = Sections[SectionID];
1483
1484 // Look up for existing stub.
1485 StubMap::const_iterator i = Stubs.find(Value);
1486 if (i != Stubs.end()) {
1487 RelocationEntry RE(SectionID, Offset, RelType, i->second);
1488 addRelocationForSection(RE, SectionID);
1489 LLVM_DEBUG(dbgs() << " Stub function found\n");
1490 } else {
1491 // Create a new stub function.
1492 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1493 Stubs[Value] = Section.getStubOffset();
1494
1495 unsigned AbiVariant = Obj.getPlatformFlags();
1496
1497 uint8_t *StubTargetAddr = createStubFunction(
1498 Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant);
1499
1500 if (IsMipsN32ABI) {
1501 // Creating Hi and Lo relocations for the filled stub instructions.
1502 RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
1503 ELF::R_MIPS_HI16, Value.Addend);
1504 RelocationEntry RELo(SectionID,
1505 StubTargetAddr - Section.getAddress() + 4,
1506 ELF::R_MIPS_LO16, Value.Addend);
1507 if (Value.SymbolName) {
1508 addRelocationForSymbol(REHi, Value.SymbolName);
1509 addRelocationForSymbol(RELo, Value.SymbolName);
1510 } else {
1511 addRelocationForSection(REHi, Value.SectionID);
1512 addRelocationForSection(RELo, Value.SectionID);
1513 }
1514 } else {
1515 // Creating Highest, Higher, Hi and Lo relocations for the filled stub
1516 // instructions.
1517 RelocationEntry REHighest(SectionID,
1518 StubTargetAddr - Section.getAddress(),
1519 ELF::R_MIPS_HIGHEST, Value.Addend);
1520 RelocationEntry REHigher(SectionID,
1521 StubTargetAddr - Section.getAddress() + 4,
1522 ELF::R_MIPS_HIGHER, Value.Addend);
1523 RelocationEntry REHi(SectionID,
1524 StubTargetAddr - Section.getAddress() + 12,
1525 ELF::R_MIPS_HI16, Value.Addend);
1526 RelocationEntry RELo(SectionID,
1527 StubTargetAddr - Section.getAddress() + 20,
1528 ELF::R_MIPS_LO16, Value.Addend);
1529 if (Value.SymbolName) {
1530 addRelocationForSymbol(REHighest, Value.SymbolName);
1531 addRelocationForSymbol(REHigher, Value.SymbolName);
1532 addRelocationForSymbol(REHi, Value.SymbolName);
1533 addRelocationForSymbol(RELo, Value.SymbolName);
1534 } else {
1535 addRelocationForSection(REHighest, Value.SectionID);
1536 addRelocationForSection(REHigher, Value.SectionID);
1537 addRelocationForSection(REHi, Value.SectionID);
1538 addRelocationForSection(RELo, Value.SectionID);
1539 }
1540 }
1541 RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
1542 addRelocationForSection(RE, SectionID);
1543 Section.advanceStubOffset(getMaxStubSize());
1544 }
1545 } else {
1546 processSimpleRelocation(SectionID, Offset, RelType, Value);
1547 }
1548
1549 } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
1550 if (RelType == ELF::R_PPC64_REL24) {
1551 // Determine ABI variant in use for this object.
1552 unsigned AbiVariant = Obj.getPlatformFlags();
1553 AbiVariant &= ELF::EF_PPC64_ABI;
1554 // A PPC branch relocation will need a stub function if the target is
1555 // an external symbol (either Value.SymbolName is set, or SymType is
1556 // Symbol::ST_Unknown) or if the target address is not within the
1557 // signed 24-bits branch address.
1558 SectionEntry &Section = Sections[SectionID];
1559 uint8_t *Target = Section.getAddressWithOffset(Offset);
1560 bool RangeOverflow = false;
1561 bool IsExtern = Value.SymbolName || SymType == SymbolRef::ST_Unknown;
1562 if (!IsExtern) {
1563 if (AbiVariant != 2) {
1564 // In the ELFv1 ABI, a function call may point to the .opd entry,
1565 // so the final symbol value is calculated based on the relocation
1566 // values in the .opd section.
1567 if (auto Err = findOPDEntrySection(Obj, ObjSectionToID, Value))
1568 return std::move(Err);
1569 } else {
1570 // In the ELFv2 ABI, a function symbol may provide a local entry
1571 // point, which must be used for direct calls.
1572 if (Value.SectionID == SectionID){
1573 uint8_t SymOther = Symbol->getOther();
1574 Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther);
1575 }
1576 }
1577 uint8_t *RelocTarget =
1578 Sections[Value.SectionID].getAddressWithOffset(Value.Addend);
1579 int64_t delta = static_cast<int64_t>(Target - RelocTarget);
1580 // If it is within 26-bits branch range, just set the branch target
1581 if (SignExtend64<26>(delta) != delta) {
1582 RangeOverflow = true;
1583 } else if ((AbiVariant != 2) ||
1584 (AbiVariant == 2 && Value.SectionID == SectionID)) {
1585 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
1586 addRelocationForSection(RE, Value.SectionID);
1587 }
1588 }
1589 if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID) ||
1590 RangeOverflow) {
1591 // It is an external symbol (either Value.SymbolName is set, or
1592 // SymType is SymbolRef::ST_Unknown) or out of range.
1593 StubMap::const_iterator i = Stubs.find(Value);
1594 if (i != Stubs.end()) {
1595 // Symbol function stub already created, just relocate to it
1596 resolveRelocation(Section, Offset,
1597 reinterpret_cast<uint64_t>(
1598 Section.getAddressWithOffset(i->second)),
1599 RelType, 0);
1600 LLVM_DEBUG(dbgs() << " Stub function found\n");
1601 } else {
1602 // Create a new stub function.
1603 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1604 Stubs[Value] = Section.getStubOffset();
1605 uint8_t *StubTargetAddr = createStubFunction(
1606 Section.getAddressWithOffset(Section.getStubOffset()),
1607 AbiVariant);
1608 RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
1609 ELF::R_PPC64_ADDR64, Value.Addend);
1610
1611 // Generates the 64-bits address loads as exemplified in section
1612 // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to
1613 // apply to the low part of the instructions, so we have to update
1614 // the offset according to the target endianness.
1615 uint64_t StubRelocOffset = StubTargetAddr - Section.getAddress();
1617 StubRelocOffset += 2;
1618
1619 RelocationEntry REhst(SectionID, StubRelocOffset + 0,
1620 ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
1621 RelocationEntry REhr(SectionID, StubRelocOffset + 4,
1622 ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
1623 RelocationEntry REh(SectionID, StubRelocOffset + 12,
1624 ELF::R_PPC64_ADDR16_HI, Value.Addend);
1625 RelocationEntry REl(SectionID, StubRelocOffset + 16,
1626 ELF::R_PPC64_ADDR16_LO, Value.Addend);
1627
1628 if (Value.SymbolName) {
1629 addRelocationForSymbol(REhst, Value.SymbolName);
1630 addRelocationForSymbol(REhr, Value.SymbolName);
1631 addRelocationForSymbol(REh, Value.SymbolName);
1632 addRelocationForSymbol(REl, Value.SymbolName);
1633 } else {
1634 addRelocationForSection(REhst, Value.SectionID);
1635 addRelocationForSection(REhr, Value.SectionID);
1636 addRelocationForSection(REh, Value.SectionID);
1637 addRelocationForSection(REl, Value.SectionID);
1638 }
1639
1640 resolveRelocation(Section, Offset, reinterpret_cast<uint64_t>(
1641 Section.getAddressWithOffset(
1642 Section.getStubOffset())),
1643 RelType, 0);
1644 Section.advanceStubOffset(getMaxStubSize());
1645 }
1646 if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID)) {
1647 // Restore the TOC for external calls
1648 if (AbiVariant == 2)
1649 writeInt32BE(Target + 4, 0xE8410018); // ld r2,24(r1)
1650 else
1651 writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1)
1652 }
1653 }
1654 } else if (RelType == ELF::R_PPC64_TOC16 ||
1655 RelType == ELF::R_PPC64_TOC16_DS ||
1656 RelType == ELF::R_PPC64_TOC16_LO ||
1657 RelType == ELF::R_PPC64_TOC16_LO_DS ||
1658 RelType == ELF::R_PPC64_TOC16_HI ||
1659 RelType == ELF::R_PPC64_TOC16_HA) {
1660 // These relocations are supposed to subtract the TOC address from
1661 // the final value. This does not fit cleanly into the RuntimeDyld
1662 // scheme, since there may be *two* sections involved in determining
1663 // the relocation value (the section of the symbol referred to by the
1664 // relocation, and the TOC section associated with the current module).
1665 //
1666 // Fortunately, these relocations are currently only ever generated
1667 // referring to symbols that themselves reside in the TOC, which means
1668 // that the two sections are actually the same. Thus they cancel out
1669 // and we can immediately resolve the relocation right now.
1670 switch (RelType) {
1671 case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break;
1672 case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break;
1673 case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break;
1674 case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break;
1675 case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break;
1676 case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break;
1677 default: llvm_unreachable("Wrong relocation type.");
1678 }
1679
1680 RelocationValueRef TOCValue;
1681 if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, TOCValue))
1682 return std::move(Err);
1683 if (Value.SymbolName || Value.SectionID != TOCValue.SectionID)
1684 llvm_unreachable("Unsupported TOC relocation.");
1685 Value.Addend -= TOCValue.Addend;
1686 resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0);
1687 } else {
1688 // There are two ways to refer to the TOC address directly: either
1689 // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are
1690 // ignored), or via any relocation that refers to the magic ".TOC."
1691 // symbols (in which case the addend is respected).
1692 if (RelType == ELF::R_PPC64_TOC) {
1693 RelType = ELF::R_PPC64_ADDR64;
1694 if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
1695 return std::move(Err);
1696 } else if (TargetName == ".TOC.") {
1697 if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
1698 return std::move(Err);
1699 Value.Addend += Addend;
1700 }
1701
1702 RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
1703
1704 if (Value.SymbolName)
1705 addRelocationForSymbol(RE, Value.SymbolName);
1706 else
1707 addRelocationForSection(RE, Value.SectionID);
1708 }
1709 } else if (Arch == Triple::systemz &&
1710 (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) {
1711 // Create function stubs for both PLT and GOT references, regardless of
1712 // whether the GOT reference is to data or code. The stub contains the
1713 // full address of the symbol, as needed by GOT references, and the
1714 // executable part only adds an overhead of 8 bytes.
1715 //
1716 // We could try to conserve space by allocating the code and data
1717 // parts of the stub separately. However, as things stand, we allocate
1718 // a stub for every relocation, so using a GOT in JIT code should be
1719 // no less space efficient than using an explicit constant pool.
1720 LLVM_DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
1721 SectionEntry &Section = Sections[SectionID];
1722
1723 // Look for an existing stub.
1724 StubMap::const_iterator i = Stubs.find(Value);
1725 uintptr_t StubAddress;
1726 if (i != Stubs.end()) {
1727 StubAddress = uintptr_t(Section.getAddressWithOffset(i->second));
1728 LLVM_DEBUG(dbgs() << " Stub function found\n");
1729 } else {
1730 // Create a new stub function.
1731 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1732
1733 uintptr_t BaseAddress = uintptr_t(Section.getAddress());
1734 StubAddress =
1735 alignTo(BaseAddress + Section.getStubOffset(), getStubAlignment());
1736 unsigned StubOffset = StubAddress - BaseAddress;
1737
1738 Stubs[Value] = StubOffset;
1739 createStubFunction((uint8_t *)StubAddress);
1740 RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64,
1741 Value.Offset);
1742 if (Value.SymbolName)
1743 addRelocationForSymbol(RE, Value.SymbolName);
1744 else
1745 addRelocationForSection(RE, Value.SectionID);
1746 Section.advanceStubOffset(getMaxStubSize());
1747 }
1748
1749 if (RelType == ELF::R_390_GOTENT)
1750 resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL,
1751 Addend);
1752 else
1753 resolveRelocation(Section, Offset, StubAddress, RelType, Addend);
1754 } else if (Arch == Triple::x86_64) {
1755 if (RelType == ELF::R_X86_64_PLT32) {
1756 // The way the PLT relocations normally work is that the linker allocates
1757 // the
1758 // PLT and this relocation makes a PC-relative call into the PLT. The PLT
1759 // entry will then jump to an address provided by the GOT. On first call,
1760 // the
1761 // GOT address will point back into PLT code that resolves the symbol. After
1762 // the first call, the GOT entry points to the actual function.
1763 //
1764 // For local functions we're ignoring all of that here and just replacing
1765 // the PLT32 relocation type with PC32, which will translate the relocation
1766 // into a PC-relative call directly to the function. For external symbols we
1767 // can't be sure the function will be within 2^32 bytes of the call site, so
1768 // we need to create a stub, which calls into the GOT. This case is
1769 // equivalent to the usual PLT implementation except that we use the stub
1770 // mechanism in RuntimeDyld (which puts stubs at the end of the section)
1771 // rather than allocating a PLT section.
1772 if (Value.SymbolName && MemMgr.allowStubAllocation()) {
1773 // This is a call to an external function.
1774 // Look for an existing stub.
1775 SectionEntry *Section = &Sections[SectionID];
1776 StubMap::const_iterator i = Stubs.find(Value);
1777 uintptr_t StubAddress;
1778 if (i != Stubs.end()) {
1779 StubAddress = uintptr_t(Section->getAddress()) + i->second;
1780 LLVM_DEBUG(dbgs() << " Stub function found\n");
1781 } else {
1782 // Create a new stub function (equivalent to a PLT entry).
1783 LLVM_DEBUG(dbgs() << " Create a new stub function\n");
1784
1785 uintptr_t BaseAddress = uintptr_t(Section->getAddress());
1786 StubAddress = alignTo(BaseAddress + Section->getStubOffset(),
1787 getStubAlignment());
1788 unsigned StubOffset = StubAddress - BaseAddress;
1789 Stubs[Value] = StubOffset;
1790 createStubFunction((uint8_t *)StubAddress);
1791
1792 // Bump our stub offset counter
1793 Section->advanceStubOffset(getMaxStubSize());
1794
1795 // Allocate a GOT Entry
1796 uint64_t GOTOffset = allocateGOTEntries(1);
1797 // This potentially creates a new Section which potentially
1798 // invalidates the Section pointer, so reload it.
1799 Section = &Sections[SectionID];
1800
1801 // The load of the GOT address has an addend of -4
1802 resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4,
1803 ELF::R_X86_64_PC32);
1804
1805 // Fill in the value of the symbol we're targeting into the GOT
1807 computeGOTOffsetRE(GOTOffset, 0, ELF::R_X86_64_64),
1808 Value.SymbolName);
1809 }
1810
1811 // Make the target call a call into the stub table.
1812 resolveRelocation(*Section, Offset, StubAddress, ELF::R_X86_64_PC32,
1813 Addend);
1814 } else {
1816 computePlaceholderAddress(SectionID, Offset));
1817 processSimpleRelocation(SectionID, Offset, ELF::R_X86_64_PC32, Value);
1818 }
1819 } else if (RelType == ELF::R_X86_64_GOTPCREL ||
1820 RelType == ELF::R_X86_64_GOTPCRELX ||
1821 RelType == ELF::R_X86_64_REX_GOTPCRELX) {
1822 uint64_t GOTOffset = allocateGOTEntries(1);
1823 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
1824 ELF::R_X86_64_PC32);
1825
1826 // Fill in the value of the symbol we're targeting into the GOT
1827 RelocationEntry RE =
1828 computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_64);
1829 if (Value.SymbolName)
1830 addRelocationForSymbol(RE, Value.SymbolName);
1831 else
1832 addRelocationForSection(RE, Value.SectionID);
1833 } else if (RelType == ELF::R_X86_64_GOT64) {
1834 // Fill in a 64-bit GOT offset.
1835 uint64_t GOTOffset = allocateGOTEntries(1);
1836 resolveRelocation(Sections[SectionID], Offset, GOTOffset,
1837 ELF::R_X86_64_64, 0);
1838
1839 // Fill in the value of the symbol we're targeting into the GOT
1840 RelocationEntry RE =
1841 computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_64);
1842 if (Value.SymbolName)
1843 addRelocationForSymbol(RE, Value.SymbolName);
1844 else
1845 addRelocationForSection(RE, Value.SectionID);
1846 } else if (RelType == ELF::R_X86_64_GOTPC32) {
1847 // Materialize the address of the base of the GOT relative to the PC.
1848 // This doesn't create a GOT entry, but it does mean we need a GOT
1849 // section.
1850 (void)allocateGOTEntries(0);
1851 resolveGOTOffsetRelocation(SectionID, Offset, Addend, ELF::R_X86_64_PC32);
1852 } else if (RelType == ELF::R_X86_64_GOTPC64) {
1853 (void)allocateGOTEntries(0);
1854 resolveGOTOffsetRelocation(SectionID, Offset, Addend, ELF::R_X86_64_PC64);
1855 } else if (RelType == ELF::R_X86_64_GOTOFF64) {
1856 // GOTOFF relocations ultimately require a section difference relocation.
1857 (void)allocateGOTEntries(0);
1858 processSimpleRelocation(SectionID, Offset, RelType, Value);
1859 } else if (RelType == ELF::R_X86_64_PC32) {
1860 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
1861 processSimpleRelocation(SectionID, Offset, RelType, Value);
1862 } else if (RelType == ELF::R_X86_64_PC64) {
1863 Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset));
1864 processSimpleRelocation(SectionID, Offset, RelType, Value);
1865 } else if (RelType == ELF::R_X86_64_GOTTPOFF) {
1866 processX86_64GOTTPOFFRelocation(SectionID, Offset, Value, Addend);
1867 } else if (RelType == ELF::R_X86_64_TLSGD ||
1868 RelType == ELF::R_X86_64_TLSLD) {
1869 // The next relocation must be the relocation for __tls_get_addr.
1870 ++RelI;
1871 auto &GetAddrRelocation = *RelI;
1872 processX86_64TLSRelocation(SectionID, Offset, RelType, Value, Addend,
1873 GetAddrRelocation);
1874 } else {
1875 processSimpleRelocation(SectionID, Offset, RelType, Value);
1876 }
1877 } else {
1878 if (Arch == Triple::x86) {
1879 Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
1880 }
1881 processSimpleRelocation(SectionID, Offset, RelType, Value);
1882 }
1883 return ++RelI;
1884}
1885
1886void RuntimeDyldELF::processX86_64GOTTPOFFRelocation(unsigned SectionID,
1889 int64_t Addend) {
1890 // Use the approach from "x86-64 Linker Optimizations" from the TLS spec
1891 // to replace the GOTTPOFF relocation with a TPOFF relocation. The spec
1892 // only mentions one optimization even though there are two different
1893 // code sequences for the Initial Exec TLS Model. We match the code to
1894 // find out which one was used.
1895
1896 // A possible TLS code sequence and its replacement
1897 struct CodeSequence {
1898 // The expected code sequence
1899 ArrayRef<uint8_t> ExpectedCodeSequence;
1900 // The negative offset of the GOTTPOFF relocation to the beginning of
1901 // the sequence
1902 uint64_t TLSSequenceOffset;
1903 // The new code sequence
1904 ArrayRef<uint8_t> NewCodeSequence;
1905 // The offset of the new TPOFF relocation
1906 uint64_t TpoffRelocationOffset;
1907 };
1908
1909 std::array<CodeSequence, 2> CodeSequences;
1910
1911 // Initial Exec Code Model Sequence
1912 {
1913 static const std::initializer_list<uint8_t> ExpectedCodeSequenceList = {
1914 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
1915 0x00, // mov %fs:0, %rax
1916 0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00 // add x@gotpoff(%rip),
1917 // %rax
1918 };
1919 CodeSequences[0].ExpectedCodeSequence =
1920 ArrayRef<uint8_t>(ExpectedCodeSequenceList);
1921 CodeSequences[0].TLSSequenceOffset = 12;
1922
1923 static const std::initializer_list<uint8_t> NewCodeSequenceList = {
1924 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0, %rax
1925 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff(%rax), %rax
1926 };
1927 CodeSequences[0].NewCodeSequence = ArrayRef<uint8_t>(NewCodeSequenceList);
1928 CodeSequences[0].TpoffRelocationOffset = 12;
1929 }
1930
1931 // Initial Exec Code Model Sequence, II
1932 {
1933 static const std::initializer_list<uint8_t> ExpectedCodeSequenceList = {
1934 0x48, 0x8b, 0x05, 0x00, 0x00, 0x00, 0x00, // mov x@gotpoff(%rip), %rax
1935 0x64, 0x48, 0x8b, 0x00, 0x00, 0x00, 0x00 // mov %fs:(%rax), %rax
1936 };
1937 CodeSequences[1].ExpectedCodeSequence =
1938 ArrayRef<uint8_t>(ExpectedCodeSequenceList);
1939 CodeSequences[1].TLSSequenceOffset = 3;
1940
1941 static const std::initializer_list<uint8_t> NewCodeSequenceList = {
1942 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00, // 6 byte nop
1943 0x64, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:x@tpoff, %rax
1944 };
1945 CodeSequences[1].NewCodeSequence = ArrayRef<uint8_t>(NewCodeSequenceList);
1946 CodeSequences[1].TpoffRelocationOffset = 10;
1947 }
1948
1949 bool Resolved = false;
1950 auto &Section = Sections[SectionID];
1951 for (const auto &C : CodeSequences) {
1952 assert(C.ExpectedCodeSequence.size() == C.NewCodeSequence.size() &&
1953 "Old and new code sequences must have the same size");
1954
1955 if (Offset < C.TLSSequenceOffset ||
1956 (Offset - C.TLSSequenceOffset + C.NewCodeSequence.size()) >
1957 Section.getSize()) {
1958 // This can't be a matching sequence as it doesn't fit in the current
1959 // section
1960 continue;
1961 }
1962
1963 auto TLSSequenceStartOffset = Offset - C.TLSSequenceOffset;
1964 auto *TLSSequence = Section.getAddressWithOffset(TLSSequenceStartOffset);
1965 if (ArrayRef<uint8_t>(TLSSequence, C.ExpectedCodeSequence.size()) !=
1966 C.ExpectedCodeSequence) {
1967 continue;
1968 }
1969
1970 memcpy(TLSSequence, C.NewCodeSequence.data(), C.NewCodeSequence.size());
1971
1972 // The original GOTTPOFF relocation has an addend as it is PC relative,
1973 // so it needs to be corrected. The TPOFF32 relocation is used as an
1974 // absolute value (which is an offset from %fs:0), so remove the addend
1975 // again.
1976 RelocationEntry RE(SectionID,
1977 TLSSequenceStartOffset + C.TpoffRelocationOffset,
1978 ELF::R_X86_64_TPOFF32, Value.Addend - Addend);
1979
1980 if (Value.SymbolName)
1981 addRelocationForSymbol(RE, Value.SymbolName);
1982 else
1983 addRelocationForSection(RE, Value.SectionID);
1984
1985 Resolved = true;
1986 break;
1987 }
1988
1989 if (!Resolved) {
1990 // The GOTTPOFF relocation was not used in one of the sequences
1991 // described in the spec, so we can't optimize it to a TPOFF
1992 // relocation.
1993 uint64_t GOTOffset = allocateGOTEntries(1);
1994 resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
1995 ELF::R_X86_64_PC32);
1996 RelocationEntry RE =
1997 computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_TPOFF64);
1998 if (Value.SymbolName)
1999 addRelocationForSymbol(RE, Value.SymbolName);
2000 else
2001 addRelocationForSection(RE, Value.SectionID);
2002 }
2003}
2004
2005void RuntimeDyldELF::processX86_64TLSRelocation(
2006 unsigned SectionID, uint64_t Offset, uint64_t RelType,
2007 RelocationValueRef Value, int64_t Addend,
2008 const RelocationRef &GetAddrRelocation) {
2009 // Since we are statically linking and have no additional DSOs, we can resolve
2010 // the relocation directly without using __tls_get_addr.
2011 // Use the approach from "x86-64 Linker Optimizations" from the TLS spec
2012 // to replace it with the Local Exec relocation variant.
2013
2014 // Find out whether the code was compiled with the large or small memory
2015 // model. For this we look at the next relocation which is the relocation
2016 // for the __tls_get_addr function. If it's a 32 bit relocation, it's the
2017 // small code model, with a 64 bit relocation it's the large code model.
2018 bool IsSmallCodeModel;
2019 // Is the relocation for the __tls_get_addr a PC-relative GOT relocation?
2020 bool IsGOTPCRel = false;
2021
2022 switch (GetAddrRelocation.getType()) {
2023 case ELF::R_X86_64_GOTPCREL:
2024 case ELF::R_X86_64_REX_GOTPCRELX:
2025 case ELF::R_X86_64_GOTPCRELX:
2026 IsGOTPCRel = true;
2027 [[fallthrough]];
2028 case ELF::R_X86_64_PLT32:
2029 IsSmallCodeModel = true;
2030 break;
2031 case ELF::R_X86_64_PLTOFF64:
2032 IsSmallCodeModel = false;
2033 break;
2034 default:
2036 "invalid TLS relocations for General/Local Dynamic TLS Model: "
2037 "expected PLT or GOT relocation for __tls_get_addr function");
2038 }
2039
2040 // The negative offset to the start of the TLS code sequence relative to
2041 // the offset of the TLSGD/TLSLD relocation
2042 uint64_t TLSSequenceOffset;
2043 // The expected start of the code sequence
2044 ArrayRef<uint8_t> ExpectedCodeSequence;
2045 // The new TLS code sequence that will replace the existing code
2046 ArrayRef<uint8_t> NewCodeSequence;
2047
2048 if (RelType == ELF::R_X86_64_TLSGD) {
2049 // The offset of the new TPOFF32 relocation (offset starting from the
2050 // beginning of the whole TLS sequence)
2051 uint64_t TpoffRelocOffset;
2052
2053 if (IsSmallCodeModel) {
2054 if (!IsGOTPCRel) {
2055 static const std::initializer_list<uint8_t> CodeSequence = {
2056 0x66, // data16 (no-op prefix)
2057 0x48, 0x8d, 0x3d, 0x00, 0x00,
2058 0x00, 0x00, // lea <disp32>(%rip), %rdi
2059 0x66, 0x66, // two data16 prefixes
2060 0x48, // rex64 (no-op prefix)
2061 0xe8, 0x00, 0x00, 0x00, 0x00 // call __tls_get_addr@plt
2062 };
2063 ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2064 TLSSequenceOffset = 4;
2065 } else {
2066 // This code sequence is not described in the TLS spec but gcc
2067 // generates it sometimes.
2068 static const std::initializer_list<uint8_t> CodeSequence = {
2069 0x66, // data16 (no-op prefix)
2070 0x48, 0x8d, 0x3d, 0x00, 0x00,
2071 0x00, 0x00, // lea <disp32>(%rip), %rdi
2072 0x66, // data16 prefix (no-op prefix)
2073 0x48, // rex64 (no-op prefix)
2074 0xff, 0x15, 0x00, 0x00, 0x00,
2075 0x00 // call *__tls_get_addr@gotpcrel(%rip)
2076 };
2077 ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2078 TLSSequenceOffset = 4;
2079 }
2080
2081 // The replacement code for the small code model. It's the same for
2082 // both sequences.
2083 static const std::initializer_list<uint8_t> SmallSequence = {
2084 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
2085 0x00, // mov %fs:0, %rax
2086 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff(%rax),
2087 // %rax
2088 };
2089 NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
2090 TpoffRelocOffset = 12;
2091 } else {
2092 static const std::initializer_list<uint8_t> CodeSequence = {
2093 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, // lea <disp32>(%rip),
2094 // %rdi
2095 0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2096 0x00, // movabs $__tls_get_addr@pltoff, %rax
2097 0x48, 0x01, 0xd8, // add %rbx, %rax
2098 0xff, 0xd0 // call *%rax
2099 };
2100 ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2101 TLSSequenceOffset = 3;
2102
2103 // The replacement code for the large code model
2104 static const std::initializer_list<uint8_t> LargeSequence = {
2105 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
2106 0x00, // mov %fs:0, %rax
2107 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00, // lea x@tpoff(%rax),
2108 // %rax
2109 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 // nopw 0x0(%rax,%rax,1)
2110 };
2111 NewCodeSequence = ArrayRef<uint8_t>(LargeSequence);
2112 TpoffRelocOffset = 12;
2113 }
2114
2115 // The TLSGD/TLSLD relocations are PC-relative, so they have an addend.
2116 // The new TPOFF32 relocations is used as an absolute offset from
2117 // %fs:0, so remove the TLSGD/TLSLD addend again.
2118 RelocationEntry RE(SectionID, Offset - TLSSequenceOffset + TpoffRelocOffset,
2119 ELF::R_X86_64_TPOFF32, Value.Addend - Addend);
2120 if (Value.SymbolName)
2121 addRelocationForSymbol(RE, Value.SymbolName);
2122 else
2123 addRelocationForSection(RE, Value.SectionID);
2124 } else if (RelType == ELF::R_X86_64_TLSLD) {
2125 if (IsSmallCodeModel) {
2126 if (!IsGOTPCRel) {
2127 static const std::initializer_list<uint8_t> CodeSequence = {
2128 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, // leaq <disp32>(%rip), %rdi
2129 0x00, 0xe8, 0x00, 0x00, 0x00, 0x00 // call __tls_get_addr@plt
2130 };
2131 ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2132 TLSSequenceOffset = 3;
2133
2134 // The replacement code for the small code model
2135 static const std::initializer_list<uint8_t> SmallSequence = {
2136 0x66, 0x66, 0x66, // three data16 prefixes (no-op)
2137 0x64, 0x48, 0x8b, 0x04, 0x25,
2138 0x00, 0x00, 0x00, 0x00 // mov %fs:0, %rax
2139 };
2140 NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
2141 } else {
2142 // This code sequence is not described in the TLS spec but gcc
2143 // generates it sometimes.
2144 static const std::initializer_list<uint8_t> CodeSequence = {
2145 0x48, 0x8d, 0x3d, 0x00,
2146 0x00, 0x00, 0x00, // leaq <disp32>(%rip), %rdi
2147 0xff, 0x15, 0x00, 0x00,
2148 0x00, 0x00 // call
2149 // *__tls_get_addr@gotpcrel(%rip)
2150 };
2151 ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2152 TLSSequenceOffset = 3;
2153
2154 // The replacement is code is just like above but it needs to be
2155 // one byte longer.
2156 static const std::initializer_list<uint8_t> SmallSequence = {
2157 0x0f, 0x1f, 0x40, 0x00, // 4 byte nop
2158 0x64, 0x48, 0x8b, 0x04, 0x25,
2159 0x00, 0x00, 0x00, 0x00 // mov %fs:0, %rax
2160 };
2161 NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
2162 }
2163 } else {
2164 // This is the same sequence as for the TLSGD sequence with the large
2165 // memory model above
2166 static const std::initializer_list<uint8_t> CodeSequence = {
2167 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, // lea <disp32>(%rip),
2168 // %rdi
2169 0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2170 0x48, // movabs $__tls_get_addr@pltoff, %rax
2171 0x01, 0xd8, // add %rbx, %rax
2172 0xff, 0xd0 // call *%rax
2173 };
2174 ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
2175 TLSSequenceOffset = 3;
2176
2177 // The replacement code for the large code model
2178 static const std::initializer_list<uint8_t> LargeSequence = {
2179 0x66, 0x66, 0x66, // three data16 prefixes (no-op)
2180 0x66, 0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00,
2181 0x00, // 10 byte nop
2182 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
2183 };
2184 NewCodeSequence = ArrayRef<uint8_t>(LargeSequence);
2185 }
2186 } else {
2187 llvm_unreachable("both TLS relocations handled above");
2188 }
2189
2190 assert(ExpectedCodeSequence.size() == NewCodeSequence.size() &&
2191 "Old and new code sequences must have the same size");
2192
2193 auto &Section = Sections[SectionID];
2194 if (Offset < TLSSequenceOffset ||
2195 (Offset - TLSSequenceOffset + NewCodeSequence.size()) >
2196 Section.getSize()) {
2197 report_fatal_error("unexpected end of section in TLS sequence");
2198 }
2199
2200 auto *TLSSequence = Section.getAddressWithOffset(Offset - TLSSequenceOffset);
2201 if (ArrayRef<uint8_t>(TLSSequence, ExpectedCodeSequence.size()) !=
2202 ExpectedCodeSequence) {
2204 "invalid TLS sequence for Global/Local Dynamic TLS Model");
2205 }
2206
2207 memcpy(TLSSequence, NewCodeSequence.data(), NewCodeSequence.size());
2208}
2209
2211 // We don't use the GOT in all of these cases, but it's essentially free
2212 // to put them all here.
2213 size_t Result = 0;
2214 switch (Arch) {
2215 case Triple::x86_64:
2216 case Triple::aarch64:
2217 case Triple::aarch64_be:
2218 case Triple::ppc64:
2219 case Triple::ppc64le:
2220 case Triple::systemz:
2221 Result = sizeof(uint64_t);
2222 break;
2223 case Triple::x86:
2224 case Triple::arm:
2225 case Triple::thumb:
2226 Result = sizeof(uint32_t);
2227 break;
2228 case Triple::mips:
2229 case Triple::mipsel:
2230 case Triple::mips64:
2231 case Triple::mips64el:
2233 Result = sizeof(uint32_t);
2234 else if (IsMipsN64ABI)
2235 Result = sizeof(uint64_t);
2236 else
2237 llvm_unreachable("Mips ABI not handled");
2238 break;
2239 default:
2240 llvm_unreachable("Unsupported CPU type!");
2241 }
2242 return Result;
2243}
2244
2245uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned no) {
2246 if (GOTSectionID == 0) {
2247 GOTSectionID = Sections.size();
2248 // Reserve a section id. We'll allocate the section later
2249 // once we know the total size
2250 Sections.push_back(SectionEntry(".got", nullptr, 0, 0, 0));
2251 }
2252 uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize();
2253 CurrentGOTIndex += no;
2254 return StartOffset;
2255}
2256
2257uint64_t RuntimeDyldELF::findOrAllocGOTEntry(const RelocationValueRef &Value,
2258 unsigned GOTRelType) {
2259 auto E = GOTOffsetMap.insert({Value, 0});
2260 if (E.second) {
2261 uint64_t GOTOffset = allocateGOTEntries(1);
2262
2263 // Create relocation for newly created GOT entry
2264 RelocationEntry RE =
2265 computeGOTOffsetRE(GOTOffset, Value.Offset, GOTRelType);
2266 if (Value.SymbolName)
2267 addRelocationForSymbol(RE, Value.SymbolName);
2268 else
2269 addRelocationForSection(RE, Value.SectionID);
2270
2271 E.first->second = GOTOffset;
2272 }
2273
2274 return E.first->second;
2275}
2276
2277void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID,
2279 uint64_t GOTOffset,
2280 uint32_t Type) {
2281 // Fill in the relative address of the GOT Entry into the stub
2282 RelocationEntry GOTRE(SectionID, Offset, Type, GOTOffset);
2283 addRelocationForSection(GOTRE, GOTSectionID);
2284}
2285
2286RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(uint64_t GOTOffset,
2287 uint64_t SymbolOffset,
2288 uint32_t Type) {
2289 return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset);
2290}
2291
2292void RuntimeDyldELF::processNewSymbol(const SymbolRef &ObjSymbol, SymbolTableEntry& Symbol) {
2293 // This should never return an error as `processNewSymbol` wouldn't have been
2294 // called if getFlags() returned an error before.
2295 auto ObjSymbolFlags = cantFail(ObjSymbol.getFlags());
2296
2297 if (ObjSymbolFlags & SymbolRef::SF_Indirect) {
2298 if (IFuncStubSectionID == 0) {
2299 // Create a dummy section for the ifunc stubs. It will be actually
2300 // allocated in finalizeLoad() below.
2301 IFuncStubSectionID = Sections.size();
2302 Sections.push_back(
2303 SectionEntry(".text.__llvm_IFuncStubs", nullptr, 0, 0, 0));
2304 // First 64B are reserverd for the IFunc resolver
2305 IFuncStubOffset = 64;
2306 }
2307
2308 IFuncStubs.push_back(IFuncStub{IFuncStubOffset, Symbol});
2309 // Modify the symbol so that it points to the ifunc stub instead of to the
2310 // resolver function.
2311 Symbol = SymbolTableEntry(IFuncStubSectionID, IFuncStubOffset,
2312 Symbol.getFlags());
2313 IFuncStubOffset += getMaxIFuncStubSize();
2314 }
2315}
2316
2318 ObjSectionToIDMap &SectionMap) {
2319 if (IsMipsO32ABI)
2320 if (!PendingRelocs.empty())
2321 return make_error<RuntimeDyldError>("Can't find matching LO16 reloc");
2322
2323 // Create the IFunc stubs if necessary. This must be done before processing
2324 // the GOT entries, as the IFunc stubs may create some.
2325 if (IFuncStubSectionID != 0) {
2326 uint8_t *IFuncStubsAddr = MemMgr.allocateCodeSection(
2327 IFuncStubOffset, 1, IFuncStubSectionID, ".text.__llvm_IFuncStubs");
2328 if (!IFuncStubsAddr)
2329 return make_error<RuntimeDyldError>(
2330 "Unable to allocate memory for IFunc stubs!");
2331 Sections[IFuncStubSectionID] =
2332 SectionEntry(".text.__llvm_IFuncStubs", IFuncStubsAddr, IFuncStubOffset,
2333 IFuncStubOffset, 0);
2334
2335 createIFuncResolver(IFuncStubsAddr);
2336
2337 LLVM_DEBUG(dbgs() << "Creating IFunc stubs SectionID: "
2338 << IFuncStubSectionID << " Addr: "
2339 << Sections[IFuncStubSectionID].getAddress() << '\n');
2340 for (auto &IFuncStub : IFuncStubs) {
2341 auto &Symbol = IFuncStub.OriginalSymbol;
2342 LLVM_DEBUG(dbgs() << "\tSectionID: " << Symbol.getSectionID()
2343 << " Offset: " << format("%p", Symbol.getOffset())
2344 << " IFuncStubOffset: "
2345 << format("%p\n", IFuncStub.StubOffset));
2346 createIFuncStub(IFuncStubSectionID, 0, IFuncStub.StubOffset,
2347 Symbol.getSectionID(), Symbol.getOffset());
2348 }
2349
2350 IFuncStubSectionID = 0;
2351 IFuncStubOffset = 0;
2352 IFuncStubs.clear();
2353 }
2354
2355 // If necessary, allocate the global offset table
2356 if (GOTSectionID != 0) {
2357 // Allocate memory for the section
2358 size_t TotalSize = CurrentGOTIndex * getGOTEntrySize();
2359 uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(),
2360 GOTSectionID, ".got", false);
2361 if (!Addr)
2362 return make_error<RuntimeDyldError>("Unable to allocate memory for GOT!");
2363
2364 Sections[GOTSectionID] =
2365 SectionEntry(".got", Addr, TotalSize, TotalSize, 0);
2366
2367 // For now, initialize all GOT entries to zero. We'll fill them in as
2368 // needed when GOT-based relocations are applied.
2369 memset(Addr, 0, TotalSize);
2370 if (IsMipsN32ABI || IsMipsN64ABI) {
2371 // To correctly resolve Mips GOT relocations, we need a mapping from
2372 // object's sections to GOTs.
2373 for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
2374 SI != SE; ++SI) {
2375 if (SI->relocation_begin() != SI->relocation_end()) {
2376 Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
2377 if (!RelSecOrErr)
2378 return make_error<RuntimeDyldError>(
2379 toString(RelSecOrErr.takeError()));
2380
2381 section_iterator RelocatedSection = *RelSecOrErr;
2382 ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection);
2383 assert(i != SectionMap.end());
2384 SectionToGOTMap[i->second] = GOTSectionID;
2385 }
2386 }
2387 GOTSymbolOffsets.clear();
2388 }
2389 }
2390
2391 // Look for and record the EH frame section.
2392 ObjSectionToIDMap::iterator i, e;
2393 for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
2394 const SectionRef &Section = i->first;
2395
2397 Expected<StringRef> NameOrErr = Section.getName();
2398 if (NameOrErr)
2399 Name = *NameOrErr;
2400 else
2401 consumeError(NameOrErr.takeError());
2402
2403 if (Name == ".eh_frame") {
2404 UnregisteredEHFrameSections.push_back(i->second);
2405 break;
2406 }
2407 }
2408
2409 GOTSectionID = 0;
2410 CurrentGOTIndex = 0;
2411
2412 return Error::success();
2413}
2414
2416 return Obj.isELF();
2417}
2418
2419void RuntimeDyldELF::createIFuncResolver(uint8_t *Addr) const {
2420 if (Arch == Triple::x86_64) {
2421 // The adddres of the GOT1 entry is in %r11, the GOT2 entry is in %r11+8
2422 // (see createIFuncStub() for details)
2423 // The following code first saves all registers that contain the original
2424 // function arguments as those registers are not saved by the resolver
2425 // function. %r11 is saved as well so that the GOT2 entry can be updated
2426 // afterwards. Then it calls the actual IFunc resolver function whose
2427 // address is stored in GOT2. After the resolver function returns, all
2428 // saved registers are restored and the return value is written to GOT1.
2429 // Finally, jump to the now resolved function.
2430 // clang-format off
2431 const uint8_t StubCode[] = {
2432 0x57, // push %rdi
2433 0x56, // push %rsi
2434 0x52, // push %rdx
2435 0x51, // push %rcx
2436 0x41, 0x50, // push %r8
2437 0x41, 0x51, // push %r9
2438 0x41, 0x53, // push %r11
2439 0x41, 0xff, 0x53, 0x08, // call *0x8(%r11)
2440 0x41, 0x5b, // pop %r11
2441 0x41, 0x59, // pop %r9
2442 0x41, 0x58, // pop %r8
2443 0x59, // pop %rcx
2444 0x5a, // pop %rdx
2445 0x5e, // pop %rsi
2446 0x5f, // pop %rdi
2447 0x49, 0x89, 0x03, // mov %rax,(%r11)
2448 0xff, 0xe0 // jmp *%rax
2449 };
2450 // clang-format on
2451 static_assert(sizeof(StubCode) <= 64,
2452 "maximum size of the IFunc resolver is 64B");
2453 memcpy(Addr, StubCode, sizeof(StubCode));
2454 } else {
2456 "IFunc resolver is not supported for target architecture");
2457 }
2458}
2459
2460void RuntimeDyldELF::createIFuncStub(unsigned IFuncStubSectionID,
2461 uint64_t IFuncResolverOffset,
2462 uint64_t IFuncStubOffset,
2463 unsigned IFuncSectionID,
2464 uint64_t IFuncOffset) {
2465 auto &IFuncStubSection = Sections[IFuncStubSectionID];
2466 auto *Addr = IFuncStubSection.getAddressWithOffset(IFuncStubOffset);
2467
2468 if (Arch == Triple::x86_64) {
2469 // The first instruction loads a PC-relative address into %r11 which is a
2470 // GOT entry for this stub. This initially contains the address to the
2471 // IFunc resolver. We can use %r11 here as it's caller saved but not used
2472 // to pass any arguments. In fact, x86_64 ABI even suggests using %r11 for
2473 // code in the PLT. The IFunc resolver will use %r11 to update the GOT
2474 // entry.
2475 //
2476 // The next instruction just jumps to the address contained in the GOT
2477 // entry. As mentioned above, we do this two-step jump by first setting
2478 // %r11 so that the IFunc resolver has access to it.
2479 //
2480 // The IFunc resolver of course also needs to know the actual address of
2481 // the actual IFunc resolver function. This will be stored in a GOT entry
2482 // right next to the first one for this stub. So, the IFunc resolver will
2483 // be able to call it with %r11+8.
2484 //
2485 // In total, two adjacent GOT entries (+relocation) and one additional
2486 // relocation are required:
2487 // GOT1: Address of the IFunc resolver.
2488 // GOT2: Address of the IFunc resolver function.
2489 // IFuncStubOffset+3: 32-bit PC-relative address of GOT1.
2490 uint64_t GOT1 = allocateGOTEntries(2);
2491 uint64_t GOT2 = GOT1 + getGOTEntrySize();
2492
2493 RelocationEntry RE1(GOTSectionID, GOT1, ELF::R_X86_64_64,
2494 IFuncResolverOffset, {});
2495 addRelocationForSection(RE1, IFuncStubSectionID);
2496 RelocationEntry RE2(GOTSectionID, GOT2, ELF::R_X86_64_64, IFuncOffset, {});
2497 addRelocationForSection(RE2, IFuncSectionID);
2498
2499 const uint8_t StubCode[] = {
2500 0x4c, 0x8d, 0x1d, 0x00, 0x00, 0x00, 0x00, // leaq 0x0(%rip),%r11
2501 0x41, 0xff, 0x23 // jmpq *(%r11)
2502 };
2503 assert(sizeof(StubCode) <= getMaxIFuncStubSize() &&
2504 "IFunc stub size must not exceed getMaxIFuncStubSize()");
2505 memcpy(Addr, StubCode, sizeof(StubCode));
2506
2507 // The PC-relative value starts 4 bytes from the end of the leaq
2508 // instruction, so the addend is -4.
2509 resolveGOTOffsetRelocation(IFuncStubSectionID, IFuncStubOffset + 3,
2510 GOT1 - 4, ELF::R_X86_64_PC32);
2511 } else {
2512 report_fatal_error("IFunc stub is not supported for target architecture");
2513 }
2514}
2515
2516unsigned RuntimeDyldELF::getMaxIFuncStubSize() const {
2517 if (Arch == Triple::x86_64) {
2518 return 10;
2519 }
2520 return 0;
2521}
2522
2523bool RuntimeDyldELF::relocationNeedsGot(const RelocationRef &R) const {
2524 unsigned RelTy = R.getType();
2526 return RelTy == ELF::R_AARCH64_ADR_GOT_PAGE ||
2527 RelTy == ELF::R_AARCH64_LD64_GOT_LO12_NC;
2528
2529 if (Arch == Triple::x86_64)
2530 return RelTy == ELF::R_X86_64_GOTPCREL ||
2531 RelTy == ELF::R_X86_64_GOTPCRELX ||
2532 RelTy == ELF::R_X86_64_GOT64 ||
2533 RelTy == ELF::R_X86_64_REX_GOTPCRELX;
2534 return false;
2535}
2536
2537bool RuntimeDyldELF::relocationNeedsStub(const RelocationRef &R) const {
2538 if (Arch != Triple::x86_64)
2539 return true; // Conservative answer
2540
2541 switch (R.getType()) {
2542 default:
2543 return true; // Conservative answer
2544
2545
2546 case ELF::R_X86_64_GOTPCREL:
2547 case ELF::R_X86_64_GOTPCRELX:
2548 case ELF::R_X86_64_REX_GOTPCRELX:
2549 case ELF::R_X86_64_GOTPC64:
2550 case ELF::R_X86_64_GOT64:
2551 case ELF::R_X86_64_GOTOFF64:
2552 case ELF::R_X86_64_PC32:
2553 case ELF::R_X86_64_PC64:
2554 case ELF::R_X86_64_64:
2555 // We know that these reloation types won't need a stub function. This list
2556 // can be extended as needed.
2557 return false;
2558 }
2559}
2560
2561} // namespace llvm
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Given that RA is a live value
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Addr
std::string Name
#define LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
Definition: ELFTypes.h:104
#define I(x, y, z)
Definition: MD5.cpp:58
#define P(N)
static void or32le(void *P, int32_t V)
static void or32AArch64Imm(void *L, uint64_t Imm)
static void write(bool isBE, void *P, T V)
static uint64_t getBits(uint64_t Val, int Start, int End)
static void write32AArch64Addr(void *L, uint64_t Imm)
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
const T * data() const
Definition: ArrayRef.h:160
Lightweight error class with error context and mandatory checking.
Definition: Error.h:156
static ErrorSuccess success()
Create a success value.
Definition: Error.h:330
Tagged union holding either a T or a Error.
Definition: Error.h:470
Error takeError()
Take ownership of the stored error.
Definition: Error.h:597
Symbol resolution interface.
Definition: JITSymbol.h:371
static std::unique_ptr< MemoryBuffer > getMemBufferCopy(StringRef InputData, const Twine &BufferName="")
Open the specified memory range as a MemoryBuffer, copying the contents and taking ownership of it.
RelocationEntry - used to represent relocations internally in the dynamic linker.
uint32_t RelType
RelType - relocation type.
uint64_t Offset
Offset - offset into the section.
int64_t Addend
Addend - the relocation addend encoded in the instruction itself.
unsigned SectionID
SectionID - the section this relocation points to.
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition: Record.h:2148
void registerEHFrames() override
size_t getGOTEntrySize() override
~RuntimeDyldELF() override
static std::unique_ptr< RuntimeDyldELF > create(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MemMgr, JITSymbolResolver &Resolver)
Error finalizeLoad(const ObjectFile &Obj, ObjSectionToIDMap &SectionMap) override
DenseMap< SID, SID > SectionToGOTMap
bool isCompatibleFile(const object::ObjectFile &Obj) const override
std::unique_ptr< RuntimeDyld::LoadedObjectInfo > loadObject(const object::ObjectFile &O) override
RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr, JITSymbolResolver &Resolver)
Expected< relocation_iterator > processRelocationRef(unsigned SectionID, relocation_iterator RelI, const ObjectFile &Obj, ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) override
Parses one or more object file relocations (some object files use relocation pairs) and stores it to ...
std::map< SectionRef, unsigned > ObjSectionToIDMap
void writeInt32BE(uint8_t *Addr, uint32_t Value)
void writeInt64BE(uint8_t *Addr, uint64_t Value)
std::map< RelocationValueRef, uintptr_t > StubMap
void writeInt16BE(uint8_t *Addr, uint16_t Value)
void addRelocationForSymbol(const RelocationEntry &RE, StringRef SymbolName)
RuntimeDyld::MemoryManager & MemMgr
void addRelocationForSection(const RelocationEntry &RE, unsigned SectionID)
Expected< unsigned > findOrEmitSection(const ObjectFile &Obj, const SectionRef &Section, bool IsCode, ObjSectionToIDMap &LocalSections)
Find Section in LocalSections.
Triple::ArchType Arch
uint8_t * createStubFunction(uint8_t *Addr, unsigned AbiVariant=0)
Emits long jump instruction to Addr.
uint64_t readBytesUnaligned(uint8_t *Src, unsigned Size) const
Endian-aware read Read the least significant Size bytes from Src.
RTDyldSymbolTable GlobalSymbolTable
Expected< ObjSectionToIDMap > loadObjectImpl(const object::ObjectFile &Obj)
virtual uint8_t * allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName, bool IsReadOnly)=0
Allocate a memory block of (at least) the given size suitable for data.
virtual uint8_t * allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName)=0
Allocate a memory block of (at least) the given size suitable for executable code.
virtual void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size)=0
Register the EH frames with the runtime so that c++ exceptions work.
virtual bool allowStubAllocation() const
Override to return false to tell LLVM no stub space will be needed.
Definition: RuntimeDyld.h:148
SectionEntry - represents a section emitted into memory by the dynamic linker.
size_t size() const
Definition: SmallVector.h:91
void push_back(const T &Elt)
Definition: SmallVector.h:416
iterator end()
Definition: StringMap.h:204
iterator find(StringRef Key)
Definition: StringMap.h:217
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
bool equals(StringRef RHS) const
equals - Check for string equality, this is more efficient than compare() when the relative ordering ...
Definition: StringRef.h:164
const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:131
Symbol info for RuntimeDyld.
Target - Wrapper for Target specific information.
@ UnknownArch
Definition: Triple.h:47
@ aarch64_be
Definition: Triple.h:52
@ mips64el
Definition: Triple.h:67
static StringRef getArchTypePrefix(ArchType Kind)
Get the "prefix" canonical name for the Kind architecture.
Definition: Triple.cpp:92
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
Expected< uint32_t > getFlags() const
Get symbol flags (bitwise OR of SymbolRef::Flags)
Definition: SymbolicFile.h:206
DataRefImpl getRawDataRefImpl() const
Definition: SymbolicFile.h:210
StringRef getData() const
Definition: Binary.cpp:39
bool isLittleEndian() const
Definition: Binary.h:152
StringRef getFileName() const
Definition: Binary.cpp:41
bool isELF() const
Definition: Binary.h:122
virtual unsigned getPlatformFlags() const =0
Returns platform-specific object flags, if any.
static bool classof(const Binary *v)
Expected< const Elf_Sym * > getSymbol(DataRefImpl Sym) const
static Expected< ELFObjectFile< ELFT > > create(MemoryBufferRef Object, bool InitContent=true)
Expected< int64_t > getAddend() const
This class is the base class for all object file types.
Definition: ObjectFile.h:228
virtual section_iterator section_end() const =0
virtual uint8_t getBytesInAddress() const =0
The number of bytes used to represent an address in this object file format.
section_iterator_range sections() const
Definition: ObjectFile.h:327
virtual StringRef getFileFormatName() const =0
virtual section_iterator section_begin() const =0
This is a value type class that represents a single relocation in the list of relocations in the obje...
Definition: ObjectFile.h:51
uint64_t getType() const
Definition: ObjectFile.h:571
This is a value type class that represents a single section in the list of sections in the object fil...
Definition: ObjectFile.h:80
DataRefImpl getRawDataRefImpl() const
Definition: ObjectFile.h:541
Expected< StringRef > getName() const
Definition: ObjectFile.h:460
This is a value type class that represents a single symbol in the list of symbols in the object file.
Definition: ObjectFile.h:167
Expected< section_iterator > getSection() const
Get section this symbol is defined in reference to.
Definition: ObjectFile.h:423
virtual basic_symbol_iterator symbol_end() const =0
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:642
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ EF_PPC64_ABI
Definition: ELF.h:403
static int64_t decodePPC64LocalEntryOffset(unsigned Other)
Definition: ELF.h:411
@ EF_MIPS_ABI_O32
Definition: ELF.h:519
@ EF_MIPS_ABI2
Definition: ELF.h:511
std::optional< const char * > toString(const std::optional< DWARFFormValue > &V)
Take an optional DWARFFormValue and try to extract a string value from it.
@ Resolved
Queried, materialization begun.
void write32le(void *P, uint32_t V)
Definition: Endian.h:416
uint32_t read32le(const void *P)
Definition: Endian.h:381
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:406
void logAllUnhandledErrors(Error E, raw_ostream &OS, Twine ErrorBanner={})
Log all errors (if any) in E to OS.
Definition: Error.cpp:63
Error write(MCStreamer &Out, ArrayRef< std::string > Inputs)
Definition: DWP.cpp:551
static uint16_t applyPPChighera(uint64_t value)
static uint16_t applyPPChi(uint64_t value)
void handleAllErrors(Error E, HandlerTs &&... Handlers)
Behaves the same as handleErrors, except that by contract all errors must be handled by the given han...
Definition: Error.h:966
static uint16_t applyPPChighesta(uint64_t value)
static uint16_t applyPPChighest(uint64_t value)
static uint16_t applyPPCha(uint64_t value)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition: Casting.h:549
static uint16_t applyPPClo(uint64_t value)
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
Definition: Format.h:124
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:745
static uint16_t applyPPChigher(uint64_t value)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
static void or32le(void *P, int32_t V)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1909
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition: Casting.h:566
void consumeError(Error Err)
Consume a Error without doing anything.
Definition: Error.h:1043
static void write32AArch64Addr(void *T, uint64_t s, uint64_t p, int shift)
Definition: BitVector.h:851
SymInfo contains information about symbol: it's address and section index which is -1LL for absolute ...