LLVM 18.0.0git
AArch64AsmBackend.cpp
Go to the documentation of this file.
1//===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
15#include "llvm/MC/MCAssembler.h"
16#include "llvm/MC/MCContext.h"
26#include "llvm/MC/MCValue.h"
31using namespace llvm;
32
33namespace {
34
35class AArch64AsmBackend : public MCAsmBackend {
36 static const unsigned PCRelFlagVal =
38protected:
39 Triple TheTriple;
40
41public:
42 AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
43 : MCAsmBackend(IsLittleEndian ? llvm::endianness::little
44 : llvm::endianness::big),
45 TheTriple(TT) {}
46
47 unsigned getNumFixupKinds() const override {
49 }
50
51 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
52
53 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
54 const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
55 // This table *must* be in the order that the fixup_* kinds are defined
56 // in AArch64FixupKinds.h.
57 //
58 // Name Offset (bits) Size (bits) Flags
59 {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal},
60 {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal},
61 {"fixup_aarch64_add_imm12", 10, 12, 0},
62 {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
63 {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
64 {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
65 {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
66 {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
67 {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
68 {"fixup_aarch64_movw", 5, 16, 0},
69 {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
70 {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
71 {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
72 {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal}};
73
74 // Fixup kinds from .reloc directive are like R_AARCH64_NONE. They do not
75 // require any extra processing.
78
79 if (Kind < FirstTargetFixupKind)
81
82 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
83 "Invalid kind!");
84 return Infos[Kind - FirstTargetFixupKind];
85 }
86
87 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
89 uint64_t Value, bool IsResolved,
90 const MCSubtargetInfo *STI) const override;
91
94 const MCAsmLayout &Layout) const override;
95 void relaxInstruction(MCInst &Inst,
96 const MCSubtargetInfo &STI) const override;
98 const MCSubtargetInfo *STI) const override;
99
100 unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
101
102 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
103 const MCValue &Target) override;
104};
105
106} // end anonymous namespace
107
108/// The number of bytes the fixup may change.
109static unsigned getFixupKindNumBytes(unsigned Kind) {
110 switch (Kind) {
111 default:
112 llvm_unreachable("Unknown fixup kind!");
113
114 case FK_Data_1:
115 return 1;
116
117 case FK_Data_2:
118 case FK_SecRel_2:
119 return 2;
120
131 return 3;
132
137 case FK_Data_4:
138 case FK_SecRel_4:
139 return 4;
140
141 case FK_Data_8:
142 return 8;
143 }
144}
145
146static unsigned AdrImmBits(unsigned Value) {
147 unsigned lo2 = Value & 0x3;
148 unsigned hi19 = (Value & 0x1ffffc) >> 2;
149 return (hi19 << 5) | (lo2 << 29);
150}
151
154 const Triple &TheTriple, bool IsResolved) {
155 int64_t SignedValue = static_cast<int64_t>(Value);
156 switch (Fixup.getTargetKind()) {
157 default:
158 llvm_unreachable("Unknown fixup kind!");
160 if (!isInt<21>(SignedValue))
161 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
162 return AdrImmBits(Value & 0x1fffffULL);
164 assert(!IsResolved);
165 if (TheTriple.isOSBinFormatCOFF()) {
166 if (!isInt<21>(SignedValue))
167 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
168 return AdrImmBits(Value & 0x1fffffULL);
169 }
170 return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
173 // Signed 19-bit immediate which gets multiplied by 4
174 if (!isInt<21>(SignedValue))
175 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
176 if (Value & 0x3)
177 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
178 // Low two bits are not encoded.
179 return (Value >> 2) & 0x7ffff;
182 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
183 Value &= 0xfff;
184 // Unsigned 12-bit immediate
185 if (!isUInt<12>(Value))
186 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
187 return Value;
189 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
190 Value &= 0xfff;
191 // Unsigned 12-bit immediate which gets multiplied by 2
192 if (!isUInt<13>(Value))
193 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
194 if (Value & 0x1)
195 Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
196 return Value >> 1;
198 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
199 Value &= 0xfff;
200 // Unsigned 12-bit immediate which gets multiplied by 4
201 if (!isUInt<14>(Value))
202 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
203 if (Value & 0x3)
204 Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
205 return Value >> 2;
207 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
208 Value &= 0xfff;
209 // Unsigned 12-bit immediate which gets multiplied by 8
210 if (!isUInt<15>(Value))
211 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
212 if (Value & 0x7)
213 Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
214 return Value >> 3;
216 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
217 Value &= 0xfff;
218 // Unsigned 12-bit immediate which gets multiplied by 16
219 if (!isUInt<16>(Value))
220 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
221 if (Value & 0xf)
222 Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
223 return Value >> 4;
226 static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
229 if (!RefKind) {
230 // The fixup is an expression
231 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
232 Ctx.reportError(Fixup.getLoc(),
233 "fixup value out of range [-0xFFFF, 0xFFFF]");
234
235 // Invert the negative immediate because it will feed into a MOVN.
236 if (SignedValue < 0)
237 SignedValue = ~SignedValue;
238 Value = static_cast<uint64_t>(SignedValue);
239 } else
240 // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
241 // ever be resolved in the assembler.
242 Ctx.reportError(Fixup.getLoc(),
243 "relocation for a thread-local variable points to an "
244 "absolute symbol");
245 return Value;
246 }
247
248 if (!IsResolved) {
249 // FIXME: Figure out when this can actually happen, and verify our
250 // behavior.
251 Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
252 "implemented");
253 return Value;
254 }
255
257 switch (AArch64MCExpr::getAddressFrag(RefKind)) {
259 break;
261 SignedValue = SignedValue >> 16;
262 break;
264 SignedValue = SignedValue >> 32;
265 break;
267 SignedValue = SignedValue >> 48;
268 break;
269 default:
270 llvm_unreachable("Variant kind doesn't correspond to fixup");
271 }
272
273 } else {
274 switch (AArch64MCExpr::getAddressFrag(RefKind)) {
276 break;
278 Value = Value >> 16;
279 break;
281 Value = Value >> 32;
282 break;
284 Value = Value >> 48;
285 break;
286 default:
287 llvm_unreachable("Variant kind doesn't correspond to fixup");
288 }
289 }
290
291 if (RefKind & AArch64MCExpr::VK_NC) {
292 Value &= 0xFFFF;
293 }
295 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
296 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
297
298 // Invert the negative immediate because it will feed into a MOVN.
299 if (SignedValue < 0)
300 SignedValue = ~SignedValue;
301 Value = static_cast<uint64_t>(SignedValue);
302 }
303 else if (Value > 0xFFFF) {
304 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
305 }
306 return Value;
307 }
309 // Signed 16-bit immediate
310 if (!isInt<16>(SignedValue))
311 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
312 // Low two bits are not encoded (4-byte alignment assumed).
313 if (Value & 0x3)
314 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
315 return (Value >> 2) & 0x3fff;
318 if (TheTriple.isOSBinFormatCOFF() && !IsResolved && SignedValue != 0) {
319 // MSVC link.exe and lld do not support this relocation type
320 // with a non-zero offset
321 Ctx.reportError(Fixup.getLoc(),
322 "cannot perform a PC-relative fixup with a non-zero "
323 "symbol offset");
324 }
325 // Signed 28-bit immediate
326 if (!isInt<28>(SignedValue))
327 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
328 // Low two bits are not encoded (4-byte alignment assumed).
329 if (Value & 0x3)
330 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
331 return (Value >> 2) & 0x3ffffff;
332 case FK_Data_1:
333 case FK_Data_2:
334 case FK_Data_4:
335 case FK_Data_8:
336 case FK_SecRel_2:
337 case FK_SecRel_4:
338 return Value;
339 }
340}
341
342std::optional<MCFixupKind>
343AArch64AsmBackend::getFixupKind(StringRef Name) const {
344 if (!TheTriple.isOSBinFormatELF())
345 return std::nullopt;
346
348#define ELF_RELOC(X, Y) .Case(#X, Y)
349#include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
350#undef ELF_RELOC
351 .Case("BFD_RELOC_NONE", ELF::R_AARCH64_NONE)
352 .Case("BFD_RELOC_16", ELF::R_AARCH64_ABS16)
353 .Case("BFD_RELOC_32", ELF::R_AARCH64_ABS32)
354 .Case("BFD_RELOC_64", ELF::R_AARCH64_ABS64)
355 .Default(-1u);
356 if (Type == -1u)
357 return std::nullopt;
358 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
359}
360
361/// getFixupKindContainereSizeInBytes - The number of bytes of the
362/// container involved in big endian or 0 if the item is little endian
363unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
364 if (Endian == llvm::endianness::little)
365 return 0;
366
367 switch (Kind) {
368 default:
369 llvm_unreachable("Unknown fixup kind!");
370
371 case FK_Data_1:
372 return 1;
373 case FK_Data_2:
374 return 2;
375 case FK_Data_4:
376 return 4;
377 case FK_Data_8:
378 return 8;
379
394 // Instructions are always little endian
395 return 0;
396 }
397}
398
399void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
400 const MCValue &Target,
402 bool IsResolved,
403 const MCSubtargetInfo *STI) const {
404 if (Fixup.getTargetKind() == FK_Data_8 && TheTriple.isOSBinFormatELF()) {
405 auto RefKind = static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
407 if (SymLoc == AArch64AuthMCExpr::VK_AUTH ||
408 SymLoc == AArch64AuthMCExpr::VK_AUTHADDR) {
409 assert(Value == 0);
410 const auto *Expr = cast<AArch64AuthMCExpr>(Fixup.getValue());
411 Value = (uint64_t(Expr->getDiscriminator()) << 32) |
412 (uint64_t(Expr->getKey()) << 60) |
413 (uint64_t(Expr->hasAddressDiversity()) << 63);
414 }
415 }
416
417 if (!Value)
418 return; // Doesn't change encoding.
419 unsigned Kind = Fixup.getKind();
420 if (Kind >= FirstLiteralRelocationKind)
421 return;
422 unsigned NumBytes = getFixupKindNumBytes(Kind);
423 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
424 MCContext &Ctx = Asm.getContext();
425 int64_t SignedValue = static_cast<int64_t>(Value);
426 // Apply any target-specific value adjustments.
427 Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
428
429 // Shift the value into position.
430 Value <<= Info.TargetOffset;
431
432 unsigned Offset = Fixup.getOffset();
433 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
434
435 // Used to point to big endian bytes.
436 unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
437
438 // For each byte of the fragment that the fixup touches, mask in the
439 // bits from the fixup value.
440 if (FulleSizeInBytes == 0) {
441 // Handle as little-endian
442 for (unsigned i = 0; i != NumBytes; ++i) {
443 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
444 }
445 } else {
446 // Handle as big-endian
447 assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
448 assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
449 for (unsigned i = 0; i != NumBytes; ++i) {
450 unsigned Idx = FulleSizeInBytes - 1 - i;
451 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
452 }
453 }
454
455 // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
456 // handle this more cleanly. This may affect the output of -show-mc-encoding.
458 static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
460 (!RefKind && Fixup.getTargetKind() == AArch64::fixup_aarch64_movw)) {
461 // If the immediate is negative, generate MOVN else MOVZ.
462 // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
463 if (SignedValue < 0)
464 Data[Offset + 3] &= ~(1 << 6);
465 else
466 Data[Offset + 3] |= (1 << 6);
467 }
468}
469
470bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
472 const MCRelaxableFragment *DF,
473 const MCAsmLayout &Layout) const {
474 // FIXME: This isn't correct for AArch64. Just moving the "generic" logic
475 // into the targets for now.
476 //
477 // Relax if the value is too big for a (signed) i8.
478 return int64_t(Value) != int64_t(int8_t(Value));
479}
480
481void AArch64AsmBackend::relaxInstruction(MCInst &Inst,
482 const MCSubtargetInfo &STI) const {
483 llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
484}
485
486bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
487 const MCSubtargetInfo *STI) const {
488 // If the count is not 4-byte aligned, we must be writing data into the text
489 // section (otherwise we have unaligned instructions, and thus have far
490 // bigger problems), so just write zeros instead.
491 OS.write_zeros(Count % 4);
492
493 // We are properly aligned, so write NOPs as requested.
494 Count /= 4;
495 for (uint64_t i = 0; i != Count; ++i)
496 OS.write("\x1f\x20\x03\xd5", 4);
497 return true;
498}
499
500bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
501 const MCFixup &Fixup,
502 const MCValue &Target) {
503 unsigned Kind = Fixup.getKind();
504 if (Kind >= FirstLiteralRelocationKind)
505 return true;
506
507 // The ADRP instruction adds some multiple of 0x1000 to the current PC &
508 // ~0xfff. This means that the required offset to reach a symbol can vary by
509 // up to one step depending on where the ADRP is in memory. For example:
510 //
511 // ADRP x0, there
512 // there:
513 //
514 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
515 // we'll need that as an offset. At any other address "there" will be in the
516 // same page as the ADRP and the instruction should encode 0x0. Assuming the
517 // section isn't 0x1000-aligned, we therefore need to delegate this decision
518 // to the linker -- a relocation!
520 return true;
521
522 return false;
523}
524
525namespace {
526
527namespace CU {
528
529/// Compact unwind encoding values.
531 /// A "frameless" leaf function, where no non-volatile registers are
532 /// saved. The return remains in LR throughout the function.
533 UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
534
535 /// No compact unwind encoding available. Instead the low 23-bits of
536 /// the compact unwind encoding is the offset of the DWARF FDE in the
537 /// __eh_frame section. This mode is never used in object files. It is only
538 /// generated by the linker in final linked images, which have only DWARF info
539 /// for a function.
540 UNWIND_ARM64_MODE_DWARF = 0x03000000,
541
542 /// This is a standard arm64 prologue where FP/LR are immediately
543 /// pushed on the stack, then SP is copied to FP. If there are any
544 /// non-volatile register saved, they are copied into the stack fame in pairs
545 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
546 /// five X pairs and four D pairs can be saved, but the memory layout must be
547 /// in register number order.
548 UNWIND_ARM64_MODE_FRAME = 0x04000000,
549
550 /// Frame register pair encodings.
551 UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
552 UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
553 UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
554 UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
555 UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
556 UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
557 UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
558 UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
559 UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
560};
561
562} // end CU namespace
563
564// FIXME: This should be in a separate file.
565class DarwinAArch64AsmBackend : public AArch64AsmBackend {
566 const MCRegisterInfo &MRI;
567
568 /// Encode compact unwind stack adjustment for frameless functions.
569 /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
570 /// The stack size always needs to be 16 byte aligned.
571 uint32_t encodeStackAdjustment(uint32_t StackSize) const {
572 return (StackSize / 16) << 12;
573 }
574
575public:
576 DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
577 const MCRegisterInfo &MRI)
578 : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
579
580 std::unique_ptr<MCObjectTargetWriter>
581 createObjectTargetWriter() const override {
583 uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TheTriple));
584 return createAArch64MachObjectWriter(CPUType, CPUSubType,
585 TheTriple.isArch32Bit());
586 }
587
588 /// Generate the compact unwind encoding from the CFI directives.
589 uint32_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
590 const MCContext *Ctxt) const override {
592 if (Instrs.empty())
593 return CU::UNWIND_ARM64_MODE_FRAMELESS;
594 if (!isDarwinCanonicalPersonality(FI->Personality) &&
596 return CU::UNWIND_ARM64_MODE_DWARF;
597
598 bool HasFP = false;
599 unsigned StackSize = 0;
600
601 uint32_t CompactUnwindEncoding = 0;
602 int CurOffset = 0;
603 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
604 const MCCFIInstruction &Inst = Instrs[i];
605
606 switch (Inst.getOperation()) {
607 default:
608 // Cannot handle this directive: bail out.
609 return CU::UNWIND_ARM64_MODE_DWARF;
611 // Defines a frame pointer.
612 unsigned XReg =
613 getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true));
614
615 // Other CFA registers than FP are not supported by compact unwind.
616 // Fallback on DWARF.
617 // FIXME: When opt-remarks are supported in MC, add a remark to notify
618 // the user.
619 if (XReg != AArch64::FP)
620 return CU::UNWIND_ARM64_MODE_DWARF;
621
622 if (i + 2 >= e)
623 return CU::UNWIND_ARM64_MODE_DWARF;
624
625 const MCCFIInstruction &LRPush = Instrs[++i];
627 return CU::UNWIND_ARM64_MODE_DWARF;
628 const MCCFIInstruction &FPPush = Instrs[++i];
630 return CU::UNWIND_ARM64_MODE_DWARF;
631
632 if (FPPush.getOffset() + 8 != LRPush.getOffset())
633 return CU::UNWIND_ARM64_MODE_DWARF;
634 CurOffset = FPPush.getOffset();
635
636 unsigned LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true);
637 unsigned FPReg = *MRI.getLLVMRegNum(FPPush.getRegister(), true);
638
639 LRReg = getXRegFromWReg(LRReg);
640 FPReg = getXRegFromWReg(FPReg);
641
642 if (LRReg != AArch64::LR || FPReg != AArch64::FP)
643 return CU::UNWIND_ARM64_MODE_DWARF;
644
645 // Indicate that the function has a frame.
646 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
647 HasFP = true;
648 break;
649 }
651 if (StackSize != 0)
652 return CU::UNWIND_ARM64_MODE_DWARF;
653 StackSize = std::abs(Inst.getOffset());
654 break;
655 }
657 // Registers are saved in pairs. We expect there to be two consecutive
658 // `.cfi_offset' instructions with the appropriate registers specified.
659 unsigned Reg1 = *MRI.getLLVMRegNum(Inst.getRegister(), true);
660 if (i + 1 == e)
661 return CU::UNWIND_ARM64_MODE_DWARF;
662
663 if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8)
664 return CU::UNWIND_ARM64_MODE_DWARF;
665 CurOffset = Inst.getOffset();
666
667 const MCCFIInstruction &Inst2 = Instrs[++i];
669 return CU::UNWIND_ARM64_MODE_DWARF;
670 unsigned Reg2 = *MRI.getLLVMRegNum(Inst2.getRegister(), true);
671
672 if (Inst2.getOffset() != CurOffset - 8)
673 return CU::UNWIND_ARM64_MODE_DWARF;
674 CurOffset = Inst2.getOffset();
675
676 // N.B. The encodings must be in register number order, and the X
677 // registers before the D registers.
678
679 // X19/X20 pair = 0x00000001,
680 // X21/X22 pair = 0x00000002,
681 // X23/X24 pair = 0x00000004,
682 // X25/X26 pair = 0x00000008,
683 // X27/X28 pair = 0x00000010
684 Reg1 = getXRegFromWReg(Reg1);
685 Reg2 = getXRegFromWReg(Reg2);
686
687 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
688 (CompactUnwindEncoding & 0xF1E) == 0)
689 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
690 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
691 (CompactUnwindEncoding & 0xF1C) == 0)
692 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
693 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
694 (CompactUnwindEncoding & 0xF18) == 0)
695 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
696 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
697 (CompactUnwindEncoding & 0xF10) == 0)
698 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
699 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
700 (CompactUnwindEncoding & 0xF00) == 0)
701 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
702 else {
703 Reg1 = getDRegFromBReg(Reg1);
704 Reg2 = getDRegFromBReg(Reg2);
705
706 // D8/D9 pair = 0x00000100,
707 // D10/D11 pair = 0x00000200,
708 // D12/D13 pair = 0x00000400,
709 // D14/D15 pair = 0x00000800
710 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
711 (CompactUnwindEncoding & 0xE00) == 0)
712 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
713 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
714 (CompactUnwindEncoding & 0xC00) == 0)
715 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
716 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
717 (CompactUnwindEncoding & 0x800) == 0)
718 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
719 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
720 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
721 else
722 // A pair was pushed which we cannot handle.
723 return CU::UNWIND_ARM64_MODE_DWARF;
724 }
725
726 break;
727 }
728 }
729 }
730
731 if (!HasFP) {
732 // With compact unwind info we can only represent stack adjustments of up
733 // to 65520 bytes.
734 if (StackSize > 65520)
735 return CU::UNWIND_ARM64_MODE_DWARF;
736
737 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
738 CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
739 }
740
741 return CompactUnwindEncoding;
742 }
743};
744
745} // end anonymous namespace
746
747namespace {
748
749class ELFAArch64AsmBackend : public AArch64AsmBackend {
750public:
751 uint8_t OSABI;
752 bool IsILP32;
753
754 ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
755 bool IsLittleEndian, bool IsILP32)
756 : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
757 IsILP32(IsILP32) {}
758
759 std::unique_ptr<MCObjectTargetWriter>
760 createObjectTargetWriter() const override {
761 return createAArch64ELFObjectWriter(OSABI, IsILP32);
762 }
763};
764
765}
766
767namespace {
768class COFFAArch64AsmBackend : public AArch64AsmBackend {
769public:
770 COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
771 : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
772
773 std::unique_ptr<MCObjectTargetWriter>
774 createObjectTargetWriter() const override {
775 return createAArch64WinCOFFObjectWriter(TheTriple);
776 }
777};
778}
779
781 const MCSubtargetInfo &STI,
782 const MCRegisterInfo &MRI,
783 const MCTargetOptions &Options) {
784 const Triple &TheTriple = STI.getTargetTriple();
785 if (TheTriple.isOSBinFormatMachO()) {
786 return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
787 }
788
789 if (TheTriple.isOSBinFormatCOFF())
790 return new COFFAArch64AsmBackend(T, TheTriple);
791
792 assert(TheTriple.isOSBinFormatELF() && "Invalid target");
793
794 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
795 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
796 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
797 IsILP32);
798}
799
801 const MCSubtargetInfo &STI,
802 const MCRegisterInfo &MRI,
803 const MCTargetOptions &Options) {
804 const Triple &TheTriple = STI.getTargetTriple();
805 assert(TheTriple.isOSBinFormatELF() &&
806 "Big endian is only supported for ELF targets!");
807 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
808 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
809 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
810 IsILP32);
811}
unsigned const MachineRegisterInfo * MRI
static unsigned AdrImmBits(unsigned Value)
static unsigned getFixupKindNumBytes(unsigned Kind)
The number of bytes the fixup may change.
static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target, uint64_t Value, MCContext &Ctx, const Triple &TheTriple, bool IsResolved)
basic Basic Alias true
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
std::string Name
static LVOptions Options
Definition: LVOptions.cpp:25
PowerPC TLS Dynamic Call Fixup
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
static VariantKind getSymbolLoc(VariantKind Kind)
static VariantKind getAddressFrag(VariantKind Kind)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
Generic interface to target specific assembler backends.
Definition: MCAsmBackend.h:43
virtual bool writeNopData(raw_ostream &OS, uint64_t Count, const MCSubtargetInfo *STI) const =0
Write an (optimal) nop sequence of Count bytes to the given output.
virtual void relaxInstruction(MCInst &Inst, const MCSubtargetInfo &STI) const
Relax the instruction in the given fragment to the next wider instruction.
Definition: MCAsmBackend.h:184
virtual bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, const MCRelaxableFragment *DF, const MCAsmLayout &Layout) const =0
Simple predicate for targets where !Resolved implies requiring relaxation.
virtual bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target)
Hook to check if a relocation is needed for some target specific reason.
Definition: MCAsmBackend.h:102
virtual unsigned getNumFixupKinds() const =0
Get the number of target specific fixup kinds.
virtual const MCFixupKindInfo & getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
virtual std::optional< MCFixupKind > getFixupKind(StringRef Name) const
Map a relocation name used in .reloc to a fixup kind.
virtual void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef< char > Data, uint64_t Value, bool IsResolved, const MCSubtargetInfo *STI) const =0
Apply the Value for given Fixup into the provided data fragment, at the offset specified by the fixup...
Encapsulates the layout of an assembly file at a particular point in time.
Definition: MCAsmLayout.h:28
unsigned getRegister() const
Definition: MCDwarf.h:661
int getOffset() const
Definition: MCDwarf.h:680
OpType getOperation() const
Definition: MCDwarf.h:658
Context object for machine code objects.
Definition: MCContext.h:76
bool emitCompactUnwindNonCanonical() const
Definition: MCContext.cpp:928
void reportError(SMLoc L, const Twine &Msg)
Definition: MCContext.cpp:1058
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
Definition: MCFixup.h:71
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
A relaxable fragment holds on to its MCInst, since it may need to be relaxed during the assembler lay...
Definition: MCFragment.h:274
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
This represents an "assembler immediate".
Definition: MCValue.h:36
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:307
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition: Triple.h:691
OSType getOS() const
Get the parsed operating system type of this triple.
Definition: Triple.h:366
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
Definition: Triple.h:683
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
Definition: Triple.h:374
bool isArch32Bit() const
Test whether the architecture is 32-bit.
Definition: Triple.cpp:1473
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:678
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & write_zeros(unsigned NumZeros)
write_zeros - Insert 'NumZeros' nulls.
raw_ostream & write(unsigned char C)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CompactUnwindEncodings
Compact unwind encoding values.
@ fixup_aarch64_ldst_imm12_scale16
Expected< uint32_t > getCPUSubType(const Triple &T)
Definition: MachO.cpp:95
Expected< uint32_t > getCPUType(const Triple &T)
Definition: MachO.cpp:77
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
std::unique_ptr< MCObjectTargetWriter > createAArch64WinCOFFObjectWriter(const Triple &TheTriple)
static unsigned getXRegFromWReg(unsigned Reg)
MCAsmBackend * createAArch64leAsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
static unsigned getDRegFromBReg(unsigned Reg)
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FirstTargetFixupKind
Definition: MCFixup.h:45
@ FK_SecRel_2
A two-byte section relative fixup.
Definition: MCFixup.h:41
@ FirstLiteralRelocationKind
The range [FirstLiteralRelocationKind, MaxTargetFixupKind) is used for relocations coming from ....
Definition: MCFixup.h:50
@ FK_Data_8
A eight-byte fixup.
Definition: MCFixup.h:26
@ FK_Data_1
A one-byte fixup.
Definition: MCFixup.h:23
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
@ FK_NONE
A no-op fixup.
Definition: MCFixup.h:22
@ FK_SecRel_4
A four-byte section relative fixup.
Definition: MCFixup.h:42
@ FK_Data_2
A two-byte fixup.
Definition: MCFixup.h:24
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:749
std::unique_ptr< MCObjectTargetWriter > createAArch64MachObjectWriter(uint32_t CPUType, uint32_t CPUSubtype, bool IsILP32)
MCAsmBackend * createAArch64beAsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
endianness
Definition: bit.h:70
std::unique_ptr< MCObjectTargetWriter > createAArch64ELFObjectWriter(uint8_t OSABI, bool IsILP32)
const MCSymbol * Personality
Definition: MCDwarf.h:702
std::vector< MCCFIInstruction > Instructions
Definition: MCDwarf.h:704
Target independent information on a fixup kind.
@ FKF_IsAlignedDownTo32Bits
Should this fixup kind force a 4-byte aligned effective PC value?
@ FKF_IsPCRel
Is this fixup kind PCrelative? This is used by the assembler backend to evaluate fixup values in a ta...