LLVM 22.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/StringRef.h"
32#include "llvm/ADT/Twine.h"
46#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/Mangler.h"
49#include "llvm/IR/Module.h"
50#include "llvm/MC/MCAsmInfo.h"
51#include "llvm/MC/MCContext.h"
52#include "llvm/MC/MCExpr.h"
53#include "llvm/MC/MCInst.h"
57#include "llvm/MC/MCStreamer.h"
58#include "llvm/MC/MCSymbol.h"
59#include "llvm/MC/MCValue.h"
69#include <cassert>
70#include <cstdint>
71#include <map>
72#include <memory>
73
74using namespace llvm;
75
78 "aarch64-ptrauth-auth-checks", cl::Hidden,
79 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
80 clEnumValN(Poison, "poison", "poison on failure"),
81 clEnumValN(Trap, "trap", "trap on failure")),
82 cl::desc("Check pointer authentication auth/resign failures"),
84
85#define DEBUG_TYPE "asm-printer"
86
87namespace {
88
89class AArch64AsmPrinter : public AsmPrinter {
90 AArch64MCInstLower MCInstLowering;
91 FaultMaps FM;
92 const AArch64Subtarget *STI;
93 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
94#ifndef NDEBUG
95 unsigned InstsEmitted;
96#endif
97 bool EnableImportCallOptimization = false;
99 SectionToImportedFunctionCalls;
100 unsigned PAuthIFuncNextUniqueID = 1;
101
102public:
103 static char ID;
104
105 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
106 : AsmPrinter(TM, std::move(Streamer), ID),
107 MCInstLowering(OutContext, *this), FM(*this) {}
108
109 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
110
111 /// Wrapper for MCInstLowering.lowerOperand() for the
112 /// tblgen'erated pseudo lowering.
113 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
114 return MCInstLowering.lowerOperand(MO, MCOp);
115 }
116
117 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
118
119 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
120
121 void emitStartOfAsmFile(Module &M) override;
122 void emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
123 ArrayRef<unsigned> JumpTableIndices) override;
124 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
126 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
127 const MCSymbol *BranchLabel) const override;
128
129 void emitFunctionEntryLabel() override;
130
131 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
132
133 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
134
135 void LowerHardenedBRJumpTable(const MachineInstr &MI);
136
137 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
138
139 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
140 const MachineInstr &MI);
141 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
142 const MachineInstr &MI);
143 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
144 const MachineInstr &MI);
145 void LowerFAULTING_OP(const MachineInstr &MI);
146
147 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
148 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
149 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
150 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
151
152 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
153 HwasanMemaccessTuple;
154 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
155 void LowerKCFI_CHECK(const MachineInstr &MI);
156 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
157 void emitHwasanMemaccessSymbols(Module &M);
158
159 void emitSled(const MachineInstr &MI, SledKind Kind);
160
161 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
162 void emitPtrauthBranch(const MachineInstr *MI);
163
164 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
165 Register ScratchReg,
168 const MCSymbol *OnFailure = nullptr);
169
170 // Check authenticated LR before tail calling.
171 void emitPtrauthTailCallHardening(const MachineInstr *TC);
172
173 // Emit the sequence for AUT or AUTPAC.
174 void emitPtrauthAuthResign(Register AUTVal, AArch64PACKey::ID AUTKey,
175 uint64_t AUTDisc,
176 const MachineOperand *AUTAddrDisc,
177 Register Scratch,
178 std::optional<AArch64PACKey::ID> PACKey,
179 uint64_t PACDisc, Register PACAddrDisc, Value *DS);
180
181 // Emit R_AARCH64_PATCHINST, the deactivation symbol relocation. Returns true
182 // if no instruction should be emitted because the deactivation symbol is
183 // defined in the current module so this function emitted a NOP instead.
184 bool emitDeactivationSymbolRelocation(Value *DS);
185
186 // Emit the sequence for PAC.
187 void emitPtrauthSign(const MachineInstr *MI);
188
189 // Emit the sequence to compute the discriminator.
190 //
191 // The returned register is either unmodified AddrDisc or ScratchReg.
192 //
193 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
194 // MayUseAddrAsScratch may save one MOV instruction, provided the address
195 // is already in x16/x17 (i.e. return x16/x17 which is the *modified* AddrDisc
196 // register at the same time) or the OS doesn't make it safer to use x16/x17
197 // (see AArch64Subtarget::isX16X17Safer()):
198 //
199 // mov x17, x16
200 // movk x17, #1234, lsl #48
201 // ; x16 is not used anymore
202 //
203 // can be replaced by
204 //
205 // movk x16, #1234, lsl #48
206 Register emitPtrauthDiscriminator(uint16_t Disc, Register AddrDisc,
207 Register ScratchReg,
208 bool MayUseAddrAsScratch = false);
209
210 // Emit the sequence for LOADauthptrstatic
211 void LowerLOADauthptrstatic(const MachineInstr &MI);
212
213 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
214 // adrp-add followed by PAC sign)
215 void LowerMOVaddrPAC(const MachineInstr &MI);
216
217 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
218 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
219 // authenticating)
220 void LowerLOADgotAUTH(const MachineInstr &MI);
221
222 void emitAddImm(MCRegister Val, int64_t Addend, MCRegister Tmp);
223 void emitAddress(MCRegister Reg, const MCExpr *Expr, MCRegister Tmp,
224 bool DSOLocal, const MCSubtargetInfo &STI);
225
226 const MCExpr *emitPAuthRelocationAsIRelative(
227 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
228 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr);
229
230 /// tblgen'erated driver function for lowering simple MI->MC
231 /// pseudo instructions.
232 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
233
234 // Emit Build Attributes
235 void emitAttributes(unsigned Flags, uint64_t PAuthABIPlatform,
236 uint64_t PAuthABIVersion, AArch64TargetStreamer *TS);
237
238 // Emit expansion of Compare-and-branch pseudo instructions
239 void emitCBPseudoExpansion(const MachineInstr *MI);
240
241 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
242 void EmitToStreamer(const MCInst &Inst) {
243 EmitToStreamer(*OutStreamer, Inst);
244 }
245
246 void emitInstruction(const MachineInstr *MI) override;
247
248 void emitFunctionHeaderComment() override;
249
250 void getAnalysisUsage(AnalysisUsage &AU) const override {
252 AU.setPreservesAll();
253 }
254
255 bool runOnMachineFunction(MachineFunction &MF) override {
256 if (auto *PSIW = getAnalysisIfAvailable<ProfileSummaryInfoWrapperPass>())
257 PSI = &PSIW->getPSI();
258 if (auto *SDPIW =
259 getAnalysisIfAvailable<StaticDataProfileInfoWrapperPass>())
260 SDPI = &SDPIW->getStaticDataProfileInfo();
261
262 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
263 STI = &MF.getSubtarget<AArch64Subtarget>();
264
265 SetupMachineFunction(MF);
266
267 if (STI->isTargetCOFF()) {
268 bool Local = MF.getFunction().hasLocalLinkage();
271 int Type =
273
274 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
275 OutStreamer->emitCOFFSymbolStorageClass(Scl);
276 OutStreamer->emitCOFFSymbolType(Type);
277 OutStreamer->endCOFFSymbolDef();
278 }
279
280 // Emit the rest of the function body.
281 emitFunctionBody();
282
283 // Emit the XRay table for this function.
284 emitXRayTable();
285
286 // We didn't modify anything.
287 return false;
288 }
289
290 const MCExpr *lowerConstant(const Constant *CV,
291 const Constant *BaseCV = nullptr,
292 uint64_t Offset = 0) override;
293
294private:
295 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
296 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
297 bool printAsmRegInClass(const MachineOperand &MO,
298 const TargetRegisterClass *RC, unsigned AltName,
299 raw_ostream &O);
300
301 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
302 const char *ExtraCode, raw_ostream &O) override;
303 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
304 const char *ExtraCode, raw_ostream &O) override;
305
306 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
307
308 void emitFunctionBodyEnd() override;
309 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
310
311 MCSymbol *GetCPISymbol(unsigned CPID) const override;
312 void emitEndOfAsmFile(Module &M) override;
313
314 AArch64FunctionInfo *AArch64FI = nullptr;
315
316 /// Emit the LOHs contained in AArch64FI.
317 void emitLOHs();
318
319 void emitMovXReg(Register Dest, Register Src);
320 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
321 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
322
323 /// Emit instruction to set float register to zero.
324 void emitFMov0(const MachineInstr &MI);
325 void emitFMov0AsFMov(const MachineInstr &MI, Register DestReg);
326
327 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
328
329 MInstToMCSymbol LOHInstToLabel;
330
331 bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
332 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
333 }
334
335 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
336 assert(STI);
337 return STI;
338 }
339 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
340 MCSymbol *LazyPointer) override;
341 void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI,
342 MCSymbol *LazyPointer) override;
343
344 /// Checks if this instruction is part of a sequence that is eligle for import
345 /// call optimization and, if so, records it to be emitted in the import call
346 /// section.
347 void recordIfImportCall(const MachineInstr *BranchInst);
348};
349
350} // end anonymous namespace
351
352void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
353 const Triple &TT = TM.getTargetTriple();
354
355 if (TT.isOSBinFormatCOFF()) {
356 emitCOFFFeatureSymbol(M);
357 emitCOFFReplaceableFunctionData(M);
358
359 if (M.getModuleFlag("import-call-optimization"))
360 EnableImportCallOptimization = true;
361 }
362
363 if (!TT.isOSBinFormatELF())
364 return;
365
366 // For emitting build attributes and .note.gnu.property section
367 auto *TS =
368 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
369 // Assemble feature flags that may require creation of build attributes and a
370 // note section.
371 unsigned BAFlags = 0;
372 unsigned GNUFlags = 0;
373 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
374 M.getModuleFlag("branch-target-enforcement"))) {
375 if (!BTE->isZero()) {
376 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_BTI_Flag;
378 }
379 }
380
381 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
382 M.getModuleFlag("guarded-control-stack"))) {
383 if (!GCS->isZero()) {
384 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_GCS_Flag;
386 }
387 }
388
389 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
390 M.getModuleFlag("sign-return-address"))) {
391 if (!Sign->isZero()) {
392 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_PAC_Flag;
394 }
395 }
396
397 uint64_t PAuthABIPlatform = -1;
398 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
399 M.getModuleFlag("aarch64-elf-pauthabi-platform"))) {
400 PAuthABIPlatform = PAP->getZExtValue();
401 }
402
403 uint64_t PAuthABIVersion = -1;
404 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
405 M.getModuleFlag("aarch64-elf-pauthabi-version"))) {
406 PAuthABIVersion = PAV->getZExtValue();
407 }
408
409 // Emit AArch64 Build Attributes
410 emitAttributes(BAFlags, PAuthABIPlatform, PAuthABIVersion, TS);
411 // Emit a .note.gnu.property section with the flags.
412 TS->emitNoteSection(GNUFlags, PAuthABIPlatform, PAuthABIVersion);
413}
414
415void AArch64AsmPrinter::emitFunctionHeaderComment() {
416 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
417 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
418 if (OutlinerString != std::nullopt)
419 OutStreamer->getCommentOS() << ' ' << OutlinerString;
420}
421
422void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
423{
424 const Function &F = MF->getFunction();
425 if (F.hasFnAttribute("patchable-function-entry")) {
426 unsigned Num;
427 if (F.getFnAttribute("patchable-function-entry")
428 .getValueAsString()
429 .getAsInteger(10, Num))
430 return;
431 emitNops(Num);
432 return;
433 }
434
435 emitSled(MI, SledKind::FUNCTION_ENTER);
436}
437
438void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
439 emitSled(MI, SledKind::FUNCTION_EXIT);
440}
441
442void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
443 emitSled(MI, SledKind::TAIL_CALL);
444}
445
446void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
447 static const int8_t NoopsInSledCount = 7;
448 // We want to emit the following pattern:
449 //
450 // .Lxray_sled_N:
451 // ALIGN
452 // B #32
453 // ; 7 NOP instructions (28 bytes)
454 // .tmpN
455 //
456 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
457 // over the full 32 bytes (8 instructions) with the following pattern:
458 //
459 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
460 // LDR W17, #12 ; W17 := function ID
461 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
462 // BLR X16 ; call the tracing trampoline
463 // ;DATA: 32 bits of function ID
464 // ;DATA: lower 32 bits of the address of the trampoline
465 // ;DATA: higher 32 bits of the address of the trampoline
466 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
467 //
468 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
469 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
470 OutStreamer->emitLabel(CurSled);
471 auto Target = OutContext.createTempSymbol();
472
473 // Emit "B #32" instruction, which jumps over the next 28 bytes.
474 // The operand has to be the number of 4-byte instructions to jump over,
475 // including the current instruction.
476 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
477
478 for (int8_t I = 0; I < NoopsInSledCount; I++)
479 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::NOP));
480
481 OutStreamer->emitLabel(Target);
482 recordSled(CurSled, MI, Kind, 2);
483}
484
485void AArch64AsmPrinter::emitAttributes(unsigned Flags,
486 uint64_t PAuthABIPlatform,
487 uint64_t PAuthABIVersion,
488 AArch64TargetStreamer *TS) {
489
490 PAuthABIPlatform = (uint64_t(-1) == PAuthABIPlatform) ? 0 : PAuthABIPlatform;
491 PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
492
493 if (PAuthABIPlatform || PAuthABIVersion) {
497 AArch64BuildAttributes::SubsectionOptional::REQUIRED,
498 AArch64BuildAttributes::SubsectionType::ULEB128);
502 PAuthABIPlatform, "");
506 "");
507 }
508
509 unsigned BTIValue =
511 unsigned PACValue =
513 unsigned GCSValue =
515
516 if (BTIValue || PACValue || GCSValue) {
520 AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
521 AArch64BuildAttributes::SubsectionType::ULEB128);
531 }
532}
533
534// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
535// (built-in functions __xray_customevent/__xray_typedevent).
536//
537// .Lxray_event_sled_N:
538// b 1f
539// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
540// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
541// bl __xray_CustomEvent or __xray_TypedEvent
542// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
543// 1:
544//
545// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
546//
547// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
548// After patching, b .+N will become a nop.
549void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
550 bool Typed) {
551 auto &O = *OutStreamer;
552 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
553 O.emitLabel(CurSled);
554 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
555 auto *Sym = MCSymbolRefExpr::create(
556 OutContext.getOrCreateSymbol(
557 Twine(MachO ? "_" : "") +
558 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
559 OutContext);
560 if (Typed) {
561 O.AddComment("Begin XRay typed event");
562 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
563 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
564 .addReg(AArch64::SP)
565 .addReg(AArch64::X0)
566 .addReg(AArch64::X1)
567 .addReg(AArch64::SP)
568 .addImm(-4));
569 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
570 .addReg(AArch64::X2)
571 .addReg(AArch64::SP)
572 .addImm(2));
573 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
574 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
575 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
576 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
577 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
578 .addReg(AArch64::X2)
579 .addReg(AArch64::SP)
580 .addImm(2));
581 O.AddComment("End XRay typed event");
582 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
583 .addReg(AArch64::SP)
584 .addReg(AArch64::X0)
585 .addReg(AArch64::X1)
586 .addReg(AArch64::SP)
587 .addImm(4));
588
589 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
590 } else {
591 O.AddComment("Begin XRay custom event");
592 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
593 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
594 .addReg(AArch64::SP)
595 .addReg(AArch64::X0)
596 .addReg(AArch64::X1)
597 .addReg(AArch64::SP)
598 .addImm(-2));
599 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
600 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
601 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
602 O.AddComment("End XRay custom event");
603 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
604 .addReg(AArch64::SP)
605 .addReg(AArch64::X0)
606 .addReg(AArch64::X1)
607 .addReg(AArch64::SP)
608 .addImm(2));
609
610 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
611 }
612}
613
614void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
615 Register AddrReg = MI.getOperand(0).getReg();
616 assert(std::next(MI.getIterator())->isCall() &&
617 "KCFI_CHECK not followed by a call instruction");
618 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
619 "KCFI_CHECK call target doesn't match call operand");
620
621 // Default to using the intra-procedure-call temporary registers for
622 // comparing the hashes.
623 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
624 if (AddrReg == AArch64::XZR) {
625 // Checking XZR makes no sense. Instead of emitting a load, zero
626 // ScratchRegs[0] and use it for the ESR AddrIndex below.
627 AddrReg = getXRegFromWReg(ScratchRegs[0]);
628 emitMovXReg(AddrReg, AArch64::XZR);
629 } else {
630 // If one of the scratch registers is used for the call target (e.g.
631 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
632 // temporary register instead (in this case, AArch64::W9) as the check
633 // is immediately followed by the call instruction.
634 for (auto &Reg : ScratchRegs) {
635 if (Reg == getWRegFromXReg(AddrReg)) {
636 Reg = AArch64::W9;
637 break;
638 }
639 }
640 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
641 "Invalid scratch registers for KCFI_CHECK");
642
643 // Adjust the offset for patchable-function-prefix. This assumes that
644 // patchable-function-prefix is the same for all functions.
645 int64_t PrefixNops = 0;
646 (void)MI.getMF()
647 ->getFunction()
648 .getFnAttribute("patchable-function-prefix")
649 .getValueAsString()
650 .getAsInteger(10, PrefixNops);
651
652 // Load the target function type hash.
653 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
654 .addReg(ScratchRegs[0])
655 .addReg(AddrReg)
656 .addImm(-(PrefixNops * 4 + 4)));
657 }
658
659 // Load the expected type hash.
660 const int64_t Type = MI.getOperand(1).getImm();
661 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
662 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
663
664 // Compare the hashes and trap if there's a mismatch.
665 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
666 .addReg(AArch64::WZR)
667 .addReg(ScratchRegs[0])
668 .addReg(ScratchRegs[1])
669 .addImm(0));
670
671 MCSymbol *Pass = OutContext.createTempSymbol();
672 EmitToStreamer(*OutStreamer,
673 MCInstBuilder(AArch64::Bcc)
674 .addImm(AArch64CC::EQ)
675 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
676
677 // The base ESR is 0x8000 and the register information is encoded in bits
678 // 0-9 as follows:
679 // - 0-4: n, where the register Xn contains the target address
680 // - 5-9: m, where the register Wm contains the expected type hash
681 // Where n, m are in [0, 30].
682 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
683 unsigned AddrIndex;
684 switch (AddrReg) {
685 default:
686 AddrIndex = AddrReg - AArch64::X0;
687 break;
688 case AArch64::FP:
689 AddrIndex = 29;
690 break;
691 case AArch64::LR:
692 AddrIndex = 30;
693 break;
694 }
695
696 assert(AddrIndex < 31 && TypeIndex < 31);
697
698 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
699 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
700 OutStreamer->emitLabel(Pass);
701}
702
703void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
704 Register Reg = MI.getOperand(0).getReg();
705
706 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
707 // statically known to be zero. However, conceivably, the HWASan pass may
708 // encounter a "cannot currently statically prove to be null" pointer (and is
709 // therefore unable to omit the intrinsic) that later optimization passes
710 // convert into a statically known-null pointer.
711 if (Reg == AArch64::XZR)
712 return;
713
714 bool IsShort =
715 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
716 (MI.getOpcode() ==
717 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
718 uint32_t AccessInfo = MI.getOperand(1).getImm();
719 bool IsFixedShadow =
720 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
721 (MI.getOpcode() ==
722 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
723 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
724
725 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
726 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
727 if (!Sym) {
728 // FIXME: Make this work on non-ELF.
729 if (!TM.getTargetTriple().isOSBinFormatELF())
730 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
731
732 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
733 utostr(AccessInfo);
734 if (IsFixedShadow)
735 SymName += "_fixed_" + utostr(FixedShadowOffset);
736 if (IsShort)
737 SymName += "_short_v2";
738 Sym = OutContext.getOrCreateSymbol(SymName);
739 }
740
741 EmitToStreamer(*OutStreamer,
742 MCInstBuilder(AArch64::BL)
743 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
744}
745
746void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
747 if (HwasanMemaccessSymbols.empty())
748 return;
749
750 const Triple &TT = TM.getTargetTriple();
751 assert(TT.isOSBinFormatELF());
752 std::unique_ptr<MCSubtargetInfo> STI(
753 TM.getTarget().createMCSubtargetInfo(TT, "", ""));
754 assert(STI && "Unable to create subtarget info");
755 this->STI = static_cast<const AArch64Subtarget *>(&*STI);
756
757 MCSymbol *HwasanTagMismatchV1Sym =
758 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
759 MCSymbol *HwasanTagMismatchV2Sym =
760 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
761
762 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
763 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
764 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
765 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
766
767 for (auto &P : HwasanMemaccessSymbols) {
768 unsigned Reg = std::get<0>(P.first);
769 bool IsShort = std::get<1>(P.first);
770 uint32_t AccessInfo = std::get<2>(P.first);
771 bool IsFixedShadow = std::get<3>(P.first);
772 uint64_t FixedShadowOffset = std::get<4>(P.first);
773 const MCSymbolRefExpr *HwasanTagMismatchRef =
774 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
775 MCSymbol *Sym = P.second;
776
777 bool HasMatchAllTag =
778 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
779 uint8_t MatchAllTag =
780 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
781 unsigned Size =
782 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
783 bool CompileKernel =
784 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
785
786 OutStreamer->switchSection(OutContext.getELFSection(
787 ".text.hot", ELF::SHT_PROGBITS,
789 /*IsComdat=*/true));
790
791 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
792 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
793 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
794 OutStreamer->emitLabel(Sym);
795
796 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
797 .addReg(AArch64::X16)
798 .addReg(Reg)
799 .addImm(4)
800 .addImm(55));
801
802 if (IsFixedShadow) {
803 // Aarch64 makes it difficult to embed large constants in the code.
804 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
805 // left-shift option in the MOV instruction. Combined with the 16-bit
806 // immediate, this is enough to represent any offset up to 2**48.
807 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
808 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
809 .addReg(AArch64::W16)
810 .addReg(AArch64::X17)
811 .addReg(AArch64::X16)
812 .addImm(0)
813 .addImm(0));
814 } else {
815 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
816 .addReg(AArch64::W16)
817 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
818 .addReg(AArch64::X16)
819 .addImm(0)
820 .addImm(0));
821 }
822
823 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
824 .addReg(AArch64::XZR)
825 .addReg(AArch64::X16)
826 .addReg(Reg)
828 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
829 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
830 .addImm(AArch64CC::NE)
832 HandleMismatchOrPartialSym, OutContext)));
833 MCSymbol *ReturnSym = OutContext.createTempSymbol();
834 OutStreamer->emitLabel(ReturnSym);
835 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
836 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
837
838 if (HasMatchAllTag) {
839 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
840 .addReg(AArch64::X17)
841 .addReg(Reg)
842 .addImm(56)
843 .addImm(63));
844 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
845 .addReg(AArch64::XZR)
846 .addReg(AArch64::X17)
847 .addImm(MatchAllTag)
848 .addImm(0));
849 EmitToStreamer(
850 MCInstBuilder(AArch64::Bcc)
851 .addImm(AArch64CC::EQ)
852 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
853 }
854
855 if (IsShort) {
856 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
857 .addReg(AArch64::WZR)
858 .addReg(AArch64::W16)
859 .addImm(15)
860 .addImm(0));
861 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
862 EmitToStreamer(
863 MCInstBuilder(AArch64::Bcc)
864 .addImm(AArch64CC::HI)
865 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
866
867 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
868 .addReg(AArch64::X17)
869 .addReg(Reg)
870 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
871 if (Size != 1)
872 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
873 .addReg(AArch64::X17)
874 .addReg(AArch64::X17)
875 .addImm(Size - 1)
876 .addImm(0));
877 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
878 .addReg(AArch64::WZR)
879 .addReg(AArch64::W16)
880 .addReg(AArch64::W17)
881 .addImm(0));
882 EmitToStreamer(
883 MCInstBuilder(AArch64::Bcc)
884 .addImm(AArch64CC::LS)
885 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
886
887 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
888 .addReg(AArch64::X16)
889 .addReg(Reg)
890 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
891 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
892 .addReg(AArch64::W16)
893 .addReg(AArch64::X16)
894 .addImm(0));
895 EmitToStreamer(
896 MCInstBuilder(AArch64::SUBSXrs)
897 .addReg(AArch64::XZR)
898 .addReg(AArch64::X16)
899 .addReg(Reg)
901 EmitToStreamer(
902 MCInstBuilder(AArch64::Bcc)
903 .addImm(AArch64CC::EQ)
904 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
905
906 OutStreamer->emitLabel(HandleMismatchSym);
907 }
908
909 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
910 .addReg(AArch64::SP)
911 .addReg(AArch64::X0)
912 .addReg(AArch64::X1)
913 .addReg(AArch64::SP)
914 .addImm(-32));
915 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
916 .addReg(AArch64::FP)
917 .addReg(AArch64::LR)
918 .addReg(AArch64::SP)
919 .addImm(29));
920
921 if (Reg != AArch64::X0)
922 emitMovXReg(AArch64::X0, Reg);
923 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
924
925 if (CompileKernel) {
926 // The Linux kernel's dynamic loader doesn't support GOT relative
927 // relocations, but it doesn't support late binding either, so just call
928 // the function directly.
929 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
930 } else {
931 // Intentionally load the GOT entry and branch to it, rather than possibly
932 // late binding the function, which may clobber the registers before we
933 // have a chance to save them.
934 EmitToStreamer(MCInstBuilder(AArch64::ADRP)
935 .addReg(AArch64::X16)
936 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
938 OutContext)));
939 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
940 .addReg(AArch64::X16)
941 .addReg(AArch64::X16)
942 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
944 OutContext)));
945 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
946 }
947 }
948 this->STI = nullptr;
949}
950
951static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
952 MCSymbol *StubLabel,
953 const MCExpr *StubAuthPtrRef) {
954 // sym$auth_ptr$key$disc:
955 OutStreamer.emitLabel(StubLabel);
956 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
957}
958
959void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
960 emitHwasanMemaccessSymbols(M);
961
962 const Triple &TT = TM.getTargetTriple();
963 if (TT.isOSBinFormatMachO()) {
964 // Output authenticated pointers as indirect symbols, if we have any.
965 MachineModuleInfoMachO &MMIMacho =
966 MMI->getObjFileInfo<MachineModuleInfoMachO>();
967
968 auto Stubs = MMIMacho.getAuthGVStubList();
969
970 if (!Stubs.empty()) {
971 // Switch to the "__auth_ptr" section.
972 OutStreamer->switchSection(
973 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
975 emitAlignment(Align(8));
976
977 for (const auto &Stub : Stubs)
978 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
979
980 OutStreamer->addBlankLine();
981 }
982
983 // Funny Darwin hack: This flag tells the linker that no global symbols
984 // contain code that falls through to other global symbols (e.g. the obvious
985 // implementation of multiple entry points). If this doesn't occur, the
986 // linker can safely perform dead code stripping. Since LLVM never
987 // generates code that does this, it is always safe to set.
988 OutStreamer->emitSubsectionsViaSymbols();
989 }
990
991 if (TT.isOSBinFormatELF()) {
992 // Output authenticated pointers as indirect symbols, if we have any.
993 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
994
995 auto Stubs = MMIELF.getAuthGVStubList();
996
997 if (!Stubs.empty()) {
998 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
999 OutStreamer->switchSection(TLOF.getDataSection());
1000 emitAlignment(Align(8));
1001
1002 for (const auto &Stub : Stubs)
1003 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1004
1005 OutStreamer->addBlankLine();
1006 }
1007
1008 // With signed ELF GOT enabled, the linker looks at the symbol type to
1009 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
1010 // for functions not defined in the module have STT_NOTYPE type by default.
1011 // This makes linker to emit signing schema with DA key (instead of IA) for
1012 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
1013 // all function symbols used in the module to have STT_FUNC type. See
1014 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
1015 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
1016 M.getModuleFlag("ptrauth-elf-got"));
1017 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
1018 for (const GlobalValue &GV : M.global_values())
1019 if (!GV.use_empty() && isa<Function>(GV) &&
1020 !GV.getName().starts_with("llvm."))
1021 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
1023 }
1024
1025 // Emit stack and fault map information.
1027
1028 // If import call optimization is enabled, emit the appropriate section.
1029 // We do this whether or not we recorded any import calls.
1030 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
1031 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
1032
1033 // Section always starts with some magic.
1034 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
1035 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
1036
1037 // Layout of this section is:
1038 // Per section that contains calls to imported functions:
1039 // uint32_t SectionSize: Size in bytes for information in this section.
1040 // uint32_t Section Number
1041 // Per call to imported function in section:
1042 // uint32_t Kind: the kind of imported function.
1043 // uint32_t BranchOffset: the offset of the branch instruction in its
1044 // parent section.
1045 // uint32_t TargetSymbolId: the symbol id of the called function.
1046 for (auto &[Section, CallsToImportedFuncs] :
1047 SectionToImportedFunctionCalls) {
1048 unsigned SectionSize =
1049 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
1050 OutStreamer->emitInt32(SectionSize);
1051 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
1052 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
1053 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
1054 OutStreamer->emitInt32(0x13);
1055 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
1056 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
1057 }
1058 }
1059 }
1060}
1061
1062void AArch64AsmPrinter::emitLOHs() {
1064
1065 for (const auto &D : AArch64FI->getLOHContainer()) {
1066 for (const MachineInstr *MI : D.getArgs()) {
1067 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
1068 assert(LabelIt != LOHInstToLabel.end() &&
1069 "Label hasn't been inserted for LOH related instruction");
1070 MCArgs.push_back(LabelIt->second);
1071 }
1072 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
1073 MCArgs.clear();
1074 }
1075}
1076
1077void AArch64AsmPrinter::emitFunctionBodyEnd() {
1078 if (!AArch64FI->getLOHRelated().empty())
1079 emitLOHs();
1080}
1081
1082/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1083MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1084 // Darwin uses a linker-private symbol name for constant-pools (to
1085 // avoid addends on the relocation?), ELF has no such concept and
1086 // uses a normal private symbol.
1087 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1088 return OutContext.getOrCreateSymbol(
1089 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1090 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1091
1092 return AsmPrinter::GetCPISymbol(CPID);
1093}
1094
1095void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1096 raw_ostream &O) {
1097 const MachineOperand &MO = MI->getOperand(OpNum);
1098 switch (MO.getType()) {
1099 default:
1100 llvm_unreachable("<unknown operand type>");
1102 Register Reg = MO.getReg();
1104 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1106 break;
1107 }
1109 O << MO.getImm();
1110 break;
1111 }
1113 PrintSymbolOperand(MO, O);
1114 break;
1115 }
1117 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1118 Sym->print(O, MAI);
1119 break;
1120 }
1121 }
1122}
1123
1124bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1125 raw_ostream &O) {
1126 Register Reg = MO.getReg();
1127 switch (Mode) {
1128 default:
1129 return true; // Unknown mode.
1130 case 'w':
1132 break;
1133 case 'x':
1135 break;
1136 case 't':
1138 break;
1139 }
1140
1142 return false;
1143}
1144
1145// Prints the register in MO using class RC using the offset in the
1146// new register class. This should not be used for cross class
1147// printing.
1148bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1149 const TargetRegisterClass *RC,
1150 unsigned AltName, raw_ostream &O) {
1151 assert(MO.isReg() && "Should only get here with a register!");
1152 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1153 Register Reg = MO.getReg();
1154 MCRegister RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1155 if (!RI->regsOverlap(RegToPrint, Reg))
1156 return true;
1157 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1158 return false;
1159}
1160
1161bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1162 const char *ExtraCode, raw_ostream &O) {
1163 const MachineOperand &MO = MI->getOperand(OpNum);
1164
1165 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1166 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1167 return false;
1168
1169 // Does this asm operand have a single letter operand modifier?
1170 if (ExtraCode && ExtraCode[0]) {
1171 if (ExtraCode[1] != 0)
1172 return true; // Unknown modifier.
1173
1174 switch (ExtraCode[0]) {
1175 default:
1176 return true; // Unknown modifier.
1177 case 'w': // Print W register
1178 case 'x': // Print X register
1179 if (MO.isReg())
1180 return printAsmMRegister(MO, ExtraCode[0], O);
1181 if (MO.isImm() && MO.getImm() == 0) {
1182 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1184 return false;
1185 }
1186 printOperand(MI, OpNum, O);
1187 return false;
1188 case 'b': // Print B register.
1189 case 'h': // Print H register.
1190 case 's': // Print S register.
1191 case 'd': // Print D register.
1192 case 'q': // Print Q register.
1193 case 'z': // Print Z register.
1194 if (MO.isReg()) {
1195 const TargetRegisterClass *RC;
1196 switch (ExtraCode[0]) {
1197 case 'b':
1198 RC = &AArch64::FPR8RegClass;
1199 break;
1200 case 'h':
1201 RC = &AArch64::FPR16RegClass;
1202 break;
1203 case 's':
1204 RC = &AArch64::FPR32RegClass;
1205 break;
1206 case 'd':
1207 RC = &AArch64::FPR64RegClass;
1208 break;
1209 case 'q':
1210 RC = &AArch64::FPR128RegClass;
1211 break;
1212 case 'z':
1213 RC = &AArch64::ZPRRegClass;
1214 break;
1215 default:
1216 return true;
1217 }
1218 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1219 }
1220 printOperand(MI, OpNum, O);
1221 return false;
1222 }
1223 }
1224
1225 // According to ARM, we should emit x and v registers unless we have a
1226 // modifier.
1227 if (MO.isReg()) {
1228 Register Reg = MO.getReg();
1229
1230 // If this is a w or x register, print an x register.
1231 if (AArch64::GPR32allRegClass.contains(Reg) ||
1232 AArch64::GPR64allRegClass.contains(Reg))
1233 return printAsmMRegister(MO, 'x', O);
1234
1235 // If this is an x register tuple, print an x register.
1236 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1237 return printAsmMRegister(MO, 't', O);
1238
1239 unsigned AltName = AArch64::NoRegAltName;
1240 const TargetRegisterClass *RegClass;
1241 if (AArch64::ZPRRegClass.contains(Reg)) {
1242 RegClass = &AArch64::ZPRRegClass;
1243 } else if (AArch64::PPRRegClass.contains(Reg)) {
1244 RegClass = &AArch64::PPRRegClass;
1245 } else if (AArch64::PNRRegClass.contains(Reg)) {
1246 RegClass = &AArch64::PNRRegClass;
1247 } else {
1248 RegClass = &AArch64::FPR128RegClass;
1249 AltName = AArch64::vreg;
1250 }
1251
1252 // If this is a b, h, s, d, or q register, print it as a v register.
1253 return printAsmRegInClass(MO, RegClass, AltName, O);
1254 }
1255
1256 printOperand(MI, OpNum, O);
1257 return false;
1258}
1259
1260bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1261 unsigned OpNum,
1262 const char *ExtraCode,
1263 raw_ostream &O) {
1264 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1265 return true; // Unknown modifier.
1266
1267 const MachineOperand &MO = MI->getOperand(OpNum);
1268 assert(MO.isReg() && "unexpected inline asm memory operand");
1269 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1270 return false;
1271}
1272
1273void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1274 raw_ostream &OS) {
1275 unsigned NOps = MI->getNumOperands();
1276 assert(NOps == 4);
1277 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1278 // cast away const; DIetc do not take const operands for some reason.
1279 OS << MI->getDebugVariable()->getName();
1280 OS << " <- ";
1281 // Frame address. Currently handles register +- offset only.
1282 assert(MI->isIndirectDebugValue());
1283 OS << '[';
1284 for (unsigned I = 0, E = llvm::size(MI->debug_operands()); I < E; ++I) {
1285 if (I != 0)
1286 OS << ", ";
1287 printOperand(MI, I, OS);
1288 }
1289 OS << ']';
1290 OS << "+";
1291 printOperand(MI, NOps - 2, OS);
1292}
1293
1294void AArch64AsmPrinter::emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
1295 ArrayRef<unsigned> JumpTableIndices) {
1296 // Fast return if there is nothing to emit to avoid creating empty sections.
1297 if (JumpTableIndices.empty())
1298 return;
1299 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1300 const auto &F = MF->getFunction();
1302
1303 MCSection *ReadOnlySec = nullptr;
1304 if (TM.Options.EnableStaticDataPartitioning) {
1305 ReadOnlySec =
1306 TLOF.getSectionForJumpTable(F, TM, &JT[JumpTableIndices.front()]);
1307 } else {
1308 ReadOnlySec = TLOF.getSectionForJumpTable(F, TM);
1309 }
1310 OutStreamer->switchSection(ReadOnlySec);
1311
1312 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1313 for (unsigned JTI : JumpTableIndices) {
1314 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1315
1316 // If this jump table was deleted, ignore it.
1317 if (JTBBs.empty()) continue;
1318
1319 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1320 emitAlignment(Align(Size));
1321 OutStreamer->emitLabel(GetJTISymbol(JTI));
1322
1323 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1324 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1325
1326 for (auto *JTBB : JTBBs) {
1327 const MCExpr *Value =
1328 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1329
1330 // Each entry is:
1331 // .byte/.hword (LBB - Lbase)>>2
1332 // or plain:
1333 // .word LBB - Lbase
1334 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1335 if (Size != 4)
1337 Value, MCConstantExpr::create(2, OutContext), OutContext);
1338
1339 OutStreamer->emitValue(Value, Size);
1340 }
1341 }
1342}
1343
1344std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1346AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1347 const MachineInstr *BranchInstr,
1348 const MCSymbol *BranchLabel) const {
1349 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1350 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1352 switch (AFI->getJumpTableEntrySize(JTI)) {
1353 case 1:
1354 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1355 break;
1356 case 2:
1357 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1358 break;
1359 case 4:
1360 EntrySize = codeview::JumpTableEntrySize::Int32;
1361 break;
1362 default:
1363 llvm_unreachable("Unexpected jump table entry size");
1364 }
1365 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1366}
1367
1368void AArch64AsmPrinter::emitFunctionEntryLabel() {
1369 const Triple &TT = TM.getTargetTriple();
1370 if (TT.isOSBinFormatELF() &&
1371 (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1372 MF->getFunction().getCallingConv() ==
1373 CallingConv::AArch64_SVE_VectorCall ||
1374 MF->getInfo<AArch64FunctionInfo>()->isSVECC())) {
1375 auto *TS =
1376 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1377 TS->emitDirectiveVariantPCS(CurrentFnSym);
1378 }
1379
1381
1382 if (TT.isWindowsArm64EC() && !MF->getFunction().hasLocalLinkage()) {
1383 // For ARM64EC targets, a function definition's name is mangled differently
1384 // from the normal symbol, emit required aliases here.
1385 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1386 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1387 OutStreamer->emitAssignment(
1388 Src, MCSymbolRefExpr::create(Dst, MMI->getContext()));
1389 };
1390
1391 auto getSymbolFromMetadata = [&](StringRef Name) {
1392 MCSymbol *Sym = nullptr;
1393 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1394 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1395 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1396 }
1397 return Sym;
1398 };
1399
1400 SmallVector<MDNode *> UnmangledNames;
1401 MF->getFunction().getMetadata("arm64ec_unmangled_name", UnmangledNames);
1402 for (MDNode *Node : UnmangledNames) {
1403 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1404 MCSymbol *UnmangledSym = MMI->getContext().getOrCreateSymbol(NameStr);
1405 if (std::optional<std::string> MangledName =
1406 getArm64ECMangledFunctionName(UnmangledSym->getName())) {
1407 MCSymbol *ECMangledSym =
1408 MMI->getContext().getOrCreateSymbol(*MangledName);
1409 emitFunctionAlias(UnmangledSym, ECMangledSym);
1410 }
1411 }
1412 if (MCSymbol *ECMangledSym =
1413 getSymbolFromMetadata("arm64ec_ecmangled_name"))
1414 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1415 }
1416}
1417
1418void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1419 const Constant *CV) {
1420 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1421 if (CPA->hasAddressDiscriminator() &&
1422 !CPA->hasSpecialAddressDiscriminator(
1425 "unexpected address discrimination value for ctors/dtors entry, only "
1426 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1427 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1428 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1429 // actual address discrimination value and only checks
1430 // hasAddressDiscriminator(), so it's OK to leave special address
1431 // discrimination value here.
1433}
1434
1435void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1436 const GlobalAlias &GA) {
1437 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1438 // Global aliases must point to a definition, but unmangled patchable
1439 // symbols are special and need to point to an undefined symbol with "EXP+"
1440 // prefix. Such undefined symbol is resolved by the linker by creating
1441 // x86 thunk that jumps back to the actual EC target.
1442 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1443 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1444 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1445 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1446
1447 OutStreamer->beginCOFFSymbolDef(ExpSym);
1448 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1449 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1451 OutStreamer->endCOFFSymbolDef();
1452
1453 OutStreamer->beginCOFFSymbolDef(Sym);
1454 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1455 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1457 OutStreamer->endCOFFSymbolDef();
1458 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1459 OutStreamer->emitAssignment(
1460 Sym, MCSymbolRefExpr::create(ExpSym, MMI->getContext()));
1461 return;
1462 }
1463 }
1465}
1466
1467/// Small jump tables contain an unsigned byte or half, representing the offset
1468/// from the lowest-addressed possible destination to the desired basic
1469/// block. Since all instructions are 4-byte aligned, this is further compressed
1470/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1471/// materialize the correct destination we need:
1472///
1473/// adr xDest, .LBB0_0
1474/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1475/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1476void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1477 const llvm::MachineInstr &MI) {
1478 Register DestReg = MI.getOperand(0).getReg();
1479 Register ScratchReg = MI.getOperand(1).getReg();
1480 Register ScratchRegW =
1481 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1482 Register TableReg = MI.getOperand(2).getReg();
1483 Register EntryReg = MI.getOperand(3).getReg();
1484 int JTIdx = MI.getOperand(4).getIndex();
1485 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1486
1487 // This has to be first because the compression pass based its reachability
1488 // calculations on the start of the JumpTableDest instruction.
1489 auto Label =
1490 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1491
1492 // If we don't already have a symbol to use as the base, use the ADR
1493 // instruction itself.
1494 if (!Label) {
1495 Label = MF->getContext().createTempSymbol();
1496 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1497 OutStreamer.emitLabel(Label);
1498 }
1499
1500 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1501 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1502 .addReg(DestReg)
1503 .addExpr(LabelExpr));
1504
1505 // Load the number of instruction-steps to offset from the label.
1506 unsigned LdrOpcode;
1507 switch (Size) {
1508 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1509 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1510 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1511 default:
1512 llvm_unreachable("Unknown jump table size");
1513 }
1514
1515 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1516 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1517 .addReg(TableReg)
1518 .addReg(EntryReg)
1519 .addImm(0)
1520 .addImm(Size == 1 ? 0 : 1));
1521
1522 // Add to the already materialized base label address, multiplying by 4 if
1523 // compressed.
1524 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1525 .addReg(DestReg)
1526 .addReg(DestReg)
1527 .addReg(ScratchReg)
1528 .addImm(Size == 4 ? 0 : 2));
1529}
1530
1531void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1532 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1533 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1534
1535 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1536 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1537
1538 // Emit:
1539 // mov x17, #<size of table> ; depending on table size, with MOVKs
1540 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1541 // csel x16, x16, xzr, ls ; check for index overflow
1542 //
1543 // adrp x17, Ltable@PAGE ; materialize table address
1544 // add x17, Ltable@PAGEOFF
1545 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1546 //
1547 // Lanchor:
1548 // adr x17, Lanchor ; compute target address
1549 // add x16, x17, x16
1550 // br x16 ; branch to target
1551
1552 MachineOperand JTOp = MI.getOperand(0);
1553
1554 unsigned JTI = JTOp.getIndex();
1555 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1556 "unsupported compressed jump table");
1557
1558 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1559
1560 // cmp only supports a 12-bit immediate. If we need more, materialize the
1561 // immediate, using x17 as a scratch register.
1562 uint64_t MaxTableEntry = NumTableEntries - 1;
1563 if (isUInt<12>(MaxTableEntry)) {
1564 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1565 .addReg(AArch64::XZR)
1566 .addReg(AArch64::X16)
1567 .addImm(MaxTableEntry)
1568 .addImm(0));
1569 } else {
1570 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1571 // It's sad that we have to manually materialize instructions, but we can't
1572 // trivially reuse the main pseudo expansion logic.
1573 // A MOVK sequence is easy enough to generate and handles the general case.
1574 for (int Offset = 16; Offset < 64; Offset += 16) {
1575 if ((MaxTableEntry >> Offset) == 0)
1576 break;
1577 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1578 Offset);
1579 }
1580 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1581 .addReg(AArch64::XZR)
1582 .addReg(AArch64::X16)
1583 .addReg(AArch64::X17)
1584 .addImm(0));
1585 }
1586
1587 // This picks entry #0 on failure.
1588 // We might want to trap instead.
1589 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1590 .addReg(AArch64::X16)
1591 .addReg(AArch64::X16)
1592 .addReg(AArch64::XZR)
1593 .addImm(AArch64CC::LS));
1594
1595 // Prepare the @PAGE/@PAGEOFF low/high operands.
1596 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1597 MCOperand JTMCHi, JTMCLo;
1598
1599 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1600 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1601
1602 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1603 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1604
1605 EmitToStreamer(
1606 *OutStreamer,
1607 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1608
1609 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1610 .addReg(AArch64::X17)
1611 .addReg(AArch64::X17)
1612 .addOperand(JTMCLo)
1613 .addImm(0));
1614
1615 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1616 .addReg(AArch64::X16)
1617 .addReg(AArch64::X17)
1618 .addReg(AArch64::X16)
1619 .addImm(0)
1620 .addImm(1));
1621
1622 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1623 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1624 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1625
1626 OutStreamer->emitLabel(AdrLabel);
1627 EmitToStreamer(
1628 *OutStreamer,
1629 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1630
1631 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1632 .addReg(AArch64::X16)
1633 .addReg(AArch64::X17)
1634 .addReg(AArch64::X16)
1635 .addImm(0));
1636
1637 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1638}
1639
1640void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1641 const llvm::MachineInstr &MI) {
1642 unsigned Opcode = MI.getOpcode();
1643 assert(STI->hasMOPS());
1644 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1645
1646 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1647 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1648 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1649 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1650 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1651 if (Opcode == AArch64::MOPSMemorySetPseudo)
1652 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1653 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1654 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1655 llvm_unreachable("Unhandled memory operation pseudo");
1656 }();
1657 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1658 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1659
1660 for (auto Op : Ops) {
1661 int i = 0;
1662 auto MCIB = MCInstBuilder(Op);
1663 // Destination registers
1664 MCIB.addReg(MI.getOperand(i++).getReg());
1665 MCIB.addReg(MI.getOperand(i++).getReg());
1666 if (!IsSet)
1667 MCIB.addReg(MI.getOperand(i++).getReg());
1668 // Input registers
1669 MCIB.addReg(MI.getOperand(i++).getReg());
1670 MCIB.addReg(MI.getOperand(i++).getReg());
1671 MCIB.addReg(MI.getOperand(i++).getReg());
1672
1673 EmitToStreamer(OutStreamer, MCIB);
1674 }
1675}
1676
1677void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1678 const MachineInstr &MI) {
1679 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1680
1681 auto &Ctx = OutStreamer.getContext();
1682 MCSymbol *MILabel = Ctx.createTempSymbol();
1683 OutStreamer.emitLabel(MILabel);
1684
1685 SM.recordStackMap(*MILabel, MI);
1686 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1687
1688 // Scan ahead to trim the shadow.
1689 const MachineBasicBlock &MBB = *MI.getParent();
1691 ++MII;
1692 while (NumNOPBytes > 0) {
1693 if (MII == MBB.end() || MII->isCall() ||
1694 MII->getOpcode() == AArch64::DBG_VALUE ||
1695 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1696 MII->getOpcode() == TargetOpcode::STACKMAP)
1697 break;
1698 ++MII;
1699 NumNOPBytes -= 4;
1700 }
1701
1702 // Emit nops.
1703 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1704 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1705}
1706
1707// Lower a patchpoint of the form:
1708// [<def>], <id>, <numBytes>, <target>, <numArgs>
1709void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1710 const MachineInstr &MI) {
1711 auto &Ctx = OutStreamer.getContext();
1712 MCSymbol *MILabel = Ctx.createTempSymbol();
1713 OutStreamer.emitLabel(MILabel);
1714 SM.recordPatchPoint(*MILabel, MI);
1715
1716 PatchPointOpers Opers(&MI);
1717
1718 int64_t CallTarget = Opers.getCallTarget().getImm();
1719 unsigned EncodedBytes = 0;
1720 if (CallTarget) {
1721 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1722 "High 16 bits of call target should be zero.");
1723 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1724 EncodedBytes = 16;
1725 // Materialize the jump address:
1726 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1727 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1728 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1729 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1730 }
1731 // Emit padding.
1732 unsigned NumBytes = Opers.getNumPatchBytes();
1733 assert(NumBytes >= EncodedBytes &&
1734 "Patchpoint can't request size less than the length of a call.");
1735 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1736 "Invalid number of NOP bytes requested!");
1737 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1738 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1739}
1740
1741void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1742 const MachineInstr &MI) {
1743 StatepointOpers SOpers(&MI);
1744 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1745 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1746 for (unsigned i = 0; i < PatchBytes; i += 4)
1747 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1748 } else {
1749 // Lower call target and choose correct opcode
1750 const MachineOperand &CallTarget = SOpers.getCallTarget();
1751 MCOperand CallTargetMCOp;
1752 unsigned CallOpcode;
1753 switch (CallTarget.getType()) {
1756 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1757 CallOpcode = AArch64::BL;
1758 break;
1760 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1761 CallOpcode = AArch64::BL;
1762 break;
1764 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1765 CallOpcode = AArch64::BLR;
1766 break;
1767 default:
1768 llvm_unreachable("Unsupported operand type in statepoint call target");
1769 break;
1770 }
1771
1772 EmitToStreamer(OutStreamer,
1773 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1774 }
1775
1776 auto &Ctx = OutStreamer.getContext();
1777 MCSymbol *MILabel = Ctx.createTempSymbol();
1778 OutStreamer.emitLabel(MILabel);
1779 SM.recordStatepoint(*MILabel, MI);
1780}
1781
1782void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1783 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1784 // <opcode>, <operands>
1785
1786 Register DefRegister = FaultingMI.getOperand(0).getReg();
1788 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1789 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1790 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1791 unsigned OperandsBeginIdx = 4;
1792
1793 auto &Ctx = OutStreamer->getContext();
1794 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1795 OutStreamer->emitLabel(FaultingLabel);
1796
1797 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1798 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1799
1800 MCInst MI;
1801 MI.setOpcode(Opcode);
1802
1803 if (DefRegister != (Register)0)
1804 MI.addOperand(MCOperand::createReg(DefRegister));
1805
1806 for (const MachineOperand &MO :
1807 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1808 MCOperand Dest;
1809 lowerOperand(MO, Dest);
1810 MI.addOperand(Dest);
1811 }
1812
1813 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1814 EmitToStreamer(MI);
1815}
1816
1817void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1818 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1819 .addReg(Dest)
1820 .addReg(AArch64::XZR)
1821 .addReg(Src)
1822 .addImm(0));
1823}
1824
1825void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1826 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1827 EmitToStreamer(*OutStreamer,
1828 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1829 .addReg(Dest)
1830 .addImm(Imm)
1831 .addImm(Shift));
1832}
1833
1834void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1835 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1836 EmitToStreamer(*OutStreamer,
1837 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1838 .addReg(Dest)
1839 .addReg(Dest)
1840 .addImm(Imm)
1841 .addImm(Shift));
1842}
1843
1844void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1845 Register DestReg = MI.getOperand(0).getReg();
1846 if (!STI->hasZeroCycleZeroingFPWorkaround() && STI->isNeonAvailable()) {
1847 if (STI->hasZeroCycleZeroingFPR64()) {
1848 // Convert H/S register to corresponding D register
1849 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1850 if (AArch64::FPR16RegClass.contains(DestReg))
1851 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1852 &AArch64::FPR64RegClass);
1853 else if (AArch64::FPR32RegClass.contains(DestReg))
1854 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1855 &AArch64::FPR64RegClass);
1856 else
1857 assert(AArch64::FPR64RegClass.contains(DestReg));
1858
1859 MCInst MOVI;
1860 MOVI.setOpcode(AArch64::MOVID);
1861 MOVI.addOperand(MCOperand::createReg(DestReg));
1863 EmitToStreamer(*OutStreamer, MOVI);
1864 } else if (STI->hasZeroCycleZeroingFPR128()) {
1865 // Convert H/S/D register to corresponding Q register
1866 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1867 if (AArch64::FPR16RegClass.contains(DestReg)) {
1868 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1869 &AArch64::FPR128RegClass);
1870 } else if (AArch64::FPR32RegClass.contains(DestReg)) {
1871 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1872 &AArch64::FPR128RegClass);
1873 } else {
1874 assert(AArch64::FPR64RegClass.contains(DestReg));
1875 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::dsub,
1876 &AArch64::FPR128RegClass);
1877 }
1878
1879 MCInst MOVI;
1880 MOVI.setOpcode(AArch64::MOVIv2d_ns);
1881 MOVI.addOperand(MCOperand::createReg(DestReg));
1883 EmitToStreamer(*OutStreamer, MOVI);
1884 } else {
1885 emitFMov0AsFMov(MI, DestReg);
1886 }
1887 } else {
1888 emitFMov0AsFMov(MI, DestReg);
1889 }
1890}
1891
1892void AArch64AsmPrinter::emitFMov0AsFMov(const MachineInstr &MI,
1893 Register DestReg) {
1894 MCInst FMov;
1895 switch (MI.getOpcode()) {
1896 default:
1897 llvm_unreachable("Unexpected opcode");
1898 case AArch64::FMOVH0:
1899 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1900 if (!STI->hasFullFP16())
1901 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1902 FMov.addOperand(MCOperand::createReg(DestReg));
1903 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1904 break;
1905 case AArch64::FMOVS0:
1906 FMov.setOpcode(AArch64::FMOVWSr);
1907 FMov.addOperand(MCOperand::createReg(DestReg));
1908 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1909 break;
1910 case AArch64::FMOVD0:
1911 FMov.setOpcode(AArch64::FMOVXDr);
1912 FMov.addOperand(MCOperand::createReg(DestReg));
1913 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1914 break;
1915 }
1916 EmitToStreamer(*OutStreamer, FMov);
1917}
1918
1919Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint16_t Disc,
1920 Register AddrDisc,
1921 Register ScratchReg,
1922 bool MayUseAddrAsScratch) {
1923 assert(ScratchReg == AArch64::X16 || ScratchReg == AArch64::X17 ||
1924 !STI->isX16X17Safer());
1925 // So far we've used NoRegister in pseudos. Now we need real encodings.
1926 if (AddrDisc == AArch64::NoRegister)
1927 AddrDisc = AArch64::XZR;
1928
1929 // If there is no constant discriminator, there's no blend involved:
1930 // just use the address discriminator register as-is (XZR or not).
1931 if (!Disc)
1932 return AddrDisc;
1933
1934 // If there's only a constant discriminator, MOV it into the scratch register.
1935 if (AddrDisc == AArch64::XZR) {
1936 emitMOVZ(ScratchReg, Disc, 0);
1937 return ScratchReg;
1938 }
1939
1940 // If there are both, emit a blend into the scratch register.
1941
1942 // Check if we can save one MOV instruction.
1943 assert(MayUseAddrAsScratch || ScratchReg != AddrDisc);
1944 bool AddrDiscIsSafe = AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17 ||
1945 !STI->isX16X17Safer();
1946 if (MayUseAddrAsScratch && AddrDiscIsSafe)
1947 ScratchReg = AddrDisc;
1948 else
1949 emitMovXReg(ScratchReg, AddrDisc);
1950
1951 emitMOVK(ScratchReg, Disc, 48);
1952 return ScratchReg;
1953}
1954
1955/// Emit a code sequence to check an authenticated pointer value.
1956///
1957/// This function emits a sequence of instructions that checks if TestedReg was
1958/// authenticated successfully. On success, execution continues at the next
1959/// instruction after the sequence.
1960///
1961/// The action performed on failure depends on the OnFailure argument:
1962/// * if OnFailure is not nullptr, control is transferred to that label after
1963/// clearing the PAC field
1964/// * otherwise, BRK instruction is emitted to generate an error
1965void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
1966 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
1967 AArch64PAuth::AuthCheckMethod Method, const MCSymbol *OnFailure) {
1968 // Insert a sequence to check if authentication of TestedReg succeeded,
1969 // such as:
1970 //
1971 // - checked and clearing:
1972 // ; x16 is TestedReg, x17 is ScratchReg
1973 // mov x17, x16
1974 // xpaci x17
1975 // cmp x16, x17
1976 // b.eq Lsuccess
1977 // mov x16, x17
1978 // b Lend
1979 // Lsuccess:
1980 // ; skipped if authentication failed
1981 // Lend:
1982 // ...
1983 //
1984 // - checked and trapping:
1985 // mov x17, x16
1986 // xpaci x17
1987 // cmp x16, x17
1988 // b.eq Lsuccess
1989 // brk #<0xc470 + aut key>
1990 // Lsuccess:
1991 // ...
1992 //
1993 // See the documentation on AuthCheckMethod enumeration constants for
1994 // the specific code sequences that can be used to perform the check.
1996
1997 if (Method == AuthCheckMethod::None)
1998 return;
1999 if (Method == AuthCheckMethod::DummyLoad) {
2000 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
2001 .addReg(getWRegFromXReg(ScratchReg))
2002 .addReg(TestedReg)
2003 .addImm(0));
2004 assert(!OnFailure && "DummyLoad always traps on error");
2005 return;
2006 }
2007
2008 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
2009 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
2010 // mov Xscratch, Xtested
2011 emitMovXReg(ScratchReg, TestedReg);
2012
2013 if (Method == AuthCheckMethod::XPAC) {
2014 // xpac(i|d) Xscratch
2015 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2016 EmitToStreamer(
2017 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
2018 } else {
2019 // xpaclri
2020
2021 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
2022 assert(TestedReg == AArch64::LR &&
2023 "XPACHint mode is only compatible with checking the LR register");
2025 "XPACHint mode is only compatible with I-keys");
2026 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
2027 }
2028
2029 // cmp Xtested, Xscratch
2030 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
2031 .addReg(AArch64::XZR)
2032 .addReg(TestedReg)
2033 .addReg(ScratchReg)
2034 .addImm(0));
2035
2036 // b.eq Lsuccess
2037 EmitToStreamer(
2038 MCInstBuilder(AArch64::Bcc)
2039 .addImm(AArch64CC::EQ)
2040 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2041 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
2042 // eor Xscratch, Xtested, Xtested, lsl #1
2043 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
2044 .addReg(ScratchReg)
2045 .addReg(TestedReg)
2046 .addReg(TestedReg)
2047 .addImm(1));
2048 // tbz Xscratch, #62, Lsuccess
2049 EmitToStreamer(
2050 MCInstBuilder(AArch64::TBZX)
2051 .addReg(ScratchReg)
2052 .addImm(62)
2053 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2054 } else {
2055 llvm_unreachable("Unsupported check method");
2056 }
2057
2058 if (!OnFailure) {
2059 // Trapping sequences do a 'brk'.
2060 // brk #<0xc470 + aut key>
2061 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
2062 } else {
2063 // Non-trapping checked sequences return the stripped result in TestedReg,
2064 // skipping over success-only code (such as re-signing the pointer) by
2065 // jumping to OnFailure label.
2066 // Note that this can introduce an authentication oracle (such as based on
2067 // the high bits of the re-signed value).
2068
2069 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
2070 // instead of ScratchReg, thus eliminating one `mov` instruction.
2071 // Both XPAC and XPACHint can be further optimized by not using a
2072 // conditional branch jumping over an unconditional one.
2073
2074 switch (Method) {
2075 case AuthCheckMethod::XPACHint:
2076 // LR is already XPAC-ed at this point.
2077 break;
2078 case AuthCheckMethod::XPAC:
2079 // mov Xtested, Xscratch
2080 emitMovXReg(TestedReg, ScratchReg);
2081 break;
2082 default:
2083 // If Xtested was not XPAC-ed so far, emit XPAC here.
2084 // xpac(i|d) Xtested
2085 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2086 EmitToStreamer(
2087 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
2088 }
2089
2090 // b Lend
2091 const auto *OnFailureExpr = MCSymbolRefExpr::create(OnFailure, OutContext);
2092 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(OnFailureExpr));
2093 }
2094
2095 // If the auth check succeeds, we can continue.
2096 // Lsuccess:
2097 OutStreamer->emitLabel(SuccessSym);
2098}
2099
2100// With Pointer Authentication, it may be needed to explicitly check the
2101// authenticated value in LR before performing a tail call.
2102// Otherwise, the callee may re-sign the invalid return address,
2103// introducing a signing oracle.
2104void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
2105 if (!AArch64FI->shouldSignReturnAddress(*MF))
2106 return;
2107
2108 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
2109 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
2110 return;
2111
2112 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
2113 Register ScratchReg =
2114 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
2115 assert(!TC->readsRegister(ScratchReg, TRI) &&
2116 "Neither x16 nor x17 is available as a scratch register");
2119 emitPtrauthCheckAuthenticatedValue(AArch64::LR, ScratchReg, Key,
2120 LRCheckMethod);
2121}
2122
2123bool AArch64AsmPrinter::emitDeactivationSymbolRelocation(Value *DS) {
2124 if (!DS)
2125 return false;
2126
2127 if (isa<GlobalAlias>(DS)) {
2128 // Just emit the nop directly.
2129 EmitToStreamer(MCInstBuilder(AArch64::NOP));
2130 return true;
2131 }
2132 MCSymbol *Dot = OutContext.createTempSymbol();
2133 OutStreamer->emitLabel(Dot);
2134 const MCExpr *DeactDotExpr = MCSymbolRefExpr::create(Dot, OutContext);
2135
2136 const MCExpr *DSExpr = MCSymbolRefExpr::create(
2137 OutContext.getOrCreateSymbol(DS->getName()), OutContext);
2138 OutStreamer->emitRelocDirective(*DeactDotExpr, "R_AARCH64_PATCHINST", DSExpr,
2139 SMLoc());
2140 return false;
2141}
2142
2143void AArch64AsmPrinter::emitPtrauthAuthResign(
2144 Register AUTVal, AArch64PACKey::ID AUTKey, uint64_t AUTDisc,
2145 const MachineOperand *AUTAddrDisc, Register Scratch,
2146 std::optional<AArch64PACKey::ID> PACKey, uint64_t PACDisc,
2147 Register PACAddrDisc, Value *DS) {
2148 const bool IsAUTPAC = PACKey.has_value();
2149
2150 // We expand AUT/AUTPAC into a sequence of the form
2151 //
2152 // ; authenticate x16
2153 // ; check pointer in x16
2154 // Lsuccess:
2155 // ; sign x16 (if AUTPAC)
2156 // Lend: ; if not trapping on failure
2157 //
2158 // with the checking sequence chosen depending on whether/how we should check
2159 // the pointer and whether we should trap on failure.
2160
2161 // By default, auth/resign sequences check for auth failures.
2162 bool ShouldCheck = true;
2163 // In the checked sequence, we only trap if explicitly requested.
2164 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2165
2166 // On an FPAC CPU, you get traps whether you want them or not: there's
2167 // no point in emitting checks or traps.
2168 if (STI->hasFPAC())
2169 ShouldCheck = ShouldTrap = false;
2170
2171 // However, command-line flags can override this, for experimentation.
2172 switch (PtrauthAuthChecks) {
2174 break;
2176 ShouldCheck = ShouldTrap = false;
2177 break;
2179 ShouldCheck = true;
2180 ShouldTrap = false;
2181 break;
2183 ShouldCheck = ShouldTrap = true;
2184 break;
2185 }
2186
2187 // Compute aut discriminator
2188 assert(isUInt<16>(AUTDisc));
2189 Register AUTDiscReg = emitPtrauthDiscriminator(
2190 AUTDisc, AUTAddrDisc->getReg(), Scratch, AUTAddrDisc->isKill());
2191 bool AUTZero = AUTDiscReg == AArch64::XZR;
2192 unsigned AUTOpc = getAUTOpcodeForKey(AUTKey, AUTZero);
2193
2194 if (!emitDeactivationSymbolRelocation(DS)) {
2195 // autiza x16 ; if AUTZero
2196 // autia x16, x17 ; if !AUTZero
2197 MCInst AUTInst;
2198 AUTInst.setOpcode(AUTOpc);
2199 AUTInst.addOperand(MCOperand::createReg(AUTVal));
2200 AUTInst.addOperand(MCOperand::createReg(AUTVal));
2201 if (!AUTZero)
2202 AUTInst.addOperand(MCOperand::createReg(AUTDiscReg));
2203 EmitToStreamer(*OutStreamer, AUTInst);
2204 }
2205
2206 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2207 if (!IsAUTPAC && (!ShouldCheck || !ShouldTrap))
2208 return;
2209
2210 MCSymbol *EndSym = nullptr;
2211
2212 if (ShouldCheck) {
2213 if (IsAUTPAC && !ShouldTrap)
2214 EndSym = createTempSymbol("resign_end_");
2215
2216 emitPtrauthCheckAuthenticatedValue(
2217 AUTVal, Scratch, AUTKey, AArch64PAuth::AuthCheckMethod::XPAC, EndSym);
2218 }
2219
2220 // We already emitted unchecked and checked-but-non-trapping AUTs.
2221 // That left us with trapping AUTs, and AUTPACs.
2222 // Trapping AUTs don't need PAC: we're done.
2223 if (!IsAUTPAC)
2224 return;
2225
2226 // Compute pac discriminator
2227 assert(isUInt<16>(PACDisc));
2228 Register PACDiscReg =
2229 emitPtrauthDiscriminator(PACDisc, PACAddrDisc, Scratch);
2230 bool PACZero = PACDiscReg == AArch64::XZR;
2231 unsigned PACOpc = getPACOpcodeForKey(*PACKey, PACZero);
2232
2233 // pacizb x16 ; if PACZero
2234 // pacib x16, x17 ; if !PACZero
2235 MCInst PACInst;
2236 PACInst.setOpcode(PACOpc);
2237 PACInst.addOperand(MCOperand::createReg(AUTVal));
2238 PACInst.addOperand(MCOperand::createReg(AUTVal));
2239 if (!PACZero)
2240 PACInst.addOperand(MCOperand::createReg(PACDiscReg));
2241 EmitToStreamer(*OutStreamer, PACInst);
2242
2243 // Lend:
2244 if (EndSym)
2245 OutStreamer->emitLabel(EndSym);
2246}
2247
2248void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) {
2249 Register Val = MI->getOperand(1).getReg();
2250 auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
2251 uint64_t Disc = MI->getOperand(3).getImm();
2252 Register AddrDisc = MI->getOperand(4).getReg();
2253 bool AddrDiscKilled = MI->getOperand(4).isKill();
2254
2255 // As long as at least one of Val and AddrDisc is in GPR64noip, a scratch
2256 // register is available.
2257 Register ScratchReg = Val == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2258 assert(ScratchReg != AddrDisc &&
2259 "Neither X16 nor X17 is available as a scratch register");
2260
2261 // Compute pac discriminator
2262 assert(isUInt<16>(Disc));
2263 Register DiscReg = emitPtrauthDiscriminator(
2264 Disc, AddrDisc, ScratchReg, /*MayUseAddrAsScratch=*/AddrDiscKilled);
2265 bool IsZeroDisc = DiscReg == AArch64::XZR;
2266 unsigned Opc = getPACOpcodeForKey(Key, IsZeroDisc);
2267
2268 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
2269 return;
2270
2271 // paciza x16 ; if IsZeroDisc
2272 // pacia x16, x17 ; if !IsZeroDisc
2273 MCInst PACInst;
2274 PACInst.setOpcode(Opc);
2275 PACInst.addOperand(MCOperand::createReg(Val));
2276 PACInst.addOperand(MCOperand::createReg(Val));
2277 if (!IsZeroDisc)
2278 PACInst.addOperand(MCOperand::createReg(DiscReg));
2279 EmitToStreamer(*OutStreamer, PACInst);
2280}
2281
2282void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2283 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2284 unsigned BrTarget = MI->getOperand(0).getReg();
2285
2286 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2288 "Invalid auth call key");
2289
2290 uint64_t Disc = MI->getOperand(2).getImm();
2291 assert(isUInt<16>(Disc));
2292
2293 unsigned AddrDisc = MI->getOperand(3).getReg();
2294
2295 // Make sure AddrDisc is solely used to compute the discriminator.
2296 // While hardly meaningful, it is still possible to describe an authentication
2297 // of a pointer against its own value (instead of storage address) with
2298 // intrinsics, so use report_fatal_error instead of assert.
2299 if (BrTarget == AddrDisc)
2300 report_fatal_error("Branch target is signed with its own value");
2301
2302 // If we are printing BLRA pseudo, try to save one MOV by making use of the
2303 // fact that x16 and x17 are described as clobbered by the MI instruction and
2304 // AddrDisc is not used as any other input.
2305 //
2306 // Back in the day, emitPtrauthDiscriminator was restricted to only returning
2307 // either x16 or x17, meaning the returned register is always among the
2308 // implicit-def'ed registers of BLRA pseudo. Now this property can be violated
2309 // if isX16X17Safer predicate is false, thus manually check if AddrDisc is
2310 // among x16 and x17 to prevent clobbering unexpected registers.
2311 //
2312 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2313 // declared as clobbering x16/x17.
2314 //
2315 // FIXME: Make use of `killed` flags and register masks instead.
2316 bool AddrDiscIsImplicitDef =
2317 IsCall && (AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17);
2318 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2319 AddrDiscIsImplicitDef);
2320 bool IsZeroDisc = DiscReg == AArch64::XZR;
2321
2322 unsigned Opc;
2323 if (IsCall) {
2324 if (Key == AArch64PACKey::IA)
2325 Opc = IsZeroDisc ? AArch64::BLRAAZ : AArch64::BLRAA;
2326 else
2327 Opc = IsZeroDisc ? AArch64::BLRABZ : AArch64::BLRAB;
2328 } else {
2329 if (Key == AArch64PACKey::IA)
2330 Opc = IsZeroDisc ? AArch64::BRAAZ : AArch64::BRAA;
2331 else
2332 Opc = IsZeroDisc ? AArch64::BRABZ : AArch64::BRAB;
2333 }
2334
2335 MCInst BRInst;
2336 BRInst.setOpcode(Opc);
2337 BRInst.addOperand(MCOperand::createReg(BrTarget));
2338 if (!IsZeroDisc)
2339 BRInst.addOperand(MCOperand::createReg(DiscReg));
2340 EmitToStreamer(*OutStreamer, BRInst);
2341}
2342
2343void AArch64AsmPrinter::emitAddImm(MCRegister Reg, int64_t Addend,
2344 MCRegister Tmp) {
2345 if (Addend != 0) {
2346 const uint64_t AbsOffset = (Addend > 0 ? Addend : -((uint64_t)Addend));
2347 const bool IsNeg = Addend < 0;
2348 if (isUInt<24>(AbsOffset)) {
2349 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2350 BitPos += 12) {
2351 EmitToStreamer(
2352 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2353 .addReg(Reg)
2354 .addReg(Reg)
2355 .addImm((AbsOffset >> BitPos) & 0xfff)
2356 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2357 }
2358 } else {
2359 const uint64_t UAddend = Addend;
2360 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2361 .addReg(Tmp)
2362 .addImm((IsNeg ? ~UAddend : UAddend) & 0xffff)
2363 .addImm(/*shift=*/0));
2364 auto NeedMovk = [IsNeg, UAddend](int BitPos) -> bool {
2365 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2366 uint64_t Shifted = UAddend >> BitPos;
2367 if (!IsNeg)
2368 return Shifted != 0;
2369 for (int I = 0; I != 64 - BitPos; I += 16)
2370 if (((Shifted >> I) & 0xffff) != 0xffff)
2371 return true;
2372 return false;
2373 };
2374 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2375 emitMOVK(Tmp, (UAddend >> BitPos) & 0xffff, BitPos);
2376
2377 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2378 .addReg(Reg)
2379 .addReg(Reg)
2380 .addReg(Tmp)
2381 .addImm(/*shift=*/0));
2382 }
2383 }
2384}
2385
2386void AArch64AsmPrinter::emitAddress(MCRegister Reg, const MCExpr *Expr,
2387 MCRegister Tmp, bool DSOLocal,
2388 const MCSubtargetInfo &STI) {
2389 MCValue Val;
2390 if (!Expr->evaluateAsRelocatable(Val, nullptr))
2391 report_fatal_error("emitAddress could not evaluate");
2392 if (DSOLocal) {
2393 EmitToStreamer(
2394 MCInstBuilder(AArch64::ADRP)
2395 .addReg(Reg)
2397 OutStreamer->getContext())));
2398 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2399 .addReg(Reg)
2400 .addReg(Reg)
2401 .addExpr(MCSpecifierExpr::create(
2402 Expr, AArch64::S_LO12, OutStreamer->getContext()))
2403 .addImm(0));
2404 } else {
2405 auto *SymRef =
2406 MCSymbolRefExpr::create(Val.getAddSym(), OutStreamer->getContext());
2407 EmitToStreamer(
2408 MCInstBuilder(AArch64::ADRP)
2409 .addReg(Reg)
2411 OutStreamer->getContext())));
2412 EmitToStreamer(
2413 MCInstBuilder(AArch64::LDRXui)
2414 .addReg(Reg)
2415 .addReg(Reg)
2417 OutStreamer->getContext())));
2418 emitAddImm(Reg, Val.getConstant(), Tmp);
2419 }
2420}
2421
2423 const MCExpr *Target,
2424 const MCExpr *DSExpr) {
2425 // No released version of glibc supports PAuth relocations.
2426 if (TT.isOSGlibc() || TT.isMusl())
2427 return false;
2428
2429 // We emit PAuth constants as IRELATIVE relocations in cases where the
2430 // constant cannot be represented as a PAuth relocation:
2431 // 1) There is a deactivation symbol.
2432 // 2) The signed value is not a symbol.
2433 return !DSExpr && !isa<MCConstantExpr>(Target);
2434}
2435
2437 // IFUNCs are ELF-only.
2438 if (!TT.isOSBinFormatELF())
2439 return false;
2440
2441 // musl doesn't support IFUNCs.
2442 if (TT.isMusl())
2443 return false;
2444
2445 return true;
2446}
2447
2448// Emit an ifunc resolver that returns a signed pointer to the specified target,
2449// and return a FUNCINIT reference to the resolver. In the linked binary, this
2450// function becomes the target of an IRELATIVE relocation. This resolver is used
2451// to relocate signed pointers in global variable initializers in special cases
2452// where the standard R_AARCH64_AUTH_ABS64 relocation would not work.
2453//
2454// Example (signed null pointer, not address discriminated):
2455//
2456// .8byte .Lpauth_ifunc0
2457// .pushsection .text.startup,"ax",@progbits
2458// .Lpauth_ifunc0:
2459// mov x0, #0
2460// mov x1, #12345
2461// b __emupac_pacda
2462//
2463// Example (signed null pointer, address discriminated):
2464//
2465// .Ltmp:
2466// .8byte .Lpauth_ifunc0
2467// .pushsection .text.startup,"ax",@progbits
2468// .Lpauth_ifunc0:
2469// mov x0, #0
2470// adrp x1, .Ltmp
2471// add x1, x1, :lo12:.Ltmp
2472// b __emupac_pacda
2473// .popsection
2474//
2475// Example (signed pointer to symbol, not address discriminated):
2476//
2477// .Ltmp:
2478// .8byte .Lpauth_ifunc0
2479// .pushsection .text.startup,"ax",@progbits
2480// .Lpauth_ifunc0:
2481// adrp x0, symbol
2482// add x0, x0, :lo12:symbol
2483// mov x1, #12345
2484// b __emupac_pacda
2485// .popsection
2486//
2487// Example (signed null pointer, not address discriminated, with deactivation
2488// symbol ds):
2489//
2490// .8byte .Lpauth_ifunc0
2491// .pushsection .text.startup,"ax",@progbits
2492// .Lpauth_ifunc0:
2493// mov x0, #0
2494// mov x1, #12345
2495// .reloc ., R_AARCH64_PATCHINST, ds
2496// b __emupac_pacda
2497// ret
2498// .popsection
2499const MCExpr *AArch64AsmPrinter::emitPAuthRelocationAsIRelative(
2500 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
2501 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr) {
2502 const Triple &TT = TM.getTargetTriple();
2503
2504 // We only emit an IRELATIVE relocation if the target supports IRELATIVE and
2505 // does not support the kind of PAuth relocation that we are trying to emit.
2506 if (targetSupportsPAuthRelocation(TT, Target, DSExpr) ||
2508 return nullptr;
2509
2510 // For now, only the DA key is supported.
2511 if (KeyID != AArch64PACKey::DA)
2512 return nullptr;
2513
2514 std::unique_ptr<MCSubtargetInfo> STI(
2515 TM.getTarget().createMCSubtargetInfo(TT, "", ""));
2516 assert(STI && "Unable to create subtarget info");
2517 this->STI = static_cast<const AArch64Subtarget *>(&*STI);
2518
2519 MCSymbol *Place = OutStreamer->getContext().createTempSymbol();
2520 OutStreamer->emitLabel(Place);
2521 OutStreamer->pushSection();
2522
2523 const MCSymbolELF *Group =
2524 static_cast<MCSectionELF *>(OutStreamer->getCurrentSectionOnly())
2525 ->getGroup();
2527 if (Group)
2529 OutStreamer->switchSection(OutStreamer->getContext().getELFSection(
2530 ".text.startup", ELF::SHT_PROGBITS, Flags, 0, Group, true,
2531 Group ? MCSection::NonUniqueID : PAuthIFuncNextUniqueID++, nullptr));
2532
2533 MCSymbol *IRelativeSym =
2534 OutStreamer->getContext().createLinkerPrivateSymbol("pauth_ifunc");
2535 OutStreamer->emitLabel(IRelativeSym);
2536 if (isa<MCConstantExpr>(Target)) {
2537 OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi)
2538 .addReg(AArch64::X0)
2539 .addExpr(Target)
2540 .addImm(0),
2541 *STI);
2542 } else {
2543 emitAddress(AArch64::X0, Target, AArch64::X16, IsDSOLocal, *STI);
2544 }
2545 if (HasAddressDiversity) {
2546 auto *PlacePlusDisc = MCBinaryExpr::createAdd(
2547 MCSymbolRefExpr::create(Place, OutStreamer->getContext()),
2548 MCConstantExpr::create(Disc, OutStreamer->getContext()),
2549 OutStreamer->getContext());
2550 emitAddress(AArch64::X1, PlacePlusDisc, AArch64::X16, /*IsDSOLocal=*/true,
2551 *STI);
2552 } else {
2553 if (!isUInt<16>(Disc)) {
2554 OutContext.reportError(SMLoc(), "AArch64 PAC Discriminator '" +
2555 Twine(Disc) +
2556 "' out of range [0, 0xFFFF]");
2557 }
2558 emitMOVZ(AArch64::X1, Disc, 0);
2559 }
2560
2561 if (DSExpr) {
2562 MCSymbol *PrePACInst = OutStreamer->getContext().createTempSymbol();
2563 OutStreamer->emitLabel(PrePACInst);
2564
2565 auto *PrePACInstExpr =
2566 MCSymbolRefExpr::create(PrePACInst, OutStreamer->getContext());
2567 OutStreamer->emitRelocDirective(*PrePACInstExpr, "R_AARCH64_PATCHINST",
2568 DSExpr, SMLoc());
2569 }
2570
2571 // We don't know the subtarget because this is being emitted for a global
2572 // initializer. Because the performance of IFUNC resolvers is unimportant, we
2573 // always call the EmuPAC runtime, which will end up using the PAC instruction
2574 // if the target supports PAC.
2575 MCSymbol *EmuPAC =
2576 OutStreamer->getContext().getOrCreateSymbol("__emupac_pacda");
2577 const MCSymbolRefExpr *EmuPACRef =
2578 MCSymbolRefExpr::create(EmuPAC, OutStreamer->getContext());
2579 OutStreamer->emitInstruction(MCInstBuilder(AArch64::B).addExpr(EmuPACRef),
2580 *STI);
2581
2582 // We need a RET despite the above tail call because the deactivation symbol
2583 // may replace the tail call with a NOP.
2584 if (DSExpr)
2585 OutStreamer->emitInstruction(
2586 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
2587 OutStreamer->popSection();
2588
2589 return MCSymbolRefExpr::create(IRelativeSym, AArch64::S_FUNCINIT,
2590 OutStreamer->getContext());
2591}
2592
2593const MCExpr *
2594AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2595 MCContext &Ctx = OutContext;
2596
2597 // Figure out the base symbol and the addend, if any.
2598 APInt Offset(64, 0);
2599 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2600 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2601
2602 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2603
2604 const MCExpr *Sym;
2605 if (BaseGVB) {
2606 // If there is an addend, turn that into the appropriate MCExpr.
2607 Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2608 if (Offset.sgt(0))
2610 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2611 else if (Offset.slt(0))
2613 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2614 } else {
2615 Sym = MCConstantExpr::create(Offset.getSExtValue(), Ctx);
2616 }
2617
2618 const MCExpr *DSExpr = nullptr;
2619 if (auto *DS = dyn_cast<GlobalValue>(CPA.getDeactivationSymbol())) {
2620 if (isa<GlobalAlias>(DS))
2621 return Sym;
2622 DSExpr = MCSymbolRefExpr::create(getSymbol(DS), Ctx);
2623 }
2624
2625 uint64_t KeyID = CPA.getKey()->getZExtValue();
2626 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2627 // AArch64AuthMCExpr::printImpl, so fail fast.
2628 if (KeyID > AArch64PACKey::LAST) {
2629 CPA.getContext().emitError("AArch64 PAC Key ID '" + Twine(KeyID) +
2630 "' out of range [0, " +
2631 Twine((unsigned)AArch64PACKey::LAST) + "]");
2632 KeyID = 0;
2633 }
2634
2635 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2636
2637 // Check if we need to represent this with an IRELATIVE and emit it if so.
2638 if (auto *IFuncSym = emitPAuthRelocationAsIRelative(
2639 Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(),
2640 BaseGVB && BaseGVB->isDSOLocal(), DSExpr))
2641 return IFuncSym;
2642
2643 if (!isUInt<16>(Disc)) {
2644 CPA.getContext().emitError("AArch64 PAC Discriminator '" + Twine(Disc) +
2645 "' out of range [0, 0xFFFF]");
2646 Disc = 0;
2647 }
2648
2649 if (DSExpr)
2650 report_fatal_error("deactivation symbols unsupported in constant "
2651 "expressions on this target");
2652
2653 // Finally build the complete @AUTH expr.
2654 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2655 CPA.hasAddressDiscriminator(), Ctx);
2656}
2657
2658void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2659 unsigned DstReg = MI.getOperand(0).getReg();
2660 const MachineOperand &GAOp = MI.getOperand(1);
2661 const uint64_t KeyC = MI.getOperand(2).getImm();
2662 assert(KeyC <= AArch64PACKey::LAST &&
2663 "key is out of range [0, AArch64PACKey::LAST]");
2664 const auto Key = (AArch64PACKey::ID)KeyC;
2665 const uint64_t Disc = MI.getOperand(3).getImm();
2666 assert(isUInt<16>(Disc) &&
2667 "constant discriminator is out of range [0, 0xffff]");
2668
2669 // Emit instruction sequence like the following:
2670 // ADRP x16, symbol$auth_ptr$key$disc
2671 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2672 //
2673 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2674 // to symbol.
2675 MCSymbol *AuthPtrStubSym;
2676 if (TM.getTargetTriple().isOSBinFormatELF()) {
2677 const auto &TLOF =
2678 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2679
2680 assert(GAOp.getOffset() == 0 &&
2681 "non-zero offset for $auth_ptr$ stub slots is not supported");
2682 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2683 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2684 } else {
2685 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2686 "LOADauthptrstatic is implemented only for MachO/ELF");
2687
2688 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2689 getObjFileLowering());
2690
2691 assert(GAOp.getOffset() == 0 &&
2692 "non-zero offset for $auth_ptr$ stub slots is not supported");
2693 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2694 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2695 }
2696
2697 MachineOperand StubMOHi =
2699 MachineOperand StubMOLo = MachineOperand::CreateMCSymbol(
2700 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2701 MCOperand StubMCHi, StubMCLo;
2702
2703 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2704 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2705
2706 EmitToStreamer(
2707 *OutStreamer,
2708 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2709
2710 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2711 .addReg(DstReg)
2712 .addReg(DstReg)
2713 .addOperand(StubMCLo));
2714}
2715
2716void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2717 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2718 const bool IsELFSignedGOT = MI.getParent()
2719 ->getParent()
2720 ->getInfo<AArch64FunctionInfo>()
2721 ->hasELFSignedGOT();
2722 MachineOperand GAOp = MI.getOperand(0);
2723 const uint64_t KeyC = MI.getOperand(1).getImm();
2724 assert(KeyC <= AArch64PACKey::LAST &&
2725 "key is out of range [0, AArch64PACKey::LAST]");
2726 const auto Key = (AArch64PACKey::ID)KeyC;
2727 const unsigned AddrDisc = MI.getOperand(2).getReg();
2728 const uint64_t Disc = MI.getOperand(3).getImm();
2729 assert(isUInt<16>(Disc) &&
2730 "constant discriminator is out of range [0, 0xffff]");
2731
2732 const int64_t Offset = GAOp.getOffset();
2733 GAOp.setOffset(0);
2734
2735 // Emit:
2736 // target materialization:
2737 // - via GOT:
2738 // - unsigned GOT:
2739 // adrp x16, :got:target
2740 // ldr x16, [x16, :got_lo12:target]
2741 // add offset to x16 if offset != 0
2742 // - ELF signed GOT:
2743 // adrp x17, :got:target
2744 // add x17, x17, :got_auth_lo12:target
2745 // ldr x16, [x17]
2746 // aut{i|d}a x16, x17
2747 // check+trap sequence (if no FPAC)
2748 // add offset to x16 if offset != 0
2749 //
2750 // - direct:
2751 // adrp x16, target
2752 // add x16, x16, :lo12:target
2753 // add offset to x16 if offset != 0
2754 //
2755 // add offset to x16:
2756 // - abs(offset) fits 24 bits:
2757 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2758 // - abs(offset) does not fit 24 bits:
2759 // - offset < 0:
2760 // movn+movk sequence filling x17 register with the offset (up to 4
2761 // instructions)
2762 // add x16, x16, x17
2763 // - offset > 0:
2764 // movz+movk sequence filling x17 register with the offset (up to 4
2765 // instructions)
2766 // add x16, x16, x17
2767 //
2768 // signing:
2769 // - 0 discriminator:
2770 // paciza x16
2771 // - Non-0 discriminator, no address discriminator:
2772 // mov x17, #Disc
2773 // pacia x16, x17
2774 // - address discriminator (with potentially folded immediate discriminator):
2775 // pacia x16, xAddrDisc
2776
2777 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2778 MCOperand GAMCHi, GAMCLo;
2779
2780 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2781 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2782 if (IsGOTLoad) {
2783 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2784 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2785 }
2786
2787 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2788 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2789
2790 EmitToStreamer(
2791 MCInstBuilder(AArch64::ADRP)
2792 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2793 .addOperand(GAMCHi));
2794
2795 if (IsGOTLoad) {
2796 if (IsELFSignedGOT) {
2797 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2798 .addReg(AArch64::X17)
2799 .addReg(AArch64::X17)
2800 .addOperand(GAMCLo)
2801 .addImm(0));
2802
2803 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2804 .addReg(AArch64::X16)
2805 .addReg(AArch64::X17)
2806 .addImm(0));
2807
2808 assert(GAOp.isGlobal());
2809 assert(GAOp.getGlobal()->getValueType() != nullptr);
2810 unsigned AuthOpcode = GAOp.getGlobal()->getValueType()->isFunctionTy()
2811 ? AArch64::AUTIA
2812 : AArch64::AUTDA;
2813
2814 EmitToStreamer(MCInstBuilder(AuthOpcode)
2815 .addReg(AArch64::X16)
2816 .addReg(AArch64::X16)
2817 .addReg(AArch64::X17));
2818
2819 if (!STI->hasFPAC()) {
2820 auto AuthKey = (AuthOpcode == AArch64::AUTIA ? AArch64PACKey::IA
2822
2823 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2824 AArch64PAuth::AuthCheckMethod::XPAC);
2825 }
2826 } else {
2827 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2828 .addReg(AArch64::X16)
2829 .addReg(AArch64::X16)
2830 .addOperand(GAMCLo));
2831 }
2832 } else {
2833 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2834 .addReg(AArch64::X16)
2835 .addReg(AArch64::X16)
2836 .addOperand(GAMCLo)
2837 .addImm(0));
2838 }
2839
2840 emitAddImm(AArch64::X16, Offset, AArch64::X17);
2841 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2842
2843 auto MIB = MCInstBuilder(getPACOpcodeForKey(Key, DiscReg == AArch64::XZR))
2844 .addReg(AArch64::X16)
2845 .addReg(AArch64::X16);
2846 if (DiscReg != AArch64::XZR)
2847 MIB.addReg(DiscReg);
2848 EmitToStreamer(MIB);
2849}
2850
2851void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2852 Register DstReg = MI.getOperand(0).getReg();
2853 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2854 const MachineOperand &GAMO = MI.getOperand(1);
2855 assert(GAMO.getOffset() == 0);
2856
2857 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2858 MCOperand GAMC;
2859 MCInstLowering.lowerOperand(GAMO, GAMC);
2860 EmitToStreamer(
2861 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2862 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2863 .addReg(AuthResultReg)
2864 .addReg(AArch64::X17)
2865 .addImm(0));
2866 } else {
2867 MachineOperand GAHiOp(GAMO);
2868 MachineOperand GALoOp(GAMO);
2869 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2870 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2871
2872 MCOperand GAMCHi, GAMCLo;
2873 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2874 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2875
2876 EmitToStreamer(
2877 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2878
2879 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2880 .addReg(AArch64::X17)
2881 .addReg(AArch64::X17)
2882 .addOperand(GAMCLo)
2883 .addImm(0));
2884
2885 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2886 .addReg(AuthResultReg)
2887 .addReg(AArch64::X17)
2888 .addImm(0));
2889 }
2890
2891 assert(GAMO.isGlobal());
2892 MCSymbol *UndefWeakSym;
2893 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2894 UndefWeakSym = createTempSymbol("undef_weak");
2895 EmitToStreamer(
2896 MCInstBuilder(AArch64::CBZX)
2897 .addReg(AuthResultReg)
2898 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2899 }
2900
2901 assert(GAMO.getGlobal()->getValueType() != nullptr);
2902 unsigned AuthOpcode = GAMO.getGlobal()->getValueType()->isFunctionTy()
2903 ? AArch64::AUTIA
2904 : AArch64::AUTDA;
2905 EmitToStreamer(MCInstBuilder(AuthOpcode)
2906 .addReg(AuthResultReg)
2907 .addReg(AuthResultReg)
2908 .addReg(AArch64::X17));
2909
2910 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2911 OutStreamer->emitLabel(UndefWeakSym);
2912
2913 if (!STI->hasFPAC()) {
2914 auto AuthKey =
2915 (AuthOpcode == AArch64::AUTIA ? AArch64PACKey::IA : AArch64PACKey::DA);
2916
2917 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2918 AArch64PAuth::AuthCheckMethod::XPAC);
2919
2920 emitMovXReg(DstReg, AuthResultReg);
2921 }
2922}
2923
2924const MCExpr *
2925AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2926 const MCExpr *BAE = AsmPrinter::lowerBlockAddressConstant(BA);
2927 const Function &Fn = *BA.getFunction();
2928
2929 if (std::optional<uint16_t> BADisc =
2930 STI->getPtrAuthBlockAddressDiscriminatorIfEnabled(Fn))
2931 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2932 /*HasAddressDiversity=*/false, OutContext);
2933
2934 return BAE;
2935}
2936
2937void AArch64AsmPrinter::emitCBPseudoExpansion(const MachineInstr *MI) {
2938 bool IsImm = false;
2939 unsigned Width = 0;
2940
2941 switch (MI->getOpcode()) {
2942 default:
2943 llvm_unreachable("This is not a CB pseudo instruction");
2944 case AArch64::CBBAssertExt:
2945 IsImm = false;
2946 Width = 8;
2947 break;
2948 case AArch64::CBHAssertExt:
2949 IsImm = false;
2950 Width = 16;
2951 break;
2952 case AArch64::CBWPrr:
2953 Width = 32;
2954 break;
2955 case AArch64::CBXPrr:
2956 Width = 64;
2957 break;
2958 case AArch64::CBWPri:
2959 IsImm = true;
2960 Width = 32;
2961 break;
2962 case AArch64::CBXPri:
2963 IsImm = true;
2964 Width = 64;
2965 break;
2966 }
2967
2969 static_cast<AArch64CC::CondCode>(MI->getOperand(0).getImm());
2970 bool NeedsRegSwap = false;
2971 bool NeedsImmDec = false;
2972 bool NeedsImmInc = false;
2973
2974#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond) \
2975 (IsImm \
2976 ? (Width == 32 ? AArch64::CB##ImmCond##Wri : AArch64::CB##ImmCond##Xri) \
2977 : (Width == 8 \
2978 ? AArch64::CBB##RegCond##Wrr \
2979 : (Width == 16 ? AArch64::CBH##RegCond##Wrr \
2980 : (Width == 32 ? AArch64::CB##RegCond##Wrr \
2981 : AArch64::CB##RegCond##Xrr))))
2982 unsigned MCOpC;
2983
2984 // Decide if we need to either swap register operands or increment/decrement
2985 // immediate operands
2986 switch (CC) {
2987 default:
2988 llvm_unreachable("Invalid CB condition code");
2989 case AArch64CC::EQ:
2990 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ EQ, /* Reg-Reg */ EQ);
2991 break;
2992 case AArch64CC::NE:
2993 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ NE, /* Reg-Reg */ NE);
2994 break;
2995 case AArch64CC::HS:
2996 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HS);
2997 NeedsImmDec = IsImm;
2998 break;
2999 case AArch64CC::LO:
3000 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HI);
3001 NeedsRegSwap = !IsImm;
3002 break;
3003 case AArch64CC::HI:
3004 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HI);
3005 break;
3006 case AArch64CC::LS:
3007 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HS);
3008 NeedsRegSwap = !IsImm;
3009 NeedsImmInc = IsImm;
3010 break;
3011 case AArch64CC::GE:
3012 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GE);
3013 NeedsImmDec = IsImm;
3014 break;
3015 case AArch64CC::LT:
3016 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GT);
3017 NeedsRegSwap = !IsImm;
3018 break;
3019 case AArch64CC::GT:
3020 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GT);
3021 break;
3022 case AArch64CC::LE:
3023 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GE);
3024 NeedsRegSwap = !IsImm;
3025 NeedsImmInc = IsImm;
3026 break;
3027 }
3028#undef GET_CB_OPC
3029
3030 MCInst Inst;
3031 Inst.setOpcode(MCOpC);
3032
3033 MCOperand Lhs, Rhs, Trgt;
3034 lowerOperand(MI->getOperand(1), Lhs);
3035 lowerOperand(MI->getOperand(2), Rhs);
3036 lowerOperand(MI->getOperand(3), Trgt);
3037
3038 // Now swap, increment or decrement
3039 if (NeedsRegSwap) {
3040 assert(Lhs.isReg() && "Expected register operand for CB");
3041 assert(Rhs.isReg() && "Expected register operand for CB");
3042 Inst.addOperand(Rhs);
3043 Inst.addOperand(Lhs);
3044 } else if (NeedsImmDec) {
3045 Rhs.setImm(Rhs.getImm() - 1);
3046 Inst.addOperand(Lhs);
3047 Inst.addOperand(Rhs);
3048 } else if (NeedsImmInc) {
3049 Rhs.setImm(Rhs.getImm() + 1);
3050 Inst.addOperand(Lhs);
3051 Inst.addOperand(Rhs);
3052 } else {
3053 Inst.addOperand(Lhs);
3054 Inst.addOperand(Rhs);
3055 }
3056
3057 assert((!IsImm || (Rhs.getImm() >= 0 && Rhs.getImm() < 64)) &&
3058 "CB immediate operand out-of-bounds");
3059
3060 Inst.addOperand(Trgt);
3061 EmitToStreamer(*OutStreamer, Inst);
3062}
3063
3064// Simple pseudo-instructions have their lowering (with expansion to real
3065// instructions) auto-generated.
3066#include "AArch64GenMCPseudoLowering.inc"
3067
3068void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
3069 S.emitInstruction(Inst, *STI);
3070#ifndef NDEBUG
3071 ++InstsEmitted;
3072#endif
3073}
3074
3075void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
3076 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
3077
3078#ifndef NDEBUG
3079 InstsEmitted = 0;
3080 auto CheckMISize = make_scope_exit([&]() {
3081 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
3082 });
3083#endif
3084
3085 // Do any auto-generated pseudo lowerings.
3086 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
3087 EmitToStreamer(*OutStreamer, OutInst);
3088 return;
3089 }
3090
3091 if (MI->getOpcode() == AArch64::ADRP) {
3092 for (auto &Opd : MI->operands()) {
3093 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
3094 "swift_async_extendedFramePointerFlags") {
3095 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
3096 }
3097 }
3098 }
3099
3100 if (AArch64FI->getLOHRelated().count(MI)) {
3101 // Generate a label for LOH related instruction
3102 MCSymbol *LOHLabel = createTempSymbol("loh");
3103 // Associate the instruction with the label
3104 LOHInstToLabel[MI] = LOHLabel;
3105 OutStreamer->emitLabel(LOHLabel);
3106 }
3107
3108 AArch64TargetStreamer *TS =
3109 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
3110 // Do any manual lowerings.
3111 switch (MI->getOpcode()) {
3112 default:
3114 "Unhandled tail call instruction");
3115 break;
3116 case AArch64::HINT: {
3117 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
3118 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
3119 // non-empty. If MI is the initial BTI, place the
3120 // __patchable_function_entries label after BTI.
3121 if (CurrentPatchableFunctionEntrySym &&
3122 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
3123 MI == &MF->front().front()) {
3124 int64_t Imm = MI->getOperand(0).getImm();
3125 if ((Imm & 32) && (Imm & 6)) {
3126 MCInst Inst;
3127 MCInstLowering.Lower(MI, Inst);
3128 EmitToStreamer(*OutStreamer, Inst);
3129 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
3130 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
3131 return;
3132 }
3133 }
3134 break;
3135 }
3136 case AArch64::MOVMCSym: {
3137 Register DestReg = MI->getOperand(0).getReg();
3138 const MachineOperand &MO_Sym = MI->getOperand(1);
3139 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
3140 MCOperand Hi_MCSym, Lo_MCSym;
3141
3142 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
3143 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
3144
3145 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
3146 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
3147
3148 MCInst MovZ;
3149 MovZ.setOpcode(AArch64::MOVZXi);
3150 MovZ.addOperand(MCOperand::createReg(DestReg));
3151 MovZ.addOperand(Hi_MCSym);
3153 EmitToStreamer(*OutStreamer, MovZ);
3154
3155 MCInst MovK;
3156 MovK.setOpcode(AArch64::MOVKXi);
3157 MovK.addOperand(MCOperand::createReg(DestReg));
3158 MovK.addOperand(MCOperand::createReg(DestReg));
3159 MovK.addOperand(Lo_MCSym);
3161 EmitToStreamer(*OutStreamer, MovK);
3162 return;
3163 }
3164 case AArch64::MOVIv2d_ns:
3165 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
3166 // as movi is more efficient across all cores. Newer cores can eliminate
3167 // fmovs early and there is no difference with movi, but this not true for
3168 // all implementations.
3169 //
3170 // The floating-point version doesn't quite work in rare cases on older
3171 // CPUs, so on those targets we lower this instruction to movi.16b instead.
3172 if (STI->hasZeroCycleZeroingFPWorkaround() &&
3173 MI->getOperand(1).getImm() == 0) {
3174 MCInst TmpInst;
3175 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
3176 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3177 TmpInst.addOperand(MCOperand::createImm(0));
3178 EmitToStreamer(*OutStreamer, TmpInst);
3179 return;
3180 }
3181 break;
3182
3183 case AArch64::DBG_VALUE:
3184 case AArch64::DBG_VALUE_LIST:
3185 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
3186 SmallString<128> TmpStr;
3187 raw_svector_ostream OS(TmpStr);
3188 PrintDebugValueComment(MI, OS);
3189 OutStreamer->emitRawText(StringRef(OS.str()));
3190 }
3191 return;
3192
3193 case AArch64::EMITBKEY: {
3194 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3195 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3196 ExceptionHandlingType != ExceptionHandling::ARM)
3197 return;
3198
3199 if (getFunctionCFISectionType(*MF) == CFISection::None)
3200 return;
3201
3202 OutStreamer->emitCFIBKeyFrame();
3203 return;
3204 }
3205
3206 case AArch64::EMITMTETAGGED: {
3207 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3208 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3209 ExceptionHandlingType != ExceptionHandling::ARM)
3210 return;
3211
3212 if (getFunctionCFISectionType(*MF) != CFISection::None)
3213 OutStreamer->emitCFIMTETaggedFrame();
3214 return;
3215 }
3216
3217 case AArch64::AUTx16x17:
3218 emitPtrauthAuthResign(
3219 AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(),
3220 MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17,
3221 std::nullopt, 0, 0, MI->getDeactivationSymbol());
3222 return;
3223
3224 case AArch64::AUTxMxN:
3225 emitPtrauthAuthResign(MI->getOperand(0).getReg(),
3226 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
3227 MI->getOperand(4).getImm(), &MI->getOperand(5),
3228 MI->getOperand(1).getReg(), std::nullopt, 0, 0,
3229 MI->getDeactivationSymbol());
3230 return;
3231
3232 case AArch64::AUTPAC:
3233 emitPtrauthAuthResign(
3234 AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(),
3235 MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17,
3236 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
3237 MI->getOperand(4).getImm(), MI->getOperand(5).getReg(),
3238 MI->getDeactivationSymbol());
3239 return;
3240
3241 case AArch64::PAC:
3242 emitPtrauthSign(MI);
3243 return;
3244
3245 case AArch64::LOADauthptrstatic:
3246 LowerLOADauthptrstatic(*MI);
3247 return;
3248
3249 case AArch64::LOADgotPAC:
3250 case AArch64::MOVaddrPAC:
3251 LowerMOVaddrPAC(*MI);
3252 return;
3253
3254 case AArch64::LOADgotAUTH:
3255 LowerLOADgotAUTH(*MI);
3256 return;
3257
3258 case AArch64::BRA:
3259 case AArch64::BLRA:
3260 emitPtrauthBranch(MI);
3261 return;
3262
3263 // Tail calls use pseudo instructions so they have the proper code-gen
3264 // attributes (isCall, isReturn, etc.). We lower them to the real
3265 // instruction here.
3266 case AArch64::AUTH_TCRETURN:
3267 case AArch64::AUTH_TCRETURN_BTI: {
3268 Register Callee = MI->getOperand(0).getReg();
3269 const uint64_t Key = MI->getOperand(2).getImm();
3271 "Invalid auth key for tail-call return");
3272
3273 const uint64_t Disc = MI->getOperand(3).getImm();
3274 assert(isUInt<16>(Disc) && "Integer discriminator is too wide");
3275
3276 Register AddrDisc = MI->getOperand(4).getReg();
3277
3278 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
3279
3280 emitPtrauthTailCallHardening(MI);
3281
3282 // See the comments in emitPtrauthBranch.
3283 if (Callee == AddrDisc)
3284 report_fatal_error("Call target is signed with its own value");
3285
3286 // After isX16X17Safer predicate was introduced, emitPtrauthDiscriminator is
3287 // no longer restricted to only reusing AddrDisc when it is X16 or X17
3288 // (which are implicit-def'ed by AUTH_TCRETURN pseudos), thus impose this
3289 // restriction manually not to clobber an unexpected register.
3290 bool AddrDiscIsImplicitDef =
3291 AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
3292 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
3293 AddrDiscIsImplicitDef);
3294
3295 const bool IsZero = DiscReg == AArch64::XZR;
3296 const unsigned Opcodes[2][2] = {{AArch64::BRAA, AArch64::BRAAZ},
3297 {AArch64::BRAB, AArch64::BRABZ}};
3298
3299 MCInst TmpInst;
3300 TmpInst.setOpcode(Opcodes[Key][IsZero]);
3301 TmpInst.addOperand(MCOperand::createReg(Callee));
3302 if (!IsZero)
3303 TmpInst.addOperand(MCOperand::createReg(DiscReg));
3304 EmitToStreamer(*OutStreamer, TmpInst);
3305 return;
3306 }
3307
3308 case AArch64::TCRETURNri:
3309 case AArch64::TCRETURNrix16x17:
3310 case AArch64::TCRETURNrix17:
3311 case AArch64::TCRETURNrinotx16:
3312 case AArch64::TCRETURNriALL: {
3313 emitPtrauthTailCallHardening(MI);
3314
3315 recordIfImportCall(MI);
3316 MCInst TmpInst;
3317 TmpInst.setOpcode(AArch64::BR);
3318 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3319 EmitToStreamer(*OutStreamer, TmpInst);
3320 return;
3321 }
3322 case AArch64::TCRETURNdi: {
3323 emitPtrauthTailCallHardening(MI);
3324
3325 MCOperand Dest;
3326 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
3327 recordIfImportCall(MI);
3328 MCInst TmpInst;
3329 TmpInst.setOpcode(AArch64::B);
3330 TmpInst.addOperand(Dest);
3331 EmitToStreamer(*OutStreamer, TmpInst);
3332 return;
3333 }
3334 case AArch64::SpeculationBarrierISBDSBEndBB: {
3335 // Print DSB SYS + ISB
3336 MCInst TmpInstDSB;
3337 TmpInstDSB.setOpcode(AArch64::DSB);
3338 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
3339 EmitToStreamer(*OutStreamer, TmpInstDSB);
3340 MCInst TmpInstISB;
3341 TmpInstISB.setOpcode(AArch64::ISB);
3342 TmpInstISB.addOperand(MCOperand::createImm(0xf));
3343 EmitToStreamer(*OutStreamer, TmpInstISB);
3344 return;
3345 }
3346 case AArch64::SpeculationBarrierSBEndBB: {
3347 // Print SB
3348 MCInst TmpInstSB;
3349 TmpInstSB.setOpcode(AArch64::SB);
3350 EmitToStreamer(*OutStreamer, TmpInstSB);
3351 return;
3352 }
3353 case AArch64::TLSDESC_AUTH_CALLSEQ: {
3354 /// lower this to:
3355 /// adrp x0, :tlsdesc_auth:var
3356 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
3357 /// add x0, x0, #:tlsdesc_auth_lo12:var
3358 /// blraa x16, x0
3359 /// (TPIDR_EL0 offset now in x0)
3360 const MachineOperand &MO_Sym = MI->getOperand(0);
3361 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3362 MCOperand SymTLSDescLo12, SymTLSDesc;
3363 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3364 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3365 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3366 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3367
3368 MCInst Adrp;
3369 Adrp.setOpcode(AArch64::ADRP);
3370 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3371 Adrp.addOperand(SymTLSDesc);
3372 EmitToStreamer(*OutStreamer, Adrp);
3373
3374 MCInst Ldr;
3375 Ldr.setOpcode(AArch64::LDRXui);
3376 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3377 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3378 Ldr.addOperand(SymTLSDescLo12);
3380 EmitToStreamer(*OutStreamer, Ldr);
3381
3382 MCInst Add;
3383 Add.setOpcode(AArch64::ADDXri);
3384 Add.addOperand(MCOperand::createReg(AArch64::X0));
3385 Add.addOperand(MCOperand::createReg(AArch64::X0));
3386 Add.addOperand(SymTLSDescLo12);
3388 EmitToStreamer(*OutStreamer, Add);
3389
3390 // Authenticated TLSDESC accesses are not relaxed.
3391 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
3392
3393 MCInst Blraa;
3394 Blraa.setOpcode(AArch64::BLRAA);
3395 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
3396 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
3397 EmitToStreamer(*OutStreamer, Blraa);
3398
3399 return;
3400 }
3401 case AArch64::TLSDESC_CALLSEQ: {
3402 /// lower this to:
3403 /// adrp x0, :tlsdesc:var
3404 /// ldr x1, [x0, #:tlsdesc_lo12:var]
3405 /// add x0, x0, #:tlsdesc_lo12:var
3406 /// .tlsdesccall var
3407 /// blr x1
3408 /// (TPIDR_EL0 offset now in x0)
3409 const MachineOperand &MO_Sym = MI->getOperand(0);
3410 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3411 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
3412 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3413 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3414 MCInstLowering.lowerOperand(MO_Sym, Sym);
3415 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3416 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3417
3418 MCInst Adrp;
3419 Adrp.setOpcode(AArch64::ADRP);
3420 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3421 Adrp.addOperand(SymTLSDesc);
3422 EmitToStreamer(*OutStreamer, Adrp);
3423
3424 MCInst Ldr;
3425 if (STI->isTargetILP32()) {
3426 Ldr.setOpcode(AArch64::LDRWui);
3427 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
3428 } else {
3429 Ldr.setOpcode(AArch64::LDRXui);
3430 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
3431 }
3432 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3433 Ldr.addOperand(SymTLSDescLo12);
3435 EmitToStreamer(*OutStreamer, Ldr);
3436
3437 MCInst Add;
3438 if (STI->isTargetILP32()) {
3439 Add.setOpcode(AArch64::ADDWri);
3440 Add.addOperand(MCOperand::createReg(AArch64::W0));
3441 Add.addOperand(MCOperand::createReg(AArch64::W0));
3442 } else {
3443 Add.setOpcode(AArch64::ADDXri);
3444 Add.addOperand(MCOperand::createReg(AArch64::X0));
3445 Add.addOperand(MCOperand::createReg(AArch64::X0));
3446 }
3447 Add.addOperand(SymTLSDescLo12);
3449 EmitToStreamer(*OutStreamer, Add);
3450
3451 // Emit a relocation-annotation. This expands to no code, but requests
3452 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
3453 MCInst TLSDescCall;
3454 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
3455 TLSDescCall.addOperand(Sym);
3456 EmitToStreamer(*OutStreamer, TLSDescCall);
3457#ifndef NDEBUG
3458 --InstsEmitted; // no code emitted
3459#endif
3460
3461 MCInst Blr;
3462 Blr.setOpcode(AArch64::BLR);
3463 Blr.addOperand(MCOperand::createReg(AArch64::X1));
3464 EmitToStreamer(*OutStreamer, Blr);
3465
3466 return;
3467 }
3468
3469 case AArch64::JumpTableDest32:
3470 case AArch64::JumpTableDest16:
3471 case AArch64::JumpTableDest8:
3472 LowerJumpTableDest(*OutStreamer, *MI);
3473 return;
3474
3475 case AArch64::BR_JumpTable:
3476 LowerHardenedBRJumpTable(*MI);
3477 return;
3478
3479 case AArch64::FMOVH0:
3480 case AArch64::FMOVS0:
3481 case AArch64::FMOVD0:
3482 emitFMov0(*MI);
3483 return;
3484
3485 case AArch64::MOPSMemoryCopyPseudo:
3486 case AArch64::MOPSMemoryMovePseudo:
3487 case AArch64::MOPSMemorySetPseudo:
3488 case AArch64::MOPSMemorySetTaggingPseudo:
3489 LowerMOPS(*OutStreamer, *MI);
3490 return;
3491
3492 case TargetOpcode::STACKMAP:
3493 return LowerSTACKMAP(*OutStreamer, SM, *MI);
3494
3495 case TargetOpcode::PATCHPOINT:
3496 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
3497
3498 case TargetOpcode::STATEPOINT:
3499 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
3500
3501 case TargetOpcode::FAULTING_OP:
3502 return LowerFAULTING_OP(*MI);
3503
3504 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
3505 LowerPATCHABLE_FUNCTION_ENTER(*MI);
3506 return;
3507
3508 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
3509 LowerPATCHABLE_FUNCTION_EXIT(*MI);
3510 return;
3511
3512 case TargetOpcode::PATCHABLE_TAIL_CALL:
3513 LowerPATCHABLE_TAIL_CALL(*MI);
3514 return;
3515 case TargetOpcode::PATCHABLE_EVENT_CALL:
3516 return LowerPATCHABLE_EVENT_CALL(*MI, false);
3517 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
3518 return LowerPATCHABLE_EVENT_CALL(*MI, true);
3519
3520 case AArch64::KCFI_CHECK:
3521 LowerKCFI_CHECK(*MI);
3522 return;
3523
3524 case AArch64::HWASAN_CHECK_MEMACCESS:
3525 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
3526 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
3527 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
3528 LowerHWASAN_CHECK_MEMACCESS(*MI);
3529 return;
3530
3531 case AArch64::SEH_StackAlloc:
3532 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
3533 return;
3534
3535 case AArch64::SEH_SaveFPLR:
3536 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
3537 return;
3538
3539 case AArch64::SEH_SaveFPLR_X:
3540 assert(MI->getOperand(0).getImm() < 0 &&
3541 "Pre increment SEH opcode must have a negative offset");
3542 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
3543 return;
3544
3545 case AArch64::SEH_SaveReg:
3546 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
3547 MI->getOperand(1).getImm());
3548 return;
3549
3550 case AArch64::SEH_SaveReg_X:
3551 assert(MI->getOperand(1).getImm() < 0 &&
3552 "Pre increment SEH opcode must have a negative offset");
3553 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
3554 -MI->getOperand(1).getImm());
3555 return;
3556
3557 case AArch64::SEH_SaveRegP:
3558 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
3559 MI->getOperand(0).getImm() <= 28) {
3560 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
3561 "Register paired with LR must be odd");
3562 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
3563 MI->getOperand(2).getImm());
3564 return;
3565 }
3566 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3567 "Non-consecutive registers not allowed for save_regp");
3568 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3569 MI->getOperand(2).getImm());
3570 return;
3571
3572 case AArch64::SEH_SaveRegP_X:
3573 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3574 "Non-consecutive registers not allowed for save_regp_x");
3575 assert(MI->getOperand(2).getImm() < 0 &&
3576 "Pre increment SEH opcode must have a negative offset");
3577 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3578 -MI->getOperand(2).getImm());
3579 return;
3580
3581 case AArch64::SEH_SaveFReg:
3582 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3583 MI->getOperand(1).getImm());
3584 return;
3585
3586 case AArch64::SEH_SaveFReg_X:
3587 assert(MI->getOperand(1).getImm() < 0 &&
3588 "Pre increment SEH opcode must have a negative offset");
3589 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3590 -MI->getOperand(1).getImm());
3591 return;
3592
3593 case AArch64::SEH_SaveFRegP:
3594 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3595 "Non-consecutive registers not allowed for save_regp");
3596 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3597 MI->getOperand(2).getImm());
3598 return;
3599
3600 case AArch64::SEH_SaveFRegP_X:
3601 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3602 "Non-consecutive registers not allowed for save_regp_x");
3603 assert(MI->getOperand(2).getImm() < 0 &&
3604 "Pre increment SEH opcode must have a negative offset");
3605 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3606 -MI->getOperand(2).getImm());
3607 return;
3608
3609 case AArch64::SEH_SetFP:
3611 return;
3612
3613 case AArch64::SEH_AddFP:
3614 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3615 return;
3616
3617 case AArch64::SEH_Nop:
3618 TS->emitARM64WinCFINop();
3619 return;
3620
3621 case AArch64::SEH_PrologEnd:
3623 return;
3624
3625 case AArch64::SEH_EpilogStart:
3627 return;
3628
3629 case AArch64::SEH_EpilogEnd:
3631 return;
3632
3633 case AArch64::SEH_PACSignLR:
3635 return;
3636
3637 case AArch64::SEH_SaveAnyRegI:
3638 assert(MI->getOperand(1).getImm() <= 1008 &&
3639 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3640 TS->emitARM64WinCFISaveAnyRegI(MI->getOperand(0).getImm(),
3641 MI->getOperand(1).getImm());
3642 return;
3643
3644 case AArch64::SEH_SaveAnyRegIP:
3645 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3646 "Non-consecutive registers not allowed for save_any_reg");
3647 assert(MI->getOperand(2).getImm() <= 1008 &&
3648 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3649 TS->emitARM64WinCFISaveAnyRegIP(MI->getOperand(0).getImm(),
3650 MI->getOperand(2).getImm());
3651 return;
3652
3653 case AArch64::SEH_SaveAnyRegQP:
3654 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3655 "Non-consecutive registers not allowed for save_any_reg");
3656 assert(MI->getOperand(2).getImm() >= 0 &&
3657 "SaveAnyRegQP SEH opcode offset must be non-negative");
3658 assert(MI->getOperand(2).getImm() <= 1008 &&
3659 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3660 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3661 MI->getOperand(2).getImm());
3662 return;
3663
3664 case AArch64::SEH_SaveAnyRegQPX:
3665 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3666 "Non-consecutive registers not allowed for save_any_reg");
3667 assert(MI->getOperand(2).getImm() < 0 &&
3668 "SaveAnyRegQPX SEH opcode offset must be negative");
3669 assert(MI->getOperand(2).getImm() >= -1008 &&
3670 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3671 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3672 -MI->getOperand(2).getImm());
3673 return;
3674
3675 case AArch64::SEH_AllocZ:
3676 assert(MI->getOperand(0).getImm() >= 0 &&
3677 "AllocZ SEH opcode offset must be non-negative");
3678 assert(MI->getOperand(0).getImm() <= 255 &&
3679 "AllocZ SEH opcode offset must fit into 8 bits");
3680 TS->emitARM64WinCFIAllocZ(MI->getOperand(0).getImm());
3681 return;
3682
3683 case AArch64::SEH_SaveZReg:
3684 assert(MI->getOperand(1).getImm() >= 0 &&
3685 "SaveZReg SEH opcode offset must be non-negative");
3686 assert(MI->getOperand(1).getImm() <= 255 &&
3687 "SaveZReg SEH opcode offset must fit into 8 bits");
3688 TS->emitARM64WinCFISaveZReg(MI->getOperand(0).getImm(),
3689 MI->getOperand(1).getImm());
3690 return;
3691
3692 case AArch64::SEH_SavePReg:
3693 assert(MI->getOperand(1).getImm() >= 0 &&
3694 "SavePReg SEH opcode offset must be non-negative");
3695 assert(MI->getOperand(1).getImm() <= 255 &&
3696 "SavePReg SEH opcode offset must fit into 8 bits");
3697 TS->emitARM64WinCFISavePReg(MI->getOperand(0).getImm(),
3698 MI->getOperand(1).getImm());
3699 return;
3700
3701 case AArch64::BLR:
3702 case AArch64::BR: {
3703 recordIfImportCall(MI);
3704 MCInst TmpInst;
3705 MCInstLowering.Lower(MI, TmpInst);
3706 EmitToStreamer(*OutStreamer, TmpInst);
3707 return;
3708 }
3709 case AArch64::CBWPri:
3710 case AArch64::CBXPri:
3711 case AArch64::CBBAssertExt:
3712 case AArch64::CBHAssertExt:
3713 case AArch64::CBWPrr:
3714 case AArch64::CBXPrr:
3715 emitCBPseudoExpansion(MI);
3716 return;
3717 }
3718
3719 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
3720 return;
3721
3722 // Finally, do the automated lowerings for everything else.
3723 MCInst TmpInst;
3724 MCInstLowering.Lower(MI, TmpInst);
3725 EmitToStreamer(*OutStreamer, TmpInst);
3726}
3727
3728void AArch64AsmPrinter::recordIfImportCall(
3729 const llvm::MachineInstr *BranchInst) {
3730 if (!EnableImportCallOptimization)
3731 return;
3732
3733 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3734 if (GV && GV->hasDLLImportStorageClass()) {
3735 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3736 OutStreamer->emitLabel(CallSiteSymbol);
3737
3738 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3739 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3740 .push_back({CallSiteSymbol, CalledSymbol});
3741 }
3742}
3743
3744void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3745 MCSymbol *LazyPointer) {
3746 // _ifunc:
3747 // adrp x16, lazy_pointer@GOTPAGE
3748 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3749 // ldr x16, [x16]
3750 // br x16
3751
3752 {
3753 MCInst Adrp;
3754 Adrp.setOpcode(AArch64::ADRP);
3755 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3756 MCOperand SymPage;
3757 MCInstLowering.lowerOperand(
3760 SymPage);
3761 Adrp.addOperand(SymPage);
3762 EmitToStreamer(Adrp);
3763 }
3764
3765 {
3766 MCInst Ldr;
3767 Ldr.setOpcode(AArch64::LDRXui);
3768 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3769 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3770 MCOperand SymPageOff;
3771 MCInstLowering.lowerOperand(
3774 SymPageOff);
3775 Ldr.addOperand(SymPageOff);
3777 EmitToStreamer(Ldr);
3778 }
3779
3780 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3781 .addReg(AArch64::X16)
3782 .addReg(AArch64::X16)
3783 .addImm(0));
3784
3785 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3786 : AArch64::BR)
3787 .addReg(AArch64::X16));
3788}
3789
3790void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3791 const GlobalIFunc &GI,
3792 MCSymbol *LazyPointer) {
3793 // These stub helpers are only ever called once, so here we're optimizing for
3794 // minimum size by using the pre-indexed store variants, which saves a few
3795 // bytes of instructions to bump & restore sp.
3796
3797 // _ifunc.stub_helper:
3798 // stp fp, lr, [sp, #-16]!
3799 // mov fp, sp
3800 // stp x1, x0, [sp, #-16]!
3801 // stp x3, x2, [sp, #-16]!
3802 // stp x5, x4, [sp, #-16]!
3803 // stp x7, x6, [sp, #-16]!
3804 // stp d1, d0, [sp, #-16]!
3805 // stp d3, d2, [sp, #-16]!
3806 // stp d5, d4, [sp, #-16]!
3807 // stp d7, d6, [sp, #-16]!
3808 // bl _resolver
3809 // adrp x16, lazy_pointer@GOTPAGE
3810 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3811 // str x0, [x16]
3812 // mov x16, x0
3813 // ldp d7, d6, [sp], #16
3814 // ldp d5, d4, [sp], #16
3815 // ldp d3, d2, [sp], #16
3816 // ldp d1, d0, [sp], #16
3817 // ldp x7, x6, [sp], #16
3818 // ldp x5, x4, [sp], #16
3819 // ldp x3, x2, [sp], #16
3820 // ldp x1, x0, [sp], #16
3821 // ldp fp, lr, [sp], #16
3822 // br x16
3823
3824 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3825 .addReg(AArch64::SP)
3826 .addReg(AArch64::FP)
3827 .addReg(AArch64::LR)
3828 .addReg(AArch64::SP)
3829 .addImm(-2));
3830
3831 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3832 .addReg(AArch64::FP)
3833 .addReg(AArch64::SP)
3834 .addImm(0)
3835 .addImm(0));
3836
3837 for (int I = 0; I != 4; ++I)
3838 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3839 .addReg(AArch64::SP)
3840 .addReg(AArch64::X1 + 2 * I)
3841 .addReg(AArch64::X0 + 2 * I)
3842 .addReg(AArch64::SP)
3843 .addImm(-2));
3844
3845 for (int I = 0; I != 4; ++I)
3846 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3847 .addReg(AArch64::SP)
3848 .addReg(AArch64::D1 + 2 * I)
3849 .addReg(AArch64::D0 + 2 * I)
3850 .addReg(AArch64::SP)
3851 .addImm(-2));
3852
3853 EmitToStreamer(
3854 MCInstBuilder(AArch64::BL)
3856
3857 {
3858 MCInst Adrp;
3859 Adrp.setOpcode(AArch64::ADRP);
3860 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3861 MCOperand SymPage;
3862 MCInstLowering.lowerOperand(
3863 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3865 SymPage);
3866 Adrp.addOperand(SymPage);
3867 EmitToStreamer(Adrp);
3868 }
3869
3870 {
3871 MCInst Ldr;
3872 Ldr.setOpcode(AArch64::LDRXui);
3873 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3874 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3875 MCOperand SymPageOff;
3876 MCInstLowering.lowerOperand(
3877 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3879 SymPageOff);
3880 Ldr.addOperand(SymPageOff);
3882 EmitToStreamer(Ldr);
3883 }
3884
3885 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3886 .addReg(AArch64::X0)
3887 .addReg(AArch64::X16)
3888 .addImm(0));
3889
3890 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3891 .addReg(AArch64::X16)
3892 .addReg(AArch64::X0)
3893 .addImm(0)
3894 .addImm(0));
3895
3896 for (int I = 3; I != -1; --I)
3897 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3898 .addReg(AArch64::SP)
3899 .addReg(AArch64::D1 + 2 * I)
3900 .addReg(AArch64::D0 + 2 * I)
3901 .addReg(AArch64::SP)
3902 .addImm(2));
3903
3904 for (int I = 3; I != -1; --I)
3905 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3906 .addReg(AArch64::SP)
3907 .addReg(AArch64::X1 + 2 * I)
3908 .addReg(AArch64::X0 + 2 * I)
3909 .addReg(AArch64::SP)
3910 .addImm(2));
3911
3912 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3913 .addReg(AArch64::SP)
3914 .addReg(AArch64::FP)
3915 .addReg(AArch64::LR)
3916 .addReg(AArch64::SP)
3917 .addImm(2));
3918
3919 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3920 : AArch64::BR)
3921 .addReg(AArch64::X16));
3922}
3923
3924const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
3925 const Constant *BaseCV,
3926 uint64_t Offset) {
3927 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
3928 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
3929 OutContext);
3930 }
3931
3932 return AsmPrinter::lowerConstant(CV, BaseCV, Offset);
3933}
3934
3935char AArch64AsmPrinter::ID = 0;
3936
3937INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
3938 "AArch64 Assembly Printer", false, false)
3939
3940// Force static initialization.
3941extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
3942LLVMInitializeAArch64AsmPrinter() {
3948}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Unchecked
#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond)
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
static bool targetSupportsPAuthRelocation(const Triple &TT, const MCExpr *Target, const MCExpr *DSExpr)
static bool targetSupportsIRelativeRelocation(const Triple &TT)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
This file defines the DenseMap class.
@ Default
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:593
Machine Check Debug Module
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
const SetOfInstructions & getLOHRelated() const
unsigned getJumpTableEntrySize(int Idx) const
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
static bool shouldSignReturnAddress(SignReturnAddress Condition, bool IsLRSpilled)
std::optional< std::string > getOutliningStyle() const
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
MCSymbol * GetGlobalValueSymbol(const GlobalValue *GV, unsigned TargetFlags) const
void Lower(const MachineInstr *MI, MCInst &OutMI) const
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
const AArch64RegisterInfo * getRegisterInfo() const override
bool isNeonAvailable() const
Returns true if the target has NEON and the function at runtime is known to have NEON enabled (e....
bool isX16X17Safer() const
Returns whether the operating system makes it safer to store sensitive values in x16 and x17 as oppos...
AArch64PAuth::AuthCheckMethod getAuthenticatedLRCheckMethod(const MachineFunction &MF) const
Choose a method of checking LR before performing a tail call.
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitAttributesSubsection(StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional, AArch64BuildAttributes::SubsectionType ParameterType)
Build attributes implementation.
virtual void emitARM64WinCFISavePReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegI(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitARM64WinCFIAllocZ(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveZReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value, std::string String)
virtual void emitARM64WinCFISaveAnyRegIP(unsigned Reg, int Offset)
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class is intended to be used as a driving class for all asm writers.
Definition AsmPrinter.h:91
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual const MCExpr * lowerConstant(const Constant *CV, const Constant *BaseCV=nullptr, uint64_t Offset=0)
Lower the specified LLVM Constant to an MCExpr.
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition AsmPrinter.h:636
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
The address of a basic block.
Definition Constants.h:904
Function * getFunction() const
Definition Constants.h:940
Conditional or Unconditional Branch instruction.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
A signed pointer, in the ptrauth sense.
Definition Constants.h:1037
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1065
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
Constant * getDeactivationSymbol() const
Definition Constants.h:1087
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition Constants.h:1083
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
void recordFaultingOp(FaultKind FaultTy, const MCSymbol *FaultingLabel, const MCSymbol *HandlerLabel)
Definition FaultMaps.cpp:28
void serializeToFaultMapSection()
Definition FaultMaps.cpp:45
const Constant * getAliasee() const
Definition GlobalAlias.h:87
const Constant * getResolver() const
Definition GlobalIFunc.h:73
bool hasLocalLinkage() const
bool hasExternalWeakLinkage() const
Type * getValueType() const
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:423
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSectionELF * getELFSection(const Twine &Section, unsigned Type, unsigned Flags)
Definition MCContext.h:553
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
LLVM_ABI MCSymbol * createLinkerPrivateSymbol(const Twine &Name)
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
MCSection * getDataSection() const
Instances of this class represent operands of the MCInst class.
Definition MCInst.h:40
void setImm(int64_t Val)
Definition MCInst.h:89
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isReg() const
Definition MCInst.h:65
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
static constexpr unsigned NonUniqueID
Definition MCSection.h:522
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:220
virtual void emitCFIBKeyFrame()
virtual void beginCOFFSymbolDef(const MCSymbol *Symbol)
Start emitting COFF symbol definition.
virtual bool popSection()
Restore the current and previous section from the section stack.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitCOFFSymbolType(int Type)
Emit the type of the symbol.
virtual void emitRelocDirective(const MCExpr &Offset, StringRef Name, const MCExpr *Expr, SMLoc Loc={})
Record a relocation described by the .reloc directive.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition MCStreamer.h:368
virtual void endCOFFSymbolDef()
Marks the end of the symbol definition.
MCContext & getContext() const
Definition MCStreamer.h:314
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition MCStreamer.h:387
virtual void emitCFIMTETaggedFrame()
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:324
void pushSection()
Save the current and previous section on the section stack.
Definition MCStreamer.h:443
virtual void switchSection(MCSection *Section, uint32_t Subsec=0)
Set the current section where code is being emitted to Section.
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:421
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
virtual void emitCOFFSymbolStorageClass(int StorageClass)
Emit the storage class of the symbol.
Generic base class for all target subtargets.
const FeatureBitset & getFeatureBits() const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
LLVM_ABI void print(raw_ostream &OS, const MCAsmInfo *MAI) const
print - Print the value to the stream OS.
Definition MCSymbol.cpp:59
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
CalledGlobalInfo tryGetCalledGlobal(const MachineInstr *MI) const
Tries to get the global and target flags for a call site, if the instruction is a call to a global.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
mop_range operands()
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
static SectionKind getMetadata()
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void push_back(const T &Elt)
LLVM_ABI void recordStatepoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a statepoint instruction.
LLVM_ABI void recordPatchPoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
LLVM_ABI void recordStackMap(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:258
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1099
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
StringRef getVendorName(unsigned const Vendor)
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ SectionSize
Definition COFF.h:61
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition COFF.h:218
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition COFF.h:224
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition COFF.h:225
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition COFF.h:276
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition COFF.h:280
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ SHF_ALLOC
Definition ELF.h:1248
@ SHF_GROUP
Definition ELF.h:1270
@ SHF_EXECINSTR
Definition ELF.h:1251
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition ELF.h:1858
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition ELF.h:1859
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition ELF.h:1860
@ SHT_PROGBITS
Definition ELF.h:1147
@ S_REGULAR
S_REGULAR - Regular section.
Definition MachO.h:127
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
bool empty() const
Definition BasicBlock.h:101
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition Mangler.cpp:294
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition ScopeExit.h:59
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Definition CodeGen.h:53
Target & getTheAArch64beTarget()
std::string utostr(uint64_t X, bool isNeg=false)
Target & getTheAArch64leTarget()
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
Target & getTheARM64_32Target()
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
@ MCSA_Hidden
.hidden (ELF)
#define EQ(a, b)
Definition regexec.c:65
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...