LLVM 22.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/Statistic.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
47#include "llvm/IR/DataLayout.h"
49#include "llvm/IR/Mangler.h"
50#include "llvm/IR/Module.h"
51#include "llvm/MC/MCAsmInfo.h"
52#include "llvm/MC/MCContext.h"
53#include "llvm/MC/MCExpr.h"
54#include "llvm/MC/MCInst.h"
58#include "llvm/MC/MCStreamer.h"
59#include "llvm/MC/MCSymbol.h"
60#include "llvm/MC/MCValue.h"
70#include <cassert>
71#include <cstdint>
72#include <map>
73#include <memory>
74
75using namespace llvm;
76
77#define DEBUG_TYPE "AArch64AsmPrinter"
78
79// Doesn't count FPR128 ZCZ instructions which are handled
80// by TableGen pattern matching
81STATISTIC(NumZCZeroingInstrsFPR,
82 "Number of zero-cycle FPR zeroing instructions expanded from "
83 "canonical pseudo instructions");
84
87 "aarch64-ptrauth-auth-checks", cl::Hidden,
88 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
89 clEnumValN(Poison, "poison", "poison on failure"),
90 clEnumValN(Trap, "trap", "trap on failure")),
91 cl::desc("Check pointer authentication auth/resign failures"),
93
94namespace {
95
96class AArch64AsmPrinter : public AsmPrinter {
97 AArch64MCInstLower MCInstLowering;
98 FaultMaps FM;
99 const AArch64Subtarget *STI;
100 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
101#ifndef NDEBUG
102 unsigned InstsEmitted;
103#endif
104 bool EnableImportCallOptimization = false;
106 SectionToImportedFunctionCalls;
107 unsigned PAuthIFuncNextUniqueID = 1;
108
109public:
110 static char ID;
111
112 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
113 : AsmPrinter(TM, std::move(Streamer), ID),
114 MCInstLowering(OutContext, *this), FM(*this) {}
115
116 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
117
118 /// Wrapper for MCInstLowering.lowerOperand() for the
119 /// tblgen'erated pseudo lowering.
120 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
121 return MCInstLowering.lowerOperand(MO, MCOp);
122 }
123
124 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
125
126 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
127
128 void emitStartOfAsmFile(Module &M) override;
129 void emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
130 ArrayRef<unsigned> JumpTableIndices) override;
131 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
133 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
134 const MCSymbol *BranchLabel) const override;
135
136 void emitFunctionEntryLabel() override;
137
138 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
139
140 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
141
142 void LowerHardenedBRJumpTable(const MachineInstr &MI);
143
144 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
145
146 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
147 const MachineInstr &MI);
148 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
149 const MachineInstr &MI);
150 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
151 const MachineInstr &MI);
152 void LowerFAULTING_OP(const MachineInstr &MI);
153
154 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
155 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
156 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
157 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
158
159 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
160 HwasanMemaccessTuple;
161 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
162 void LowerKCFI_CHECK(const MachineInstr &MI);
163 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
164 void emitHwasanMemaccessSymbols(Module &M);
165
166 void emitSled(const MachineInstr &MI, SledKind Kind);
167
168 // Returns whether Reg may be used to store sensitive temporary values when
169 // expanding PtrAuth pseudos. Some OSes may take extra care to protect a
170 // small subset of GPRs on context switches - use these registers then.
171 //
172 // If there are no preferred registers, returns true for any Reg.
173 bool isPtrauthRegSafe(Register Reg) const {
174 if (STI->isX16X17Safer())
175 return Reg == AArch64::X16 || Reg == AArch64::X17;
176
177 return true;
178 }
179
180 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
181 void emitPtrauthBranch(const MachineInstr *MI);
182
183 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
184 Register ScratchReg,
187 const MCSymbol *OnFailure = nullptr);
188
189 // Check authenticated LR before tail calling.
190 void emitPtrauthTailCallHardening(const MachineInstr *TC);
191
192 // Emit the sequence for AUT or AUTPAC.
193 void emitPtrauthAuthResign(Register AUTVal, AArch64PACKey::ID AUTKey,
194 uint64_t AUTDisc,
195 const MachineOperand *AUTAddrDisc,
196 Register Scratch,
197 std::optional<AArch64PACKey::ID> PACKey,
198 uint64_t PACDisc, Register PACAddrDisc, Value *DS);
199
200 // Emit R_AARCH64_PATCHINST, the deactivation symbol relocation. Returns true
201 // if no instruction should be emitted because the deactivation symbol is
202 // defined in the current module so this function emitted a NOP instead.
203 bool emitDeactivationSymbolRelocation(Value *DS);
204
205 // Emit the sequence for PAC.
206 void emitPtrauthSign(const MachineInstr *MI);
207
208 // Emit the sequence to compute the discriminator.
209 //
210 // The Scratch register passed to this function must be safe, as returned by
211 // isPtrauthRegSafe(ScratchReg).
212 //
213 // The returned register is either ScratchReg, AddrDisc, or XZR. Furthermore,
214 // it is guaranteed to be safe (or XZR), with the only exception of
215 // passing-through an *unmodified* unsafe AddrDisc register.
216 //
217 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
218 // MayClobberAddrDisc may save one MOV instruction, provided
219 // isPtrauthRegSafe(AddrDisc) is true:
220 //
221 // mov x17, x16
222 // movk x17, #1234, lsl #48
223 // ; x16 is not used anymore
224 //
225 // can be replaced by
226 //
227 // movk x16, #1234, lsl #48
228 Register emitPtrauthDiscriminator(uint64_t Disc, Register AddrDisc,
229 Register ScratchReg,
230 bool MayClobberAddrDisc = false);
231
232 // Emit the sequence for LOADauthptrstatic
233 void LowerLOADauthptrstatic(const MachineInstr &MI);
234
235 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
236 // adrp-add followed by PAC sign)
237 void LowerMOVaddrPAC(const MachineInstr &MI);
238
239 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
240 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
241 // authenticating)
242 void LowerLOADgotAUTH(const MachineInstr &MI);
243
244 void emitAddImm(MCRegister Val, int64_t Addend, MCRegister Tmp);
245 void emitAddress(MCRegister Reg, const MCExpr *Expr, MCRegister Tmp,
246 bool DSOLocal, const MCSubtargetInfo &STI);
247
248 const MCExpr *emitPAuthRelocationAsIRelative(
249 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
250 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr);
251
252 /// tblgen'erated driver function for lowering simple MI->MC
253 /// pseudo instructions.
254 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
255
256 // Emit Build Attributes
257 void emitAttributes(unsigned Flags, uint64_t PAuthABIPlatform,
258 uint64_t PAuthABIVersion, AArch64TargetStreamer *TS);
259
260 // Emit expansion of Compare-and-branch pseudo instructions
261 void emitCBPseudoExpansion(const MachineInstr *MI);
262
263 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
264 void EmitToStreamer(const MCInst &Inst) {
265 EmitToStreamer(*OutStreamer, Inst);
266 }
267
268 void emitInstruction(const MachineInstr *MI) override;
269
270 void emitFunctionHeaderComment() override;
271
272 void getAnalysisUsage(AnalysisUsage &AU) const override {
274 AU.setPreservesAll();
275 }
276
277 bool runOnMachineFunction(MachineFunction &MF) override {
278 if (auto *PSIW = getAnalysisIfAvailable<ProfileSummaryInfoWrapperPass>())
279 PSI = &PSIW->getPSI();
280 if (auto *SDPIW =
281 getAnalysisIfAvailable<StaticDataProfileInfoWrapperPass>())
282 SDPI = &SDPIW->getStaticDataProfileInfo();
283
284 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
285 STI = &MF.getSubtarget<AArch64Subtarget>();
286
287 SetupMachineFunction(MF);
288
289 if (STI->isTargetCOFF()) {
290 bool Local = MF.getFunction().hasLocalLinkage();
293 int Type =
295
296 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
297 OutStreamer->emitCOFFSymbolStorageClass(Scl);
298 OutStreamer->emitCOFFSymbolType(Type);
299 OutStreamer->endCOFFSymbolDef();
300 }
301
302 // Emit the rest of the function body.
303 emitFunctionBody();
304
305 // Emit the XRay table for this function.
306 emitXRayTable();
307
308 // We didn't modify anything.
309 return false;
310 }
311
312 const MCExpr *lowerConstant(const Constant *CV,
313 const Constant *BaseCV = nullptr,
314 uint64_t Offset = 0) override;
315
316private:
317 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
318 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
319 bool printAsmRegInClass(const MachineOperand &MO,
320 const TargetRegisterClass *RC, unsigned AltName,
321 raw_ostream &O);
322
323 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
324 const char *ExtraCode, raw_ostream &O) override;
325 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
326 const char *ExtraCode, raw_ostream &O) override;
327
328 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
329
330 void emitFunctionBodyEnd() override;
331 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
332
333 MCSymbol *GetCPISymbol(unsigned CPID) const override;
334 void emitEndOfAsmFile(Module &M) override;
335
336 AArch64FunctionInfo *AArch64FI = nullptr;
337
338 /// Emit the LOHs contained in AArch64FI.
339 void emitLOHs();
340
341 void emitMovXReg(Register Dest, Register Src);
342 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
343 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
344
345 void emitAUT(AArch64PACKey::ID Key, Register Pointer, Register Disc);
346 void emitPAC(AArch64PACKey::ID Key, Register Pointer, Register Disc);
347 void emitBLRA(bool IsCall, AArch64PACKey::ID Key, Register Target,
348 Register Disc);
349
350 /// Emit instruction to set float register to zero.
351 void emitFMov0(const MachineInstr &MI);
352 void emitFMov0AsFMov(const MachineInstr &MI, Register DestReg);
353
354 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
355
356 MInstToMCSymbol LOHInstToLabel;
357
358 bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
359 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
360 }
361
362 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
363 assert(STI);
364 return STI;
365 }
366 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
367 MCSymbol *LazyPointer) override;
368 void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI,
369 MCSymbol *LazyPointer) override;
370
371 /// Checks if this instruction is part of a sequence that is eligle for import
372 /// call optimization and, if so, records it to be emitted in the import call
373 /// section.
374 void recordIfImportCall(const MachineInstr *BranchInst);
375};
376
377} // end anonymous namespace
378
379void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
380 const Triple &TT = TM.getTargetTriple();
381
382 if (TT.isOSBinFormatCOFF()) {
383 emitCOFFFeatureSymbol(M);
384 emitCOFFReplaceableFunctionData(M);
385
386 if (M.getModuleFlag("import-call-optimization"))
387 EnableImportCallOptimization = true;
388 }
389
390 if (!TT.isOSBinFormatELF())
391 return;
392
393 // For emitting build attributes and .note.gnu.property section
394 auto *TS =
395 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
396 // Assemble feature flags that may require creation of build attributes and a
397 // note section.
398 unsigned BAFlags = 0;
399 unsigned GNUFlags = 0;
400 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
401 M.getModuleFlag("branch-target-enforcement"))) {
402 if (!BTE->isZero()) {
403 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_BTI_Flag;
405 }
406 }
407
408 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
409 M.getModuleFlag("guarded-control-stack"))) {
410 if (!GCS->isZero()) {
411 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_GCS_Flag;
413 }
414 }
415
416 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
417 M.getModuleFlag("sign-return-address"))) {
418 if (!Sign->isZero()) {
419 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_PAC_Flag;
421 }
422 }
423
424 uint64_t PAuthABIPlatform = -1;
425 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
426 M.getModuleFlag("aarch64-elf-pauthabi-platform"))) {
427 PAuthABIPlatform = PAP->getZExtValue();
428 }
429
430 uint64_t PAuthABIVersion = -1;
431 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
432 M.getModuleFlag("aarch64-elf-pauthabi-version"))) {
433 PAuthABIVersion = PAV->getZExtValue();
434 }
435
436 // Emit AArch64 Build Attributes
437 emitAttributes(BAFlags, PAuthABIPlatform, PAuthABIVersion, TS);
438 // Emit a .note.gnu.property section with the flags.
439 TS->emitNoteSection(GNUFlags, PAuthABIPlatform, PAuthABIVersion);
440}
441
442void AArch64AsmPrinter::emitFunctionHeaderComment() {
443 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
444 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
445 if (OutlinerString != std::nullopt)
446 OutStreamer->getCommentOS() << ' ' << OutlinerString;
447}
448
449void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
450{
451 const Function &F = MF->getFunction();
452 if (F.hasFnAttribute("patchable-function-entry")) {
453 unsigned Num;
454 if (F.getFnAttribute("patchable-function-entry")
455 .getValueAsString()
456 .getAsInteger(10, Num))
457 return;
458 emitNops(Num);
459 return;
460 }
461
462 emitSled(MI, SledKind::FUNCTION_ENTER);
463}
464
465void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
466 emitSled(MI, SledKind::FUNCTION_EXIT);
467}
468
469void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
470 emitSled(MI, SledKind::TAIL_CALL);
471}
472
473void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
474 static const int8_t NoopsInSledCount = 7;
475 // We want to emit the following pattern:
476 //
477 // .Lxray_sled_N:
478 // ALIGN
479 // B #32
480 // ; 7 NOP instructions (28 bytes)
481 // .tmpN
482 //
483 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
484 // over the full 32 bytes (8 instructions) with the following pattern:
485 //
486 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
487 // LDR W17, #12 ; W17 := function ID
488 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
489 // BLR X16 ; call the tracing trampoline
490 // ;DATA: 32 bits of function ID
491 // ;DATA: lower 32 bits of the address of the trampoline
492 // ;DATA: higher 32 bits of the address of the trampoline
493 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
494 //
495 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
496 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
497 OutStreamer->emitLabel(CurSled);
498 auto Target = OutContext.createTempSymbol();
499
500 // Emit "B #32" instruction, which jumps over the next 28 bytes.
501 // The operand has to be the number of 4-byte instructions to jump over,
502 // including the current instruction.
503 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
504
505 for (int8_t I = 0; I < NoopsInSledCount; I++)
506 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::NOP));
507
508 OutStreamer->emitLabel(Target);
509 recordSled(CurSled, MI, Kind, 2);
510}
511
512void AArch64AsmPrinter::emitAttributes(unsigned Flags,
513 uint64_t PAuthABIPlatform,
514 uint64_t PAuthABIVersion,
515 AArch64TargetStreamer *TS) {
516
517 PAuthABIPlatform = (uint64_t(-1) == PAuthABIPlatform) ? 0 : PAuthABIPlatform;
518 PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
519
520 if (PAuthABIPlatform || PAuthABIVersion) {
524 AArch64BuildAttributes::SubsectionOptional::REQUIRED,
525 AArch64BuildAttributes::SubsectionType::ULEB128);
529 PAuthABIPlatform, "");
533 "");
534 }
535
536 unsigned BTIValue =
538 unsigned PACValue =
540 unsigned GCSValue =
542
543 if (BTIValue || PACValue || GCSValue) {
547 AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
548 AArch64BuildAttributes::SubsectionType::ULEB128);
558 }
559}
560
561// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
562// (built-in functions __xray_customevent/__xray_typedevent).
563//
564// .Lxray_event_sled_N:
565// b 1f
566// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
567// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
568// bl __xray_CustomEvent or __xray_TypedEvent
569// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
570// 1:
571//
572// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
573//
574// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
575// After patching, b .+N will become a nop.
576void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
577 bool Typed) {
578 auto &O = *OutStreamer;
579 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
580 O.emitLabel(CurSled);
581 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
582 auto *Sym = MCSymbolRefExpr::create(
583 OutContext.getOrCreateSymbol(
584 Twine(MachO ? "_" : "") +
585 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
586 OutContext);
587 if (Typed) {
588 O.AddComment("Begin XRay typed event");
589 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
590 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
591 .addReg(AArch64::SP)
592 .addReg(AArch64::X0)
593 .addReg(AArch64::X1)
594 .addReg(AArch64::SP)
595 .addImm(-4));
596 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
597 .addReg(AArch64::X2)
598 .addReg(AArch64::SP)
599 .addImm(2));
600 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
601 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
602 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
603 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
604 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
605 .addReg(AArch64::X2)
606 .addReg(AArch64::SP)
607 .addImm(2));
608 O.AddComment("End XRay typed event");
609 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
610 .addReg(AArch64::SP)
611 .addReg(AArch64::X0)
612 .addReg(AArch64::X1)
613 .addReg(AArch64::SP)
614 .addImm(4));
615
616 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
617 } else {
618 O.AddComment("Begin XRay custom event");
619 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
620 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
621 .addReg(AArch64::SP)
622 .addReg(AArch64::X0)
623 .addReg(AArch64::X1)
624 .addReg(AArch64::SP)
625 .addImm(-2));
626 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
627 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
628 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
629 O.AddComment("End XRay custom event");
630 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
631 .addReg(AArch64::SP)
632 .addReg(AArch64::X0)
633 .addReg(AArch64::X1)
634 .addReg(AArch64::SP)
635 .addImm(2));
636
637 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
638 }
639}
640
641void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
642 Register AddrReg = MI.getOperand(0).getReg();
643 assert(std::next(MI.getIterator())->isCall() &&
644 "KCFI_CHECK not followed by a call instruction");
645 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
646 "KCFI_CHECK call target doesn't match call operand");
647
648 // Default to using the intra-procedure-call temporary registers for
649 // comparing the hashes.
650 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
651 if (AddrReg == AArch64::XZR) {
652 // Checking XZR makes no sense. Instead of emitting a load, zero
653 // ScratchRegs[0] and use it for the ESR AddrIndex below.
654 AddrReg = getXRegFromWReg(ScratchRegs[0]);
655 emitMovXReg(AddrReg, AArch64::XZR);
656 } else {
657 // If one of the scratch registers is used for the call target (e.g.
658 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
659 // temporary register instead (in this case, AArch64::W9) as the check
660 // is immediately followed by the call instruction.
661 for (auto &Reg : ScratchRegs) {
662 if (Reg == getWRegFromXReg(AddrReg)) {
663 Reg = AArch64::W9;
664 break;
665 }
666 }
667 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
668 "Invalid scratch registers for KCFI_CHECK");
669
670 // Adjust the offset for patchable-function-prefix. This assumes that
671 // patchable-function-prefix is the same for all functions.
672 int64_t PrefixNops = 0;
673 (void)MI.getMF()
674 ->getFunction()
675 .getFnAttribute("patchable-function-prefix")
676 .getValueAsString()
677 .getAsInteger(10, PrefixNops);
678
679 // Load the target function type hash.
680 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
681 .addReg(ScratchRegs[0])
682 .addReg(AddrReg)
683 .addImm(-(PrefixNops * 4 + 4)));
684 }
685
686 // Load the expected type hash.
687 const int64_t Type = MI.getOperand(1).getImm();
688 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
689 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
690
691 // Compare the hashes and trap if there's a mismatch.
692 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
693 .addReg(AArch64::WZR)
694 .addReg(ScratchRegs[0])
695 .addReg(ScratchRegs[1])
696 .addImm(0));
697
698 MCSymbol *Pass = OutContext.createTempSymbol();
699 EmitToStreamer(*OutStreamer,
700 MCInstBuilder(AArch64::Bcc)
701 .addImm(AArch64CC::EQ)
702 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
703
704 // The base ESR is 0x8000 and the register information is encoded in bits
705 // 0-9 as follows:
706 // - 0-4: n, where the register Xn contains the target address
707 // - 5-9: m, where the register Wm contains the expected type hash
708 // Where n, m are in [0, 30].
709 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
710 unsigned AddrIndex;
711 switch (AddrReg) {
712 default:
713 AddrIndex = AddrReg - AArch64::X0;
714 break;
715 case AArch64::FP:
716 AddrIndex = 29;
717 break;
718 case AArch64::LR:
719 AddrIndex = 30;
720 break;
721 }
722
723 assert(AddrIndex < 31 && TypeIndex < 31);
724
725 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
726 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
727 OutStreamer->emitLabel(Pass);
728}
729
730void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
731 Register Reg = MI.getOperand(0).getReg();
732
733 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
734 // statically known to be zero. However, conceivably, the HWASan pass may
735 // encounter a "cannot currently statically prove to be null" pointer (and is
736 // therefore unable to omit the intrinsic) that later optimization passes
737 // convert into a statically known-null pointer.
738 if (Reg == AArch64::XZR)
739 return;
740
741 bool IsShort =
742 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
743 (MI.getOpcode() ==
744 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
745 uint32_t AccessInfo = MI.getOperand(1).getImm();
746 bool IsFixedShadow =
747 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
748 (MI.getOpcode() ==
749 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
750 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
751
752 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
753 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
754 if (!Sym) {
755 // FIXME: Make this work on non-ELF.
756 if (!TM.getTargetTriple().isOSBinFormatELF())
757 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
758
759 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
760 utostr(AccessInfo);
761 if (IsFixedShadow)
762 SymName += "_fixed_" + utostr(FixedShadowOffset);
763 if (IsShort)
764 SymName += "_short_v2";
765 Sym = OutContext.getOrCreateSymbol(SymName);
766 }
767
768 EmitToStreamer(*OutStreamer,
769 MCInstBuilder(AArch64::BL)
770 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
771}
772
773void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
774 if (HwasanMemaccessSymbols.empty())
775 return;
776
777 const Triple &TT = TM.getTargetTriple();
778 assert(TT.isOSBinFormatELF());
779 AArch64Subtarget STI(TT, TM.getTargetCPU(), TM.getTargetCPU(),
780 TM.getTargetFeatureString(), TM, true);
781 this->STI = &STI;
782
783 MCSymbol *HwasanTagMismatchV1Sym =
784 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
785 MCSymbol *HwasanTagMismatchV2Sym =
786 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
787
788 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
789 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
790 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
791 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
792
793 for (auto &P : HwasanMemaccessSymbols) {
794 unsigned Reg = std::get<0>(P.first);
795 bool IsShort = std::get<1>(P.first);
796 uint32_t AccessInfo = std::get<2>(P.first);
797 bool IsFixedShadow = std::get<3>(P.first);
798 uint64_t FixedShadowOffset = std::get<4>(P.first);
799 const MCSymbolRefExpr *HwasanTagMismatchRef =
800 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
801 MCSymbol *Sym = P.second;
802
803 bool HasMatchAllTag =
804 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
805 uint8_t MatchAllTag =
806 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
807 unsigned Size =
808 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
809 bool CompileKernel =
810 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
811
812 OutStreamer->switchSection(OutContext.getELFSection(
813 ".text.hot", ELF::SHT_PROGBITS,
815 /*IsComdat=*/true));
816
817 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
818 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
819 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
820 OutStreamer->emitLabel(Sym);
821
822 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
823 .addReg(AArch64::X16)
824 .addReg(Reg)
825 .addImm(4)
826 .addImm(55));
827
828 if (IsFixedShadow) {
829 // Aarch64 makes it difficult to embed large constants in the code.
830 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
831 // left-shift option in the MOV instruction. Combined with the 16-bit
832 // immediate, this is enough to represent any offset up to 2**48.
833 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
834 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
835 .addReg(AArch64::W16)
836 .addReg(AArch64::X17)
837 .addReg(AArch64::X16)
838 .addImm(0)
839 .addImm(0));
840 } else {
841 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
842 .addReg(AArch64::W16)
843 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
844 .addReg(AArch64::X16)
845 .addImm(0)
846 .addImm(0));
847 }
848
849 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
850 .addReg(AArch64::XZR)
851 .addReg(AArch64::X16)
852 .addReg(Reg)
854 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
855 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
856 .addImm(AArch64CC::NE)
858 HandleMismatchOrPartialSym, OutContext)));
859 MCSymbol *ReturnSym = OutContext.createTempSymbol();
860 OutStreamer->emitLabel(ReturnSym);
861 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
862 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
863
864 if (HasMatchAllTag) {
865 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
866 .addReg(AArch64::X17)
867 .addReg(Reg)
868 .addImm(56)
869 .addImm(63));
870 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
871 .addReg(AArch64::XZR)
872 .addReg(AArch64::X17)
873 .addImm(MatchAllTag)
874 .addImm(0));
875 EmitToStreamer(
876 MCInstBuilder(AArch64::Bcc)
877 .addImm(AArch64CC::EQ)
878 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
879 }
880
881 if (IsShort) {
882 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
883 .addReg(AArch64::WZR)
884 .addReg(AArch64::W16)
885 .addImm(15)
886 .addImm(0));
887 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
888 EmitToStreamer(
889 MCInstBuilder(AArch64::Bcc)
890 .addImm(AArch64CC::HI)
891 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
892
893 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
894 .addReg(AArch64::X17)
895 .addReg(Reg)
896 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
897 if (Size != 1)
898 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
899 .addReg(AArch64::X17)
900 .addReg(AArch64::X17)
901 .addImm(Size - 1)
902 .addImm(0));
903 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
904 .addReg(AArch64::WZR)
905 .addReg(AArch64::W16)
906 .addReg(AArch64::W17)
907 .addImm(0));
908 EmitToStreamer(
909 MCInstBuilder(AArch64::Bcc)
910 .addImm(AArch64CC::LS)
911 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
912
913 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
914 .addReg(AArch64::X16)
915 .addReg(Reg)
916 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
917 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
918 .addReg(AArch64::W16)
919 .addReg(AArch64::X16)
920 .addImm(0));
921 EmitToStreamer(
922 MCInstBuilder(AArch64::SUBSXrs)
923 .addReg(AArch64::XZR)
924 .addReg(AArch64::X16)
925 .addReg(Reg)
927 EmitToStreamer(
928 MCInstBuilder(AArch64::Bcc)
929 .addImm(AArch64CC::EQ)
930 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
931
932 OutStreamer->emitLabel(HandleMismatchSym);
933 }
934
935 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
936 .addReg(AArch64::SP)
937 .addReg(AArch64::X0)
938 .addReg(AArch64::X1)
939 .addReg(AArch64::SP)
940 .addImm(-32));
941 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
942 .addReg(AArch64::FP)
943 .addReg(AArch64::LR)
944 .addReg(AArch64::SP)
945 .addImm(29));
946
947 if (Reg != AArch64::X0)
948 emitMovXReg(AArch64::X0, Reg);
949 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
950
951 if (CompileKernel) {
952 // The Linux kernel's dynamic loader doesn't support GOT relative
953 // relocations, but it doesn't support late binding either, so just call
954 // the function directly.
955 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
956 } else {
957 // Intentionally load the GOT entry and branch to it, rather than possibly
958 // late binding the function, which may clobber the registers before we
959 // have a chance to save them.
960 EmitToStreamer(MCInstBuilder(AArch64::ADRP)
961 .addReg(AArch64::X16)
962 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
964 OutContext)));
965 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
966 .addReg(AArch64::X16)
967 .addReg(AArch64::X16)
968 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
970 OutContext)));
971 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
972 }
973 }
974 this->STI = nullptr;
975}
976
977static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
978 MCSymbol *StubLabel,
979 const MCExpr *StubAuthPtrRef) {
980 // sym$auth_ptr$key$disc:
981 OutStreamer.emitLabel(StubLabel);
982 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
983}
984
985void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
986 emitHwasanMemaccessSymbols(M);
987
988 const Triple &TT = TM.getTargetTriple();
989 if (TT.isOSBinFormatMachO()) {
990 // Output authenticated pointers as indirect symbols, if we have any.
991 MachineModuleInfoMachO &MMIMacho =
992 MMI->getObjFileInfo<MachineModuleInfoMachO>();
993
994 auto Stubs = MMIMacho.getAuthGVStubList();
995
996 if (!Stubs.empty()) {
997 // Switch to the "__auth_ptr" section.
998 OutStreamer->switchSection(
999 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
1001 emitAlignment(Align(8));
1002
1003 for (const auto &Stub : Stubs)
1004 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1005
1006 OutStreamer->addBlankLine();
1007 }
1008
1009 // Funny Darwin hack: This flag tells the linker that no global symbols
1010 // contain code that falls through to other global symbols (e.g. the obvious
1011 // implementation of multiple entry points). If this doesn't occur, the
1012 // linker can safely perform dead code stripping. Since LLVM never
1013 // generates code that does this, it is always safe to set.
1014 OutStreamer->emitSubsectionsViaSymbols();
1015 }
1016
1017 if (TT.isOSBinFormatELF()) {
1018 // Output authenticated pointers as indirect symbols, if we have any.
1019 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
1020
1021 auto Stubs = MMIELF.getAuthGVStubList();
1022
1023 if (!Stubs.empty()) {
1024 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1025 OutStreamer->switchSection(TLOF.getDataSection());
1026 emitAlignment(Align(8));
1027
1028 for (const auto &Stub : Stubs)
1029 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
1030
1031 OutStreamer->addBlankLine();
1032 }
1033
1034 // With signed ELF GOT enabled, the linker looks at the symbol type to
1035 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
1036 // for functions not defined in the module have STT_NOTYPE type by default.
1037 // This makes linker to emit signing schema with DA key (instead of IA) for
1038 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
1039 // all function symbols used in the module to have STT_FUNC type. See
1040 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
1041 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
1042 M.getModuleFlag("ptrauth-elf-got"));
1043 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
1044 for (const GlobalValue &GV : M.global_values())
1045 if (!GV.use_empty() && isa<Function>(GV) &&
1046 !GV.getName().starts_with("llvm."))
1047 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
1049 }
1050
1051 // Emit stack and fault map information.
1053
1054 // If import call optimization is enabled, emit the appropriate section.
1055 // We do this whether or not we recorded any import calls.
1056 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
1057 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
1058
1059 // Section always starts with some magic.
1060 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
1061 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
1062
1063 // Layout of this section is:
1064 // Per section that contains calls to imported functions:
1065 // uint32_t SectionSize: Size in bytes for information in this section.
1066 // uint32_t Section Number
1067 // Per call to imported function in section:
1068 // uint32_t Kind: the kind of imported function.
1069 // uint32_t BranchOffset: the offset of the branch instruction in its
1070 // parent section.
1071 // uint32_t TargetSymbolId: the symbol id of the called function.
1072 for (auto &[Section, CallsToImportedFuncs] :
1073 SectionToImportedFunctionCalls) {
1074 unsigned SectionSize =
1075 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
1076 OutStreamer->emitInt32(SectionSize);
1077 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
1078 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
1079 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
1080 OutStreamer->emitInt32(0x13);
1081 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
1082 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
1083 }
1084 }
1085 }
1086}
1087
1088void AArch64AsmPrinter::emitLOHs() {
1090
1091 for (const auto &D : AArch64FI->getLOHContainer()) {
1092 for (const MachineInstr *MI : D.getArgs()) {
1093 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
1094 assert(LabelIt != LOHInstToLabel.end() &&
1095 "Label hasn't been inserted for LOH related instruction");
1096 MCArgs.push_back(LabelIt->second);
1097 }
1098 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
1099 MCArgs.clear();
1100 }
1101}
1102
1103void AArch64AsmPrinter::emitFunctionBodyEnd() {
1104 if (!AArch64FI->getLOHRelated().empty())
1105 emitLOHs();
1106}
1107
1108/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1109MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1110 // Darwin uses a linker-private symbol name for constant-pools (to
1111 // avoid addends on the relocation?), ELF has no such concept and
1112 // uses a normal private symbol.
1113 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1114 return OutContext.getOrCreateSymbol(
1115 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1116 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1117
1118 return AsmPrinter::GetCPISymbol(CPID);
1119}
1120
1121void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1122 raw_ostream &O) {
1123 const MachineOperand &MO = MI->getOperand(OpNum);
1124 switch (MO.getType()) {
1125 default:
1126 llvm_unreachable("<unknown operand type>");
1128 Register Reg = MO.getReg();
1130 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1132 break;
1133 }
1135 O << MO.getImm();
1136 break;
1137 }
1139 PrintSymbolOperand(MO, O);
1140 break;
1141 }
1143 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1144 Sym->print(O, MAI);
1145 break;
1146 }
1147 }
1148}
1149
1150bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1151 raw_ostream &O) {
1152 Register Reg = MO.getReg();
1153 switch (Mode) {
1154 default:
1155 return true; // Unknown mode.
1156 case 'w':
1158 break;
1159 case 'x':
1161 break;
1162 case 't':
1164 break;
1165 }
1166
1168 return false;
1169}
1170
1171// Prints the register in MO using class RC using the offset in the
1172// new register class. This should not be used for cross class
1173// printing.
1174bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1175 const TargetRegisterClass *RC,
1176 unsigned AltName, raw_ostream &O) {
1177 assert(MO.isReg() && "Should only get here with a register!");
1178 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1179 Register Reg = MO.getReg();
1180 MCRegister RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1181 if (!RI->regsOverlap(RegToPrint, Reg))
1182 return true;
1183 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1184 return false;
1185}
1186
1187bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1188 const char *ExtraCode, raw_ostream &O) {
1189 const MachineOperand &MO = MI->getOperand(OpNum);
1190
1191 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1192 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1193 return false;
1194
1195 // Does this asm operand have a single letter operand modifier?
1196 if (ExtraCode && ExtraCode[0]) {
1197 if (ExtraCode[1] != 0)
1198 return true; // Unknown modifier.
1199
1200 switch (ExtraCode[0]) {
1201 default:
1202 return true; // Unknown modifier.
1203 case 'w': // Print W register
1204 case 'x': // Print X register
1205 if (MO.isReg())
1206 return printAsmMRegister(MO, ExtraCode[0], O);
1207 if (MO.isImm() && MO.getImm() == 0) {
1208 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1210 return false;
1211 }
1212 printOperand(MI, OpNum, O);
1213 return false;
1214 case 'b': // Print B register.
1215 case 'h': // Print H register.
1216 case 's': // Print S register.
1217 case 'd': // Print D register.
1218 case 'q': // Print Q register.
1219 case 'z': // Print Z register.
1220 if (MO.isReg()) {
1221 const TargetRegisterClass *RC;
1222 switch (ExtraCode[0]) {
1223 case 'b':
1224 RC = &AArch64::FPR8RegClass;
1225 break;
1226 case 'h':
1227 RC = &AArch64::FPR16RegClass;
1228 break;
1229 case 's':
1230 RC = &AArch64::FPR32RegClass;
1231 break;
1232 case 'd':
1233 RC = &AArch64::FPR64RegClass;
1234 break;
1235 case 'q':
1236 RC = &AArch64::FPR128RegClass;
1237 break;
1238 case 'z':
1239 RC = &AArch64::ZPRRegClass;
1240 break;
1241 default:
1242 return true;
1243 }
1244 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1245 }
1246 printOperand(MI, OpNum, O);
1247 return false;
1248 }
1249 }
1250
1251 // According to ARM, we should emit x and v registers unless we have a
1252 // modifier.
1253 if (MO.isReg()) {
1254 Register Reg = MO.getReg();
1255
1256 // If this is a w or x register, print an x register.
1257 if (AArch64::GPR32allRegClass.contains(Reg) ||
1258 AArch64::GPR64allRegClass.contains(Reg))
1259 return printAsmMRegister(MO, 'x', O);
1260
1261 // If this is an x register tuple, print an x register.
1262 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1263 return printAsmMRegister(MO, 't', O);
1264
1265 unsigned AltName = AArch64::NoRegAltName;
1266 const TargetRegisterClass *RegClass;
1267 if (AArch64::ZPRRegClass.contains(Reg)) {
1268 RegClass = &AArch64::ZPRRegClass;
1269 } else if (AArch64::PPRRegClass.contains(Reg)) {
1270 RegClass = &AArch64::PPRRegClass;
1271 } else if (AArch64::PNRRegClass.contains(Reg)) {
1272 RegClass = &AArch64::PNRRegClass;
1273 } else {
1274 RegClass = &AArch64::FPR128RegClass;
1275 AltName = AArch64::vreg;
1276 }
1277
1278 // If this is a b, h, s, d, or q register, print it as a v register.
1279 return printAsmRegInClass(MO, RegClass, AltName, O);
1280 }
1281
1282 printOperand(MI, OpNum, O);
1283 return false;
1284}
1285
1286bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1287 unsigned OpNum,
1288 const char *ExtraCode,
1289 raw_ostream &O) {
1290 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1291 return true; // Unknown modifier.
1292
1293 const MachineOperand &MO = MI->getOperand(OpNum);
1294 assert(MO.isReg() && "unexpected inline asm memory operand");
1295 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1296 return false;
1297}
1298
1299void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1300 raw_ostream &OS) {
1301 unsigned NOps = MI->getNumOperands();
1302 assert(NOps == 4);
1303 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1304 // cast away const; DIetc do not take const operands for some reason.
1305 OS << MI->getDebugVariable()->getName();
1306 OS << " <- ";
1307 // Frame address. Currently handles register +- offset only.
1308 assert(MI->isIndirectDebugValue());
1309 OS << '[';
1310 for (unsigned I = 0, E = llvm::size(MI->debug_operands()); I < E; ++I) {
1311 if (I != 0)
1312 OS << ", ";
1313 printOperand(MI, I, OS);
1314 }
1315 OS << ']';
1316 OS << "+";
1317 printOperand(MI, NOps - 2, OS);
1318}
1319
1320void AArch64AsmPrinter::emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
1321 ArrayRef<unsigned> JumpTableIndices) {
1322 // Fast return if there is nothing to emit to avoid creating empty sections.
1323 if (JumpTableIndices.empty())
1324 return;
1325 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1326 const auto &F = MF->getFunction();
1328
1329 MCSection *ReadOnlySec = nullptr;
1330 if (TM.Options.EnableStaticDataPartitioning) {
1331 ReadOnlySec =
1332 TLOF.getSectionForJumpTable(F, TM, &JT[JumpTableIndices.front()]);
1333 } else {
1334 ReadOnlySec = TLOF.getSectionForJumpTable(F, TM);
1335 }
1336 OutStreamer->switchSection(ReadOnlySec);
1337
1338 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1339 for (unsigned JTI : JumpTableIndices) {
1340 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1341
1342 // If this jump table was deleted, ignore it.
1343 if (JTBBs.empty()) continue;
1344
1345 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1346 emitAlignment(Align(Size));
1347 OutStreamer->emitLabel(GetJTISymbol(JTI));
1348
1349 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1350 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1351
1352 for (auto *JTBB : JTBBs) {
1353 const MCExpr *Value =
1354 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1355
1356 // Each entry is:
1357 // .byte/.hword (LBB - Lbase)>>2
1358 // or plain:
1359 // .word LBB - Lbase
1360 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1361 if (Size != 4)
1363 Value, MCConstantExpr::create(2, OutContext), OutContext);
1364
1365 OutStreamer->emitValue(Value, Size);
1366 }
1367 }
1368}
1369
1370std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1372AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1373 const MachineInstr *BranchInstr,
1374 const MCSymbol *BranchLabel) const {
1375 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1376 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1378 switch (AFI->getJumpTableEntrySize(JTI)) {
1379 case 1:
1380 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1381 break;
1382 case 2:
1383 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1384 break;
1385 case 4:
1386 EntrySize = codeview::JumpTableEntrySize::Int32;
1387 break;
1388 default:
1389 llvm_unreachable("Unexpected jump table entry size");
1390 }
1391 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1392}
1393
1394void AArch64AsmPrinter::emitFunctionEntryLabel() {
1395 const Triple &TT = TM.getTargetTriple();
1396 if (TT.isOSBinFormatELF() &&
1397 (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1398 MF->getFunction().getCallingConv() ==
1399 CallingConv::AArch64_SVE_VectorCall ||
1400 MF->getInfo<AArch64FunctionInfo>()->isSVECC())) {
1401 auto *TS =
1402 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1403 TS->emitDirectiveVariantPCS(CurrentFnSym);
1404 }
1405
1407
1408 if (TT.isWindowsArm64EC() && !MF->getFunction().hasLocalLinkage()) {
1409 // For ARM64EC targets, a function definition's name is mangled differently
1410 // from the normal symbol, emit required aliases here.
1411 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1412 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1413 OutStreamer->emitAssignment(
1414 Src, MCSymbolRefExpr::create(Dst, MMI->getContext()));
1415 };
1416
1417 auto getSymbolFromMetadata = [&](StringRef Name) {
1418 MCSymbol *Sym = nullptr;
1419 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1420 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1421 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1422 }
1423 return Sym;
1424 };
1425
1426 SmallVector<MDNode *> UnmangledNames;
1427 MF->getFunction().getMetadata("arm64ec_unmangled_name", UnmangledNames);
1428 for (MDNode *Node : UnmangledNames) {
1429 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1430 MCSymbol *UnmangledSym = MMI->getContext().getOrCreateSymbol(NameStr);
1431 if (std::optional<std::string> MangledName =
1432 getArm64ECMangledFunctionName(UnmangledSym->getName())) {
1433 MCSymbol *ECMangledSym =
1434 MMI->getContext().getOrCreateSymbol(*MangledName);
1435 emitFunctionAlias(UnmangledSym, ECMangledSym);
1436 }
1437 }
1438 if (MCSymbol *ECMangledSym =
1439 getSymbolFromMetadata("arm64ec_ecmangled_name"))
1440 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1441 }
1442}
1443
1444void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1445 const Constant *CV) {
1446 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1447 if (CPA->hasAddressDiscriminator() &&
1448 !CPA->hasSpecialAddressDiscriminator(
1451 "unexpected address discrimination value for ctors/dtors entry, only "
1452 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1453 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1454 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1455 // actual address discrimination value and only checks
1456 // hasAddressDiscriminator(), so it's OK to leave special address
1457 // discrimination value here.
1459}
1460
1461void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1462 const GlobalAlias &GA) {
1463 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1464 // Global aliases must point to a definition, but unmangled patchable
1465 // symbols are special and need to point to an undefined symbol with "EXP+"
1466 // prefix. Such undefined symbol is resolved by the linker by creating
1467 // x86 thunk that jumps back to the actual EC target.
1468 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1469 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1470 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1471 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1472
1473 OutStreamer->beginCOFFSymbolDef(ExpSym);
1474 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1475 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1477 OutStreamer->endCOFFSymbolDef();
1478
1479 OutStreamer->beginCOFFSymbolDef(Sym);
1480 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1481 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1483 OutStreamer->endCOFFSymbolDef();
1484 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1485 OutStreamer->emitAssignment(
1486 Sym, MCSymbolRefExpr::create(ExpSym, MMI->getContext()));
1487 return;
1488 }
1489 }
1491}
1492
1493/// Small jump tables contain an unsigned byte or half, representing the offset
1494/// from the lowest-addressed possible destination to the desired basic
1495/// block. Since all instructions are 4-byte aligned, this is further compressed
1496/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1497/// materialize the correct destination we need:
1498///
1499/// adr xDest, .LBB0_0
1500/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1501/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1502void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1503 const llvm::MachineInstr &MI) {
1504 Register DestReg = MI.getOperand(0).getReg();
1505 Register ScratchReg = MI.getOperand(1).getReg();
1506 Register ScratchRegW =
1507 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1508 Register TableReg = MI.getOperand(2).getReg();
1509 Register EntryReg = MI.getOperand(3).getReg();
1510 int JTIdx = MI.getOperand(4).getIndex();
1511 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1512
1513 // This has to be first because the compression pass based its reachability
1514 // calculations on the start of the JumpTableDest instruction.
1515 auto Label =
1516 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1517
1518 // If we don't already have a symbol to use as the base, use the ADR
1519 // instruction itself.
1520 if (!Label) {
1522 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1523 OutStreamer.emitLabel(Label);
1524 }
1525
1526 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1527 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1528 .addReg(DestReg)
1529 .addExpr(LabelExpr));
1530
1531 // Load the number of instruction-steps to offset from the label.
1532 unsigned LdrOpcode;
1533 switch (Size) {
1534 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1535 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1536 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1537 default:
1538 llvm_unreachable("Unknown jump table size");
1539 }
1540
1541 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1542 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1543 .addReg(TableReg)
1544 .addReg(EntryReg)
1545 .addImm(0)
1546 .addImm(Size == 1 ? 0 : 1));
1547
1548 // Add to the already materialized base label address, multiplying by 4 if
1549 // compressed.
1550 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1551 .addReg(DestReg)
1552 .addReg(DestReg)
1553 .addReg(ScratchReg)
1554 .addImm(Size == 4 ? 0 : 2));
1555}
1556
1557void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1558 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1559 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1560
1561 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1562 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1563
1564 // Emit:
1565 // mov x17, #<size of table> ; depending on table size, with MOVKs
1566 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1567 // csel x16, x16, xzr, ls ; check for index overflow
1568 //
1569 // adrp x17, Ltable@PAGE ; materialize table address
1570 // add x17, Ltable@PAGEOFF
1571 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1572 //
1573 // Lanchor:
1574 // adr x17, Lanchor ; compute target address
1575 // add x16, x17, x16
1576 // br x16 ; branch to target
1577
1578 MachineOperand JTOp = MI.getOperand(0);
1579
1580 unsigned JTI = JTOp.getIndex();
1581 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1582 "unsupported compressed jump table");
1583
1584 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1585
1586 // cmp only supports a 12-bit immediate. If we need more, materialize the
1587 // immediate, using x17 as a scratch register.
1588 uint64_t MaxTableEntry = NumTableEntries - 1;
1589 if (isUInt<12>(MaxTableEntry)) {
1590 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1591 .addReg(AArch64::XZR)
1592 .addReg(AArch64::X16)
1593 .addImm(MaxTableEntry)
1594 .addImm(0));
1595 } else {
1596 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1597 // It's sad that we have to manually materialize instructions, but we can't
1598 // trivially reuse the main pseudo expansion logic.
1599 // A MOVK sequence is easy enough to generate and handles the general case.
1600 for (int Offset = 16; Offset < 64; Offset += 16) {
1601 if ((MaxTableEntry >> Offset) == 0)
1602 break;
1603 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1604 Offset);
1605 }
1606 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1607 .addReg(AArch64::XZR)
1608 .addReg(AArch64::X16)
1609 .addReg(AArch64::X17)
1610 .addImm(0));
1611 }
1612
1613 // This picks entry #0 on failure.
1614 // We might want to trap instead.
1615 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1616 .addReg(AArch64::X16)
1617 .addReg(AArch64::X16)
1618 .addReg(AArch64::XZR)
1619 .addImm(AArch64CC::LS));
1620
1621 // Prepare the @PAGE/@PAGEOFF low/high operands.
1622 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1623 MCOperand JTMCHi, JTMCLo;
1624
1625 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1626 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1627
1628 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1629 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1630
1631 EmitToStreamer(
1632 *OutStreamer,
1633 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1634
1635 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1636 .addReg(AArch64::X17)
1637 .addReg(AArch64::X17)
1638 .addOperand(JTMCLo)
1639 .addImm(0));
1640
1641 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1642 .addReg(AArch64::X16)
1643 .addReg(AArch64::X17)
1644 .addReg(AArch64::X16)
1645 .addImm(0)
1646 .addImm(1));
1647
1648 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1649 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1650 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1651
1652 OutStreamer->emitLabel(AdrLabel);
1653 EmitToStreamer(
1654 *OutStreamer,
1655 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1656
1657 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1658 .addReg(AArch64::X16)
1659 .addReg(AArch64::X17)
1660 .addReg(AArch64::X16)
1661 .addImm(0));
1662
1663 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1664}
1665
1666void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1667 const llvm::MachineInstr &MI) {
1668 unsigned Opcode = MI.getOpcode();
1669 assert(STI->hasMOPS());
1670 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1671
1672 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1673 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1674 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1675 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1676 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1677 if (Opcode == AArch64::MOPSMemorySetPseudo)
1678 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1679 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1680 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1681 llvm_unreachable("Unhandled memory operation pseudo");
1682 }();
1683 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1684 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1685
1686 for (auto Op : Ops) {
1687 int i = 0;
1688 auto MCIB = MCInstBuilder(Op);
1689 // Destination registers
1690 MCIB.addReg(MI.getOperand(i++).getReg());
1691 MCIB.addReg(MI.getOperand(i++).getReg());
1692 if (!IsSet)
1693 MCIB.addReg(MI.getOperand(i++).getReg());
1694 // Input registers
1695 MCIB.addReg(MI.getOperand(i++).getReg());
1696 MCIB.addReg(MI.getOperand(i++).getReg());
1697 MCIB.addReg(MI.getOperand(i++).getReg());
1698
1699 EmitToStreamer(OutStreamer, MCIB);
1700 }
1701}
1702
1703void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1704 const MachineInstr &MI) {
1705 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1706
1707 auto &Ctx = OutStreamer.getContext();
1708 MCSymbol *MILabel = Ctx.createTempSymbol();
1709 OutStreamer.emitLabel(MILabel);
1710
1711 SM.recordStackMap(*MILabel, MI);
1712 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1713
1714 // Scan ahead to trim the shadow.
1715 const MachineBasicBlock &MBB = *MI.getParent();
1717 ++MII;
1718 while (NumNOPBytes > 0) {
1719 if (MII == MBB.end() || MII->isCall() ||
1720 MII->getOpcode() == AArch64::DBG_VALUE ||
1721 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1722 MII->getOpcode() == TargetOpcode::STACKMAP)
1723 break;
1724 ++MII;
1725 NumNOPBytes -= 4;
1726 }
1727
1728 // Emit nops.
1729 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1730 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1731}
1732
1733// Lower a patchpoint of the form:
1734// [<def>], <id>, <numBytes>, <target>, <numArgs>
1735void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1736 const MachineInstr &MI) {
1737 auto &Ctx = OutStreamer.getContext();
1738 MCSymbol *MILabel = Ctx.createTempSymbol();
1739 OutStreamer.emitLabel(MILabel);
1740 SM.recordPatchPoint(*MILabel, MI);
1741
1742 PatchPointOpers Opers(&MI);
1743
1744 int64_t CallTarget = Opers.getCallTarget().getImm();
1745 unsigned EncodedBytes = 0;
1746 if (CallTarget) {
1747 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1748 "High 16 bits of call target should be zero.");
1749 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1750 EncodedBytes = 16;
1751 // Materialize the jump address:
1752 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1753 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1754 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1755 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1756 }
1757 // Emit padding.
1758 unsigned NumBytes = Opers.getNumPatchBytes();
1759 assert(NumBytes >= EncodedBytes &&
1760 "Patchpoint can't request size less than the length of a call.");
1761 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1762 "Invalid number of NOP bytes requested!");
1763 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1764 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1765}
1766
1767void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1768 const MachineInstr &MI) {
1769 StatepointOpers SOpers(&MI);
1770 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1771 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1772 for (unsigned i = 0; i < PatchBytes; i += 4)
1773 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::NOP));
1774 } else {
1775 // Lower call target and choose correct opcode
1776 const MachineOperand &CallTarget = SOpers.getCallTarget();
1777 MCOperand CallTargetMCOp;
1778 unsigned CallOpcode;
1779 switch (CallTarget.getType()) {
1782 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1783 CallOpcode = AArch64::BL;
1784 break;
1786 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1787 CallOpcode = AArch64::BL;
1788 break;
1790 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1791 CallOpcode = AArch64::BLR;
1792 break;
1793 default:
1794 llvm_unreachable("Unsupported operand type in statepoint call target");
1795 break;
1796 }
1797
1798 EmitToStreamer(OutStreamer,
1799 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1800 }
1801
1802 auto &Ctx = OutStreamer.getContext();
1803 MCSymbol *MILabel = Ctx.createTempSymbol();
1804 OutStreamer.emitLabel(MILabel);
1805 SM.recordStatepoint(*MILabel, MI);
1806}
1807
1808void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1809 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1810 // <opcode>, <operands>
1811
1812 Register DefRegister = FaultingMI.getOperand(0).getReg();
1814 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1815 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1816 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1817 unsigned OperandsBeginIdx = 4;
1818
1819 auto &Ctx = OutStreamer->getContext();
1820 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1821 OutStreamer->emitLabel(FaultingLabel);
1822
1823 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1824 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1825
1826 MCInst MI;
1827 MI.setOpcode(Opcode);
1828
1829 if (DefRegister != (Register)0)
1830 MI.addOperand(MCOperand::createReg(DefRegister));
1831
1832 for (const MachineOperand &MO :
1833 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1834 MCOperand Dest;
1835 lowerOperand(MO, Dest);
1836 MI.addOperand(Dest);
1837 }
1838
1839 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1840 EmitToStreamer(MI);
1841}
1842
1843void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1844 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1845 .addReg(Dest)
1846 .addReg(AArch64::XZR)
1847 .addReg(Src)
1848 .addImm(0));
1849}
1850
1851void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1852 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1853 EmitToStreamer(*OutStreamer,
1854 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1855 .addReg(Dest)
1856 .addImm(Imm)
1857 .addImm(Shift));
1858}
1859
1860void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1861 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1862 EmitToStreamer(*OutStreamer,
1863 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1864 .addReg(Dest)
1865 .addReg(Dest)
1866 .addImm(Imm)
1867 .addImm(Shift));
1868}
1869
1870void AArch64AsmPrinter::emitAUT(AArch64PACKey::ID Key, Register Pointer,
1871 Register Disc) {
1872 bool IsZeroDisc = Disc == AArch64::XZR;
1873 unsigned Opcode = getAUTOpcodeForKey(Key, IsZeroDisc);
1874
1875 // autiza x16 ; if IsZeroDisc
1876 // autia x16, x17 ; if !IsZeroDisc
1877 MCInst AUTInst;
1878 AUTInst.setOpcode(Opcode);
1879 AUTInst.addOperand(MCOperand::createReg(Pointer));
1880 AUTInst.addOperand(MCOperand::createReg(Pointer));
1881 if (!IsZeroDisc)
1882 AUTInst.addOperand(MCOperand::createReg(Disc));
1883
1884 EmitToStreamer(AUTInst);
1885}
1886
1887void AArch64AsmPrinter::emitPAC(AArch64PACKey::ID Key, Register Pointer,
1888 Register Disc) {
1889 bool IsZeroDisc = Disc == AArch64::XZR;
1890 unsigned Opcode = getPACOpcodeForKey(Key, IsZeroDisc);
1891
1892 // paciza x16 ; if IsZeroDisc
1893 // pacia x16, x17 ; if !IsZeroDisc
1894 MCInst PACInst;
1895 PACInst.setOpcode(Opcode);
1896 PACInst.addOperand(MCOperand::createReg(Pointer));
1897 PACInst.addOperand(MCOperand::createReg(Pointer));
1898 if (!IsZeroDisc)
1899 PACInst.addOperand(MCOperand::createReg(Disc));
1900
1901 EmitToStreamer(PACInst);
1902}
1903
1904void AArch64AsmPrinter::emitBLRA(bool IsCall, AArch64PACKey::ID Key,
1905 Register Target, Register Disc) {
1906 bool IsZeroDisc = Disc == AArch64::XZR;
1907 unsigned Opcode = getBranchOpcodeForKey(IsCall, Key, IsZeroDisc);
1908
1909 // blraaz x16 ; if IsZeroDisc
1910 // blraa x16, x17 ; if !IsZeroDisc
1911 MCInst Inst;
1912 Inst.setOpcode(Opcode);
1913 Inst.addOperand(MCOperand::createReg(Target));
1914 if (!IsZeroDisc)
1915 Inst.addOperand(MCOperand::createReg(Disc));
1916 EmitToStreamer(Inst);
1917}
1918
1919void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1920 Register DestReg = MI.getOperand(0).getReg();
1921 if (!STI->hasZeroCycleZeroingFPWorkaround() && STI->isNeonAvailable()) {
1922 if (STI->hasZeroCycleZeroingFPR64()) {
1923 // Convert H/S register to corresponding D register
1924 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1925 if (AArch64::FPR16RegClass.contains(DestReg))
1926 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1927 &AArch64::FPR64RegClass);
1928 else if (AArch64::FPR32RegClass.contains(DestReg))
1929 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1930 &AArch64::FPR64RegClass);
1931 else
1932 assert(AArch64::FPR64RegClass.contains(DestReg));
1933
1934 MCInst MOVI;
1935 MOVI.setOpcode(AArch64::MOVID);
1936 MOVI.addOperand(MCOperand::createReg(DestReg));
1938 EmitToStreamer(*OutStreamer, MOVI);
1939 ++NumZCZeroingInstrsFPR;
1940 } else if (STI->hasZeroCycleZeroingFPR128()) {
1941 // Convert H/S/D register to corresponding Q register
1942 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
1943 if (AArch64::FPR16RegClass.contains(DestReg)) {
1944 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::hsub,
1945 &AArch64::FPR128RegClass);
1946 } else if (AArch64::FPR32RegClass.contains(DestReg)) {
1947 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::ssub,
1948 &AArch64::FPR128RegClass);
1949 } else {
1950 assert(AArch64::FPR64RegClass.contains(DestReg));
1951 DestReg = TRI->getMatchingSuperReg(DestReg, AArch64::dsub,
1952 &AArch64::FPR128RegClass);
1953 }
1954
1955 MCInst MOVI;
1956 MOVI.setOpcode(AArch64::MOVIv2d_ns);
1957 MOVI.addOperand(MCOperand::createReg(DestReg));
1959 EmitToStreamer(*OutStreamer, MOVI);
1960 ++NumZCZeroingInstrsFPR;
1961 } else {
1962 emitFMov0AsFMov(MI, DestReg);
1963 }
1964 } else {
1965 emitFMov0AsFMov(MI, DestReg);
1966 }
1967}
1968
1969void AArch64AsmPrinter::emitFMov0AsFMov(const MachineInstr &MI,
1970 Register DestReg) {
1971 MCInst FMov;
1972 switch (MI.getOpcode()) {
1973 default:
1974 llvm_unreachable("Unexpected opcode");
1975 case AArch64::FMOVH0:
1976 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1977 if (!STI->hasFullFP16())
1978 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1979 FMov.addOperand(MCOperand::createReg(DestReg));
1980 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1981 break;
1982 case AArch64::FMOVS0:
1983 FMov.setOpcode(AArch64::FMOVWSr);
1984 FMov.addOperand(MCOperand::createReg(DestReg));
1985 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1986 break;
1987 case AArch64::FMOVD0:
1988 FMov.setOpcode(AArch64::FMOVXDr);
1989 FMov.addOperand(MCOperand::createReg(DestReg));
1990 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1991 break;
1992 }
1993 EmitToStreamer(*OutStreamer, FMov);
1994}
1995
1996Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint64_t Disc,
1997 Register AddrDisc,
1998 Register ScratchReg,
1999 bool MayClobberAddrDisc) {
2000 assert(isPtrauthRegSafe(ScratchReg) &&
2001 "Safe scratch register must be provided by the caller");
2002 assert(isUInt<16>(Disc) && "Constant discriminator is too wide");
2003
2004 // So far we've used NoRegister in pseudos. Now we need real encodings.
2005 if (AddrDisc == AArch64::NoRegister)
2006 AddrDisc = AArch64::XZR;
2007
2008 // If there is no constant discriminator, there's no blend involved:
2009 // just use the address discriminator register as-is (XZR or not).
2010 if (!Disc)
2011 return AddrDisc;
2012
2013 // If there's only a constant discriminator, MOV it into the scratch register.
2014 if (AddrDisc == AArch64::XZR) {
2015 emitMOVZ(ScratchReg, Disc, 0);
2016 return ScratchReg;
2017 }
2018
2019 // If there are both, emit a blend into the scratch register.
2020
2021 // Check if we can save one MOV instruction.
2022 if (MayClobberAddrDisc && isPtrauthRegSafe(AddrDisc)) {
2023 ScratchReg = AddrDisc;
2024 } else {
2025 emitMovXReg(ScratchReg, AddrDisc);
2026 assert(ScratchReg != AddrDisc &&
2027 "Forbidden to clobber AddrDisc, but have to");
2028 }
2029
2030 emitMOVK(ScratchReg, Disc, 48);
2031 return ScratchReg;
2032}
2033
2034/// Emit a code sequence to check an authenticated pointer value.
2035///
2036/// This function emits a sequence of instructions that checks if TestedReg was
2037/// authenticated successfully. On success, execution continues at the next
2038/// instruction after the sequence.
2039///
2040/// The action performed on failure depends on the OnFailure argument:
2041/// * if OnFailure is not nullptr, control is transferred to that label after
2042/// clearing the PAC field
2043/// * otherwise, BRK instruction is emitted to generate an error
2044void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
2045 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
2046 AArch64PAuth::AuthCheckMethod Method, const MCSymbol *OnFailure) {
2047 // Insert a sequence to check if authentication of TestedReg succeeded,
2048 // such as:
2049 //
2050 // - checked and clearing:
2051 // ; x16 is TestedReg, x17 is ScratchReg
2052 // mov x17, x16
2053 // xpaci x17
2054 // cmp x16, x17
2055 // b.eq Lsuccess
2056 // mov x16, x17
2057 // b Lend
2058 // Lsuccess:
2059 // ; skipped if authentication failed
2060 // Lend:
2061 // ...
2062 //
2063 // - checked and trapping:
2064 // mov x17, x16
2065 // xpaci x17
2066 // cmp x16, x17
2067 // b.eq Lsuccess
2068 // brk #<0xc470 + aut key>
2069 // Lsuccess:
2070 // ...
2071 //
2072 // See the documentation on AuthCheckMethod enumeration constants for
2073 // the specific code sequences that can be used to perform the check.
2075
2076 if (Method == AuthCheckMethod::None)
2077 return;
2078 if (Method == AuthCheckMethod::DummyLoad) {
2079 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
2080 .addReg(getWRegFromXReg(ScratchReg))
2081 .addReg(TestedReg)
2082 .addImm(0));
2083 assert(!OnFailure && "DummyLoad always traps on error");
2084 return;
2085 }
2086
2087 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
2088 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
2089 // mov Xscratch, Xtested
2090 emitMovXReg(ScratchReg, TestedReg);
2091
2092 if (Method == AuthCheckMethod::XPAC) {
2093 // xpac(i|d) Xscratch
2094 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2095 EmitToStreamer(
2096 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
2097 } else {
2098 // xpaclri
2099
2100 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
2101 assert(TestedReg == AArch64::LR &&
2102 "XPACHint mode is only compatible with checking the LR register");
2104 "XPACHint mode is only compatible with I-keys");
2105 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
2106 }
2107
2108 // cmp Xtested, Xscratch
2109 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
2110 .addReg(AArch64::XZR)
2111 .addReg(TestedReg)
2112 .addReg(ScratchReg)
2113 .addImm(0));
2114
2115 // b.eq Lsuccess
2116 EmitToStreamer(
2117 MCInstBuilder(AArch64::Bcc)
2118 .addImm(AArch64CC::EQ)
2119 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2120 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
2121 // eor Xscratch, Xtested, Xtested, lsl #1
2122 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
2123 .addReg(ScratchReg)
2124 .addReg(TestedReg)
2125 .addReg(TestedReg)
2126 .addImm(1));
2127 // tbz Xscratch, #62, Lsuccess
2128 EmitToStreamer(
2129 MCInstBuilder(AArch64::TBZX)
2130 .addReg(ScratchReg)
2131 .addImm(62)
2132 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2133 } else {
2134 llvm_unreachable("Unsupported check method");
2135 }
2136
2137 if (!OnFailure) {
2138 // Trapping sequences do a 'brk'.
2139 // brk #<0xc470 + aut key>
2140 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
2141 } else {
2142 // Non-trapping checked sequences return the stripped result in TestedReg,
2143 // skipping over success-only code (such as re-signing the pointer) by
2144 // jumping to OnFailure label.
2145 // Note that this can introduce an authentication oracle (such as based on
2146 // the high bits of the re-signed value).
2147
2148 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
2149 // instead of ScratchReg, thus eliminating one `mov` instruction.
2150 // Both XPAC and XPACHint can be further optimized by not using a
2151 // conditional branch jumping over an unconditional one.
2152
2153 switch (Method) {
2154 case AuthCheckMethod::XPACHint:
2155 // LR is already XPAC-ed at this point.
2156 break;
2157 case AuthCheckMethod::XPAC:
2158 // mov Xtested, Xscratch
2159 emitMovXReg(TestedReg, ScratchReg);
2160 break;
2161 default:
2162 // If Xtested was not XPAC-ed so far, emit XPAC here.
2163 // xpac(i|d) Xtested
2164 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2165 EmitToStreamer(
2166 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
2167 }
2168
2169 // b Lend
2170 const auto *OnFailureExpr = MCSymbolRefExpr::create(OnFailure, OutContext);
2171 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(OnFailureExpr));
2172 }
2173
2174 // If the auth check succeeds, we can continue.
2175 // Lsuccess:
2176 OutStreamer->emitLabel(SuccessSym);
2177}
2178
2179// With Pointer Authentication, it may be needed to explicitly check the
2180// authenticated value in LR before performing a tail call.
2181// Otherwise, the callee may re-sign the invalid return address,
2182// introducing a signing oracle.
2183void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
2184 if (!AArch64FI->shouldSignReturnAddress(*MF))
2185 return;
2186
2187 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
2188 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
2189 return;
2190
2191 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
2192 Register ScratchReg =
2193 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
2194 assert(!TC->readsRegister(ScratchReg, TRI) &&
2195 "Neither x16 nor x17 is available as a scratch register");
2198 emitPtrauthCheckAuthenticatedValue(AArch64::LR, ScratchReg, Key,
2199 LRCheckMethod);
2200}
2201
2202bool AArch64AsmPrinter::emitDeactivationSymbolRelocation(Value *DS) {
2203 if (!DS)
2204 return false;
2205
2206 if (isa<GlobalAlias>(DS)) {
2207 // Just emit the nop directly.
2208 EmitToStreamer(MCInstBuilder(AArch64::NOP));
2209 return true;
2210 }
2211 MCSymbol *Dot = OutContext.createTempSymbol();
2212 OutStreamer->emitLabel(Dot);
2213 const MCExpr *DeactDotExpr = MCSymbolRefExpr::create(Dot, OutContext);
2214
2215 const MCExpr *DSExpr = MCSymbolRefExpr::create(
2216 OutContext.getOrCreateSymbol(DS->getName()), OutContext);
2217 OutStreamer->emitRelocDirective(*DeactDotExpr, "R_AARCH64_PATCHINST", DSExpr,
2218 SMLoc());
2219 return false;
2220}
2221
2222void AArch64AsmPrinter::emitPtrauthAuthResign(
2223 Register AUTVal, AArch64PACKey::ID AUTKey, uint64_t AUTDisc,
2224 const MachineOperand *AUTAddrDisc, Register Scratch,
2225 std::optional<AArch64PACKey::ID> PACKey, uint64_t PACDisc,
2226 Register PACAddrDisc, Value *DS) {
2227 const bool IsAUTPAC = PACKey.has_value();
2228
2229 // We expand AUT/AUTPAC into a sequence of the form
2230 //
2231 // ; authenticate x16
2232 // ; check pointer in x16
2233 // Lsuccess:
2234 // ; sign x16 (if AUTPAC)
2235 // Lend: ; if not trapping on failure
2236 //
2237 // with the checking sequence chosen depending on whether/how we should check
2238 // the pointer and whether we should trap on failure.
2239
2240 // By default, auth/resign sequences check for auth failures.
2241 bool ShouldCheck = true;
2242 // In the checked sequence, we only trap if explicitly requested.
2243 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2244
2245 // On an FPAC CPU, you get traps whether you want them or not: there's
2246 // no point in emitting checks or traps.
2247 if (STI->hasFPAC())
2248 ShouldCheck = ShouldTrap = false;
2249
2250 // However, command-line flags can override this, for experimentation.
2251 switch (PtrauthAuthChecks) {
2253 break;
2255 ShouldCheck = ShouldTrap = false;
2256 break;
2258 ShouldCheck = true;
2259 ShouldTrap = false;
2260 break;
2262 ShouldCheck = ShouldTrap = true;
2263 break;
2264 }
2265
2266 // Compute aut discriminator
2267 Register AUTDiscReg = emitPtrauthDiscriminator(
2268 AUTDisc, AUTAddrDisc->getReg(), Scratch, AUTAddrDisc->isKill());
2269
2270 if (!emitDeactivationSymbolRelocation(DS))
2271 emitAUT(AUTKey, AUTVal, AUTDiscReg);
2272
2273 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2274 if (!IsAUTPAC && (!ShouldCheck || !ShouldTrap))
2275 return;
2276
2277 MCSymbol *EndSym = nullptr;
2278
2279 if (ShouldCheck) {
2280 if (IsAUTPAC && !ShouldTrap)
2281 EndSym = createTempSymbol("resign_end_");
2282
2283 emitPtrauthCheckAuthenticatedValue(
2284 AUTVal, Scratch, AUTKey, AArch64PAuth::AuthCheckMethod::XPAC, EndSym);
2285 }
2286
2287 // We already emitted unchecked and checked-but-non-trapping AUTs.
2288 // That left us with trapping AUTs, and AUTPACs.
2289 // Trapping AUTs don't need PAC: we're done.
2290 if (!IsAUTPAC)
2291 return;
2292
2293 // Compute pac discriminator
2294 Register PACDiscReg = emitPtrauthDiscriminator(PACDisc, PACAddrDisc, Scratch);
2295 emitPAC(*PACKey, AUTVal, PACDiscReg);
2296
2297 // Lend:
2298 if (EndSym)
2299 OutStreamer->emitLabel(EndSym);
2300}
2301
2302void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) {
2303 Register Val = MI->getOperand(1).getReg();
2304 auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
2305 uint64_t Disc = MI->getOperand(3).getImm();
2306 Register AddrDisc = MI->getOperand(4).getReg();
2307 bool AddrDiscKilled = MI->getOperand(4).isKill();
2308
2309 // As long as at least one of Val and AddrDisc is in GPR64noip, a scratch
2310 // register is available.
2311 Register ScratchReg = Val == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2312 assert(ScratchReg != AddrDisc &&
2313 "Neither X16 nor X17 is available as a scratch register");
2314
2315 // Compute pac discriminator
2316 Register DiscReg = emitPtrauthDiscriminator(
2317 Disc, AddrDisc, ScratchReg, /*MayClobberAddrDisc=*/AddrDiscKilled);
2318
2319 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
2320 return;
2321
2322 emitPAC(Key, Val, DiscReg);
2323}
2324
2325void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2326 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2327 unsigned BrTarget = MI->getOperand(0).getReg();
2328
2329 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2330 uint64_t Disc = MI->getOperand(2).getImm();
2331
2332 unsigned AddrDisc = MI->getOperand(3).getReg();
2333
2334 // Make sure AddrDisc is solely used to compute the discriminator.
2335 // While hardly meaningful, it is still possible to describe an authentication
2336 // of a pointer against its own value (instead of storage address) with
2337 // intrinsics, so use report_fatal_error instead of assert.
2338 if (BrTarget == AddrDisc)
2339 report_fatal_error("Branch target is signed with its own value");
2340
2341 // If we are printing BLRA pseudo, try to save one MOV by making use of the
2342 // fact that x16 and x17 are described as clobbered by the MI instruction and
2343 // AddrDisc is not used as any other input.
2344 //
2345 // Back in the day, emitPtrauthDiscriminator was restricted to only returning
2346 // either x16 or x17, meaning the returned register is always among the
2347 // implicit-def'ed registers of BLRA pseudo. Now this property can be violated
2348 // if isX16X17Safer predicate is false, thus manually check if AddrDisc is
2349 // among x16 and x17 to prevent clobbering unexpected registers.
2350 //
2351 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2352 // declared as clobbering x16/x17.
2353 //
2354 // FIXME: Make use of `killed` flags and register masks instead.
2355 bool AddrDiscIsImplicitDef =
2356 IsCall && (AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17);
2357 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2358 AddrDiscIsImplicitDef);
2359 emitBLRA(IsCall, Key, BrTarget, DiscReg);
2360}
2361
2362void AArch64AsmPrinter::emitAddImm(MCRegister Reg, int64_t Addend,
2363 MCRegister Tmp) {
2364 if (Addend != 0) {
2365 const uint64_t AbsOffset = (Addend > 0 ? Addend : -((uint64_t)Addend));
2366 const bool IsNeg = Addend < 0;
2367 if (isUInt<24>(AbsOffset)) {
2368 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2369 BitPos += 12) {
2370 EmitToStreamer(
2371 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2372 .addReg(Reg)
2373 .addReg(Reg)
2374 .addImm((AbsOffset >> BitPos) & 0xfff)
2375 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2376 }
2377 } else {
2378 const uint64_t UAddend = Addend;
2379 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2380 .addReg(Tmp)
2381 .addImm((IsNeg ? ~UAddend : UAddend) & 0xffff)
2382 .addImm(/*shift=*/0));
2383 auto NeedMovk = [IsNeg, UAddend](int BitPos) -> bool {
2384 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2385 uint64_t Shifted = UAddend >> BitPos;
2386 if (!IsNeg)
2387 return Shifted != 0;
2388 for (int I = 0; I != 64 - BitPos; I += 16)
2389 if (((Shifted >> I) & 0xffff) != 0xffff)
2390 return true;
2391 return false;
2392 };
2393 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2394 emitMOVK(Tmp, (UAddend >> BitPos) & 0xffff, BitPos);
2395
2396 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2397 .addReg(Reg)
2398 .addReg(Reg)
2399 .addReg(Tmp)
2400 .addImm(/*shift=*/0));
2401 }
2402 }
2403}
2404
2405void AArch64AsmPrinter::emitAddress(MCRegister Reg, const MCExpr *Expr,
2406 MCRegister Tmp, bool DSOLocal,
2407 const MCSubtargetInfo &STI) {
2408 MCValue Val;
2409 if (!Expr->evaluateAsRelocatable(Val, nullptr))
2410 report_fatal_error("emitAddress could not evaluate");
2411 if (DSOLocal) {
2412 EmitToStreamer(
2413 MCInstBuilder(AArch64::ADRP)
2414 .addReg(Reg)
2416 OutStreamer->getContext())));
2417 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2418 .addReg(Reg)
2419 .addReg(Reg)
2420 .addExpr(MCSpecifierExpr::create(
2421 Expr, AArch64::S_LO12, OutStreamer->getContext()))
2422 .addImm(0));
2423 } else {
2424 auto *SymRef =
2425 MCSymbolRefExpr::create(Val.getAddSym(), OutStreamer->getContext());
2426 EmitToStreamer(
2427 MCInstBuilder(AArch64::ADRP)
2428 .addReg(Reg)
2430 OutStreamer->getContext())));
2431 EmitToStreamer(
2432 MCInstBuilder(AArch64::LDRXui)
2433 .addReg(Reg)
2434 .addReg(Reg)
2436 OutStreamer->getContext())));
2437 emitAddImm(Reg, Val.getConstant(), Tmp);
2438 }
2439}
2440
2442 // IFUNCs are ELF-only.
2443 if (!TT.isOSBinFormatELF())
2444 return false;
2445
2446 // IFUNCs are supported on glibc, bionic, and some but not all of the BSDs.
2447 return TT.isOSGlibc() || TT.isAndroid() || TT.isOSFreeBSD() ||
2448 TT.isOSDragonFly() || TT.isOSNetBSD();
2449}
2450
2451// Emit an ifunc resolver that returns a signed pointer to the specified target,
2452// and return a FUNCINIT reference to the resolver. In the linked binary, this
2453// function becomes the target of an IRELATIVE relocation. This resolver is used
2454// to relocate signed pointers in global variable initializers in special cases
2455// where the standard R_AARCH64_AUTH_ABS64 relocation would not work.
2456//
2457// Example (signed null pointer, not address discriminated):
2458//
2459// .8byte .Lpauth_ifunc0
2460// .pushsection .text.startup,"ax",@progbits
2461// .Lpauth_ifunc0:
2462// mov x0, #0
2463// mov x1, #12345
2464// b __emupac_pacda
2465//
2466// Example (signed null pointer, address discriminated):
2467//
2468// .Ltmp:
2469// .8byte .Lpauth_ifunc0
2470// .pushsection .text.startup,"ax",@progbits
2471// .Lpauth_ifunc0:
2472// mov x0, #0
2473// adrp x1, .Ltmp
2474// add x1, x1, :lo12:.Ltmp
2475// b __emupac_pacda
2476// .popsection
2477//
2478// Example (signed pointer to symbol, not address discriminated):
2479//
2480// .Ltmp:
2481// .8byte .Lpauth_ifunc0
2482// .pushsection .text.startup,"ax",@progbits
2483// .Lpauth_ifunc0:
2484// adrp x0, symbol
2485// add x0, x0, :lo12:symbol
2486// mov x1, #12345
2487// b __emupac_pacda
2488// .popsection
2489//
2490// Example (signed null pointer, not address discriminated, with deactivation
2491// symbol ds):
2492//
2493// .8byte .Lpauth_ifunc0
2494// .pushsection .text.startup,"ax",@progbits
2495// .Lpauth_ifunc0:
2496// mov x0, #0
2497// mov x1, #12345
2498// .reloc ., R_AARCH64_PATCHINST, ds
2499// b __emupac_pacda
2500// ret
2501// .popsection
2502const MCExpr *AArch64AsmPrinter::emitPAuthRelocationAsIRelative(
2503 const MCExpr *Target, uint64_t Disc, AArch64PACKey::ID KeyID,
2504 bool HasAddressDiversity, bool IsDSOLocal, const MCExpr *DSExpr) {
2505 const Triple &TT = TM.getTargetTriple();
2506
2507 // We only emit an IRELATIVE relocation if the target supports IRELATIVE.
2509 return nullptr;
2510
2511 // For now, only the DA key is supported.
2512 if (KeyID != AArch64PACKey::DA)
2513 return nullptr;
2514
2515 AArch64Subtarget STI(TT, TM.getTargetCPU(), TM.getTargetCPU(),
2516 TM.getTargetFeatureString(), TM, true);
2517 this->STI = &STI;
2518
2519 MCSymbol *Place = OutStreamer->getContext().createTempSymbol();
2520 OutStreamer->emitLabel(Place);
2521 OutStreamer->pushSection();
2522
2523 const MCSymbolELF *Group =
2524 static_cast<MCSectionELF *>(OutStreamer->getCurrentSectionOnly())
2525 ->getGroup();
2527 if (Group)
2529 OutStreamer->switchSection(OutStreamer->getContext().getELFSection(
2530 ".text.startup", ELF::SHT_PROGBITS, Flags, 0, Group, true,
2531 Group ? MCSection::NonUniqueID : PAuthIFuncNextUniqueID++, nullptr));
2532
2533 MCSymbol *IRelativeSym =
2534 OutStreamer->getContext().createLinkerPrivateSymbol("pauth_ifunc");
2535 OutStreamer->emitLabel(IRelativeSym);
2536 if (isa<MCConstantExpr>(Target)) {
2537 OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi)
2538 .addReg(AArch64::X0)
2539 .addExpr(Target)
2540 .addImm(0),
2541 STI);
2542 } else {
2543 emitAddress(AArch64::X0, Target, AArch64::X16, IsDSOLocal, STI);
2544 }
2545 if (HasAddressDiversity) {
2546 auto *PlacePlusDisc = MCBinaryExpr::createAdd(
2547 MCSymbolRefExpr::create(Place, OutStreamer->getContext()),
2548 MCConstantExpr::create(Disc, OutStreamer->getContext()),
2549 OutStreamer->getContext());
2550 emitAddress(AArch64::X1, PlacePlusDisc, AArch64::X16, /*IsDSOLocal=*/true,
2551 STI);
2552 } else {
2553 if (!isUInt<16>(Disc)) {
2554 OutContext.reportError(SMLoc(), "AArch64 PAC Discriminator '" +
2555 Twine(Disc) +
2556 "' out of range [0, 0xFFFF]");
2557 }
2558 emitMOVZ(AArch64::X1, Disc, 0);
2559 }
2560
2561 if (DSExpr) {
2562 MCSymbol *PrePACInst = OutStreamer->getContext().createTempSymbol();
2563 OutStreamer->emitLabel(PrePACInst);
2564
2565 auto *PrePACInstExpr =
2566 MCSymbolRefExpr::create(PrePACInst, OutStreamer->getContext());
2567 OutStreamer->emitRelocDirective(*PrePACInstExpr, "R_AARCH64_PATCHINST",
2568 DSExpr, SMLoc());
2569 }
2570
2571 // We don't know the subtarget because this is being emitted for a global
2572 // initializer. Because the performance of IFUNC resolvers is unimportant, we
2573 // always call the EmuPAC runtime, which will end up using the PAC instruction
2574 // if the target supports PAC.
2575 MCSymbol *EmuPAC =
2576 OutStreamer->getContext().getOrCreateSymbol("__emupac_pacda");
2577 const MCSymbolRefExpr *EmuPACRef =
2578 MCSymbolRefExpr::create(EmuPAC, OutStreamer->getContext());
2579 OutStreamer->emitInstruction(MCInstBuilder(AArch64::B).addExpr(EmuPACRef),
2580 STI);
2581
2582 // We need a RET despite the above tail call because the deactivation symbol
2583 // may replace the tail call with a NOP.
2584 if (DSExpr)
2585 OutStreamer->emitInstruction(
2586 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), STI);
2587 OutStreamer->popSection();
2588
2589 return MCSymbolRefExpr::create(IRelativeSym, AArch64::S_FUNCINIT,
2590 OutStreamer->getContext());
2591}
2592
2593const MCExpr *
2594AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2595 MCContext &Ctx = OutContext;
2596
2597 // Figure out the base symbol and the addend, if any.
2598 APInt Offset(64, 0);
2599 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2600 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2601
2602 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2603
2604 const MCExpr *Sym;
2605 if (BaseGVB) {
2606 // If there is an addend, turn that into the appropriate MCExpr.
2607 Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2608 if (Offset.sgt(0))
2610 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2611 else if (Offset.slt(0))
2613 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2614 } else {
2615 Sym = MCConstantExpr::create(Offset.getSExtValue(), Ctx);
2616 }
2617
2618 const MCExpr *DSExpr = nullptr;
2619 if (auto *DS = dyn_cast<GlobalValue>(CPA.getDeactivationSymbol())) {
2620 if (isa<GlobalAlias>(DS))
2621 return Sym;
2622 DSExpr = MCSymbolRefExpr::create(getSymbol(DS), Ctx);
2623 }
2624
2625 uint64_t KeyID = CPA.getKey()->getZExtValue();
2626 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2627 // AArch64AuthMCExpr::printImpl, so fail fast.
2628 if (KeyID > AArch64PACKey::LAST) {
2629 CPA.getContext().emitError("AArch64 PAC Key ID '" + Twine(KeyID) +
2630 "' out of range [0, " +
2631 Twine((unsigned)AArch64PACKey::LAST) + "]");
2632 KeyID = 0;
2633 }
2634
2635 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2636
2637 // Check if we can represent this with an IRELATIVE and emit it if so.
2638 if (auto *IFuncSym = emitPAuthRelocationAsIRelative(
2639 Sym, Disc, AArch64PACKey::ID(KeyID), CPA.hasAddressDiscriminator(),
2640 BaseGVB && BaseGVB->isDSOLocal(), DSExpr))
2641 return IFuncSym;
2642
2643 if (!isUInt<16>(Disc)) {
2644 CPA.getContext().emitError("AArch64 PAC Discriminator '" + Twine(Disc) +
2645 "' out of range [0, 0xFFFF]");
2646 Disc = 0;
2647 }
2648
2649 if (DSExpr)
2650 report_fatal_error("deactivation symbols unsupported in constant "
2651 "expressions on this target");
2652
2653 // Finally build the complete @AUTH expr.
2654 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2655 CPA.hasAddressDiscriminator(), Ctx);
2656}
2657
2658void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2659 unsigned DstReg = MI.getOperand(0).getReg();
2660 const MachineOperand &GAOp = MI.getOperand(1);
2661 const uint64_t KeyC = MI.getOperand(2).getImm();
2662 assert(KeyC <= AArch64PACKey::LAST &&
2663 "key is out of range [0, AArch64PACKey::LAST]");
2664 const auto Key = (AArch64PACKey::ID)KeyC;
2665 const uint64_t Disc = MI.getOperand(3).getImm();
2666 assert(isUInt<16>(Disc) &&
2667 "constant discriminator is out of range [0, 0xffff]");
2668
2669 // Emit instruction sequence like the following:
2670 // ADRP x16, symbol$auth_ptr$key$disc
2671 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2672 //
2673 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2674 // to symbol.
2675 MCSymbol *AuthPtrStubSym;
2676 if (TM.getTargetTriple().isOSBinFormatELF()) {
2677 const auto &TLOF =
2678 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2679
2680 assert(GAOp.getOffset() == 0 &&
2681 "non-zero offset for $auth_ptr$ stub slots is not supported");
2682 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2683 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2684 } else {
2685 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2686 "LOADauthptrstatic is implemented only for MachO/ELF");
2687
2688 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2689 getObjFileLowering());
2690
2691 assert(GAOp.getOffset() == 0 &&
2692 "non-zero offset for $auth_ptr$ stub slots is not supported");
2693 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2694 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2695 }
2696
2697 MachineOperand StubMOHi =
2699 MachineOperand StubMOLo = MachineOperand::CreateMCSymbol(
2700 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2701 MCOperand StubMCHi, StubMCLo;
2702
2703 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2704 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2705
2706 EmitToStreamer(
2707 *OutStreamer,
2708 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2709
2710 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2711 .addReg(DstReg)
2712 .addReg(DstReg)
2713 .addOperand(StubMCLo));
2714}
2715
2716void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2717 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2718 const bool IsELFSignedGOT = MI.getParent()
2719 ->getParent()
2720 ->getInfo<AArch64FunctionInfo>()
2721 ->hasELFSignedGOT();
2722 MachineOperand GAOp = MI.getOperand(0);
2723 const uint64_t KeyC = MI.getOperand(1).getImm();
2724 assert(KeyC <= AArch64PACKey::LAST &&
2725 "key is out of range [0, AArch64PACKey::LAST]");
2726 const auto Key = (AArch64PACKey::ID)KeyC;
2727 const unsigned AddrDisc = MI.getOperand(2).getReg();
2728 const uint64_t Disc = MI.getOperand(3).getImm();
2729
2730 const int64_t Offset = GAOp.getOffset();
2731 GAOp.setOffset(0);
2732
2733 // Emit:
2734 // target materialization:
2735 // - via GOT:
2736 // - unsigned GOT:
2737 // adrp x16, :got:target
2738 // ldr x16, [x16, :got_lo12:target]
2739 // add offset to x16 if offset != 0
2740 // - ELF signed GOT:
2741 // adrp x17, :got:target
2742 // add x17, x17, :got_auth_lo12:target
2743 // ldr x16, [x17]
2744 // aut{i|d}a x16, x17
2745 // check+trap sequence (if no FPAC)
2746 // add offset to x16 if offset != 0
2747 //
2748 // - direct:
2749 // adrp x16, target
2750 // add x16, x16, :lo12:target
2751 // add offset to x16 if offset != 0
2752 //
2753 // add offset to x16:
2754 // - abs(offset) fits 24 bits:
2755 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2756 // - abs(offset) does not fit 24 bits:
2757 // - offset < 0:
2758 // movn+movk sequence filling x17 register with the offset (up to 4
2759 // instructions)
2760 // add x16, x16, x17
2761 // - offset > 0:
2762 // movz+movk sequence filling x17 register with the offset (up to 4
2763 // instructions)
2764 // add x16, x16, x17
2765 //
2766 // signing:
2767 // - 0 discriminator:
2768 // paciza x16
2769 // - Non-0 discriminator, no address discriminator:
2770 // mov x17, #Disc
2771 // pacia x16, x17
2772 // - address discriminator (with potentially folded immediate discriminator):
2773 // pacia x16, xAddrDisc
2774
2775 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2776 MCOperand GAMCHi, GAMCLo;
2777
2778 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2779 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2780 if (IsGOTLoad) {
2781 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2782 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2783 }
2784
2785 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2786 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2787
2788 EmitToStreamer(
2789 MCInstBuilder(AArch64::ADRP)
2790 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2791 .addOperand(GAMCHi));
2792
2793 if (IsGOTLoad) {
2794 if (IsELFSignedGOT) {
2795 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2796 .addReg(AArch64::X17)
2797 .addReg(AArch64::X17)
2798 .addOperand(GAMCLo)
2799 .addImm(0));
2800
2801 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2802 .addReg(AArch64::X16)
2803 .addReg(AArch64::X17)
2804 .addImm(0));
2805
2806 assert(GAOp.isGlobal());
2807 assert(GAOp.getGlobal()->getValueType() != nullptr);
2808
2809 bool IsFunctionTy = GAOp.getGlobal()->getValueType()->isFunctionTy();
2810 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2811 emitAUT(AuthKey, AArch64::X16, AArch64::X17);
2812
2813 if (!STI->hasFPAC())
2814 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2815 AArch64PAuth::AuthCheckMethod::XPAC);
2816 } else {
2817 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2818 .addReg(AArch64::X16)
2819 .addReg(AArch64::X16)
2820 .addOperand(GAMCLo));
2821 }
2822 } else {
2823 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2824 .addReg(AArch64::X16)
2825 .addReg(AArch64::X16)
2826 .addOperand(GAMCLo)
2827 .addImm(0));
2828 }
2829
2830 emitAddImm(AArch64::X16, Offset, AArch64::X17);
2831 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2832
2833 emitPAC(Key, AArch64::X16, DiscReg);
2834}
2835
2836void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2837 Register DstReg = MI.getOperand(0).getReg();
2838 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2839 const MachineOperand &GAMO = MI.getOperand(1);
2840 assert(GAMO.getOffset() == 0);
2841
2842 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2843 MCOperand GAMC;
2844 MCInstLowering.lowerOperand(GAMO, GAMC);
2845 EmitToStreamer(
2846 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2847 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2848 .addReg(AuthResultReg)
2849 .addReg(AArch64::X17)
2850 .addImm(0));
2851 } else {
2852 MachineOperand GAHiOp(GAMO);
2853 MachineOperand GALoOp(GAMO);
2854 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2855 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2856
2857 MCOperand GAMCHi, GAMCLo;
2858 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2859 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2860
2861 EmitToStreamer(
2862 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2863
2864 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2865 .addReg(AArch64::X17)
2866 .addReg(AArch64::X17)
2867 .addOperand(GAMCLo)
2868 .addImm(0));
2869
2870 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2871 .addReg(AuthResultReg)
2872 .addReg(AArch64::X17)
2873 .addImm(0));
2874 }
2875
2876 assert(GAMO.isGlobal());
2877 MCSymbol *UndefWeakSym;
2878 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2879 UndefWeakSym = createTempSymbol("undef_weak");
2880 EmitToStreamer(
2881 MCInstBuilder(AArch64::CBZX)
2882 .addReg(AuthResultReg)
2883 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2884 }
2885
2886 assert(GAMO.getGlobal()->getValueType() != nullptr);
2887
2888 bool IsFunctionTy = GAMO.getGlobal()->getValueType()->isFunctionTy();
2889 auto AuthKey = IsFunctionTy ? AArch64PACKey::IA : AArch64PACKey::DA;
2890 emitAUT(AuthKey, AuthResultReg, AArch64::X17);
2891
2892 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2893 OutStreamer->emitLabel(UndefWeakSym);
2894
2895 if (!STI->hasFPAC()) {
2896 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2897 AArch64PAuth::AuthCheckMethod::XPAC);
2898
2899 emitMovXReg(DstReg, AuthResultReg);
2900 }
2901}
2902
2903const MCExpr *
2904AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2905 const MCExpr *BAE = AsmPrinter::lowerBlockAddressConstant(BA);
2906 const Function &Fn = *BA.getFunction();
2907
2908 if (std::optional<uint16_t> BADisc =
2909 STI->getPtrAuthBlockAddressDiscriminatorIfEnabled(Fn))
2910 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2911 /*HasAddressDiversity=*/false, OutContext);
2912
2913 return BAE;
2914}
2915
2916void AArch64AsmPrinter::emitCBPseudoExpansion(const MachineInstr *MI) {
2917 bool IsImm = false;
2918 unsigned Width = 0;
2919
2920 switch (MI->getOpcode()) {
2921 default:
2922 llvm_unreachable("This is not a CB pseudo instruction");
2923 case AArch64::CBBAssertExt:
2924 IsImm = false;
2925 Width = 8;
2926 break;
2927 case AArch64::CBHAssertExt:
2928 IsImm = false;
2929 Width = 16;
2930 break;
2931 case AArch64::CBWPrr:
2932 Width = 32;
2933 break;
2934 case AArch64::CBXPrr:
2935 Width = 64;
2936 break;
2937 case AArch64::CBWPri:
2938 IsImm = true;
2939 Width = 32;
2940 break;
2941 case AArch64::CBXPri:
2942 IsImm = true;
2943 Width = 64;
2944 break;
2945 }
2946
2948 static_cast<AArch64CC::CondCode>(MI->getOperand(0).getImm());
2949 bool NeedsRegSwap = false;
2950 bool NeedsImmDec = false;
2951 bool NeedsImmInc = false;
2952
2953#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond) \
2954 (IsImm \
2955 ? (Width == 32 ? AArch64::CB##ImmCond##Wri : AArch64::CB##ImmCond##Xri) \
2956 : (Width == 8 \
2957 ? AArch64::CBB##RegCond##Wrr \
2958 : (Width == 16 ? AArch64::CBH##RegCond##Wrr \
2959 : (Width == 32 ? AArch64::CB##RegCond##Wrr \
2960 : AArch64::CB##RegCond##Xrr))))
2961 unsigned MCOpC;
2962
2963 // Decide if we need to either swap register operands or increment/decrement
2964 // immediate operands
2965 switch (CC) {
2966 default:
2967 llvm_unreachable("Invalid CB condition code");
2968 case AArch64CC::EQ:
2969 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ EQ, /* Reg-Reg */ EQ);
2970 break;
2971 case AArch64CC::NE:
2972 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ NE, /* Reg-Reg */ NE);
2973 break;
2974 case AArch64CC::HS:
2975 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HS);
2976 NeedsImmDec = IsImm;
2977 break;
2978 case AArch64CC::LO:
2979 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HI);
2980 NeedsRegSwap = !IsImm;
2981 break;
2982 case AArch64CC::HI:
2983 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ HI, /* Reg-Reg */ HI);
2984 break;
2985 case AArch64CC::LS:
2986 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LO, /* Reg-Reg */ HS);
2987 NeedsRegSwap = !IsImm;
2988 NeedsImmInc = IsImm;
2989 break;
2990 case AArch64CC::GE:
2991 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GE);
2992 NeedsImmDec = IsImm;
2993 break;
2994 case AArch64CC::LT:
2995 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GT);
2996 NeedsRegSwap = !IsImm;
2997 break;
2998 case AArch64CC::GT:
2999 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ GT, /* Reg-Reg */ GT);
3000 break;
3001 case AArch64CC::LE:
3002 MCOpC = GET_CB_OPC(IsImm, Width, /* Reg-Imm */ LT, /* Reg-Reg */ GE);
3003 NeedsRegSwap = !IsImm;
3004 NeedsImmInc = IsImm;
3005 break;
3006 }
3007#undef GET_CB_OPC
3008
3009 MCInst Inst;
3010 Inst.setOpcode(MCOpC);
3011
3012 MCOperand Lhs, Rhs, Trgt;
3013 lowerOperand(MI->getOperand(1), Lhs);
3014 lowerOperand(MI->getOperand(2), Rhs);
3015 lowerOperand(MI->getOperand(3), Trgt);
3016
3017 // Now swap, increment or decrement
3018 if (NeedsRegSwap) {
3019 assert(Lhs.isReg() && "Expected register operand for CB");
3020 assert(Rhs.isReg() && "Expected register operand for CB");
3021 Inst.addOperand(Rhs);
3022 Inst.addOperand(Lhs);
3023 } else if (NeedsImmDec) {
3024 Rhs.setImm(Rhs.getImm() - 1);
3025 Inst.addOperand(Lhs);
3026 Inst.addOperand(Rhs);
3027 } else if (NeedsImmInc) {
3028 Rhs.setImm(Rhs.getImm() + 1);
3029 Inst.addOperand(Lhs);
3030 Inst.addOperand(Rhs);
3031 } else {
3032 Inst.addOperand(Lhs);
3033 Inst.addOperand(Rhs);
3034 }
3035
3036 assert((!IsImm || (Rhs.getImm() >= 0 && Rhs.getImm() < 64)) &&
3037 "CB immediate operand out-of-bounds");
3038
3039 Inst.addOperand(Trgt);
3040 EmitToStreamer(*OutStreamer, Inst);
3041}
3042
3043// Simple pseudo-instructions have their lowering (with expansion to real
3044// instructions) auto-generated.
3045#include "AArch64GenMCPseudoLowering.inc"
3046
3047void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
3048 S.emitInstruction(Inst, *STI);
3049#ifndef NDEBUG
3050 ++InstsEmitted;
3051#endif
3052}
3053
3054void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
3055 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
3056
3057#ifndef NDEBUG
3058 InstsEmitted = 0;
3059 llvm::scope_exit CheckMISize([&]() {
3060 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
3061 });
3062#endif
3063
3064 // Do any auto-generated pseudo lowerings.
3065 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
3066 EmitToStreamer(*OutStreamer, OutInst);
3067 return;
3068 }
3069
3070 if (MI->getOpcode() == AArch64::ADRP) {
3071 for (auto &Opd : MI->operands()) {
3072 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
3073 "swift_async_extendedFramePointerFlags") {
3074 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
3075 }
3076 }
3077 }
3078
3079 if (AArch64FI->getLOHRelated().count(MI)) {
3080 // Generate a label for LOH related instruction
3081 MCSymbol *LOHLabel = createTempSymbol("loh");
3082 // Associate the instruction with the label
3083 LOHInstToLabel[MI] = LOHLabel;
3084 OutStreamer->emitLabel(LOHLabel);
3085 }
3086
3087 AArch64TargetStreamer *TS =
3088 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
3089 // Do any manual lowerings.
3090 switch (MI->getOpcode()) {
3091 default:
3093 "Unhandled tail call instruction");
3094 break;
3095 case AArch64::HINT: {
3096 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
3097 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
3098 // non-empty. If MI is the initial BTI, place the
3099 // __patchable_function_entries label after BTI.
3100 if (CurrentPatchableFunctionEntrySym &&
3101 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
3102 MI == &MF->front().front()) {
3103 int64_t Imm = MI->getOperand(0).getImm();
3104 if ((Imm & 32) && (Imm & 6)) {
3105 MCInst Inst;
3106 MCInstLowering.Lower(MI, Inst);
3107 EmitToStreamer(*OutStreamer, Inst);
3108 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
3109 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
3110 return;
3111 }
3112 }
3113 break;
3114 }
3115 case AArch64::MOVMCSym: {
3116 Register DestReg = MI->getOperand(0).getReg();
3117 const MachineOperand &MO_Sym = MI->getOperand(1);
3118 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
3119 MCOperand Hi_MCSym, Lo_MCSym;
3120
3121 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
3122 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
3123
3124 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
3125 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
3126
3127 MCInst MovZ;
3128 MovZ.setOpcode(AArch64::MOVZXi);
3129 MovZ.addOperand(MCOperand::createReg(DestReg));
3130 MovZ.addOperand(Hi_MCSym);
3132 EmitToStreamer(*OutStreamer, MovZ);
3133
3134 MCInst MovK;
3135 MovK.setOpcode(AArch64::MOVKXi);
3136 MovK.addOperand(MCOperand::createReg(DestReg));
3137 MovK.addOperand(MCOperand::createReg(DestReg));
3138 MovK.addOperand(Lo_MCSym);
3140 EmitToStreamer(*OutStreamer, MovK);
3141 return;
3142 }
3143 case AArch64::MOVIv2d_ns:
3144 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
3145 // as movi is more efficient across all cores. Newer cores can eliminate
3146 // fmovs early and there is no difference with movi, but this not true for
3147 // all implementations.
3148 //
3149 // The floating-point version doesn't quite work in rare cases on older
3150 // CPUs, so on those targets we lower this instruction to movi.16b instead.
3151 if (STI->hasZeroCycleZeroingFPWorkaround() &&
3152 MI->getOperand(1).getImm() == 0) {
3153 MCInst TmpInst;
3154 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
3155 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3156 TmpInst.addOperand(MCOperand::createImm(0));
3157 EmitToStreamer(*OutStreamer, TmpInst);
3158 return;
3159 }
3160 break;
3161
3162 case AArch64::DBG_VALUE:
3163 case AArch64::DBG_VALUE_LIST:
3164 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
3165 SmallString<128> TmpStr;
3166 raw_svector_ostream OS(TmpStr);
3167 PrintDebugValueComment(MI, OS);
3168 OutStreamer->emitRawText(StringRef(OS.str()));
3169 }
3170 return;
3171
3172 case AArch64::EMITBKEY: {
3173 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3174 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3175 ExceptionHandlingType != ExceptionHandling::ARM)
3176 return;
3177
3178 if (getFunctionCFISectionType(*MF) == CFISection::None)
3179 return;
3180
3181 OutStreamer->emitCFIBKeyFrame();
3182 return;
3183 }
3184
3185 case AArch64::EMITMTETAGGED: {
3186 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
3187 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
3188 ExceptionHandlingType != ExceptionHandling::ARM)
3189 return;
3190
3191 if (getFunctionCFISectionType(*MF) != CFISection::None)
3192 OutStreamer->emitCFIMTETaggedFrame();
3193 return;
3194 }
3195
3196 case AArch64::AUTx16x17:
3197 emitPtrauthAuthResign(
3198 AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(),
3199 MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17,
3200 std::nullopt, 0, 0, MI->getDeactivationSymbol());
3201 return;
3202
3203 case AArch64::AUTxMxN:
3204 emitPtrauthAuthResign(MI->getOperand(0).getReg(),
3205 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
3206 MI->getOperand(4).getImm(), &MI->getOperand(5),
3207 MI->getOperand(1).getReg(), std::nullopt, 0, 0,
3208 MI->getDeactivationSymbol());
3209 return;
3210
3211 case AArch64::AUTPAC:
3212 emitPtrauthAuthResign(
3213 AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(),
3214 MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17,
3215 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
3216 MI->getOperand(4).getImm(), MI->getOperand(5).getReg(),
3217 MI->getDeactivationSymbol());
3218 return;
3219
3220 case AArch64::PAC:
3221 emitPtrauthSign(MI);
3222 return;
3223
3224 case AArch64::LOADauthptrstatic:
3225 LowerLOADauthptrstatic(*MI);
3226 return;
3227
3228 case AArch64::LOADgotPAC:
3229 case AArch64::MOVaddrPAC:
3230 LowerMOVaddrPAC(*MI);
3231 return;
3232
3233 case AArch64::LOADgotAUTH:
3234 LowerLOADgotAUTH(*MI);
3235 return;
3236
3237 case AArch64::BRA:
3238 case AArch64::BLRA:
3239 emitPtrauthBranch(MI);
3240 return;
3241
3242 // Tail calls use pseudo instructions so they have the proper code-gen
3243 // attributes (isCall, isReturn, etc.). We lower them to the real
3244 // instruction here.
3245 case AArch64::AUTH_TCRETURN:
3246 case AArch64::AUTH_TCRETURN_BTI: {
3247 Register Callee = MI->getOperand(0).getReg();
3248 const auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
3249 const uint64_t Disc = MI->getOperand(3).getImm();
3250
3251 Register AddrDisc = MI->getOperand(4).getReg();
3252
3253 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
3254
3255 emitPtrauthTailCallHardening(MI);
3256
3257 // See the comments in emitPtrauthBranch.
3258 if (Callee == AddrDisc)
3259 report_fatal_error("Call target is signed with its own value");
3260
3261 // After isX16X17Safer predicate was introduced, emitPtrauthDiscriminator is
3262 // no longer restricted to only reusing AddrDisc when it is X16 or X17
3263 // (which are implicit-def'ed by AUTH_TCRETURN pseudos), thus impose this
3264 // restriction manually not to clobber an unexpected register.
3265 bool AddrDiscIsImplicitDef =
3266 AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
3267 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
3268 AddrDiscIsImplicitDef);
3269 emitBLRA(/*IsCall*/ false, Key, Callee, DiscReg);
3270 return;
3271 }
3272
3273 case AArch64::TCRETURNri:
3274 case AArch64::TCRETURNrix16x17:
3275 case AArch64::TCRETURNrix17:
3276 case AArch64::TCRETURNrinotx16:
3277 case AArch64::TCRETURNriALL: {
3278 emitPtrauthTailCallHardening(MI);
3279
3280 recordIfImportCall(MI);
3281 MCInst TmpInst;
3282 TmpInst.setOpcode(AArch64::BR);
3283 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3284 EmitToStreamer(*OutStreamer, TmpInst);
3285 return;
3286 }
3287 case AArch64::TCRETURNdi: {
3288 emitPtrauthTailCallHardening(MI);
3289
3290 MCOperand Dest;
3291 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
3292 recordIfImportCall(MI);
3293 MCInst TmpInst;
3294 TmpInst.setOpcode(AArch64::B);
3295 TmpInst.addOperand(Dest);
3296 EmitToStreamer(*OutStreamer, TmpInst);
3297 return;
3298 }
3299 case AArch64::SpeculationBarrierISBDSBEndBB: {
3300 // Print DSB SYS + ISB
3301 MCInst TmpInstDSB;
3302 TmpInstDSB.setOpcode(AArch64::DSB);
3303 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
3304 EmitToStreamer(*OutStreamer, TmpInstDSB);
3305 MCInst TmpInstISB;
3306 TmpInstISB.setOpcode(AArch64::ISB);
3307 TmpInstISB.addOperand(MCOperand::createImm(0xf));
3308 EmitToStreamer(*OutStreamer, TmpInstISB);
3309 return;
3310 }
3311 case AArch64::SpeculationBarrierSBEndBB: {
3312 // Print SB
3313 MCInst TmpInstSB;
3314 TmpInstSB.setOpcode(AArch64::SB);
3315 EmitToStreamer(*OutStreamer, TmpInstSB);
3316 return;
3317 }
3318 case AArch64::TLSDESC_AUTH_CALLSEQ: {
3319 /// lower this to:
3320 /// adrp x0, :tlsdesc_auth:var
3321 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
3322 /// add x0, x0, #:tlsdesc_auth_lo12:var
3323 /// blraa x16, x0
3324 /// (TPIDR_EL0 offset now in x0)
3325 const MachineOperand &MO_Sym = MI->getOperand(0);
3326 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3327 MCOperand SymTLSDescLo12, SymTLSDesc;
3328 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3329 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3330 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3331 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3332
3333 MCInst Adrp;
3334 Adrp.setOpcode(AArch64::ADRP);
3335 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3336 Adrp.addOperand(SymTLSDesc);
3337 EmitToStreamer(*OutStreamer, Adrp);
3338
3339 MCInst Ldr;
3340 Ldr.setOpcode(AArch64::LDRXui);
3341 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3342 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3343 Ldr.addOperand(SymTLSDescLo12);
3345 EmitToStreamer(*OutStreamer, Ldr);
3346
3347 MCInst Add;
3348 Add.setOpcode(AArch64::ADDXri);
3349 Add.addOperand(MCOperand::createReg(AArch64::X0));
3350 Add.addOperand(MCOperand::createReg(AArch64::X0));
3351 Add.addOperand(SymTLSDescLo12);
3353 EmitToStreamer(*OutStreamer, Add);
3354
3355 // Authenticated TLSDESC accesses are not relaxed.
3356 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
3357
3358 MCInst Blraa;
3359 Blraa.setOpcode(AArch64::BLRAA);
3360 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
3361 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
3362 EmitToStreamer(*OutStreamer, Blraa);
3363
3364 return;
3365 }
3366 case AArch64::TLSDESC_CALLSEQ: {
3367 /// lower this to:
3368 /// adrp x0, :tlsdesc:var
3369 /// ldr x1, [x0, #:tlsdesc_lo12:var]
3370 /// add x0, x0, #:tlsdesc_lo12:var
3371 /// .tlsdesccall var
3372 /// blr x1
3373 /// (TPIDR_EL0 offset now in x0)
3374 const MachineOperand &MO_Sym = MI->getOperand(0);
3375 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3376 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
3377 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3378 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3379 MCInstLowering.lowerOperand(MO_Sym, Sym);
3380 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3381 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3382
3383 MCInst Adrp;
3384 Adrp.setOpcode(AArch64::ADRP);
3385 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3386 Adrp.addOperand(SymTLSDesc);
3387 EmitToStreamer(*OutStreamer, Adrp);
3388
3389 MCInst Ldr;
3390 if (STI->isTargetILP32()) {
3391 Ldr.setOpcode(AArch64::LDRWui);
3392 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
3393 } else {
3394 Ldr.setOpcode(AArch64::LDRXui);
3395 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
3396 }
3397 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3398 Ldr.addOperand(SymTLSDescLo12);
3400 EmitToStreamer(*OutStreamer, Ldr);
3401
3402 MCInst Add;
3403 if (STI->isTargetILP32()) {
3404 Add.setOpcode(AArch64::ADDWri);
3405 Add.addOperand(MCOperand::createReg(AArch64::W0));
3406 Add.addOperand(MCOperand::createReg(AArch64::W0));
3407 } else {
3408 Add.setOpcode(AArch64::ADDXri);
3409 Add.addOperand(MCOperand::createReg(AArch64::X0));
3410 Add.addOperand(MCOperand::createReg(AArch64::X0));
3411 }
3412 Add.addOperand(SymTLSDescLo12);
3414 EmitToStreamer(*OutStreamer, Add);
3415
3416 // Emit a relocation-annotation. This expands to no code, but requests
3417 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
3418 MCInst TLSDescCall;
3419 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
3420 TLSDescCall.addOperand(Sym);
3421 EmitToStreamer(*OutStreamer, TLSDescCall);
3422#ifndef NDEBUG
3423 --InstsEmitted; // no code emitted
3424#endif
3425
3426 MCInst Blr;
3427 Blr.setOpcode(AArch64::BLR);
3428 Blr.addOperand(MCOperand::createReg(AArch64::X1));
3429 EmitToStreamer(*OutStreamer, Blr);
3430
3431 return;
3432 }
3433
3434 case AArch64::JumpTableDest32:
3435 case AArch64::JumpTableDest16:
3436 case AArch64::JumpTableDest8:
3437 LowerJumpTableDest(*OutStreamer, *MI);
3438 return;
3439
3440 case AArch64::BR_JumpTable:
3441 LowerHardenedBRJumpTable(*MI);
3442 return;
3443
3444 case AArch64::FMOVH0:
3445 case AArch64::FMOVS0:
3446 case AArch64::FMOVD0:
3447 emitFMov0(*MI);
3448 return;
3449
3450 case AArch64::MOPSMemoryCopyPseudo:
3451 case AArch64::MOPSMemoryMovePseudo:
3452 case AArch64::MOPSMemorySetPseudo:
3453 case AArch64::MOPSMemorySetTaggingPseudo:
3454 LowerMOPS(*OutStreamer, *MI);
3455 return;
3456
3457 case TargetOpcode::STACKMAP:
3458 return LowerSTACKMAP(*OutStreamer, SM, *MI);
3459
3460 case TargetOpcode::PATCHPOINT:
3461 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
3462
3463 case TargetOpcode::STATEPOINT:
3464 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
3465
3466 case TargetOpcode::FAULTING_OP:
3467 return LowerFAULTING_OP(*MI);
3468
3469 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
3470 LowerPATCHABLE_FUNCTION_ENTER(*MI);
3471 return;
3472
3473 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
3474 LowerPATCHABLE_FUNCTION_EXIT(*MI);
3475 return;
3476
3477 case TargetOpcode::PATCHABLE_TAIL_CALL:
3478 LowerPATCHABLE_TAIL_CALL(*MI);
3479 return;
3480 case TargetOpcode::PATCHABLE_EVENT_CALL:
3481 return LowerPATCHABLE_EVENT_CALL(*MI, false);
3482 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
3483 return LowerPATCHABLE_EVENT_CALL(*MI, true);
3484
3485 case AArch64::KCFI_CHECK:
3486 LowerKCFI_CHECK(*MI);
3487 return;
3488
3489 case AArch64::HWASAN_CHECK_MEMACCESS:
3490 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
3491 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
3492 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
3493 LowerHWASAN_CHECK_MEMACCESS(*MI);
3494 return;
3495
3496 case AArch64::SEH_StackAlloc:
3497 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
3498 return;
3499
3500 case AArch64::SEH_SaveFPLR:
3501 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
3502 return;
3503
3504 case AArch64::SEH_SaveFPLR_X:
3505 assert(MI->getOperand(0).getImm() < 0 &&
3506 "Pre increment SEH opcode must have a negative offset");
3507 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
3508 return;
3509
3510 case AArch64::SEH_SaveReg:
3511 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
3512 MI->getOperand(1).getImm());
3513 return;
3514
3515 case AArch64::SEH_SaveReg_X:
3516 assert(MI->getOperand(1).getImm() < 0 &&
3517 "Pre increment SEH opcode must have a negative offset");
3518 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
3519 -MI->getOperand(1).getImm());
3520 return;
3521
3522 case AArch64::SEH_SaveRegP:
3523 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
3524 MI->getOperand(0).getImm() <= 28) {
3525 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
3526 "Register paired with LR must be odd");
3527 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
3528 MI->getOperand(2).getImm());
3529 return;
3530 }
3531 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3532 "Non-consecutive registers not allowed for save_regp");
3533 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3534 MI->getOperand(2).getImm());
3535 return;
3536
3537 case AArch64::SEH_SaveRegP_X:
3538 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3539 "Non-consecutive registers not allowed for save_regp_x");
3540 assert(MI->getOperand(2).getImm() < 0 &&
3541 "Pre increment SEH opcode must have a negative offset");
3542 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3543 -MI->getOperand(2).getImm());
3544 return;
3545
3546 case AArch64::SEH_SaveFReg:
3547 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3548 MI->getOperand(1).getImm());
3549 return;
3550
3551 case AArch64::SEH_SaveFReg_X:
3552 assert(MI->getOperand(1).getImm() < 0 &&
3553 "Pre increment SEH opcode must have a negative offset");
3554 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3555 -MI->getOperand(1).getImm());
3556 return;
3557
3558 case AArch64::SEH_SaveFRegP:
3559 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3560 "Non-consecutive registers not allowed for save_regp");
3561 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3562 MI->getOperand(2).getImm());
3563 return;
3564
3565 case AArch64::SEH_SaveFRegP_X:
3566 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3567 "Non-consecutive registers not allowed for save_regp_x");
3568 assert(MI->getOperand(2).getImm() < 0 &&
3569 "Pre increment SEH opcode must have a negative offset");
3570 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3571 -MI->getOperand(2).getImm());
3572 return;
3573
3574 case AArch64::SEH_SetFP:
3576 return;
3577
3578 case AArch64::SEH_AddFP:
3579 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3580 return;
3581
3582 case AArch64::SEH_Nop:
3583 TS->emitARM64WinCFINop();
3584 return;
3585
3586 case AArch64::SEH_PrologEnd:
3588 return;
3589
3590 case AArch64::SEH_EpilogStart:
3592 return;
3593
3594 case AArch64::SEH_EpilogEnd:
3596 return;
3597
3598 case AArch64::SEH_PACSignLR:
3600 return;
3601
3602 case AArch64::SEH_SaveAnyRegI:
3603 assert(MI->getOperand(1).getImm() <= 1008 &&
3604 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3605 TS->emitARM64WinCFISaveAnyRegI(MI->getOperand(0).getImm(),
3606 MI->getOperand(1).getImm());
3607 return;
3608
3609 case AArch64::SEH_SaveAnyRegIP:
3610 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3611 "Non-consecutive registers not allowed for save_any_reg");
3612 assert(MI->getOperand(2).getImm() <= 1008 &&
3613 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3614 TS->emitARM64WinCFISaveAnyRegIP(MI->getOperand(0).getImm(),
3615 MI->getOperand(2).getImm());
3616 return;
3617
3618 case AArch64::SEH_SaveAnyRegQP:
3619 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3620 "Non-consecutive registers not allowed for save_any_reg");
3621 assert(MI->getOperand(2).getImm() >= 0 &&
3622 "SaveAnyRegQP SEH opcode offset must be non-negative");
3623 assert(MI->getOperand(2).getImm() <= 1008 &&
3624 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3625 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3626 MI->getOperand(2).getImm());
3627 return;
3628
3629 case AArch64::SEH_SaveAnyRegQPX:
3630 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3631 "Non-consecutive registers not allowed for save_any_reg");
3632 assert(MI->getOperand(2).getImm() < 0 &&
3633 "SaveAnyRegQPX SEH opcode offset must be negative");
3634 assert(MI->getOperand(2).getImm() >= -1008 &&
3635 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3636 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3637 -MI->getOperand(2).getImm());
3638 return;
3639
3640 case AArch64::SEH_AllocZ:
3641 assert(MI->getOperand(0).getImm() >= 0 &&
3642 "AllocZ SEH opcode offset must be non-negative");
3643 assert(MI->getOperand(0).getImm() <= 255 &&
3644 "AllocZ SEH opcode offset must fit into 8 bits");
3645 TS->emitARM64WinCFIAllocZ(MI->getOperand(0).getImm());
3646 return;
3647
3648 case AArch64::SEH_SaveZReg:
3649 assert(MI->getOperand(1).getImm() >= 0 &&
3650 "SaveZReg SEH opcode offset must be non-negative");
3651 assert(MI->getOperand(1).getImm() <= 255 &&
3652 "SaveZReg SEH opcode offset must fit into 8 bits");
3653 TS->emitARM64WinCFISaveZReg(MI->getOperand(0).getImm(),
3654 MI->getOperand(1).getImm());
3655 return;
3656
3657 case AArch64::SEH_SavePReg:
3658 assert(MI->getOperand(1).getImm() >= 0 &&
3659 "SavePReg SEH opcode offset must be non-negative");
3660 assert(MI->getOperand(1).getImm() <= 255 &&
3661 "SavePReg SEH opcode offset must fit into 8 bits");
3662 TS->emitARM64WinCFISavePReg(MI->getOperand(0).getImm(),
3663 MI->getOperand(1).getImm());
3664 return;
3665
3666 case AArch64::BLR:
3667 case AArch64::BR: {
3668 recordIfImportCall(MI);
3669 MCInst TmpInst;
3670 MCInstLowering.Lower(MI, TmpInst);
3671 EmitToStreamer(*OutStreamer, TmpInst);
3672 return;
3673 }
3674 case AArch64::CBWPri:
3675 case AArch64::CBXPri:
3676 case AArch64::CBBAssertExt:
3677 case AArch64::CBHAssertExt:
3678 case AArch64::CBWPrr:
3679 case AArch64::CBXPrr:
3680 emitCBPseudoExpansion(MI);
3681 return;
3682 }
3683
3684 if (emitDeactivationSymbolRelocation(MI->getDeactivationSymbol()))
3685 return;
3686
3687 // Finally, do the automated lowerings for everything else.
3688 MCInst TmpInst;
3689 MCInstLowering.Lower(MI, TmpInst);
3690 EmitToStreamer(*OutStreamer, TmpInst);
3691}
3692
3693void AArch64AsmPrinter::recordIfImportCall(
3694 const llvm::MachineInstr *BranchInst) {
3695 if (!EnableImportCallOptimization)
3696 return;
3697
3698 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3699 if (GV && GV->hasDLLImportStorageClass()) {
3700 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3701 OutStreamer->emitLabel(CallSiteSymbol);
3702
3703 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3704 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3705 .push_back({CallSiteSymbol, CalledSymbol});
3706 }
3707}
3708
3709void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3710 MCSymbol *LazyPointer) {
3711 // _ifunc:
3712 // adrp x16, lazy_pointer@GOTPAGE
3713 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3714 // ldr x16, [x16]
3715 // br x16
3716
3717 {
3718 MCInst Adrp;
3719 Adrp.setOpcode(AArch64::ADRP);
3720 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3721 MCOperand SymPage;
3722 MCInstLowering.lowerOperand(
3725 SymPage);
3726 Adrp.addOperand(SymPage);
3727 EmitToStreamer(Adrp);
3728 }
3729
3730 {
3731 MCInst Ldr;
3732 Ldr.setOpcode(AArch64::LDRXui);
3733 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3734 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3735 MCOperand SymPageOff;
3736 MCInstLowering.lowerOperand(
3739 SymPageOff);
3740 Ldr.addOperand(SymPageOff);
3742 EmitToStreamer(Ldr);
3743 }
3744
3745 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3746 .addReg(AArch64::X16)
3747 .addReg(AArch64::X16)
3748 .addImm(0));
3749
3750 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3751 : AArch64::BR)
3752 .addReg(AArch64::X16));
3753}
3754
3755void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3756 const GlobalIFunc &GI,
3757 MCSymbol *LazyPointer) {
3758 // These stub helpers are only ever called once, so here we're optimizing for
3759 // minimum size by using the pre-indexed store variants, which saves a few
3760 // bytes of instructions to bump & restore sp.
3761
3762 // _ifunc.stub_helper:
3763 // stp fp, lr, [sp, #-16]!
3764 // mov fp, sp
3765 // stp x1, x0, [sp, #-16]!
3766 // stp x3, x2, [sp, #-16]!
3767 // stp x5, x4, [sp, #-16]!
3768 // stp x7, x6, [sp, #-16]!
3769 // stp d1, d0, [sp, #-16]!
3770 // stp d3, d2, [sp, #-16]!
3771 // stp d5, d4, [sp, #-16]!
3772 // stp d7, d6, [sp, #-16]!
3773 // bl _resolver
3774 // adrp x16, lazy_pointer@GOTPAGE
3775 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3776 // str x0, [x16]
3777 // mov x16, x0
3778 // ldp d7, d6, [sp], #16
3779 // ldp d5, d4, [sp], #16
3780 // ldp d3, d2, [sp], #16
3781 // ldp d1, d0, [sp], #16
3782 // ldp x7, x6, [sp], #16
3783 // ldp x5, x4, [sp], #16
3784 // ldp x3, x2, [sp], #16
3785 // ldp x1, x0, [sp], #16
3786 // ldp fp, lr, [sp], #16
3787 // br x16
3788
3789 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3790 .addReg(AArch64::SP)
3791 .addReg(AArch64::FP)
3792 .addReg(AArch64::LR)
3793 .addReg(AArch64::SP)
3794 .addImm(-2));
3795
3796 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3797 .addReg(AArch64::FP)
3798 .addReg(AArch64::SP)
3799 .addImm(0)
3800 .addImm(0));
3801
3802 for (int I = 0; I != 4; ++I)
3803 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3804 .addReg(AArch64::SP)
3805 .addReg(AArch64::X1 + 2 * I)
3806 .addReg(AArch64::X0 + 2 * I)
3807 .addReg(AArch64::SP)
3808 .addImm(-2));
3809
3810 for (int I = 0; I != 4; ++I)
3811 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3812 .addReg(AArch64::SP)
3813 .addReg(AArch64::D1 + 2 * I)
3814 .addReg(AArch64::D0 + 2 * I)
3815 .addReg(AArch64::SP)
3816 .addImm(-2));
3817
3818 EmitToStreamer(
3819 MCInstBuilder(AArch64::BL)
3821
3822 {
3823 MCInst Adrp;
3824 Adrp.setOpcode(AArch64::ADRP);
3825 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3826 MCOperand SymPage;
3827 MCInstLowering.lowerOperand(
3828 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3830 SymPage);
3831 Adrp.addOperand(SymPage);
3832 EmitToStreamer(Adrp);
3833 }
3834
3835 {
3836 MCInst Ldr;
3837 Ldr.setOpcode(AArch64::LDRXui);
3838 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3839 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3840 MCOperand SymPageOff;
3841 MCInstLowering.lowerOperand(
3842 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3844 SymPageOff);
3845 Ldr.addOperand(SymPageOff);
3847 EmitToStreamer(Ldr);
3848 }
3849
3850 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3851 .addReg(AArch64::X0)
3852 .addReg(AArch64::X16)
3853 .addImm(0));
3854
3855 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3856 .addReg(AArch64::X16)
3857 .addReg(AArch64::X0)
3858 .addImm(0)
3859 .addImm(0));
3860
3861 for (int I = 3; I != -1; --I)
3862 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3863 .addReg(AArch64::SP)
3864 .addReg(AArch64::D1 + 2 * I)
3865 .addReg(AArch64::D0 + 2 * I)
3866 .addReg(AArch64::SP)
3867 .addImm(2));
3868
3869 for (int I = 3; I != -1; --I)
3870 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3871 .addReg(AArch64::SP)
3872 .addReg(AArch64::X1 + 2 * I)
3873 .addReg(AArch64::X0 + 2 * I)
3874 .addReg(AArch64::SP)
3875 .addImm(2));
3876
3877 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3878 .addReg(AArch64::SP)
3879 .addReg(AArch64::FP)
3880 .addReg(AArch64::LR)
3881 .addReg(AArch64::SP)
3882 .addImm(2));
3883
3884 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3885 : AArch64::BR)
3886 .addReg(AArch64::X16));
3887}
3888
3889const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
3890 const Constant *BaseCV,
3891 uint64_t Offset) {
3892 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
3893 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
3894 OutContext);
3895 }
3896
3897 return AsmPrinter::lowerConstant(CV, BaseCV, Offset);
3898}
3899
3900char AArch64AsmPrinter::ID = 0;
3901
3902INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
3903 "AArch64 Assembly Printer", false, false)
3904
3905// Force static initialization.
3906extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
3907LLVMInitializeAArch64AsmPrinter() {
3913}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Unchecked
#define GET_CB_OPC(IsImm, Width, ImmCond, RegCond)
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
static bool targetSupportsIRelativeRelocation(const Triple &TT)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
This file defines the DenseMap class.
@ Default
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
print mir2vec MIR2Vec Vocabulary Printer Pass
Definition MIR2Vec.cpp:593
Machine Check Debug Module
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
const SetOfInstructions & getLOHRelated() const
unsigned getJumpTableEntrySize(int Idx) const
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
static bool shouldSignReturnAddress(SignReturnAddress Condition, bool IsLRSpilled)
std::optional< std::string > getOutliningStyle() const
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
MCSymbol * GetGlobalValueSymbol(const GlobalValue *GV, unsigned TargetFlags) const
void Lower(const MachineInstr *MI, MCInst &OutMI) const
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitAttributesSubsection(StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional, AArch64BuildAttributes::SubsectionType ParameterType)
Build attributes implementation.
virtual void emitARM64WinCFISavePReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegI(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitARM64WinCFIAllocZ(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveZReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value, std::string String)
virtual void emitARM64WinCFISaveAnyRegIP(unsigned Reg, int Offset)
void setPreservesAll()
Set by analyses that do not transform their input at all.
const T & front() const
front - Get the first element.
Definition ArrayRef.h:145
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class is intended to be used as a driving class for all asm writers.
Definition AsmPrinter.h:91
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual const MCExpr * lowerConstant(const Constant *CV, const Constant *BaseCV=nullptr, uint64_t Offset=0)
Lower the specified LLVM Constant to an MCExpr.
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition AsmPrinter.h:636
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
Function * getFunction() const
Definition Constants.h:940
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition Constants.h:1065
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
Constant * getDeactivationSymbol() const
Definition Constants.h:1087
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition Constants.h:1083
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
void recordFaultingOp(FaultKind FaultTy, const MCSymbol *FaultingLabel, const MCSymbol *HandlerLabel)
Definition FaultMaps.cpp:28
void serializeToFaultMapSection()
Definition FaultMaps.cpp:45
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:730
const Constant * getAliasee() const
Definition GlobalAlias.h:87
const Constant * getResolver() const
Definition GlobalIFunc.h:73
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
bool hasLocalLinkage() const
bool hasExternalWeakLinkage() const
Type * getValueType() const
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:423
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSectionELF * getELFSection(const Twine &Section, unsigned Type, unsigned Flags)
Definition MCContext.h:553
LLVM_ABI MCSymbol * getOrCreateSymbol(const Twine &Name)
Lookup the symbol inside with the specified Name.
LLVM_ABI MCSymbol * createLinkerPrivateSymbol(const Twine &Name)
Base class for the full range of assembler expressions which are needed for parsing.
Definition MCExpr.h:34
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
MCSection * getDataSection() const
void setImm(int64_t Val)
Definition MCInst.h:89
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isReg() const
Definition MCInst.h:65
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
static constexpr unsigned NonUniqueID
Definition MCSection.h:522
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:220
virtual void emitCFIBKeyFrame()
virtual bool popSection()
Restore the current and previous section from the section stack.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitRelocDirective(const MCExpr &Offset, StringRef Name, const MCExpr *Expr, SMLoc Loc={})
Record a relocation described by the .reloc directive.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition MCStreamer.h:368
MCContext & getContext() const
Definition MCStreamer.h:314
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition MCStreamer.h:387
virtual void emitCFIMTETaggedFrame()
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:324
void pushSection()
Save the current and previous section on the section stack.
Definition MCStreamer.h:443
virtual void switchSection(MCSection *Section, uint32_t Subsec=0)
Set the current section where code is being emitted to Section.
MCSection * getCurrentSectionOnly() const
Definition MCStreamer.h:421
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
const FeatureBitset & getFeatureBits() const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
LLVM_ABI void print(raw_ostream &OS, const MCAsmInfo *MAI) const
print - Print the value to the stream OS.
Definition MCSymbol.cpp:59
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
CalledGlobalInfo tryGetCalledGlobal(const MachineInstr *MI) const
Tries to get the global and target flags for a call site, if the instruction is a call to a global.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MCContext & getContext() const
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
mop_range operands()
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
const std::vector< MachineJumpTableEntry > & getJumpTables() const
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
static SectionKind getMetadata()
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void push_back(const T &Elt)
LLVM_ABI void recordStatepoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a statepoint instruction.
LLVM_ABI void recordPatchPoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
LLVM_ABI void recordStackMap(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:258
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.cpp:1106
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
StringRef getVendorName(unsigned const Vendor)
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ SectionSize
Definition COFF.h:61
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition COFF.h:218
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition COFF.h:224
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition COFF.h:225
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition COFF.h:276
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition COFF.h:280
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ SHF_ALLOC
Definition ELF.h:1248
@ SHF_GROUP
Definition ELF.h:1270
@ SHF_EXECINSTR
Definition ELF.h:1251
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition ELF.h:1858
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition ELF.h:1859
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition ELF.h:1860
@ SHT_PROGBITS
Definition ELF.h:1147
@ S_REGULAR
S_REGULAR - Regular section.
Definition MachO.h:127
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
bool empty() const
Definition BasicBlock.h:101
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition Mangler.cpp:294
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
scope_exit(Callable) -> scope_exit< Callable >
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Definition CodeGen.h:53
Target & getTheAArch64beTarget()
std::string utostr(uint64_t X, bool isNeg=false)
static unsigned getBranchOpcodeForKey(bool IsCall, AArch64PACKey::ID K, bool Zero)
Return B(L)RA opcode to be used for an authenticated branch or call using the given key,...
Target & getTheAArch64leTarget()
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
Target & getTheARM64_32Target()
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
@ MCSA_Hidden
.hidden (ELF)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
#define EQ(a, b)
Definition regexec.c:65
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...