LLVM 17.0.0git
AArch64RegisterInfo.cpp
Go to the documentation of this file.
1//===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetRegisterInfo
10// class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64RegisterInfo.h"
16#include "AArch64InstrInfo.h"
18#include "AArch64Subtarget.h"
21#include "llvm/ADT/BitVector.h"
30#include "llvm/IR/Function.h"
34
35using namespace llvm;
36
37#define GET_CC_REGISTER_LISTS
38#include "AArch64GenCallingConv.inc"
39#define GET_REGINFO_TARGET_DESC
40#include "AArch64GenRegisterInfo.inc"
41
43 : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
45}
46
47/// Return whether the register needs a CFI entry. Not all unwinders may know
48/// about SVE registers, so we assume the lowest common denominator, i.e. the
49/// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
50/// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
51/// returned in \p RegToUseForCFI.
53 unsigned &RegToUseForCFI) const {
54 if (AArch64::PPRRegClass.contains(Reg))
55 return false;
56
57 if (AArch64::ZPRRegClass.contains(Reg)) {
58 RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
59 for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
60 if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
61 return true;
62 }
63 return false;
64 }
65
66 RegToUseForCFI = Reg;
67 return true;
68}
69
70const MCPhysReg *
72 assert(MF && "Invalid MachineFunction pointer.");
73
75 // GHC set of callee saved regs is empty as all those regs are
76 // used for passing STG regs around
77 return CSR_AArch64_NoRegs_SaveList;
79 return CSR_AArch64_AllRegs_SaveList;
80
81 // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
82 // lists depending on that will need to have their Darwin variant as well.
84 return getDarwinCalleeSavedRegs(MF);
85
87 return CSR_Win_AArch64_CFGuard_Check_SaveList;
89 return CSR_Win_AArch64_AAPCS_SaveList;
91 return CSR_AArch64_AAVPCS_SaveList;
93 return CSR_AArch64_SVE_AAPCS_SaveList;
94 if (MF->getFunction().getCallingConv() ==
97 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
98 "only supported to improve calls to SME ACLE save/restore/disable-za "
99 "functions, and is not intended to be used beyond that scope.");
100 if (MF->getFunction().getCallingConv() ==
103 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
104 "only supported to improve calls to SME ACLE __arm_sme_state "
105 "and is not intended to be used beyond that scope.");
107 ->supportSwiftError() &&
109 Attribute::SwiftError))
110 return CSR_AArch64_AAPCS_SwiftError_SaveList;
112 return CSR_AArch64_AAPCS_SwiftTail_SaveList;
114 return CSR_AArch64_RT_MostRegs_SaveList;
116 return CSR_AArch64_RT_AllRegs_SaveList;
118 // This is for OSes other than Windows; Windows is a separate case further
119 // above.
120 return CSR_AArch64_AAPCS_X18_SaveList;
121 if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
122 return CSR_AArch64_SVE_AAPCS_SaveList;
123 return CSR_AArch64_AAPCS_SaveList;
124}
125
126const MCPhysReg *
128 assert(MF && "Invalid MachineFunction pointer.");
130 "Invalid subtarget for getDarwinCalleeSavedRegs");
131
134 "Calling convention CFGuard_Check is unsupported on Darwin.");
136 return CSR_Darwin_AArch64_AAVPCS_SaveList;
139 "Calling convention SVE_VectorCall is unsupported on Darwin.");
140 if (MF->getFunction().getCallingConv() ==
143 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
144 "only supported to improve calls to SME ACLE save/restore/disable-za "
145 "functions, and is not intended to be used beyond that scope.");
146 if (MF->getFunction().getCallingConv() ==
149 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
150 "only supported to improve calls to SME ACLE __arm_sme_state "
151 "and is not intended to be used beyond that scope.");
153 return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
154 ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
155 : CSR_Darwin_AArch64_CXX_TLS_SaveList;
157 ->supportSwiftError() &&
159 Attribute::SwiftError))
160 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
162 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
164 return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
166 return CSR_Darwin_AArch64_RT_AllRegs_SaveList;
168 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
169 return CSR_Darwin_AArch64_AAPCS_SaveList;
170}
171
173 const MachineFunction *MF) const {
174 assert(MF && "Invalid MachineFunction pointer.");
177 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
178 return nullptr;
179}
180
182 MachineFunction &MF) const {
183 const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
184 SmallVector<MCPhysReg, 32> UpdatedCSRs;
185 for (const MCPhysReg *I = CSRs; *I; ++I)
186 UpdatedCSRs.push_back(*I);
187
188 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
190 UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
191 }
192 }
193 // Register lists are zero-terminated.
194 UpdatedCSRs.push_back(0);
195 MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
196}
197
200 unsigned Idx) const {
201 // edge case for GPR/FPR register classes
202 if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
203 return &AArch64::FPR32RegClass;
204 else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
205 return &AArch64::FPR64RegClass;
206
207 // Forward to TableGen's default version.
208 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
209}
210
211const uint32_t *
213 CallingConv::ID CC) const {
215 "Invalid subtarget for getDarwinCallPreservedMask");
216
218 return CSR_Darwin_AArch64_CXX_TLS_RegMask;
220 return CSR_Darwin_AArch64_AAVPCS_RegMask;
223 "Calling convention SVE_VectorCall is unsupported on Darwin.");
226 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
227 "unsupported on Darwin.");
230 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
231 "unsupported on Darwin.");
234 "Calling convention CFGuard_Check is unsupported on Darwin.");
237 ->supportSwiftError() &&
238 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
239 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
241 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
243 return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
245 return CSR_Darwin_AArch64_RT_AllRegs_RegMask;
246 return CSR_Darwin_AArch64_AAPCS_RegMask;
247}
248
249const uint32_t *
251 CallingConv::ID CC) const {
252 bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
253 if (CC == CallingConv::GHC)
254 // This is academic because all GHC calls are (supposed to be) tail calls
255 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
256 if (CC == CallingConv::AnyReg)
257 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
258
259 // All the following calling conventions are handled differently on Darwin.
261 if (SCS)
262 report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
263 return getDarwinCallPreservedMask(MF, CC);
264 }
265
267 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
269 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
270 : CSR_AArch64_SVE_AAPCS_RegMask;
272 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
274 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
276 return CSR_Win_AArch64_CFGuard_Check_RegMask;
278 ->supportSwiftError() &&
279 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
280 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
281 : CSR_AArch64_AAPCS_SwiftError_RegMask;
282 if (CC == CallingConv::SwiftTail) {
283 if (SCS)
284 report_fatal_error("ShadowCallStack attribute not supported with swifttail");
285 return CSR_AArch64_AAPCS_SwiftTail_RegMask;
286 }
288 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
289 : CSR_AArch64_RT_MostRegs_RegMask;
290 else if (CC == CallingConv::PreserveAll)
291 return SCS ? CSR_AArch64_RT_AllRegs_SCS_RegMask
292 : CSR_AArch64_RT_AllRegs_RegMask;
293
294 else
295 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
296}
297
299 const MachineFunction &MF) const {
301 return CSR_AArch64_AAPCS_RegMask;
302
303 return nullptr;
304}
305
307 if (TT.isOSDarwin())
308 return CSR_Darwin_AArch64_TLS_RegMask;
309
310 assert(TT.isOSBinFormatELF() && "Invalid target");
311 return CSR_AArch64_TLS_ELF_RegMask;
312}
313
315 const uint32_t **Mask) const {
316 uint32_t *UpdatedMask = MF.allocateRegMask();
317 unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
318 memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
319
320 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
322 for (MCPhysReg SubReg :
323 subregs_inclusive(AArch64::GPR64commonRegClass.getRegister(i))) {
324 // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
325 // register mask.
326 UpdatedMask[SubReg / 32] |= 1u << (SubReg % 32);
327 }
328 }
329 }
330 *Mask = UpdatedMask;
331}
332
334 return CSR_AArch64_SMStartStop_RegMask;
335}
336
337const uint32_t *
339 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
340}
341
343 return CSR_AArch64_NoRegs_RegMask;
344}
345
346const uint32_t *
348 CallingConv::ID CC) const {
349 // This should return a register mask that is the same as that returned by
350 // getCallPreservedMask but that additionally preserves the register used for
351 // the first i64 argument (which must also be the register used to return a
352 // single i64 return value)
353 //
354 // In case that the calling convention does not use the same register for
355 // both, the function should return NULL (does not currently apply)
356 assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
358 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
359 return CSR_AArch64_AAPCS_ThisReturn_RegMask;
360}
361
363 return CSR_AArch64_StackProbe_Windows_RegMask;
364}
365
366std::optional<std::string>
368 MCRegister PhysReg) const {
369 if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
370 return std::string("X19 is used as the frame base pointer register.");
371
373 bool warn = false;
374 if (MCRegisterInfo::regsOverlap(PhysReg, AArch64::X13) ||
375 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X14) ||
376 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X23) ||
377 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X24) ||
378 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X28))
379 warn = true;
380
381 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
382 if (MCRegisterInfo::regsOverlap(PhysReg, i))
383 warn = true;
384
385 if (warn)
386 return std::string(AArch64InstPrinter::getRegisterName(PhysReg)) +
387 " is clobbered by asynchronous signals when using Arm64EC.";
388 }
389
390 return {};
391}
392
395 const AArch64FrameLowering *TFI = getFrameLowering(MF);
396
397 // FIXME: avoid re-calculating this every time.
398 BitVector Reserved(getNumRegs());
399 markSuperRegs(Reserved, AArch64::WSP);
400 markSuperRegs(Reserved, AArch64::WZR);
401
402 if (TFI->hasFP(MF) || TT.isOSDarwin())
403 markSuperRegs(Reserved, AArch64::W29);
404
406 // x13, x14, x23, x24, x28, and v16-v31 are clobbered by asynchronous
407 // signals, so we can't ever use them.
408 markSuperRegs(Reserved, AArch64::W13);
409 markSuperRegs(Reserved, AArch64::W14);
410 markSuperRegs(Reserved, AArch64::W23);
411 markSuperRegs(Reserved, AArch64::W24);
412 markSuperRegs(Reserved, AArch64::W28);
413 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
414 markSuperRegs(Reserved, i);
415 }
416
417 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
419 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
420 }
421
422 if (hasBasePointer(MF))
423 markSuperRegs(Reserved, AArch64::W19);
424
425 // SLH uses register W16/X16 as the taint register.
426 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
427 markSuperRegs(Reserved, AArch64::W16);
428
429 // SME tiles are not allocatable.
430 if (MF.getSubtarget<AArch64Subtarget>().hasSME()) {
431 for (MCPhysReg SubReg : subregs_inclusive(AArch64::ZA))
432 Reserved.set(SubReg);
433 }
434
435 markSuperRegs(Reserved, AArch64::FPCR);
436
437 assert(checkAllSuperRegsMarked(Reserved));
438 return Reserved;
439}
440
444
445 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
447 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
448 }
449
450 assert(checkAllSuperRegsMarked(Reserved));
451 return Reserved;
452}
453
455 MCRegister Reg) const {
456 return getReservedRegs(MF)[Reg];
457}
458
460 MCRegister Reg) const {
461 return getStrictlyReservedRegs(MF)[Reg];
462}
463
465 return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
466 return isStrictlyReservedReg(MF, r);
467 });
468}
469
471 const MachineFunction &MF) const {
472 const Function &F = MF.getFunction();
473 F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
474 " function calls if any of the argument registers is reserved.")});
475}
476
478 MCRegister PhysReg) const {
479 // SLH uses register X16 as the taint register but it will fallback to a different
480 // method if the user clobbers it. So X16 is not reserved for inline asm but is
481 // for normal codegen.
482 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening) &&
483 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16))
484 return true;
485
486 return !isReservedReg(MF, PhysReg);
487}
488
491 unsigned Kind) const {
492 return &AArch64::GPR64spRegClass;
493}
494
497 if (RC == &AArch64::CCRRegClass)
498 return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
499 return RC;
500}
501
502unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
503
505 const MachineFrameInfo &MFI = MF.getFrameInfo();
506
507 // In the presence of variable sized objects or funclets, if the fixed stack
508 // size is large enough that referencing from the FP won't result in things
509 // being in range relatively often, we can use a base pointer to allow access
510 // from the other direction like the SP normally works.
511 //
512 // Furthermore, if both variable sized objects are present, and the
513 // stack needs to be dynamically re-aligned, the base pointer is the only
514 // reliable way to reference the locals.
515 if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
516 if (hasStackRealignment(MF))
517 return true;
518
519 if (MF.getSubtarget<AArch64Subtarget>().hasSVE()) {
521 // Frames that have variable sized objects and scalable SVE objects,
522 // should always use a basepointer.
523 if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
524 return true;
525 }
526
527 // Conservatively estimate whether the negative offset from the frame
528 // pointer will be sufficient to reach. If a function has a smallish
529 // frame, it's less likely to have lots of spills and callee saved
530 // space, so it's all more likely to be within range of the frame pointer.
531 // If it's wrong, we'll materialize the constant and still get to the
532 // object; it's just suboptimal. Negative offsets use the unscaled
533 // load/store instructions, which have a 9-bit signed immediate.
534 return MFI.getLocalFrameSize() >= 256;
535 }
536
537 return false;
538}
539
541 MCRegister Reg) const {
544 bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv());
545
546 auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) {
547 return llvm::is_contained(RegList, Reg);
548 };
549
550 switch (CC) {
551 default:
552 report_fatal_error("Unsupported calling convention.");
554 return HasReg(CC_AArch64_WebKit_JS_ArgRegs, Reg);
555 case CallingConv::GHC:
556 return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
557 case CallingConv::C:
565 if (STI.isTargetWindows() && IsVarArg)
566 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
567 if (!STI.isTargetDarwin()) {
568 switch (CC) {
569 default:
570 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
573 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
574 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
575 }
576 }
577 if (!IsVarArg) {
578 switch (CC) {
579 default:
580 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
583 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
584 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
585 }
586 }
587 if (STI.isTargetILP32())
588 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
589 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
591 if (IsVarArg)
592 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
593 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
595 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
600 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
601 }
602}
603
606 const AArch64FrameLowering *TFI = getFrameLowering(MF);
607 return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
608}
609
611 const MachineFunction &MF) const {
612 return true;
613}
614
616 const MachineFunction &MF) const {
617 return true;
618}
619
620bool
622 // This function indicates whether the emergency spillslot should be placed
623 // close to the beginning of the stackframe (closer to FP) or the end
624 // (closer to SP).
625 //
626 // The beginning works most reliably if we have a frame pointer.
627 // In the presence of any non-constant space between FP and locals,
628 // (e.g. in case of stack realignment or a scalable SVE area), it is
629 // better to use SP or BP.
630 const AArch64FrameLowering &TFI = *getFrameLowering(MF);
632 assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
634 "Expected SVE area to be calculated by this point");
635 return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE();
636}
637
639 const MachineFunction &MF) const {
640 return true;
641}
642
643bool
645 const MachineFrameInfo &MFI = MF.getFrameInfo();
647 return true;
648 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
649}
650
651/// needsFrameBaseReg - Returns true if the instruction's frame index
652/// reference would be better served by a base register other than FP
653/// or SP. Used by LocalStackFrameAllocation to determine which frame index
654/// references it should create new base registers for.
656 int64_t Offset) const {
657 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
658 assert(i < MI->getNumOperands() &&
659 "Instr doesn't have FrameIndex operand!");
660
661 // It's the load/store FI references that cause issues, as it can be difficult
662 // to materialize the offset if it won't fit in the literal field. Estimate
663 // based on the size of the local frame and some conservative assumptions
664 // about the rest of the stack frame (note, this is pre-regalloc, so
665 // we don't know everything for certain yet) whether this offset is likely
666 // to be out of range of the immediate. Return true if so.
667
668 // We only generate virtual base registers for loads and stores, so
669 // return false for everything else.
670 if (!MI->mayLoad() && !MI->mayStore())
671 return false;
672
673 // Without a virtual base register, if the function has variable sized
674 // objects, all fixed-size local references will be via the frame pointer,
675 // Approximate the offset and see if it's legal for the instruction.
676 // Note that the incoming offset is based on the SP value at function entry,
677 // so it'll be negative.
678 MachineFunction &MF = *MI->getParent()->getParent();
679 const AArch64FrameLowering *TFI = getFrameLowering(MF);
680 MachineFrameInfo &MFI = MF.getFrameInfo();
681
682 // Estimate an offset from the frame pointer.
683 // Conservatively assume all GPR callee-saved registers get pushed.
684 // FP, LR, X19-X28, D8-D15. 64-bits each.
685 int64_t FPOffset = Offset - 16 * 20;
686 // Estimate an offset from the stack pointer.
687 // The incoming offset is relating to the SP at the start of the function,
688 // but when we access the local it'll be relative to the SP after local
689 // allocation, so adjust our SP-relative offset by that allocation size.
690 Offset += MFI.getLocalFrameSize();
691 // Assume that we'll have at least some spill slots allocated.
692 // FIXME: This is a total SWAG number. We should run some statistics
693 // and pick a real one.
694 Offset += 128; // 128 bytes of spill slots
695
696 // If there is a frame pointer, try using it.
697 // The FP is only available if there is no dynamic realignment. We
698 // don't know for sure yet whether we'll need that, so we guess based
699 // on whether there are any local variables that would trigger it.
700 if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
701 return false;
702
703 // If we can reference via the stack pointer or base pointer, try that.
704 // FIXME: This (and the code that resolves the references) can be improved
705 // to only disallow SP relative references in the live range of
706 // the VLA(s). In practice, it's unclear how much difference that
707 // would make, but it may be worth doing.
708 if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
709 return false;
710
711 // If even offset 0 is illegal, we don't want a virtual base register.
712 if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
713 return false;
714
715 // The offset likely isn't legal; we want to allocate a virtual base register.
716 return true;
717}
718
720 Register BaseReg,
721 int64_t Offset) const {
722 assert(MI && "Unable to get the legal offset for nil instruction.");
725}
726
727/// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
728/// at the beginning of the basic block.
731 int FrameIdx,
732 int64_t Offset) const {
734 DebugLoc DL; // Defaults to "unknown"
735 if (Ins != MBB->end())
736 DL = Ins->getDebugLoc();
737 const MachineFunction &MF = *MBB->getParent();
738 const AArch64InstrInfo *TII =
739 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
740 const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
742 Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
743 MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
744 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
745
746 BuildMI(*MBB, Ins, DL, MCID, BaseReg)
747 .addFrameIndex(FrameIdx)
748 .addImm(Offset)
749 .addImm(Shifter);
750
751 return BaseReg;
752}
753
755 int64_t Offset) const {
756 // ARM doesn't need the general 64-bit offsets
758
759 unsigned i = 0;
760 while (!MI.getOperand(i).isFI()) {
761 ++i;
762 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
763 }
764
765 const MachineFunction *MF = MI.getParent()->getParent();
766 const AArch64InstrInfo *TII =
767 MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
768 bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
769 assert(Done && "Unable to resolve frame index!");
770 (void)Done;
771}
772
773// Create a scratch register for the frame index elimination in an instruction.
774// This function has special handling of stack tagging loop pseudos, in which
775// case it can also change the instruction opcode.
776static Register
778 const AArch64InstrInfo *TII) {
779 // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
780 // replace the instruction with the writeback variant because it will now
781 // satisfy the operand constraints for it.
782 Register ScratchReg;
783 if (MI.getOpcode() == AArch64::STGloop ||
784 MI.getOpcode() == AArch64::STZGloop) {
785 assert(FIOperandNum == 3 &&
786 "Wrong frame index operand for STGloop/STZGloop");
787 unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
788 : AArch64::STZGloop_wback;
789 ScratchReg = MI.getOperand(1).getReg();
790 MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
791 MI.setDesc(TII->get(Op));
792 MI.tieOperands(1, 3);
793 } else {
794 ScratchReg =
795 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
796 MI.getOperand(FIOperandNum)
797 .ChangeToRegister(ScratchReg, false, false, true);
798 }
799 return ScratchReg;
800}
801
803 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
804 // The smallest scalable element supported by scaled SVE addressing
805 // modes are predicates, which are 2 scalable bytes in size. So the scalable
806 // byte offset must always be a multiple of 2.
807 assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
808
809 // Add fixed-sized offset using existing DIExpression interface.
810 DIExpression::appendOffset(Ops, Offset.getFixed());
811
812 unsigned VG = getDwarfRegNum(AArch64::VG, true);
813 int64_t VGSized = Offset.getScalable() / 2;
814 if (VGSized > 0) {
815 Ops.push_back(dwarf::DW_OP_constu);
816 Ops.push_back(VGSized);
817 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
818 Ops.push_back(dwarf::DW_OP_mul);
819 Ops.push_back(dwarf::DW_OP_plus);
820 } else if (VGSized < 0) {
821 Ops.push_back(dwarf::DW_OP_constu);
822 Ops.push_back(-VGSized);
823 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
824 Ops.push_back(dwarf::DW_OP_mul);
825 Ops.push_back(dwarf::DW_OP_minus);
826 }
827}
828
830 int SPAdj, unsigned FIOperandNum,
831 RegScavenger *RS) const {
832 assert(SPAdj == 0 && "Unexpected");
833
834 MachineInstr &MI = *II;
835 MachineBasicBlock &MBB = *MI.getParent();
837 const MachineFrameInfo &MFI = MF.getFrameInfo();
838 const AArch64InstrInfo *TII =
839 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
840 const AArch64FrameLowering *TFI = getFrameLowering(MF);
841 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
842 bool Tagged =
843 MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
844 Register FrameReg;
845
846 // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
847 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
848 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
849 MI.getOpcode() == TargetOpcode::STATEPOINT) {
851 TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
852 /*PreferFP=*/true,
853 /*ForSimm=*/false);
854 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
855 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
856 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
857 return false;
858 }
859
860 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
861 MachineOperand &FI = MI.getOperand(FIOperandNum);
863 assert(!Offset.getScalable() &&
864 "Frame offsets with a scalable component are not supported");
865 FI.ChangeToImmediate(Offset.getFixed());
866 return false;
867 }
868
870 if (MI.getOpcode() == AArch64::TAGPstack) {
871 // TAGPstack must use the virtual frame register in its 3rd operand.
873 FrameReg = MI.getOperand(3).getReg();
876 } else if (Tagged) {
878 MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
879 if (MFI.hasVarSizedObjects() ||
880 isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
882 // Can't update to SP + offset in place. Precalculate the tagged pointer
883 // in a scratch register.
885 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
886 Register ScratchReg =
887 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
888 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
889 TII);
890 BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
891 .addReg(ScratchReg)
892 .addReg(ScratchReg)
893 .addImm(0);
894 MI.getOperand(FIOperandNum)
895 .ChangeToRegister(ScratchReg, false, false, true);
896 return false;
897 }
898 FrameReg = AArch64::SP;
900 (int64_t)MFI.getStackSize());
901 } else {
903 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
904 }
905
906 // Modify MI as necessary to handle as much of 'Offset' as possible
907 if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
908 return true;
909
910 assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
911 "Emergency spill slot is out of reach");
912
913 // If we get here, the immediate doesn't fit into the instruction. We folded
914 // as much as possible above. Handle the rest, providing a register that is
915 // SP+LargeImm.
916 Register ScratchReg =
918 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
919 return false;
920}
921
923 MachineFunction &MF) const {
924 const AArch64FrameLowering *TFI = getFrameLowering(MF);
925
926 switch (RC->getID()) {
927 default:
928 return 0;
929 case AArch64::GPR32RegClassID:
930 case AArch64::GPR32spRegClassID:
931 case AArch64::GPR32allRegClassID:
932 case AArch64::GPR64spRegClassID:
933 case AArch64::GPR64allRegClassID:
934 case AArch64::GPR64RegClassID:
935 case AArch64::GPR32commonRegClassID:
936 case AArch64::GPR64commonRegClassID:
937 return 32 - 1 // XZR/SP
938 - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
940 - hasBasePointer(MF); // X19
941 case AArch64::FPR8RegClassID:
942 case AArch64::FPR16RegClassID:
943 case AArch64::FPR32RegClassID:
944 case AArch64::FPR64RegClassID:
945 case AArch64::FPR128RegClassID:
946 return 32;
947
948 case AArch64::MatrixIndexGPR32_8_11RegClassID:
949 case AArch64::MatrixIndexGPR32_12_15RegClassID:
950 return 4;
951
952 case AArch64::DDRegClassID:
953 case AArch64::DDDRegClassID:
954 case AArch64::DDDDRegClassID:
955 case AArch64::QQRegClassID:
956 case AArch64::QQQRegClassID:
957 case AArch64::QQQQRegClassID:
958 return 32;
959
960 case AArch64::FPR128_loRegClassID:
961 case AArch64::FPR64_loRegClassID:
962 case AArch64::FPR16_loRegClassID:
963 return 16;
964 }
965}
966
968 const MachineFunction &MF) const {
969 const auto &MFI = MF.getFrameInfo();
970 if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
971 return AArch64::SP;
972 else if (hasStackRealignment(MF))
973 return getBaseRegister();
974 return getFrameRegister(MF);
975}
976
977/// SrcRC and DstRC will be morphed into NewRC if this returns true
979 MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
980 const TargetRegisterClass *DstRC, unsigned DstSubReg,
981 const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
982 if (MI->isCopy() &&
983 ((DstRC->getID() == AArch64::GPR64RegClassID) ||
984 (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
985 MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
986 // Do not coalesce in the case of a 32-bit subregister copy
987 // which implements a 32 to 64 bit zero extension
988 // which relies on the upper 32 bits being zeroed.
989 return false;
990 return true;
991}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:195
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:470
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
BitVector getReservedRegs(const MachineFunction &MF) const override
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
bool requiresRegisterScavenging(const MachineFunction &MF) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
AArch64RegisterInfo(const Triple &TT)
bool isAnyArgRegReserved(const MachineFunction &MF) const
void emitReservedArgRegCallError(const MachineFunction &MF) const
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const
Return whether the register needs a CFI entry.
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
const uint32_t * getTLSCallPreservedMask() const
const uint32_t * getNoPreservedMask() const override
Register getFrameRegister(const MachineFunction &MF) const override
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
const MCPhysReg * getDarwinCalleeSavedRegs(const MachineFunction *MF) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * SMEABISupportRoutinesCallPreservedMaskFromX0() const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
unsigned getLocalAddressRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
const uint32_t * getSMStartStopCallPreservedMask() const
bool useFPForScavengingIndex(const MachineFunction &MF) const override
bool cannotEliminateFrame(const MachineFunction &MF) const
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
std::optional< std::string > explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool isXRegisterReservedForRA(size_t i) const
unsigned getNumXRegisterReserved() const
const AArch64TargetLowering * getTargetLowering() const override
bool isCallingConvWin64(CallingConv::ID CC) const
bool isXRegCustomCalleeSaved(size_t i) const
bool isXRegisterReserved(size_t i) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:237
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:313
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:644
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
int64_t getLocalFrameSize() const
Get the size of the local object blob.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:36
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:52
TargetOptions Options
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
Definition: Triple.h:520
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:681
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
Definition: CallingConv.h:218
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
Definition: CallingConv.h:221
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:82
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition: CallingConv.h:63
@ AnyReg
Used for dynamic register based calls (e.g.
Definition: CallingConv.h:60
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:238
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:235
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ WebKit_JS
Used for stack based JavaScript calls.
Definition: CallingConv.h:56
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
Definition: CallingConv.h:156
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
@ Done
Definition: Threading.h:61
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1826
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1976