LLVM 18.0.0git
AArch64RegisterInfo.cpp
Go to the documentation of this file.
1//===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetRegisterInfo
10// class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64RegisterInfo.h"
16#include "AArch64InstrInfo.h"
18#include "AArch64Subtarget.h"
21#include "llvm/ADT/BitVector.h"
30#include "llvm/IR/Function.h"
34
35using namespace llvm;
36
37#define GET_CC_REGISTER_LISTS
38#include "AArch64GenCallingConv.inc"
39#define GET_REGINFO_TARGET_DESC
40#include "AArch64GenRegisterInfo.inc"
41
43 : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
45}
46
47/// Return whether the register needs a CFI entry. Not all unwinders may know
48/// about SVE registers, so we assume the lowest common denominator, i.e. the
49/// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
50/// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
51/// returned in \p RegToUseForCFI.
53 unsigned &RegToUseForCFI) const {
54 if (AArch64::PPRRegClass.contains(Reg))
55 return false;
56
57 if (AArch64::ZPRRegClass.contains(Reg)) {
58 RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
59 for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
60 if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
61 return true;
62 }
63 return false;
64 }
65
66 RegToUseForCFI = Reg;
67 return true;
68}
69
70const MCPhysReg *
72 assert(MF && "Invalid MachineFunction pointer.");
73
75 // GHC set of callee saved regs is empty as all those regs are
76 // used for passing STG regs around
77 return CSR_AArch64_NoRegs_SaveList;
79 return CSR_AArch64_AllRegs_SaveList;
80
81 // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
82 // lists depending on that will need to have their Darwin variant as well.
84 return getDarwinCalleeSavedRegs(MF);
85
87 return CSR_Win_AArch64_CFGuard_Check_SaveList;
92 Attribute::SwiftError))
93 return CSR_Win_AArch64_AAPCS_SwiftError_SaveList;
95 return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList;
96 return CSR_Win_AArch64_AAPCS_SaveList;
97 }
99 return CSR_AArch64_AAVPCS_SaveList;
101 return CSR_AArch64_SVE_AAPCS_SaveList;
102 if (MF->getFunction().getCallingConv() ==
105 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
106 "only supported to improve calls to SME ACLE save/restore/disable-za "
107 "functions, and is not intended to be used beyond that scope.");
108 if (MF->getFunction().getCallingConv() ==
111 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
112 "only supported to improve calls to SME ACLE __arm_sme_state "
113 "and is not intended to be used beyond that scope.");
115 ->supportSwiftError() &&
117 Attribute::SwiftError))
118 return CSR_AArch64_AAPCS_SwiftError_SaveList;
120 return CSR_AArch64_AAPCS_SwiftTail_SaveList;
122 return CSR_AArch64_RT_MostRegs_SaveList;
124 return CSR_AArch64_RT_AllRegs_SaveList;
126 // This is for OSes other than Windows; Windows is a separate case further
127 // above.
128 return CSR_AArch64_AAPCS_X18_SaveList;
129 if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
130 return CSR_AArch64_SVE_AAPCS_SaveList;
131 return CSR_AArch64_AAPCS_SaveList;
132}
133
134const MCPhysReg *
136 assert(MF && "Invalid MachineFunction pointer.");
138 "Invalid subtarget for getDarwinCalleeSavedRegs");
139
142 "Calling convention CFGuard_Check is unsupported on Darwin.");
144 return CSR_Darwin_AArch64_AAVPCS_SaveList;
147 "Calling convention SVE_VectorCall is unsupported on Darwin.");
148 if (MF->getFunction().getCallingConv() ==
151 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
152 "only supported to improve calls to SME ACLE save/restore/disable-za "
153 "functions, and is not intended to be used beyond that scope.");
154 if (MF->getFunction().getCallingConv() ==
157 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
158 "only supported to improve calls to SME ACLE __arm_sme_state "
159 "and is not intended to be used beyond that scope.");
161 return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
162 ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
163 : CSR_Darwin_AArch64_CXX_TLS_SaveList;
165 ->supportSwiftError() &&
167 Attribute::SwiftError))
168 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
170 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
172 return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
174 return CSR_Darwin_AArch64_RT_AllRegs_SaveList;
176 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
177 return CSR_Darwin_AArch64_AAPCS_SaveList;
178}
179
181 const MachineFunction *MF) const {
182 assert(MF && "Invalid MachineFunction pointer.");
185 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
186 return nullptr;
187}
188
190 MachineFunction &MF) const {
191 const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
192 SmallVector<MCPhysReg, 32> UpdatedCSRs;
193 for (const MCPhysReg *I = CSRs; *I; ++I)
194 UpdatedCSRs.push_back(*I);
195
196 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
198 UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
199 }
200 }
201 // Register lists are zero-terminated.
202 UpdatedCSRs.push_back(0);
203 MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
204}
205
208 unsigned Idx) const {
209 // edge case for GPR/FPR register classes
210 if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
211 return &AArch64::FPR32RegClass;
212 else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
213 return &AArch64::FPR64RegClass;
214
215 // Forward to TableGen's default version.
216 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
217}
218
219const uint32_t *
221 CallingConv::ID CC) const {
223 "Invalid subtarget for getDarwinCallPreservedMask");
224
226 return CSR_Darwin_AArch64_CXX_TLS_RegMask;
228 return CSR_Darwin_AArch64_AAVPCS_RegMask;
231 "Calling convention SVE_VectorCall is unsupported on Darwin.");
234 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
235 "unsupported on Darwin.");
238 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
239 "unsupported on Darwin.");
242 "Calling convention CFGuard_Check is unsupported on Darwin.");
245 ->supportSwiftError() &&
246 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
247 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
249 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
251 return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
253 return CSR_Darwin_AArch64_RT_AllRegs_RegMask;
254 return CSR_Darwin_AArch64_AAPCS_RegMask;
255}
256
257const uint32_t *
259 CallingConv::ID CC) const {
260 bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
261 if (CC == CallingConv::GHC)
262 // This is academic because all GHC calls are (supposed to be) tail calls
263 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
264 if (CC == CallingConv::AnyReg)
265 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
266
267 // All the following calling conventions are handled differently on Darwin.
269 if (SCS)
270 report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
271 return getDarwinCallPreservedMask(MF, CC);
272 }
273
275 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
277 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
278 : CSR_AArch64_SVE_AAPCS_RegMask;
280 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
282 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
284 return CSR_Win_AArch64_CFGuard_Check_RegMask;
286 ->supportSwiftError() &&
287 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
288 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
289 : CSR_AArch64_AAPCS_SwiftError_RegMask;
290 if (CC == CallingConv::SwiftTail) {
291 if (SCS)
292 report_fatal_error("ShadowCallStack attribute not supported with swifttail");
293 return CSR_AArch64_AAPCS_SwiftTail_RegMask;
294 }
296 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
297 : CSR_AArch64_RT_MostRegs_RegMask;
298 else if (CC == CallingConv::PreserveAll)
299 return SCS ? CSR_AArch64_RT_AllRegs_SCS_RegMask
300 : CSR_AArch64_RT_AllRegs_RegMask;
301
302 else
303 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
304}
305
307 const MachineFunction &MF) const {
309 return CSR_AArch64_AAPCS_RegMask;
310
311 return nullptr;
312}
313
315 if (TT.isOSDarwin())
316 return CSR_Darwin_AArch64_TLS_RegMask;
317
318 assert(TT.isOSBinFormatELF() && "Invalid target");
319 return CSR_AArch64_TLS_ELF_RegMask;
320}
321
323 const uint32_t **Mask) const {
324 uint32_t *UpdatedMask = MF.allocateRegMask();
325 unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
326 memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
327
328 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
330 for (MCPhysReg SubReg :
331 subregs_inclusive(AArch64::GPR64commonRegClass.getRegister(i))) {
332 // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
333 // register mask.
334 UpdatedMask[SubReg / 32] |= 1u << (SubReg % 32);
335 }
336 }
337 }
338 *Mask = UpdatedMask;
339}
340
342 return CSR_AArch64_SMStartStop_RegMask;
343}
344
345const uint32_t *
347 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
348}
349
351 return CSR_AArch64_NoRegs_RegMask;
352}
353
354const uint32_t *
356 CallingConv::ID CC) const {
357 // This should return a register mask that is the same as that returned by
358 // getCallPreservedMask but that additionally preserves the register used for
359 // the first i64 argument (which must also be the register used to return a
360 // single i64 return value)
361 //
362 // In case that the calling convention does not use the same register for
363 // both, the function should return NULL (does not currently apply)
364 assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
366 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
367 return CSR_AArch64_AAPCS_ThisReturn_RegMask;
368}
369
371 return CSR_AArch64_StackProbe_Windows_RegMask;
372}
373
374std::optional<std::string>
376 MCRegister PhysReg) const {
377 if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
378 return std::string("X19 is used as the frame base pointer register.");
379
381 bool warn = false;
382 if (MCRegisterInfo::regsOverlap(PhysReg, AArch64::X13) ||
383 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X14) ||
384 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X23) ||
385 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X24) ||
386 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X28))
387 warn = true;
388
389 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
390 if (MCRegisterInfo::regsOverlap(PhysReg, i))
391 warn = true;
392
393 if (warn)
394 return std::string(AArch64InstPrinter::getRegisterName(PhysReg)) +
395 " is clobbered by asynchronous signals when using Arm64EC.";
396 }
397
398 return {};
399}
400
403 const AArch64FrameLowering *TFI = getFrameLowering(MF);
404
405 // FIXME: avoid re-calculating this every time.
406 BitVector Reserved(getNumRegs());
407 markSuperRegs(Reserved, AArch64::WSP);
408 markSuperRegs(Reserved, AArch64::WZR);
409
410 if (TFI->hasFP(MF) || TT.isOSDarwin())
411 markSuperRegs(Reserved, AArch64::W29);
412
414 // x13, x14, x23, x24, x28, and v16-v31 are clobbered by asynchronous
415 // signals, so we can't ever use them.
416 markSuperRegs(Reserved, AArch64::W13);
417 markSuperRegs(Reserved, AArch64::W14);
418 markSuperRegs(Reserved, AArch64::W23);
419 markSuperRegs(Reserved, AArch64::W24);
420 markSuperRegs(Reserved, AArch64::W28);
421 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
422 markSuperRegs(Reserved, i);
423 }
424
425 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
427 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
428 }
429
430 if (hasBasePointer(MF))
431 markSuperRegs(Reserved, AArch64::W19);
432
433 // SLH uses register W16/X16 as the taint register.
434 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
435 markSuperRegs(Reserved, AArch64::W16);
436
437 // SME tiles are not allocatable.
438 if (MF.getSubtarget<AArch64Subtarget>().hasSME()) {
439 for (MCPhysReg SubReg : subregs_inclusive(AArch64::ZA))
440 Reserved.set(SubReg);
441 }
442
443 if (MF.getSubtarget<AArch64Subtarget>().hasSME2()) {
444 for (MCSubRegIterator SubReg(AArch64::ZT0, this, /*self=*/true);
445 SubReg.isValid(); ++SubReg)
446 Reserved.set(*SubReg);
447 }
448
449 markSuperRegs(Reserved, AArch64::FPCR);
450
452 markSuperRegs(Reserved, AArch64::X27);
453 markSuperRegs(Reserved, AArch64::X28);
454 markSuperRegs(Reserved, AArch64::W27);
455 markSuperRegs(Reserved, AArch64::W28);
456 }
457
458 assert(checkAllSuperRegsMarked(Reserved));
459 return Reserved;
460}
461
465
466 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
468 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
469 }
470
471 assert(checkAllSuperRegsMarked(Reserved));
472 return Reserved;
473}
474
476 MCRegister Reg) const {
477 return getReservedRegs(MF)[Reg];
478}
479
481 MCRegister Reg) const {
482 return getStrictlyReservedRegs(MF)[Reg];
483}
484
486 return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
487 return isStrictlyReservedReg(MF, r);
488 });
489}
490
492 const MachineFunction &MF) const {
493 const Function &F = MF.getFunction();
494 F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
495 " function calls if any of the argument registers is reserved.")});
496}
497
499 MCRegister PhysReg) const {
500 // SLH uses register X16 as the taint register but it will fallback to a different
501 // method if the user clobbers it. So X16 is not reserved for inline asm but is
502 // for normal codegen.
503 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening) &&
504 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16))
505 return true;
506
507 return !isReservedReg(MF, PhysReg);
508}
509
512 unsigned Kind) const {
513 return &AArch64::GPR64spRegClass;
514}
515
518 if (RC == &AArch64::CCRRegClass)
519 return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
520 return RC;
521}
522
523unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
524
526 const MachineFrameInfo &MFI = MF.getFrameInfo();
527
528 // In the presence of variable sized objects or funclets, if the fixed stack
529 // size is large enough that referencing from the FP won't result in things
530 // being in range relatively often, we can use a base pointer to allow access
531 // from the other direction like the SP normally works.
532 //
533 // Furthermore, if both variable sized objects are present, and the
534 // stack needs to be dynamically re-aligned, the base pointer is the only
535 // reliable way to reference the locals.
536 if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
537 if (hasStackRealignment(MF))
538 return true;
539
540 if (MF.getSubtarget<AArch64Subtarget>().hasSVE()) {
542 // Frames that have variable sized objects and scalable SVE objects,
543 // should always use a basepointer.
544 if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
545 return true;
546 }
547
548 // Conservatively estimate whether the negative offset from the frame
549 // pointer will be sufficient to reach. If a function has a smallish
550 // frame, it's less likely to have lots of spills and callee saved
551 // space, so it's all more likely to be within range of the frame pointer.
552 // If it's wrong, we'll materialize the constant and still get to the
553 // object; it's just suboptimal. Negative offsets use the unscaled
554 // load/store instructions, which have a 9-bit signed immediate.
555 return MFI.getLocalFrameSize() >= 256;
556 }
557
558 return false;
559}
560
562 MCRegister Reg) const {
565 bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv());
566
567 auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) {
568 return llvm::is_contained(RegList, Reg);
569 };
570
571 switch (CC) {
572 default:
573 report_fatal_error("Unsupported calling convention.");
574 case CallingConv::GHC:
575 return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
576 case CallingConv::C:
584 if (STI.isTargetWindows()) {
585 if (IsVarArg)
586 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
587 switch (CC) {
588 default:
589 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
592 return HasReg(CC_AArch64_Win64PCS_Swift_ArgRegs, Reg) ||
593 HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
594 }
595 }
596 if (!STI.isTargetDarwin()) {
597 switch (CC) {
598 default:
599 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
602 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
603 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
604 }
605 }
606 if (!IsVarArg) {
607 switch (CC) {
608 default:
609 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
612 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
613 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
614 }
615 }
616 if (STI.isTargetILP32())
617 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
618 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
620 if (IsVarArg)
621 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
622 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
624 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
629 if (STI.isTargetWindows())
630 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
631 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
632 }
633}
634
637 const AArch64FrameLowering *TFI = getFrameLowering(MF);
638 return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
639}
640
642 const MachineFunction &MF) const {
643 return true;
644}
645
647 const MachineFunction &MF) const {
648 return true;
649}
650
651bool
653 // This function indicates whether the emergency spillslot should be placed
654 // close to the beginning of the stackframe (closer to FP) or the end
655 // (closer to SP).
656 //
657 // The beginning works most reliably if we have a frame pointer.
658 // In the presence of any non-constant space between FP and locals,
659 // (e.g. in case of stack realignment or a scalable SVE area), it is
660 // better to use SP or BP.
661 const AArch64FrameLowering &TFI = *getFrameLowering(MF);
663 assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
665 "Expected SVE area to be calculated by this point");
666 return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE();
667}
668
670 const MachineFunction &MF) const {
671 return true;
672}
673
674bool
676 const MachineFrameInfo &MFI = MF.getFrameInfo();
678 return true;
679 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
680}
681
682/// needsFrameBaseReg - Returns true if the instruction's frame index
683/// reference would be better served by a base register other than FP
684/// or SP. Used by LocalStackFrameAllocation to determine which frame index
685/// references it should create new base registers for.
687 int64_t Offset) const {
688 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
689 assert(i < MI->getNumOperands() &&
690 "Instr doesn't have FrameIndex operand!");
691
692 // It's the load/store FI references that cause issues, as it can be difficult
693 // to materialize the offset if it won't fit in the literal field. Estimate
694 // based on the size of the local frame and some conservative assumptions
695 // about the rest of the stack frame (note, this is pre-regalloc, so
696 // we don't know everything for certain yet) whether this offset is likely
697 // to be out of range of the immediate. Return true if so.
698
699 // We only generate virtual base registers for loads and stores, so
700 // return false for everything else.
701 if (!MI->mayLoad() && !MI->mayStore())
702 return false;
703
704 // Without a virtual base register, if the function has variable sized
705 // objects, all fixed-size local references will be via the frame pointer,
706 // Approximate the offset and see if it's legal for the instruction.
707 // Note that the incoming offset is based on the SP value at function entry,
708 // so it'll be negative.
709 MachineFunction &MF = *MI->getParent()->getParent();
710 const AArch64FrameLowering *TFI = getFrameLowering(MF);
711 MachineFrameInfo &MFI = MF.getFrameInfo();
712
713 // Estimate an offset from the frame pointer.
714 // Conservatively assume all GPR callee-saved registers get pushed.
715 // FP, LR, X19-X28, D8-D15. 64-bits each.
716 int64_t FPOffset = Offset - 16 * 20;
717 // Estimate an offset from the stack pointer.
718 // The incoming offset is relating to the SP at the start of the function,
719 // but when we access the local it'll be relative to the SP after local
720 // allocation, so adjust our SP-relative offset by that allocation size.
721 Offset += MFI.getLocalFrameSize();
722 // Assume that we'll have at least some spill slots allocated.
723 // FIXME: This is a total SWAG number. We should run some statistics
724 // and pick a real one.
725 Offset += 128; // 128 bytes of spill slots
726
727 // If there is a frame pointer, try using it.
728 // The FP is only available if there is no dynamic realignment. We
729 // don't know for sure yet whether we'll need that, so we guess based
730 // on whether there are any local variables that would trigger it.
731 if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
732 return false;
733
734 // If we can reference via the stack pointer or base pointer, try that.
735 // FIXME: This (and the code that resolves the references) can be improved
736 // to only disallow SP relative references in the live range of
737 // the VLA(s). In practice, it's unclear how much difference that
738 // would make, but it may be worth doing.
739 if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
740 return false;
741
742 // If even offset 0 is illegal, we don't want a virtual base register.
743 if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
744 return false;
745
746 // The offset likely isn't legal; we want to allocate a virtual base register.
747 return true;
748}
749
751 Register BaseReg,
752 int64_t Offset) const {
753 assert(MI && "Unable to get the legal offset for nil instruction.");
756}
757
758/// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
759/// at the beginning of the basic block.
762 int FrameIdx,
763 int64_t Offset) const {
765 DebugLoc DL; // Defaults to "unknown"
766 if (Ins != MBB->end())
767 DL = Ins->getDebugLoc();
768 const MachineFunction &MF = *MBB->getParent();
769 const AArch64InstrInfo *TII =
770 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
771 const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
773 Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
774 MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
775 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
776
777 BuildMI(*MBB, Ins, DL, MCID, BaseReg)
778 .addFrameIndex(FrameIdx)
779 .addImm(Offset)
780 .addImm(Shifter);
781
782 return BaseReg;
783}
784
786 int64_t Offset) const {
787 // ARM doesn't need the general 64-bit offsets
789
790 unsigned i = 0;
791 while (!MI.getOperand(i).isFI()) {
792 ++i;
793 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
794 }
795
796 const MachineFunction *MF = MI.getParent()->getParent();
797 const AArch64InstrInfo *TII =
798 MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
799 bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
800 assert(Done && "Unable to resolve frame index!");
801 (void)Done;
802}
803
804// Create a scratch register for the frame index elimination in an instruction.
805// This function has special handling of stack tagging loop pseudos, in which
806// case it can also change the instruction opcode.
807static Register
809 const AArch64InstrInfo *TII) {
810 // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
811 // replace the instruction with the writeback variant because it will now
812 // satisfy the operand constraints for it.
813 Register ScratchReg;
814 if (MI.getOpcode() == AArch64::STGloop ||
815 MI.getOpcode() == AArch64::STZGloop) {
816 assert(FIOperandNum == 3 &&
817 "Wrong frame index operand for STGloop/STZGloop");
818 unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
819 : AArch64::STZGloop_wback;
820 ScratchReg = MI.getOperand(1).getReg();
821 MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
822 MI.setDesc(TII->get(Op));
823 MI.tieOperands(1, 3);
824 } else {
825 ScratchReg =
826 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
827 MI.getOperand(FIOperandNum)
828 .ChangeToRegister(ScratchReg, false, false, true);
829 }
830 return ScratchReg;
831}
832
834 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
835 // The smallest scalable element supported by scaled SVE addressing
836 // modes are predicates, which are 2 scalable bytes in size. So the scalable
837 // byte offset must always be a multiple of 2.
838 assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
839
840 // Add fixed-sized offset using existing DIExpression interface.
841 DIExpression::appendOffset(Ops, Offset.getFixed());
842
843 unsigned VG = getDwarfRegNum(AArch64::VG, true);
844 int64_t VGSized = Offset.getScalable() / 2;
845 if (VGSized > 0) {
846 Ops.push_back(dwarf::DW_OP_constu);
847 Ops.push_back(VGSized);
848 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
849 Ops.push_back(dwarf::DW_OP_mul);
850 Ops.push_back(dwarf::DW_OP_plus);
851 } else if (VGSized < 0) {
852 Ops.push_back(dwarf::DW_OP_constu);
853 Ops.push_back(-VGSized);
854 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
855 Ops.push_back(dwarf::DW_OP_mul);
856 Ops.push_back(dwarf::DW_OP_minus);
857 }
858}
859
861 int SPAdj, unsigned FIOperandNum,
862 RegScavenger *RS) const {
863 assert(SPAdj == 0 && "Unexpected");
864
865 MachineInstr &MI = *II;
866 MachineBasicBlock &MBB = *MI.getParent();
868 const MachineFrameInfo &MFI = MF.getFrameInfo();
869 const AArch64InstrInfo *TII =
870 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
871 const AArch64FrameLowering *TFI = getFrameLowering(MF);
872 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
873 bool Tagged =
874 MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
875 Register FrameReg;
876
877 // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
878 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
879 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
880 MI.getOpcode() == TargetOpcode::STATEPOINT) {
882 TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
883 /*PreferFP=*/true,
884 /*ForSimm=*/false);
885 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
886 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
887 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
888 return false;
889 }
890
891 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
892 MachineOperand &FI = MI.getOperand(FIOperandNum);
894 assert(!Offset.getScalable() &&
895 "Frame offsets with a scalable component are not supported");
896 FI.ChangeToImmediate(Offset.getFixed());
897 return false;
898 }
899
901 if (MI.getOpcode() == AArch64::TAGPstack) {
902 // TAGPstack must use the virtual frame register in its 3rd operand.
904 FrameReg = MI.getOperand(3).getReg();
907 } else if (Tagged) {
909 MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
910 if (MFI.hasVarSizedObjects() ||
911 isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
913 // Can't update to SP + offset in place. Precalculate the tagged pointer
914 // in a scratch register.
916 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
917 Register ScratchReg =
918 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
919 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
920 TII);
921 BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
922 .addReg(ScratchReg)
923 .addReg(ScratchReg)
924 .addImm(0);
925 MI.getOperand(FIOperandNum)
926 .ChangeToRegister(ScratchReg, false, false, true);
927 return false;
928 }
929 FrameReg = AArch64::SP;
931 (int64_t)MFI.getStackSize());
932 } else {
934 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
935 }
936
937 // Modify MI as necessary to handle as much of 'Offset' as possible
938 if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
939 return true;
940
941 assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
942 "Emergency spill slot is out of reach");
943
944 // If we get here, the immediate doesn't fit into the instruction. We folded
945 // as much as possible above. Handle the rest, providing a register that is
946 // SP+LargeImm.
947 Register ScratchReg =
949 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
950 return false;
951}
952
954 MachineFunction &MF) const {
955 const AArch64FrameLowering *TFI = getFrameLowering(MF);
956
957 switch (RC->getID()) {
958 default:
959 return 0;
960 case AArch64::GPR32RegClassID:
961 case AArch64::GPR32spRegClassID:
962 case AArch64::GPR32allRegClassID:
963 case AArch64::GPR64spRegClassID:
964 case AArch64::GPR64allRegClassID:
965 case AArch64::GPR64RegClassID:
966 case AArch64::GPR32commonRegClassID:
967 case AArch64::GPR64commonRegClassID:
968 return 32 - 1 // XZR/SP
969 - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
971 - hasBasePointer(MF); // X19
972 case AArch64::FPR8RegClassID:
973 case AArch64::FPR16RegClassID:
974 case AArch64::FPR32RegClassID:
975 case AArch64::FPR64RegClassID:
976 case AArch64::FPR128RegClassID:
977 return 32;
978
979 case AArch64::MatrixIndexGPR32_8_11RegClassID:
980 case AArch64::MatrixIndexGPR32_12_15RegClassID:
981 return 4;
982
983 case AArch64::DDRegClassID:
984 case AArch64::DDDRegClassID:
985 case AArch64::DDDDRegClassID:
986 case AArch64::QQRegClassID:
987 case AArch64::QQQRegClassID:
988 case AArch64::QQQQRegClassID:
989 return 32;
990
991 case AArch64::FPR128_loRegClassID:
992 case AArch64::FPR64_loRegClassID:
993 case AArch64::FPR16_loRegClassID:
994 return 16;
995 case AArch64::FPR128_0to7RegClassID:
996 return 8;
997 }
998}
999
1001 const MachineFunction &MF) const {
1002 const auto &MFI = MF.getFrameInfo();
1003 if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
1004 return AArch64::SP;
1005 else if (hasStackRealignment(MF))
1006 return getBaseRegister();
1007 return getFrameRegister(MF);
1008}
1009
1010/// SrcRC and DstRC will be morphed into NewRC if this returns true
1012 MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
1013 const TargetRegisterClass *DstRC, unsigned DstSubReg,
1014 const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
1015 if (MI->isCopy() &&
1016 ((DstRC->getID() == AArch64::GPR64RegClassID) ||
1017 (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
1018 MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
1019 // Do not coalesce in the case of a 32-bit subregister copy
1020 // which implements a 32 to 64 bit zero extension
1021 // which relies on the upper 32 bits being zeroed.
1022 return false;
1023 return true;
1024}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:195
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
BitVector getReservedRegs(const MachineFunction &MF) const override
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
bool requiresRegisterScavenging(const MachineFunction &MF) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
AArch64RegisterInfo(const Triple &TT)
bool isAnyArgRegReserved(const MachineFunction &MF) const
void emitReservedArgRegCallError(const MachineFunction &MF) const
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const
Return whether the register needs a CFI entry.
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
const uint32_t * getTLSCallPreservedMask() const
const uint32_t * getNoPreservedMask() const override
Register getFrameRegister(const MachineFunction &MF) const override
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
const MCPhysReg * getDarwinCalleeSavedRegs(const MachineFunction *MF) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * SMEABISupportRoutinesCallPreservedMaskFromX0() const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
unsigned getLocalAddressRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
const uint32_t * getSMStartStopCallPreservedMask() const
bool useFPForScavengingIndex(const MachineFunction &MF) const override
bool cannotEliminateFrame(const MachineFunction &MF) const
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
std::optional< std::string > explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool isXRegisterReservedForRA(size_t i) const
unsigned getNumXRegisterReserved() const
const AArch64TargetLowering * getTargetLowering() const override
bool isCallingConvWin64(CallingConv::ID CC) const
bool isXRegCustomCalleeSaved(size_t i) const
bool isXRegisterReserved(size_t i) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:262
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:338
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:666
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MCSubRegIterator enumerates all sub-registers of Reg.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
int64_t getLocalFrameSize() const
Get the size of the local object blob.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:34
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:50
TargetOptions Options
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
Definition: Triple.h:517
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:678
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
Definition: CallingConv.h:218
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
Definition: CallingConv.h:221
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:82
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition: CallingConv.h:63
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:238
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:235
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
Definition: CallingConv.h:156
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
Definition: CallingConv.h:252
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
@ Done
Definition: Threading.h:61
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1733
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1883