LLVM 22.0.0git
AArch64RegisterInfo.cpp
Go to the documentation of this file.
1//===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetRegisterInfo
10// class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64RegisterInfo.h"
16#include "AArch64InstrInfo.h"
18#include "AArch64Subtarget.h"
22#include "llvm/ADT/BitVector.h"
32#include "llvm/IR/Function.h"
35
36using namespace llvm;
37
38#define GET_CC_REGISTER_LISTS
39#include "AArch64GenCallingConv.inc"
40#define GET_REGINFO_TARGET_DESC
41#include "AArch64GenRegisterInfo.inc"
42
44 : AArch64GenRegisterInfo(AArch64::LR, 0, 0, 0, HwMode), TT(TT) {
46}
47
48/// Return whether the register needs a CFI entry. Not all unwinders may know
49/// about SVE registers, so we assume the lowest common denominator, i.e. the
50/// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
51/// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
52/// returned in \p RegToUseForCFI.
54 MCRegister &RegToUseForCFI) const {
55 if (AArch64::PPRRegClass.contains(Reg))
56 return false;
57
58 if (AArch64::ZPRRegClass.contains(Reg)) {
59 RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
60 for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
61 if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
62 return true;
63 }
64 return false;
65 }
66
67 RegToUseForCFI = Reg;
68 return true;
69}
70
71const MCPhysReg *
73 assert(MF && "Invalid MachineFunction pointer.");
74 auto &AFI = *MF->getInfo<AArch64FunctionInfo>();
75
77 // GHC set of callee saved regs is empty as all those regs are
78 // used for passing STG regs around
79 return CSR_AArch64_NoRegs_SaveList;
81 return CSR_AArch64_NoneRegs_SaveList;
83 return CSR_AArch64_AllRegs_SaveList;
84
86 return CSR_Win_AArch64_Arm64EC_Thunk_SaveList;
87
88 // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
89 // lists depending on that will need to have their Darwin variant as well.
91 return getDarwinCalleeSavedRegs(MF);
92
94 return CSR_Win_AArch64_CFGuard_Check_SaveList;
98 MF->getFunction().getAttributes().hasAttrSomewhere(
99 Attribute::SwiftError))
100 return CSR_Win_AArch64_AAPCS_SwiftError_SaveList;
102 return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList;
104 return CSR_Win_AArch64_AAVPCS_SaveList;
105 if (AFI.hasSVE_AAPCS(*MF))
106 return CSR_Win_AArch64_SVE_AAPCS_SaveList;
107 return CSR_Win_AArch64_AAPCS_SaveList;
108 }
110 return CSR_AArch64_AAVPCS_SaveList;
112 return CSR_AArch64_SVE_AAPCS_SaveList;
113 if (MF->getFunction().getCallingConv() ==
116 "Calling convention "
117 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is only "
118 "supported to improve calls to SME ACLE save/restore/disable-za "
119 "functions, and is not intended to be used beyond that scope.");
120 if (MF->getFunction().getCallingConv() ==
123 "Calling convention "
124 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1 is "
125 "only supported to improve calls to SME ACLE __arm_get_current_vg "
126 "function, and is not intended to be used beyond that scope.");
127 if (MF->getFunction().getCallingConv() ==
130 "Calling convention "
131 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
132 "only supported to improve calls to SME ACLE __arm_sme_state "
133 "and is not intended to be used beyond that scope.");
135 ->supportSwiftError() &&
136 MF->getFunction().getAttributes().hasAttrSomewhere(
137 Attribute::SwiftError))
138 return CSR_AArch64_AAPCS_SwiftError_SaveList;
140 return CSR_AArch64_AAPCS_SwiftTail_SaveList;
142 return CSR_AArch64_RT_MostRegs_SaveList;
144 return CSR_AArch64_RT_AllRegs_SaveList;
146 // This is for OSes other than Windows; Windows is a separate case further
147 // above.
148 return CSR_AArch64_AAPCS_X18_SaveList;
149 if (AFI.hasSVE_AAPCS(*MF))
150 return CSR_AArch64_SVE_AAPCS_SaveList;
151 return CSR_AArch64_AAPCS_SaveList;
152}
153
154const MCPhysReg *
156 assert(MF && "Invalid MachineFunction pointer.");
158 "Invalid subtarget for getDarwinCalleeSavedRegs");
159 auto &AFI = *MF->getInfo<AArch64FunctionInfo>();
160
163 "Calling convention CFGuard_Check is unsupported on Darwin.");
165 return CSR_Darwin_AArch64_AAVPCS_SaveList;
168 "Calling convention SVE_VectorCall is unsupported on Darwin.");
169 if (MF->getFunction().getCallingConv() ==
172 "Calling convention "
173 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
174 "only supported to improve calls to SME ACLE save/restore/disable-za "
175 "functions, and is not intended to be used beyond that scope.");
176 if (MF->getFunction().getCallingConv() ==
179 "Calling convention "
180 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1 is "
181 "only supported to improve calls to SME ACLE __arm_get_current_vg "
182 "function, and is not intended to be used beyond that scope.");
183 if (MF->getFunction().getCallingConv() ==
186 "Calling convention "
187 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
188 "only supported to improve calls to SME ACLE __arm_sme_state "
189 "and is not intended to be used beyond that scope.");
191 return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
192 ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
193 : CSR_Darwin_AArch64_CXX_TLS_SaveList;
195 ->supportSwiftError() &&
196 MF->getFunction().getAttributes().hasAttrSomewhere(
197 Attribute::SwiftError))
198 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
200 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
202 return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
204 return CSR_Darwin_AArch64_RT_AllRegs_SaveList;
206 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
207 if (AFI.hasSVE_AAPCS(*MF))
208 return CSR_Darwin_AArch64_SVE_AAPCS_SaveList;
209 return CSR_Darwin_AArch64_AAPCS_SaveList;
210}
211
213 const MachineFunction *MF) const {
214 assert(MF && "Invalid MachineFunction pointer.");
217 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
218 return nullptr;
219}
220
222 MachineFunction &MF) const {
223 const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
224 SmallVector<MCPhysReg, 32> UpdatedCSRs;
225 for (const MCPhysReg *I = CSRs; *I; ++I)
226 UpdatedCSRs.push_back(*I);
227
228 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
230 UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
231 }
232 }
233 // Register lists are zero-terminated.
234 UpdatedCSRs.push_back(0);
235 MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
236}
237
240 unsigned Idx) const {
241 // edge case for GPR/FPR register classes
242 if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
243 return &AArch64::FPR32RegClass;
244 else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
245 return &AArch64::FPR64RegClass;
246
247 // Forward to TableGen's default version.
248 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
249}
250
251const uint32_t *
253 CallingConv::ID CC) const {
255 "Invalid subtarget for getDarwinCallPreservedMask");
256
258 return CSR_Darwin_AArch64_CXX_TLS_RegMask;
260 return CSR_Darwin_AArch64_AAVPCS_RegMask;
262 return CSR_Darwin_AArch64_SVE_AAPCS_RegMask;
264 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
266 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
268 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
271 "Calling convention CFGuard_Check is unsupported on Darwin.");
274 ->supportSwiftError() &&
275 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
276 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
277 if (CC == CallingConv::SwiftTail)
278 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
280 return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
281 if (CC == CallingConv::PreserveAll)
282 return CSR_Darwin_AArch64_RT_AllRegs_RegMask;
283 return CSR_Darwin_AArch64_AAPCS_RegMask;
284}
285
286const uint32_t *
288 CallingConv::ID CC) const {
289 bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
290 if (CC == CallingConv::GHC)
291 // This is academic because all GHC calls are (supposed to be) tail calls
292 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
294 return SCS ? CSR_AArch64_NoneRegs_SCS_RegMask
295 : CSR_AArch64_NoneRegs_RegMask;
296 if (CC == CallingConv::AnyReg)
297 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
298
299 // All the following calling conventions are handled differently on Darwin.
301 if (SCS)
302 report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
303 return getDarwinCallPreservedMask(MF, CC);
304 }
305
307 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
309 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
310 : CSR_AArch64_SVE_AAPCS_RegMask;
312 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
314 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
316 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
318 return CSR_Win_AArch64_CFGuard_Check_RegMask;
320 ->supportSwiftError() &&
321 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
322 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
323 : CSR_AArch64_AAPCS_SwiftError_RegMask;
324 if (CC == CallingConv::SwiftTail) {
325 if (SCS)
326 report_fatal_error("ShadowCallStack attribute not supported with swifttail");
327 return CSR_AArch64_AAPCS_SwiftTail_RegMask;
328 }
330 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
331 : CSR_AArch64_RT_MostRegs_RegMask;
332 if (CC == CallingConv::PreserveAll)
333 return SCS ? CSR_AArch64_RT_AllRegs_SCS_RegMask
334 : CSR_AArch64_RT_AllRegs_RegMask;
335
336 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
337}
338
340 const MachineFunction &MF) const {
342 return CSR_AArch64_AAPCS_RegMask;
343
344 return nullptr;
345}
346
348 if (TT.isOSDarwin())
349 return CSR_Darwin_AArch64_TLS_RegMask;
350
351 assert(TT.isOSBinFormatELF() && "Invalid target");
352 return CSR_AArch64_TLS_ELF_RegMask;
353}
354
356 const uint32_t **Mask) const {
357 uint32_t *UpdatedMask = MF.allocateRegMask();
358 unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
359 memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
360
361 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
363 for (MCPhysReg SubReg :
364 subregs_inclusive(AArch64::GPR64commonRegClass.getRegister(i))) {
365 // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
366 // register mask.
367 UpdatedMask[SubReg / 32] |= 1u << (SubReg % 32);
368 }
369 }
370 }
371 *Mask = UpdatedMask;
372}
373
375 return CSR_AArch64_SMStartStop_RegMask;
376}
377
378const uint32_t *
380 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
381}
382
384 return CSR_AArch64_NoRegs_RegMask;
385}
386
387const uint32_t *
389 CallingConv::ID CC) const {
390 // This should return a register mask that is the same as that returned by
391 // getCallPreservedMask but that additionally preserves the register used for
392 // the first i64 argument (which must also be the register used to return a
393 // single i64 return value)
394 //
395 // In case that the calling convention does not use the same register for
396 // both, the function should return NULL (does not currently apply)
397 assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
399 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
400 return CSR_AArch64_AAPCS_ThisReturn_RegMask;
401}
402
404 return CSR_AArch64_StackProbe_Windows_RegMask;
405}
406
407std::optional<std::string>
409 MCRegister PhysReg) const {
410 if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
411 return std::string("X19 is used as the frame base pointer register.");
412
414 bool warn = false;
415 if (MCRegisterInfo::regsOverlap(PhysReg, AArch64::X13) ||
416 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X14) ||
417 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X23) ||
418 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X24) ||
419 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X28))
420 warn = true;
421
422 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
423 if (MCRegisterInfo::regsOverlap(PhysReg, i))
424 warn = true;
425
426 if (warn)
427 return std::string(AArch64InstPrinter::getRegisterName(PhysReg)) +
428 " is clobbered by asynchronous signals when using Arm64EC.";
429 }
430
431 return {};
432}
433
436 const AArch64FrameLowering *TFI = getFrameLowering(MF);
437
438 // FIXME: avoid re-calculating this every time.
439 BitVector Reserved(getNumRegs());
440 markSuperRegs(Reserved, AArch64::WSP);
441 markSuperRegs(Reserved, AArch64::WZR);
442
443 if (TFI->isFPReserved(MF))
444 markSuperRegs(Reserved, AArch64::W29);
445
447 // x13, x14, x23, x24, x28, and v16-v31 are clobbered by asynchronous
448 // signals, so we can't ever use them.
449 markSuperRegs(Reserved, AArch64::W13);
450 markSuperRegs(Reserved, AArch64::W14);
451 markSuperRegs(Reserved, AArch64::W23);
452 markSuperRegs(Reserved, AArch64::W24);
453 markSuperRegs(Reserved, AArch64::W28);
454 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
455 markSuperRegs(Reserved, i);
456 }
457
458 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
460 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
461 }
462
463 if (hasBasePointer(MF))
464 markSuperRegs(Reserved, AArch64::W19);
465
466 // SLH uses register W16/X16 as the taint register.
467 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
468 markSuperRegs(Reserved, AArch64::W16);
469
470 // FFR is modelled as global state that cannot be allocated.
471 if (MF.getSubtarget<AArch64Subtarget>().hasSVE())
472 Reserved.set(AArch64::FFR);
473
474 // SME tiles are not allocatable.
475 if (MF.getSubtarget<AArch64Subtarget>().hasSME()) {
476 for (MCPhysReg SubReg : subregs_inclusive(AArch64::ZA))
477 Reserved.set(SubReg);
478 }
479
480 // VG cannot be allocated
481 Reserved.set(AArch64::VG);
482
483 if (MF.getSubtarget<AArch64Subtarget>().hasSME2()) {
484 for (MCSubRegIterator SubReg(AArch64::ZT0, this, /*self=*/true);
485 SubReg.isValid(); ++SubReg)
486 Reserved.set(*SubReg);
487 }
488
489 markSuperRegs(Reserved, AArch64::FPCR);
490 markSuperRegs(Reserved, AArch64::FPMR);
491 markSuperRegs(Reserved, AArch64::FPSR);
492
494 markSuperRegs(Reserved, AArch64::X27);
495 markSuperRegs(Reserved, AArch64::X28);
496 markSuperRegs(Reserved, AArch64::W27);
497 markSuperRegs(Reserved, AArch64::W28);
498 }
499
500 assert(checkAllSuperRegsMarked(Reserved));
501
502 // Add _HI registers after checkAllSuperRegsMarked as this check otherwise
503 // becomes considerably more expensive.
504 Reserved.set(AArch64::WSP_HI);
505 Reserved.set(AArch64::WZR_HI);
506 static_assert(AArch64::W30_HI - AArch64::W0_HI == 30,
507 "Unexpected order of registers");
508 Reserved.set(AArch64::W0_HI, AArch64::W30_HI);
509 static_assert(AArch64::B31_HI - AArch64::B0_HI == 31,
510 "Unexpected order of registers");
511 Reserved.set(AArch64::B0_HI, AArch64::B31_HI);
512 static_assert(AArch64::H31_HI - AArch64::H0_HI == 31,
513 "Unexpected order of registers");
514 Reserved.set(AArch64::H0_HI, AArch64::H31_HI);
515 static_assert(AArch64::S31_HI - AArch64::S0_HI == 31,
516 "Unexpected order of registers");
517 Reserved.set(AArch64::S0_HI, AArch64::S31_HI);
518 static_assert(AArch64::D31_HI - AArch64::D0_HI == 31,
519 "Unexpected order of registers");
520 Reserved.set(AArch64::D0_HI, AArch64::D31_HI);
521 static_assert(AArch64::Q31_HI - AArch64::Q0_HI == 31,
522 "Unexpected order of registers");
523 Reserved.set(AArch64::Q0_HI, AArch64::Q31_HI);
524
525 return Reserved;
526}
527
530 BitVector Reserved(getNumRegs());
531 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
532 // ReserveXRegister is set for registers manually reserved
533 // through +reserve-x#i.
535 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
536 }
537 return Reserved;
538}
539
542 BitVector Reserved(getNumRegs());
543 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
545 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
546 }
547
549 // In order to prevent the register allocator from using LR, we need to
550 // mark it as reserved. However we don't want to keep it reserved throughout
551 // the pipeline since it prevents other infrastructure from reasoning about
552 // it's liveness. We use the NoVRegs property instead of IsSSA because
553 // IsSSA is removed before VirtRegRewriter runs.
554 if (!MF.getProperties().hasNoVRegs())
555 markSuperRegs(Reserved, AArch64::LR);
556 }
557
558 assert(checkAllSuperRegsMarked(Reserved));
559
560 // Handle strictlyReservedRegs separately to avoid re-evaluating the assert,
561 // which becomes considerably expensive when considering the _HI registers.
563
564 return Reserved;
565}
566
568 MCRegister Reg) const {
569 return getReservedRegs(MF)[Reg];
570}
571
573 MCRegister Reg) const {
574 return getUserReservedRegs(MF)[Reg];
575}
576
578 MCRegister Reg) const {
579 return getStrictlyReservedRegs(MF)[Reg];
580}
581
583 return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
584 return isStrictlyReservedReg(MF, r);
585 });
586}
587
589 const MachineFunction &MF) const {
590 const Function &F = MF.getFunction();
591 F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
592 " function calls if any of the argument registers is reserved.")});
593}
594
596 MCRegister PhysReg) const {
597 // SLH uses register X16 as the taint register but it will fallback to a different
598 // method if the user clobbers it. So X16 is not reserved for inline asm but is
599 // for normal codegen.
600 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening) &&
601 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16))
602 return true;
603
604 // ZA/ZT0 registers are reserved but may be permitted in the clobber list.
605 if (PhysReg == AArch64::ZA || PhysReg == AArch64::ZT0)
606 return true;
607
608 return !isReservedReg(MF, PhysReg);
609}
610
613 return &AArch64::GPR64spRegClass;
614}
615
618 if (RC == &AArch64::CCRRegClass)
619 return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
620 return RC;
621}
622
623unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
624
626 const MachineFrameInfo &MFI = MF.getFrameInfo();
627
628 // In the presence of variable sized objects or funclets, if the fixed stack
629 // size is large enough that referencing from the FP won't result in things
630 // being in range relatively often, we can use a base pointer to allow access
631 // from the other direction like the SP normally works.
632 //
633 // Furthermore, if both variable sized objects are present, and the
634 // stack needs to be dynamically re-aligned, the base pointer is the only
635 // reliable way to reference the locals.
636 if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
637 if (hasStackRealignment(MF))
638 return true;
639
640 auto &ST = MF.getSubtarget<AArch64Subtarget>();
642 if (ST.hasSVE() || ST.isStreaming()) {
643 // Frames that have variable sized objects and scalable SVE objects,
644 // should always use a basepointer.
645 if (!AFI->hasCalculatedStackSizeSVE() || AFI->hasSVEStackSize())
646 return true;
647 }
648
649 // Frames with hazard padding can have a large offset between the frame
650 // pointer and GPR locals, which includes the emergency spill slot. If the
651 // emergency spill slot is not within range of the load/store instructions
652 // (which have a signed 9-bit range), we will fail to compile if it is used.
653 // Since hasBasePointer() is called before we know if we have hazard padding
654 // or an emergency spill slot we need to enable the basepointer
655 // conservatively.
656 if (ST.getStreamingHazardSize() &&
657 !AFI->getSMEFnAttrs().hasNonStreamingInterfaceAndBody()) {
658 return true;
659 }
660
661 // Conservatively estimate whether the negative offset from the frame
662 // pointer will be sufficient to reach. If a function has a smallish
663 // frame, it's less likely to have lots of spills and callee saved
664 // space, so it's all more likely to be within range of the frame pointer.
665 // If it's wrong, we'll materialize the constant and still get to the
666 // object; it's just suboptimal. Negative offsets use the unscaled
667 // load/store instructions, which have a 9-bit signed immediate.
668 return MFI.getLocalFrameSize() >= 256;
669 }
670
671 return false;
672}
673
675 MCRegister Reg) const {
678 bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv(),
679 MF.getFunction().isVarArg());
680
681 auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) {
682 return llvm::is_contained(RegList, Reg);
683 };
684
685 switch (CC) {
686 default:
687 report_fatal_error("Unsupported calling convention.");
688 case CallingConv::GHC:
689 return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
691 if (!MF.getFunction().isVarArg())
692 return HasReg(CC_AArch64_Preserve_None_ArgRegs, Reg);
693 [[fallthrough]];
694 case CallingConv::C:
702 if (STI.isTargetWindows()) {
703 if (IsVarArg)
704 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
705 switch (CC) {
706 default:
707 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
710 return HasReg(CC_AArch64_Win64PCS_Swift_ArgRegs, Reg) ||
711 HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
712 }
713 }
714 if (!STI.isTargetDarwin()) {
715 switch (CC) {
716 default:
717 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
720 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
721 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
722 }
723 }
724 if (!IsVarArg) {
725 switch (CC) {
726 default:
727 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
730 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
731 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
732 }
733 }
734 if (STI.isTargetILP32())
735 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
736 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
738 if (IsVarArg)
739 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
740 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
742 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
748 if (STI.isTargetWindows())
749 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
750 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
751 }
752}
753
756 const AArch64FrameLowering *TFI = getFrameLowering(MF);
757 return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
758}
759
761 const MachineFunction &MF) const {
762 return true;
763}
764
766 const MachineFunction &MF) const {
767 return true;
768}
769
770bool
772 // This function indicates whether the emergency spillslot should be placed
773 // close to the beginning of the stackframe (closer to FP) or the end
774 // (closer to SP).
775 //
776 // The beginning works most reliably if we have a frame pointer.
777 // In the presence of any non-constant space between FP and locals,
778 // (e.g. in case of stack realignment or a scalable SVE area), it is
779 // better to use SP or BP.
780 const AArch64FrameLowering &TFI = *getFrameLowering(MF);
782 assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
784 "Expected SVE area to be calculated by this point");
785 return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->hasSVEStackSize() &&
787}
788
790 const MachineFunction &MF) const {
791 return true;
792}
793
794bool
796 const MachineFrameInfo &MFI = MF.getFrameInfo();
798 return true;
799 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
800}
801
802/// needsFrameBaseReg - Returns true if the instruction's frame index
803/// reference would be better served by a base register other than FP
804/// or SP. Used by LocalStackFrameAllocation to determine which frame index
805/// references it should create new base registers for.
807 int64_t Offset) const {
808 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
809 assert(i < MI->getNumOperands() &&
810 "Instr doesn't have FrameIndex operand!");
811
812 // It's the load/store FI references that cause issues, as it can be difficult
813 // to materialize the offset if it won't fit in the literal field. Estimate
814 // based on the size of the local frame and some conservative assumptions
815 // about the rest of the stack frame (note, this is pre-regalloc, so
816 // we don't know everything for certain yet) whether this offset is likely
817 // to be out of range of the immediate. Return true if so.
818
819 // We only generate virtual base registers for loads and stores, so
820 // return false for everything else.
821 if (!MI->mayLoad() && !MI->mayStore())
822 return false;
823
824 // Without a virtual base register, if the function has variable sized
825 // objects, all fixed-size local references will be via the frame pointer,
826 // Approximate the offset and see if it's legal for the instruction.
827 // Note that the incoming offset is based on the SP value at function entry,
828 // so it'll be negative.
829 MachineFunction &MF = *MI->getParent()->getParent();
830 const AArch64FrameLowering *TFI = getFrameLowering(MF);
831 MachineFrameInfo &MFI = MF.getFrameInfo();
832
833 // Estimate an offset from the frame pointer.
834 // Conservatively assume all GPR callee-saved registers get pushed.
835 // FP, LR, X19-X28, D8-D15. 64-bits each.
836 int64_t FPOffset = Offset - 16 * 20;
837 // Estimate an offset from the stack pointer.
838 // The incoming offset is relating to the SP at the start of the function,
839 // but when we access the local it'll be relative to the SP after local
840 // allocation, so adjust our SP-relative offset by that allocation size.
841 Offset += MFI.getLocalFrameSize();
842 // Assume that we'll have at least some spill slots allocated.
843 // FIXME: This is a total SWAG number. We should run some statistics
844 // and pick a real one.
845 Offset += 128; // 128 bytes of spill slots
846
847 // If there is a frame pointer, try using it.
848 // The FP is only available if there is no dynamic realignment. We
849 // don't know for sure yet whether we'll need that, so we guess based
850 // on whether there are any local variables that would trigger it.
851 if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
852 return false;
853
854 // If we can reference via the stack pointer or base pointer, try that.
855 // FIXME: This (and the code that resolves the references) can be improved
856 // to only disallow SP relative references in the live range of
857 // the VLA(s). In practice, it's unclear how much difference that
858 // would make, but it may be worth doing.
859 if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
860 return false;
861
862 // If even offset 0 is illegal, we don't want a virtual base register.
863 if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
864 return false;
865
866 // The offset likely isn't legal; we want to allocate a virtual base register.
867 return true;
868}
869
871 Register BaseReg,
872 int64_t Offset) const {
873 assert(MI && "Unable to get the legal offset for nil instruction.");
876}
877
878/// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
879/// at the beginning of the basic block.
882 int FrameIdx,
883 int64_t Offset) const {
884 MachineBasicBlock::iterator Ins = MBB->begin();
885 DebugLoc DL; // Defaults to "unknown"
886 if (Ins != MBB->end())
887 DL = Ins->getDebugLoc();
888 const MachineFunction &MF = *MBB->getParent();
889 const AArch64InstrInfo *TII =
890 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
891 const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
892 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
893 Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
894 MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this));
895 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
896
897 BuildMI(*MBB, Ins, DL, MCID, BaseReg)
898 .addFrameIndex(FrameIdx)
899 .addImm(Offset)
900 .addImm(Shifter);
901
902 return BaseReg;
903}
904
906 int64_t Offset) const {
907 // ARM doesn't need the general 64-bit offsets
909
910 unsigned i = 0;
911 while (!MI.getOperand(i).isFI()) {
912 ++i;
913 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
914 }
915
916 const MachineFunction *MF = MI.getParent()->getParent();
917 const AArch64InstrInfo *TII =
918 MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
919 bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
920 assert(Done && "Unable to resolve frame index!");
921 (void)Done;
922}
923
924// Create a scratch register for the frame index elimination in an instruction.
925// This function has special handling of stack tagging loop pseudos, in which
926// case it can also change the instruction opcode.
927static Register
929 const AArch64InstrInfo *TII) {
930 // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
931 // replace the instruction with the writeback variant because it will now
932 // satisfy the operand constraints for it.
933 Register ScratchReg;
934 if (MI.getOpcode() == AArch64::STGloop ||
935 MI.getOpcode() == AArch64::STZGloop) {
936 assert(FIOperandNum == 3 &&
937 "Wrong frame index operand for STGloop/STZGloop");
938 unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
939 : AArch64::STZGloop_wback;
940 ScratchReg = MI.getOperand(1).getReg();
941 MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
942 MI.setDesc(TII->get(Op));
943 MI.tieOperands(1, 3);
944 } else {
945 ScratchReg =
946 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
947 MI.getOperand(FIOperandNum)
948 .ChangeToRegister(ScratchReg, false, false, true);
949 }
950 return ScratchReg;
951}
952
955 // The smallest scalable element supported by scaled SVE addressing
956 // modes are predicates, which are 2 scalable bytes in size. So the scalable
957 // byte offset must always be a multiple of 2.
958 assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
959
960 // Add fixed-sized offset using existing DIExpression interface.
962
963 unsigned VG = getDwarfRegNum(AArch64::VG, true);
964 int64_t VGSized = Offset.getScalable() / 2;
965 if (VGSized > 0) {
966 Ops.push_back(dwarf::DW_OP_constu);
967 Ops.push_back(VGSized);
968 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
969 Ops.push_back(dwarf::DW_OP_mul);
970 Ops.push_back(dwarf::DW_OP_plus);
971 } else if (VGSized < 0) {
972 Ops.push_back(dwarf::DW_OP_constu);
973 Ops.push_back(-VGSized);
974 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
975 Ops.push_back(dwarf::DW_OP_mul);
976 Ops.push_back(dwarf::DW_OP_minus);
977 }
978}
979
981 int SPAdj, unsigned FIOperandNum,
982 RegScavenger *RS) const {
983 assert(SPAdj == 0 && "Unexpected");
984
985 MachineInstr &MI = *II;
986 MachineBasicBlock &MBB = *MI.getParent();
987 MachineFunction &MF = *MBB.getParent();
988 const MachineFrameInfo &MFI = MF.getFrameInfo();
989 const AArch64InstrInfo *TII =
990 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
991 const AArch64FrameLowering *TFI = getFrameLowering(MF);
992 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
993 bool Tagged =
994 MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
995 Register FrameReg;
996
997 // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
998 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
999 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
1000 MI.getOpcode() == TargetOpcode::STATEPOINT) {
1002 TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
1003 /*PreferFP=*/true,
1004 /*ForSimm=*/false);
1005 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
1006 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
1007 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
1008 return false;
1009 }
1010
1011 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
1012 MachineOperand &FI = MI.getOperand(FIOperandNum);
1013 StackOffset Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex);
1014 assert(!Offset.getScalable() &&
1015 "Frame offsets with a scalable component are not supported");
1016 FI.ChangeToImmediate(Offset.getFixed());
1017 return false;
1018 }
1019
1021 if (MI.getOpcode() == AArch64::TAGPstack) {
1022 // TAGPstack must use the virtual frame register in its 3rd operand.
1024 FrameReg = MI.getOperand(3).getReg();
1025 Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) +
1027 } else if (Tagged) {
1029 MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
1030 if (MFI.hasVarSizedObjects() ||
1031 isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
1033 // Can't update to SP + offset in place. Precalculate the tagged pointer
1034 // in a scratch register.
1036 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
1037 Register ScratchReg =
1038 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
1039 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
1040 TII);
1041 BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
1042 .addReg(ScratchReg)
1043 .addReg(ScratchReg)
1044 .addImm(0);
1045 MI.getOperand(FIOperandNum)
1046 .ChangeToRegister(ScratchReg, false, false, true);
1047 return false;
1048 }
1049 FrameReg = AArch64::SP;
1050 Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) +
1051 (int64_t)MFI.getStackSize());
1052 } else {
1054 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
1055 }
1056
1057 // Modify MI as necessary to handle as much of 'Offset' as possible
1058 if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
1059 return true;
1060
1061 assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
1062 "Emergency spill slot is out of reach");
1063
1064 // If we get here, the immediate doesn't fit into the instruction. We folded
1065 // as much as possible above. Handle the rest, providing a register that is
1066 // SP+LargeImm.
1067 Register ScratchReg =
1069 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
1070 return false;
1071}
1072
1074 MachineFunction &MF) const {
1075 const AArch64FrameLowering *TFI = getFrameLowering(MF);
1076
1077 switch (RC->getID()) {
1078 default:
1079 return 0;
1080 case AArch64::GPR32RegClassID:
1081 case AArch64::GPR32spRegClassID:
1082 case AArch64::GPR32allRegClassID:
1083 case AArch64::GPR64spRegClassID:
1084 case AArch64::GPR64allRegClassID:
1085 case AArch64::GPR64RegClassID:
1086 case AArch64::GPR32commonRegClassID:
1087 case AArch64::GPR64commonRegClassID:
1088 return 32 - 1 // XZR/SP
1089 - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
1090 - MF.getSubtarget<AArch64Subtarget>().getNumXRegisterReserved()
1091 - hasBasePointer(MF); // X19
1092 case AArch64::FPR8RegClassID:
1093 case AArch64::FPR16RegClassID:
1094 case AArch64::FPR32RegClassID:
1095 case AArch64::FPR64RegClassID:
1096 case AArch64::FPR128RegClassID:
1097 return 32;
1098
1099 case AArch64::MatrixIndexGPR32_8_11RegClassID:
1100 case AArch64::MatrixIndexGPR32_12_15RegClassID:
1101 return 4;
1102
1103 case AArch64::DDRegClassID:
1104 case AArch64::DDDRegClassID:
1105 case AArch64::DDDDRegClassID:
1106 case AArch64::QQRegClassID:
1107 case AArch64::QQQRegClassID:
1108 case AArch64::QQQQRegClassID:
1109 return 32;
1110
1111 case AArch64::FPR128_loRegClassID:
1112 case AArch64::FPR64_loRegClassID:
1113 case AArch64::FPR16_loRegClassID:
1114 return 16;
1115 case AArch64::FPR128_0to7RegClassID:
1116 return 8;
1117 }
1118}
1119
1120// FORM_TRANSPOSED_REG_TUPLE nodes are created to improve register allocation
1121// where a consecutive multi-vector tuple is constructed from the same indices
1122// of multiple strided loads. This may still result in unnecessary copies
1123// between the loads and the tuple. Here we try to return a hint to assign the
1124// contiguous ZPRMulReg starting at the same register as the first operand of
1125// the pseudo, which should be a subregister of the first strided load.
1126//
1127// For example, if the first strided load has been assigned $z16_z20_z24_z28
1128// and the operands of the pseudo are each accessing subregister zsub2, we
1129// should look through through Order to find a contiguous register which
1130// begins with $z24 (i.e. $z24_z25_z26_z27).
1131//
1133 Register VirtReg, ArrayRef<MCPhysReg> Order,
1135 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
1136
1137 auto &ST = MF.getSubtarget<AArch64Subtarget>();
1138 if (!ST.hasSME() || !ST.isStreaming())
1139 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
1140 VRM);
1141
1142 // The SVE calling convention preserves registers Z8-Z23. As a result, there
1143 // are no ZPR2Strided or ZPR4Strided registers that do not overlap with the
1144 // callee-saved registers and so by default these will be pushed to the back
1145 // of the allocation order for the ZPRStridedOrContiguous classes.
1146 // If any of the instructions which define VirtReg are used by the
1147 // FORM_TRANSPOSED_REG_TUPLE pseudo, we want to favour reducing copy
1148 // instructions over reducing the number of clobbered callee-save registers,
1149 // so we add the strided registers as a hint.
1150 const MachineRegisterInfo &MRI = MF.getRegInfo();
1151 unsigned RegID = MRI.getRegClass(VirtReg)->getID();
1152 if (RegID == AArch64::ZPR2StridedOrContiguousRegClassID ||
1153 RegID == AArch64::ZPR4StridedOrContiguousRegClassID) {
1154
1155 // Look through uses of the register for FORM_TRANSPOSED_REG_TUPLE.
1156 for (const MachineInstr &Use : MRI.use_nodbg_instructions(VirtReg)) {
1157 if (Use.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
1158 Use.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO)
1159 continue;
1160
1161 unsigned UseOps = Use.getNumOperands() - 1;
1162 const TargetRegisterClass *StridedRC;
1163 switch (RegID) {
1164 case AArch64::ZPR2StridedOrContiguousRegClassID:
1165 StridedRC = &AArch64::ZPR2StridedRegClass;
1166 break;
1167 case AArch64::ZPR4StridedOrContiguousRegClassID:
1168 StridedRC = &AArch64::ZPR4StridedRegClass;
1169 break;
1170 default:
1171 llvm_unreachable("Unexpected RegID");
1172 }
1173
1174 SmallVector<MCPhysReg, 4> StridedOrder;
1175 for (MCPhysReg Reg : Order)
1176 if (StridedRC->contains(Reg))
1177 StridedOrder.push_back(Reg);
1178
1179 int OpIdx = Use.findRegisterUseOperandIdx(VirtReg, this);
1180 assert(OpIdx != -1 && "Expected operand index from register use.");
1181
1182 unsigned TupleID = MRI.getRegClass(Use.getOperand(0).getReg())->getID();
1183 bool IsMulZPR = TupleID == AArch64::ZPR2Mul2RegClassID ||
1184 TupleID == AArch64::ZPR4Mul4RegClassID;
1185
1186 const MachineOperand *AssignedRegOp = llvm::find_if(
1187 make_range(Use.operands_begin() + 1, Use.operands_end()),
1188 [&VRM](const MachineOperand &Op) {
1189 return VRM->hasPhys(Op.getReg());
1190 });
1191
1192 // Example:
1193 //
1194 // When trying to find a suitable register allocation for VirtReg %v2 in:
1195 //
1196 // %v0:zpr2stridedorcontiguous = ld1 p0/z, [...]
1197 // %v1:zpr2stridedorcontiguous = ld1 p0/z, [...]
1198 // %v2:zpr2stridedorcontiguous = ld1 p0/z, [...]
1199 // %v3:zpr2stridedorcontiguous = ld1 p0/z, [...]
1200 // %v4:zpr4mul4 = FORM_TRANSPOSED_X4 %v0:0, %v1:0, %v2:0, %v3:0
1201 //
1202 // One such suitable allocation would be:
1203 //
1204 // { z0, z8 } = ld1 p0/z, [...]
1205 // { z1, z9 } = ld1 p0/z, [...]
1206 // { z2, z10 } = ld1 p0/z, [...]
1207 // { z3, z11 } = ld1 p0/z, [...]
1208 // { z0, z1, z2, z3 } =
1209 // FORM_TRANSPOSED_X4 {z0, z8}:0, {z1, z9}:0, {z2, z10}:0, {z3, z11}:0
1210 //
1211 // Below we distinguish two cases when trying to find a register:
1212 // * None of the registers used by FORM_TRANSPOSED_X4 have been assigned
1213 // yet. In this case the code muse ensure that there are at least UseOps
1214 // free consecutive registers. If IsMulZPR is true, then the first of
1215 // registers must also be a multiple of UseOps, e.g. { z0, z1, z2, z3 }
1216 // is valid but { z1, z2, z3, z5 } is not.
1217 // * One or more of the registers used by FORM_TRANSPOSED_X4 is already
1218 // assigned a physical register, which means only checking that a
1219 // consecutive range of free tuple registers exists which includes
1220 // the assigned register.
1221 // e.g. in the example above, if { z0, z8 } is already allocated for
1222 // %v0, we just need to ensure that { z1, z9 }, { z2, z10 } and
1223 // { z3, z11 } are also free. If so, we add { z2, z10 }.
1224
1225 if (AssignedRegOp == Use.operands_end()) {
1226 // There are no registers already assigned to any of the pseudo
1227 // operands. Look for a valid starting register for the group.
1228 for (unsigned I = 0; I < StridedOrder.size(); ++I) {
1229 MCPhysReg Reg = StridedOrder[I];
1230
1231 // If the FORM_TRANSPOSE nodes use the ZPRMul classes, the starting
1232 // register of the first load should be a multiple of 2 or 4.
1233 unsigned SubRegIdx = Use.getOperand(OpIdx).getSubReg();
1234 if (IsMulZPR && (getSubReg(Reg, SubRegIdx) - AArch64::Z0) % UseOps !=
1235 ((unsigned)OpIdx - 1))
1236 continue;
1237
1238 // In the example above, if VirtReg is the third operand of the
1239 // tuple (%v2) and Reg == Z2_Z10, then we need to make sure that
1240 // Z0_Z8, Z1_Z9 and Z3_Z11 are also available.
1241 auto IsFreeConsecutiveReg = [&](unsigned UseOp) {
1242 unsigned R = Reg - (OpIdx - 1) + UseOp;
1243 return StridedRC->contains(R) &&
1244 (UseOp == 0 ||
1245 ((getSubReg(R, AArch64::zsub0) - AArch64::Z0) ==
1246 (getSubReg(R - 1, AArch64::zsub0) - AArch64::Z0) + 1)) &&
1247 !Matrix->isPhysRegUsed(R);
1248 };
1249 if (all_of(iota_range<unsigned>(0U, UseOps, /*Inclusive=*/false),
1250 IsFreeConsecutiveReg))
1251 Hints.push_back(Reg);
1252 }
1253 } else {
1254 // At least one operand already has a physical register assigned.
1255 // Find the starting sub-register of this and use it to work out the
1256 // correct strided register to suggest based on the current op index.
1257 MCPhysReg TargetStartReg =
1258 getSubReg(VRM->getPhys(AssignedRegOp->getReg()), AArch64::zsub0) +
1259 (OpIdx - AssignedRegOp->getOperandNo());
1260
1261 for (unsigned I = 0; I < StridedOrder.size(); ++I)
1262 if (getSubReg(StridedOrder[I], AArch64::zsub0) == TargetStartReg)
1263 Hints.push_back(StridedOrder[I]);
1264 }
1265
1266 if (!Hints.empty())
1267 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
1268 MF, VRM);
1269 }
1270 }
1271
1272 for (MachineInstr &MI : MRI.def_instructions(VirtReg)) {
1273 if (MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
1274 MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO)
1275 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
1276 MF, VRM);
1277
1278 unsigned FirstOpSubReg = MI.getOperand(1).getSubReg();
1279 switch (FirstOpSubReg) {
1280 case AArch64::zsub0:
1281 case AArch64::zsub1:
1282 case AArch64::zsub2:
1283 case AArch64::zsub3:
1284 break;
1285 default:
1286 continue;
1287 }
1288
1289 // Look up the physical register mapped to the first operand of the pseudo.
1290 Register FirstOpVirtReg = MI.getOperand(1).getReg();
1291 if (!VRM->hasPhys(FirstOpVirtReg))
1292 continue;
1293
1294 MCRegister TupleStartReg =
1295 getSubReg(VRM->getPhys(FirstOpVirtReg), FirstOpSubReg);
1296 for (unsigned I = 0; I < Order.size(); ++I)
1297 if (MCRegister R = getSubReg(Order[I], AArch64::zsub0))
1298 if (R == TupleStartReg)
1299 Hints.push_back(Order[I]);
1300 }
1301
1302 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
1303 VRM);
1304}
1305
1307 const MachineFunction &MF) const {
1308 const auto &MFI = MF.getFrameInfo();
1309 if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
1310 return AArch64::SP;
1311 else if (hasStackRealignment(MF))
1312 return getBaseRegister();
1313 return getFrameRegister(MF);
1314}
1315
1316/// SrcRC and DstRC will be morphed into NewRC if this returns true
1318 MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
1319 const TargetRegisterClass *DstRC, unsigned DstSubReg,
1320 const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
1321 MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
1322
1323 if (MI->isCopy() &&
1324 ((DstRC->getID() == AArch64::GPR64RegClassID) ||
1325 (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
1326 MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
1327 // Do not coalesce in the case of a 32-bit subregister copy
1328 // which implements a 32 to 64 bit zero extension
1329 // which relies on the upper 32 bits being zeroed.
1330 return false;
1331
1332 auto IsCoalescerBarrier = [](const MachineInstr &MI) {
1333 switch (MI.getOpcode()) {
1334 case AArch64::COALESCER_BARRIER_FPR16:
1335 case AArch64::COALESCER_BARRIER_FPR32:
1336 case AArch64::COALESCER_BARRIER_FPR64:
1337 case AArch64::COALESCER_BARRIER_FPR128:
1338 return true;
1339 default:
1340 return false;
1341 }
1342 };
1343
1344 // For calls that temporarily have to toggle streaming mode as part of the
1345 // call-sequence, we need to be more careful when coalescing copy instructions
1346 // so that we don't end up coalescing the NEON/FP result or argument register
1347 // with a whole Z-register, such that after coalescing the register allocator
1348 // will try to spill/reload the entire Z register.
1349 //
1350 // We do this by checking if the node has any defs/uses that are
1351 // COALESCER_BARRIER pseudos. These are 'nops' in practice, but they exist to
1352 // instruct the coalescer to avoid coalescing the copy.
1353 if (MI->isCopy() && SubReg != DstSubReg &&
1354 (AArch64::ZPRRegClass.hasSubClassEq(DstRC) ||
1355 AArch64::ZPRRegClass.hasSubClassEq(SrcRC))) {
1356 unsigned SrcReg = MI->getOperand(1).getReg();
1357 if (any_of(MRI.def_instructions(SrcReg), IsCoalescerBarrier))
1358 return false;
1359 unsigned DstReg = MI->getOperand(0).getReg();
1360 if (any_of(MRI.use_nodbg_instructions(DstReg), IsCoalescerBarrier))
1361 return false;
1362 }
1363
1364 return true;
1365}
1366
1368 MCRegister R) const {
1369 return R == AArch64::VG;
1370}
1371
1373 return (LLVMReg >= AArch64::Z0 && LLVMReg <= AArch64::Z31) ||
1374 (LLVMReg >= AArch64::P0 && LLVMReg <= AArch64::P15);
1375}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Live Register Matrix
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
static unsigned getDwarfRegNum(MCRegister Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
bool isFPReserved(const MachineFunction &MF) const
Should the Frame Pointer be reserved for the current function?
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
virtual bool isIgnoredCVReg(MCRegister LLVMReg) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
const TargetRegisterClass * getPointerRegClass(unsigned Kind=0) const override
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
bool isUserReservedReg(const MachineFunction &MF, MCRegister Reg) const
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
bool requiresRegisterScavenging(const MachineFunction &MF) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
BitVector getUserReservedRegs(const MachineFunction &MF) const
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
bool regNeedsCFI(MCRegister Reg, MCRegister &RegToUseForCFI) const
Return whether the register needs a CFI entry.
bool isAnyArgRegReserved(const MachineFunction &MF) const
void emitReservedArgRegCallError(const MachineFunction &MF) const
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
const uint32_t * getTLSCallPreservedMask() const
const uint32_t * getNoPreservedMask() const override
Register getFrameRegister(const MachineFunction &MF) const override
bool shouldAnalyzePhysregInMachineLoopInfo(MCRegister R) const override
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
const MCPhysReg * getDarwinCalleeSavedRegs(const MachineFunction *MF) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
AArch64RegisterInfo(const Triple &TT, unsigned HwMode)
const uint32_t * SMEABISupportRoutinesCallPreservedMaskFromX0() const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
unsigned getLocalAddressRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
const uint32_t * getSMStartStopCallPreservedMask() const
bool useFPForScavengingIndex(const MachineFunction &MF) const override
bool cannotEliminateFrame(const MachineFunction &MF) const
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
std::optional< std::string > explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool isXRegisterReservedForRA(size_t i) const
const AArch64TargetLowering * getTargetLowering() const override
bool isXRegCustomCalleeSaved(size_t i) const
bool isXRegisterReserved(size_t i) const
bool isCallingConvWin64(CallingConv::ID CC, bool IsVarArg) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition DebugLoc.h:124
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:352
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
Describe properties that are true of each instruction in the target description file.
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
MCSubRegIterator enumerates all sub-registers of Reg.
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
int64_t getLocalFrameSize() const
Get the size of the local object blob.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI unsigned getOperandNo() const
Returns the index of this operand in the instruction that it belongs to.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:31
int64_t getFixed() const
Returns the fixed component of the stack.
Definition TypeSize.h:47
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetOptions Options
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition VirtRegMap.h:91
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
Definition VirtRegMap.h:87
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
@ Swift
Calling convention for Swift.
Definition CallingConv.h:69
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition CallingConv.h:82
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition CallingConv.h:63
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition CallingConv.h:60
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1
Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ PreserveNone
Used for runtime calls that preserves none general registers.
Definition CallingConv.h:90
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition CallingConv.h:87
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
@ ARM64EC_Thunk_X64
Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
@ Done
Definition Threading.h:60
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1712
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1877