LLVM 22.0.0git
AArch64RegisterInfo.cpp
Go to the documentation of this file.
1//===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetRegisterInfo
10// class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64RegisterInfo.h"
16#include "AArch64InstrInfo.h"
18#include "AArch64Subtarget.h"
22#include "llvm/ADT/BitVector.h"
32#include "llvm/IR/Function.h"
35
36using namespace llvm;
37
38#define GET_CC_REGISTER_LISTS
39#include "AArch64GenCallingConv.inc"
40#define GET_REGINFO_TARGET_DESC
41#include "AArch64GenRegisterInfo.inc"
42
44 : AArch64GenRegisterInfo(AArch64::LR, 0, 0, 0, HwMode), TT(TT) {
46}
47
48/// Return whether the register needs a CFI entry. Not all unwinders may know
49/// about SVE registers, so we assume the lowest common denominator, i.e. the
50/// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
51/// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
52/// returned in \p RegToUseForCFI.
54 MCRegister &RegToUseForCFI) const {
55 if (AArch64::PPRRegClass.contains(Reg))
56 return false;
57
58 if (AArch64::ZPRRegClass.contains(Reg)) {
59 RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
60 for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
61 if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
62 return true;
63 }
64 return false;
65 }
66
67 RegToUseForCFI = Reg;
68 return true;
69}
70
71const MCPhysReg *
73 assert(MF && "Invalid MachineFunction pointer.");
74
76 // GHC set of callee saved regs is empty as all those regs are
77 // used for passing STG regs around
78 return CSR_AArch64_NoRegs_SaveList;
80 return CSR_AArch64_NoneRegs_SaveList;
82 return CSR_AArch64_AllRegs_SaveList;
83
85 return CSR_Win_AArch64_Arm64EC_Thunk_SaveList;
86
87 // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
88 // lists depending on that will need to have their Darwin variant as well.
90 return getDarwinCalleeSavedRegs(MF);
91
93 return CSR_Win_AArch64_CFGuard_Check_SaveList;
97 MF->getFunction().getAttributes().hasAttrSomewhere(
98 Attribute::SwiftError))
99 return CSR_Win_AArch64_AAPCS_SwiftError_SaveList;
101 return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList;
103 return CSR_Win_AArch64_AAVPCS_SaveList;
104 if (MF->getFunction().getCallingConv() ==
106 return CSR_Win_AArch64_SVE_AAPCS_SaveList;
107 if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
108 return CSR_Win_AArch64_SVE_AAPCS_SaveList;
109 return CSR_Win_AArch64_AAPCS_SaveList;
110 }
112 return CSR_AArch64_AAVPCS_SaveList;
114 return CSR_AArch64_SVE_AAPCS_SaveList;
115 if (MF->getFunction().getCallingConv() ==
118 "Calling convention "
119 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is only "
120 "supported to improve calls to SME ACLE save/restore/disable-za "
121 "functions, and is not intended to be used beyond that scope.");
122 if (MF->getFunction().getCallingConv() ==
125 "Calling convention "
126 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1 is "
127 "only supported to improve calls to SME ACLE __arm_get_current_vg "
128 "function, and is not intended to be used beyond that scope.");
129 if (MF->getFunction().getCallingConv() ==
132 "Calling convention "
133 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
134 "only supported to improve calls to SME ACLE __arm_sme_state "
135 "and is not intended to be used beyond that scope.");
137 ->supportSwiftError() &&
138 MF->getFunction().getAttributes().hasAttrSomewhere(
139 Attribute::SwiftError))
140 return CSR_AArch64_AAPCS_SwiftError_SaveList;
142 return CSR_AArch64_AAPCS_SwiftTail_SaveList;
144 return CSR_AArch64_RT_MostRegs_SaveList;
146 return CSR_AArch64_RT_AllRegs_SaveList;
148 // This is for OSes other than Windows; Windows is a separate case further
149 // above.
150 return CSR_AArch64_AAPCS_X18_SaveList;
151 if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
152 return CSR_AArch64_SVE_AAPCS_SaveList;
153 return CSR_AArch64_AAPCS_SaveList;
154}
155
156const MCPhysReg *
158 assert(MF && "Invalid MachineFunction pointer.");
160 "Invalid subtarget for getDarwinCalleeSavedRegs");
161
164 "Calling convention CFGuard_Check is unsupported on Darwin.");
166 return CSR_Darwin_AArch64_AAVPCS_SaveList;
169 "Calling convention SVE_VectorCall is unsupported on Darwin.");
170 if (MF->getFunction().getCallingConv() ==
173 "Calling convention "
174 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
175 "only supported to improve calls to SME ACLE save/restore/disable-za "
176 "functions, and is not intended to be used beyond that scope.");
177 if (MF->getFunction().getCallingConv() ==
180 "Calling convention "
181 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1 is "
182 "only supported to improve calls to SME ACLE __arm_get_current_vg "
183 "function, and is not intended to be used beyond that scope.");
184 if (MF->getFunction().getCallingConv() ==
187 "Calling convention "
188 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
189 "only supported to improve calls to SME ACLE __arm_sme_state "
190 "and is not intended to be used beyond that scope.");
192 return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
193 ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
194 : CSR_Darwin_AArch64_CXX_TLS_SaveList;
196 ->supportSwiftError() &&
197 MF->getFunction().getAttributes().hasAttrSomewhere(
198 Attribute::SwiftError))
199 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
201 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
203 return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
205 return CSR_Darwin_AArch64_RT_AllRegs_SaveList;
207 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
208 if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
209 return CSR_Darwin_AArch64_SVE_AAPCS_SaveList;
210 return CSR_Darwin_AArch64_AAPCS_SaveList;
211}
212
214 const MachineFunction *MF) const {
215 assert(MF && "Invalid MachineFunction pointer.");
218 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
219 return nullptr;
220}
221
223 MachineFunction &MF) const {
224 const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
225 SmallVector<MCPhysReg, 32> UpdatedCSRs;
226 for (const MCPhysReg *I = CSRs; *I; ++I)
227 UpdatedCSRs.push_back(*I);
228
229 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
231 UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
232 }
233 }
234 // Register lists are zero-terminated.
235 UpdatedCSRs.push_back(0);
236 MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
237}
238
241 unsigned Idx) const {
242 // edge case for GPR/FPR register classes
243 if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
244 return &AArch64::FPR32RegClass;
245 else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
246 return &AArch64::FPR64RegClass;
247
248 // Forward to TableGen's default version.
249 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
250}
251
252const uint32_t *
254 CallingConv::ID CC) const {
256 "Invalid subtarget for getDarwinCallPreservedMask");
257
259 return CSR_Darwin_AArch64_CXX_TLS_RegMask;
261 return CSR_Darwin_AArch64_AAVPCS_RegMask;
263 return CSR_Darwin_AArch64_SVE_AAPCS_RegMask;
265 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
267 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
269 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
272 "Calling convention CFGuard_Check is unsupported on Darwin.");
275 ->supportSwiftError() &&
276 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
277 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
278 if (CC == CallingConv::SwiftTail)
279 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
281 return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
282 if (CC == CallingConv::PreserveAll)
283 return CSR_Darwin_AArch64_RT_AllRegs_RegMask;
284 return CSR_Darwin_AArch64_AAPCS_RegMask;
285}
286
287const uint32_t *
289 CallingConv::ID CC) const {
290 bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
291 if (CC == CallingConv::GHC)
292 // This is academic because all GHC calls are (supposed to be) tail calls
293 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
295 return SCS ? CSR_AArch64_NoneRegs_SCS_RegMask
296 : CSR_AArch64_NoneRegs_RegMask;
297 if (CC == CallingConv::AnyReg)
298 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
299
300 // All the following calling conventions are handled differently on Darwin.
302 if (SCS)
303 report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
304 return getDarwinCallPreservedMask(MF, CC);
305 }
306
308 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
310 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
311 : CSR_AArch64_SVE_AAPCS_RegMask;
313 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
315 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
317 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
319 return CSR_Win_AArch64_CFGuard_Check_RegMask;
321 ->supportSwiftError() &&
322 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
323 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
324 : CSR_AArch64_AAPCS_SwiftError_RegMask;
325 if (CC == CallingConv::SwiftTail) {
326 if (SCS)
327 report_fatal_error("ShadowCallStack attribute not supported with swifttail");
328 return CSR_AArch64_AAPCS_SwiftTail_RegMask;
329 }
331 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
332 : CSR_AArch64_RT_MostRegs_RegMask;
333 if (CC == CallingConv::PreserveAll)
334 return SCS ? CSR_AArch64_RT_AllRegs_SCS_RegMask
335 : CSR_AArch64_RT_AllRegs_RegMask;
336
337 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
338}
339
341 const MachineFunction &MF) const {
343 return CSR_AArch64_AAPCS_RegMask;
344
345 return nullptr;
346}
347
349 if (TT.isOSDarwin())
350 return CSR_Darwin_AArch64_TLS_RegMask;
351
352 assert(TT.isOSBinFormatELF() && "Invalid target");
353 return CSR_AArch64_TLS_ELF_RegMask;
354}
355
357 const uint32_t **Mask) const {
358 uint32_t *UpdatedMask = MF.allocateRegMask();
359 unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
360 memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
361
362 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
364 for (MCPhysReg SubReg :
365 subregs_inclusive(AArch64::GPR64commonRegClass.getRegister(i))) {
366 // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
367 // register mask.
368 UpdatedMask[SubReg / 32] |= 1u << (SubReg % 32);
369 }
370 }
371 }
372 *Mask = UpdatedMask;
373}
374
376 return CSR_AArch64_SMStartStop_RegMask;
377}
378
379const uint32_t *
381 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
382}
383
385 return CSR_AArch64_NoRegs_RegMask;
386}
387
388const uint32_t *
390 CallingConv::ID CC) const {
391 // This should return a register mask that is the same as that returned by
392 // getCallPreservedMask but that additionally preserves the register used for
393 // the first i64 argument (which must also be the register used to return a
394 // single i64 return value)
395 //
396 // In case that the calling convention does not use the same register for
397 // both, the function should return NULL (does not currently apply)
398 assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
400 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
401 return CSR_AArch64_AAPCS_ThisReturn_RegMask;
402}
403
405 return CSR_AArch64_StackProbe_Windows_RegMask;
406}
407
408std::optional<std::string>
410 MCRegister PhysReg) const {
411 if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
412 return std::string("X19 is used as the frame base pointer register.");
413
415 bool warn = false;
416 if (MCRegisterInfo::regsOverlap(PhysReg, AArch64::X13) ||
417 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X14) ||
418 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X23) ||
419 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X24) ||
420 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X28))
421 warn = true;
422
423 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
424 if (MCRegisterInfo::regsOverlap(PhysReg, i))
425 warn = true;
426
427 if (warn)
428 return std::string(AArch64InstPrinter::getRegisterName(PhysReg)) +
429 " is clobbered by asynchronous signals when using Arm64EC.";
430 }
431
432 return {};
433}
434
437 const AArch64FrameLowering *TFI = getFrameLowering(MF);
438
439 // FIXME: avoid re-calculating this every time.
440 BitVector Reserved(getNumRegs());
441 markSuperRegs(Reserved, AArch64::WSP);
442 markSuperRegs(Reserved, AArch64::WZR);
443
444 if (TFI->isFPReserved(MF))
445 markSuperRegs(Reserved, AArch64::W29);
446
448 // x13, x14, x23, x24, x28, and v16-v31 are clobbered by asynchronous
449 // signals, so we can't ever use them.
450 markSuperRegs(Reserved, AArch64::W13);
451 markSuperRegs(Reserved, AArch64::W14);
452 markSuperRegs(Reserved, AArch64::W23);
453 markSuperRegs(Reserved, AArch64::W24);
454 markSuperRegs(Reserved, AArch64::W28);
455 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
456 markSuperRegs(Reserved, i);
457 }
458
459 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
461 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
462 }
463
464 if (hasBasePointer(MF))
465 markSuperRegs(Reserved, AArch64::W19);
466
467 // SLH uses register W16/X16 as the taint register.
468 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
469 markSuperRegs(Reserved, AArch64::W16);
470
471 // FFR is modelled as global state that cannot be allocated.
472 if (MF.getSubtarget<AArch64Subtarget>().hasSVE())
473 Reserved.set(AArch64::FFR);
474
475 // SME tiles are not allocatable.
476 if (MF.getSubtarget<AArch64Subtarget>().hasSME()) {
477 for (MCPhysReg SubReg : subregs_inclusive(AArch64::ZA))
478 Reserved.set(SubReg);
479 }
480
481 // VG cannot be allocated
482 Reserved.set(AArch64::VG);
483
484 if (MF.getSubtarget<AArch64Subtarget>().hasSME2()) {
485 for (MCSubRegIterator SubReg(AArch64::ZT0, this, /*self=*/true);
486 SubReg.isValid(); ++SubReg)
487 Reserved.set(*SubReg);
488 }
489
490 markSuperRegs(Reserved, AArch64::FPCR);
491 markSuperRegs(Reserved, AArch64::FPMR);
492 markSuperRegs(Reserved, AArch64::FPSR);
493
495 markSuperRegs(Reserved, AArch64::X27);
496 markSuperRegs(Reserved, AArch64::X28);
497 markSuperRegs(Reserved, AArch64::W27);
498 markSuperRegs(Reserved, AArch64::W28);
499 }
500
501 assert(checkAllSuperRegsMarked(Reserved));
502
503 // Add _HI registers after checkAllSuperRegsMarked as this check otherwise
504 // becomes considerably more expensive.
505 Reserved.set(AArch64::WSP_HI);
506 Reserved.set(AArch64::WZR_HI);
507 static_assert(AArch64::W30_HI - AArch64::W0_HI == 30,
508 "Unexpected order of registers");
509 Reserved.set(AArch64::W0_HI, AArch64::W30_HI);
510 static_assert(AArch64::B31_HI - AArch64::B0_HI == 31,
511 "Unexpected order of registers");
512 Reserved.set(AArch64::B0_HI, AArch64::B31_HI);
513 static_assert(AArch64::H31_HI - AArch64::H0_HI == 31,
514 "Unexpected order of registers");
515 Reserved.set(AArch64::H0_HI, AArch64::H31_HI);
516 static_assert(AArch64::S31_HI - AArch64::S0_HI == 31,
517 "Unexpected order of registers");
518 Reserved.set(AArch64::S0_HI, AArch64::S31_HI);
519 static_assert(AArch64::D31_HI - AArch64::D0_HI == 31,
520 "Unexpected order of registers");
521 Reserved.set(AArch64::D0_HI, AArch64::D31_HI);
522 static_assert(AArch64::Q31_HI - AArch64::Q0_HI == 31,
523 "Unexpected order of registers");
524 Reserved.set(AArch64::Q0_HI, AArch64::Q31_HI);
525
526 return Reserved;
527}
528
531 BitVector Reserved(getNumRegs());
532 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
533 // ReserveXRegister is set for registers manually reserved
534 // through +reserve-x#i.
536 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
537 }
538 return Reserved;
539}
540
543 BitVector Reserved(getNumRegs());
544 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
546 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
547 }
548
550 // In order to prevent the register allocator from using LR, we need to
551 // mark it as reserved. However we don't want to keep it reserved throughout
552 // the pipeline since it prevents other infrastructure from reasoning about
553 // it's liveness. We use the NoVRegs property instead of IsSSA because
554 // IsSSA is removed before VirtRegRewriter runs.
555 if (!MF.getProperties().hasNoVRegs())
556 markSuperRegs(Reserved, AArch64::LR);
557 }
558
559 assert(checkAllSuperRegsMarked(Reserved));
560
561 // Handle strictlyReservedRegs separately to avoid re-evaluating the assert,
562 // which becomes considerably expensive when considering the _HI registers.
564
565 return Reserved;
566}
567
569 MCRegister Reg) const {
570 return getReservedRegs(MF)[Reg];
571}
572
574 MCRegister Reg) const {
575 return getUserReservedRegs(MF)[Reg];
576}
577
579 MCRegister Reg) const {
580 return getStrictlyReservedRegs(MF)[Reg];
581}
582
584 return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
585 return isStrictlyReservedReg(MF, r);
586 });
587}
588
590 const MachineFunction &MF) const {
591 const Function &F = MF.getFunction();
592 F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
593 " function calls if any of the argument registers is reserved.")});
594}
595
597 MCRegister PhysReg) const {
598 // SLH uses register X16 as the taint register but it will fallback to a different
599 // method if the user clobbers it. So X16 is not reserved for inline asm but is
600 // for normal codegen.
601 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening) &&
602 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16))
603 return true;
604
605 // ZA/ZT0 registers are reserved but may be permitted in the clobber list.
606 if (PhysReg == AArch64::ZA || PhysReg == AArch64::ZT0)
607 return true;
608
609 return !isReservedReg(MF, PhysReg);
610}
611
614 return &AArch64::GPR64spRegClass;
615}
616
619 if (RC == &AArch64::CCRRegClass)
620 return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
621 return RC;
622}
623
624unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
625
627 const MachineFrameInfo &MFI = MF.getFrameInfo();
628
629 // In the presence of variable sized objects or funclets, if the fixed stack
630 // size is large enough that referencing from the FP won't result in things
631 // being in range relatively often, we can use a base pointer to allow access
632 // from the other direction like the SP normally works.
633 //
634 // Furthermore, if both variable sized objects are present, and the
635 // stack needs to be dynamically re-aligned, the base pointer is the only
636 // reliable way to reference the locals.
637 if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
638 if (hasStackRealignment(MF))
639 return true;
640
641 auto &ST = MF.getSubtarget<AArch64Subtarget>();
643 if (ST.hasSVE() || ST.isStreaming()) {
644 // Frames that have variable sized objects and scalable SVE objects,
645 // should always use a basepointer.
646 if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
647 return true;
648 }
649
650 // Frames with hazard padding can have a large offset between the frame
651 // pointer and GPR locals, which includes the emergency spill slot. If the
652 // emergency spill slot is not within range of the load/store instructions
653 // (which have a signed 9-bit range), we will fail to compile if it is used.
654 // Since hasBasePointer() is called before we know if we have hazard padding
655 // or an emergency spill slot we need to enable the basepointer
656 // conservatively.
657 if (ST.getStreamingHazardSize() &&
658 !AFI->getSMEFnAttrs().hasNonStreamingInterfaceAndBody()) {
659 return true;
660 }
661
662 // Conservatively estimate whether the negative offset from the frame
663 // pointer will be sufficient to reach. If a function has a smallish
664 // frame, it's less likely to have lots of spills and callee saved
665 // space, so it's all more likely to be within range of the frame pointer.
666 // If it's wrong, we'll materialize the constant and still get to the
667 // object; it's just suboptimal. Negative offsets use the unscaled
668 // load/store instructions, which have a 9-bit signed immediate.
669 return MFI.getLocalFrameSize() >= 256;
670 }
671
672 return false;
673}
674
676 MCRegister Reg) const {
679 bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv(),
680 MF.getFunction().isVarArg());
681
682 auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) {
683 return llvm::is_contained(RegList, Reg);
684 };
685
686 switch (CC) {
687 default:
688 report_fatal_error("Unsupported calling convention.");
689 case CallingConv::GHC:
690 return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
692 if (!MF.getFunction().isVarArg())
693 return HasReg(CC_AArch64_Preserve_None_ArgRegs, Reg);
694 [[fallthrough]];
695 case CallingConv::C:
703 if (STI.isTargetWindows()) {
704 if (IsVarArg)
705 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
706 switch (CC) {
707 default:
708 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
711 return HasReg(CC_AArch64_Win64PCS_Swift_ArgRegs, Reg) ||
712 HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
713 }
714 }
715 if (!STI.isTargetDarwin()) {
716 switch (CC) {
717 default:
718 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
721 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
722 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
723 }
724 }
725 if (!IsVarArg) {
726 switch (CC) {
727 default:
728 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
731 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
732 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
733 }
734 }
735 if (STI.isTargetILP32())
736 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
737 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
739 if (IsVarArg)
740 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
741 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
743 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
749 if (STI.isTargetWindows())
750 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
751 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
752 }
753}
754
757 const AArch64FrameLowering *TFI = getFrameLowering(MF);
758 return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
759}
760
762 const MachineFunction &MF) const {
763 return true;
764}
765
767 const MachineFunction &MF) const {
768 return true;
769}
770
771bool
773 // This function indicates whether the emergency spillslot should be placed
774 // close to the beginning of the stackframe (closer to FP) or the end
775 // (closer to SP).
776 //
777 // The beginning works most reliably if we have a frame pointer.
778 // In the presence of any non-constant space between FP and locals,
779 // (e.g. in case of stack realignment or a scalable SVE area), it is
780 // better to use SP or BP.
781 const AArch64FrameLowering &TFI = *getFrameLowering(MF);
783 assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
785 "Expected SVE area to be calculated by this point");
786 return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE() &&
788}
789
791 const MachineFunction &MF) const {
792 return true;
793}
794
795bool
797 const MachineFrameInfo &MFI = MF.getFrameInfo();
799 return true;
800 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
801}
802
803/// needsFrameBaseReg - Returns true if the instruction's frame index
804/// reference would be better served by a base register other than FP
805/// or SP. Used by LocalStackFrameAllocation to determine which frame index
806/// references it should create new base registers for.
808 int64_t Offset) const {
809 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
810 assert(i < MI->getNumOperands() &&
811 "Instr doesn't have FrameIndex operand!");
812
813 // It's the load/store FI references that cause issues, as it can be difficult
814 // to materialize the offset if it won't fit in the literal field. Estimate
815 // based on the size of the local frame and some conservative assumptions
816 // about the rest of the stack frame (note, this is pre-regalloc, so
817 // we don't know everything for certain yet) whether this offset is likely
818 // to be out of range of the immediate. Return true if so.
819
820 // We only generate virtual base registers for loads and stores, so
821 // return false for everything else.
822 if (!MI->mayLoad() && !MI->mayStore())
823 return false;
824
825 // Without a virtual base register, if the function has variable sized
826 // objects, all fixed-size local references will be via the frame pointer,
827 // Approximate the offset and see if it's legal for the instruction.
828 // Note that the incoming offset is based on the SP value at function entry,
829 // so it'll be negative.
830 MachineFunction &MF = *MI->getParent()->getParent();
831 const AArch64FrameLowering *TFI = getFrameLowering(MF);
832 MachineFrameInfo &MFI = MF.getFrameInfo();
833
834 // Estimate an offset from the frame pointer.
835 // Conservatively assume all GPR callee-saved registers get pushed.
836 // FP, LR, X19-X28, D8-D15. 64-bits each.
837 int64_t FPOffset = Offset - 16 * 20;
838 // Estimate an offset from the stack pointer.
839 // The incoming offset is relating to the SP at the start of the function,
840 // but when we access the local it'll be relative to the SP after local
841 // allocation, so adjust our SP-relative offset by that allocation size.
842 Offset += MFI.getLocalFrameSize();
843 // Assume that we'll have at least some spill slots allocated.
844 // FIXME: This is a total SWAG number. We should run some statistics
845 // and pick a real one.
846 Offset += 128; // 128 bytes of spill slots
847
848 // If there is a frame pointer, try using it.
849 // The FP is only available if there is no dynamic realignment. We
850 // don't know for sure yet whether we'll need that, so we guess based
851 // on whether there are any local variables that would trigger it.
852 if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
853 return false;
854
855 // If we can reference via the stack pointer or base pointer, try that.
856 // FIXME: This (and the code that resolves the references) can be improved
857 // to only disallow SP relative references in the live range of
858 // the VLA(s). In practice, it's unclear how much difference that
859 // would make, but it may be worth doing.
860 if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
861 return false;
862
863 // If even offset 0 is illegal, we don't want a virtual base register.
864 if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
865 return false;
866
867 // The offset likely isn't legal; we want to allocate a virtual base register.
868 return true;
869}
870
872 Register BaseReg,
873 int64_t Offset) const {
874 assert(MI && "Unable to get the legal offset for nil instruction.");
877}
878
879/// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
880/// at the beginning of the basic block.
883 int FrameIdx,
884 int64_t Offset) const {
885 MachineBasicBlock::iterator Ins = MBB->begin();
886 DebugLoc DL; // Defaults to "unknown"
887 if (Ins != MBB->end())
888 DL = Ins->getDebugLoc();
889 const MachineFunction &MF = *MBB->getParent();
890 const AArch64InstrInfo *TII =
891 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
892 const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
893 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
894 Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
895 MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this));
896 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
897
898 BuildMI(*MBB, Ins, DL, MCID, BaseReg)
899 .addFrameIndex(FrameIdx)
900 .addImm(Offset)
901 .addImm(Shifter);
902
903 return BaseReg;
904}
905
907 int64_t Offset) const {
908 // ARM doesn't need the general 64-bit offsets
910
911 unsigned i = 0;
912 while (!MI.getOperand(i).isFI()) {
913 ++i;
914 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
915 }
916
917 const MachineFunction *MF = MI.getParent()->getParent();
918 const AArch64InstrInfo *TII =
919 MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
920 bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
921 assert(Done && "Unable to resolve frame index!");
922 (void)Done;
923}
924
925// Create a scratch register for the frame index elimination in an instruction.
926// This function has special handling of stack tagging loop pseudos, in which
927// case it can also change the instruction opcode.
928static Register
930 const AArch64InstrInfo *TII) {
931 // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
932 // replace the instruction with the writeback variant because it will now
933 // satisfy the operand constraints for it.
934 Register ScratchReg;
935 if (MI.getOpcode() == AArch64::STGloop ||
936 MI.getOpcode() == AArch64::STZGloop) {
937 assert(FIOperandNum == 3 &&
938 "Wrong frame index operand for STGloop/STZGloop");
939 unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
940 : AArch64::STZGloop_wback;
941 ScratchReg = MI.getOperand(1).getReg();
942 MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
943 MI.setDesc(TII->get(Op));
944 MI.tieOperands(1, 3);
945 } else {
946 ScratchReg =
947 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
948 MI.getOperand(FIOperandNum)
949 .ChangeToRegister(ScratchReg, false, false, true);
950 }
951 return ScratchReg;
952}
953
956 // The smallest scalable element supported by scaled SVE addressing
957 // modes are predicates, which are 2 scalable bytes in size. So the scalable
958 // byte offset must always be a multiple of 2.
959 assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
960
961 // Add fixed-sized offset using existing DIExpression interface.
963
964 unsigned VG = getDwarfRegNum(AArch64::VG, true);
965 int64_t VGSized = Offset.getScalable() / 2;
966 if (VGSized > 0) {
967 Ops.push_back(dwarf::DW_OP_constu);
968 Ops.push_back(VGSized);
969 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
970 Ops.push_back(dwarf::DW_OP_mul);
971 Ops.push_back(dwarf::DW_OP_plus);
972 } else if (VGSized < 0) {
973 Ops.push_back(dwarf::DW_OP_constu);
974 Ops.push_back(-VGSized);
975 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
976 Ops.push_back(dwarf::DW_OP_mul);
977 Ops.push_back(dwarf::DW_OP_minus);
978 }
979}
980
982 int SPAdj, unsigned FIOperandNum,
983 RegScavenger *RS) const {
984 assert(SPAdj == 0 && "Unexpected");
985
986 MachineInstr &MI = *II;
987 MachineBasicBlock &MBB = *MI.getParent();
988 MachineFunction &MF = *MBB.getParent();
989 const MachineFrameInfo &MFI = MF.getFrameInfo();
990 const AArch64InstrInfo *TII =
991 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
992 const AArch64FrameLowering *TFI = getFrameLowering(MF);
993 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
994 bool Tagged =
995 MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
996 Register FrameReg;
997
998 // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
999 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
1000 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
1001 MI.getOpcode() == TargetOpcode::STATEPOINT) {
1003 TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
1004 /*PreferFP=*/true,
1005 /*ForSimm=*/false);
1006 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
1007 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
1008 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
1009 return false;
1010 }
1011
1012 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
1013 MachineOperand &FI = MI.getOperand(FIOperandNum);
1014 StackOffset Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex);
1015 assert(!Offset.getScalable() &&
1016 "Frame offsets with a scalable component are not supported");
1017 FI.ChangeToImmediate(Offset.getFixed());
1018 return false;
1019 }
1020
1022 if (MI.getOpcode() == AArch64::TAGPstack) {
1023 // TAGPstack must use the virtual frame register in its 3rd operand.
1025 FrameReg = MI.getOperand(3).getReg();
1026 Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) +
1028 } else if (Tagged) {
1030 MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
1031 if (MFI.hasVarSizedObjects() ||
1032 isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
1034 // Can't update to SP + offset in place. Precalculate the tagged pointer
1035 // in a scratch register.
1037 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
1038 Register ScratchReg =
1039 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
1040 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
1041 TII);
1042 BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
1043 .addReg(ScratchReg)
1044 .addReg(ScratchReg)
1045 .addImm(0);
1046 MI.getOperand(FIOperandNum)
1047 .ChangeToRegister(ScratchReg, false, false, true);
1048 return false;
1049 }
1050 FrameReg = AArch64::SP;
1051 Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) +
1052 (int64_t)MFI.getStackSize());
1053 } else {
1055 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
1056 }
1057
1058 // Modify MI as necessary to handle as much of 'Offset' as possible
1059 if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
1060 return true;
1061
1062 assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
1063 "Emergency spill slot is out of reach");
1064
1065 // If we get here, the immediate doesn't fit into the instruction. We folded
1066 // as much as possible above. Handle the rest, providing a register that is
1067 // SP+LargeImm.
1068 Register ScratchReg =
1070 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
1071 return false;
1072}
1073
1075 MachineFunction &MF) const {
1076 const AArch64FrameLowering *TFI = getFrameLowering(MF);
1077
1078 switch (RC->getID()) {
1079 default:
1080 return 0;
1081 case AArch64::GPR32RegClassID:
1082 case AArch64::GPR32spRegClassID:
1083 case AArch64::GPR32allRegClassID:
1084 case AArch64::GPR64spRegClassID:
1085 case AArch64::GPR64allRegClassID:
1086 case AArch64::GPR64RegClassID:
1087 case AArch64::GPR32commonRegClassID:
1088 case AArch64::GPR64commonRegClassID:
1089 return 32 - 1 // XZR/SP
1090 - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
1091 - MF.getSubtarget<AArch64Subtarget>().getNumXRegisterReserved()
1092 - hasBasePointer(MF); // X19
1093 case AArch64::FPR8RegClassID:
1094 case AArch64::FPR16RegClassID:
1095 case AArch64::FPR32RegClassID:
1096 case AArch64::FPR64RegClassID:
1097 case AArch64::FPR128RegClassID:
1098 return 32;
1099
1100 case AArch64::MatrixIndexGPR32_8_11RegClassID:
1101 case AArch64::MatrixIndexGPR32_12_15RegClassID:
1102 return 4;
1103
1104 case AArch64::DDRegClassID:
1105 case AArch64::DDDRegClassID:
1106 case AArch64::DDDDRegClassID:
1107 case AArch64::QQRegClassID:
1108 case AArch64::QQQRegClassID:
1109 case AArch64::QQQQRegClassID:
1110 return 32;
1111
1112 case AArch64::FPR128_loRegClassID:
1113 case AArch64::FPR64_loRegClassID:
1114 case AArch64::FPR16_loRegClassID:
1115 return 16;
1116 case AArch64::FPR128_0to7RegClassID:
1117 return 8;
1118 }
1119}
1120
1121// FORM_TRANSPOSED_REG_TUPLE nodes are created to improve register allocation
1122// where a consecutive multi-vector tuple is constructed from the same indices
1123// of multiple strided loads. This may still result in unnecessary copies
1124// between the loads and the tuple. Here we try to return a hint to assign the
1125// contiguous ZPRMulReg starting at the same register as the first operand of
1126// the pseudo, which should be a subregister of the first strided load.
1127//
1128// For example, if the first strided load has been assigned $z16_z20_z24_z28
1129// and the operands of the pseudo are each accessing subregister zsub2, we
1130// should look through through Order to find a contiguous register which
1131// begins with $z24 (i.e. $z24_z25_z26_z27).
1132//
1134 Register VirtReg, ArrayRef<MCPhysReg> Order,
1136 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
1137
1138 auto &ST = MF.getSubtarget<AArch64Subtarget>();
1139 if (!ST.hasSME() || !ST.isStreaming())
1140 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
1141 VRM);
1142
1143 // The SVE calling convention preserves registers Z8-Z23. As a result, there
1144 // are no ZPR2Strided or ZPR4Strided registers that do not overlap with the
1145 // callee-saved registers and so by default these will be pushed to the back
1146 // of the allocation order for the ZPRStridedOrContiguous classes.
1147 // If any of the instructions which define VirtReg are used by the
1148 // FORM_TRANSPOSED_REG_TUPLE pseudo, we want to favour reducing copy
1149 // instructions over reducing the number of clobbered callee-save registers,
1150 // so we add the strided registers as a hint.
1151 const MachineRegisterInfo &MRI = MF.getRegInfo();
1152 unsigned RegID = MRI.getRegClass(VirtReg)->getID();
1153 if (RegID == AArch64::ZPR2StridedOrContiguousRegClassID ||
1154 RegID == AArch64::ZPR4StridedOrContiguousRegClassID) {
1155
1156 // Look through uses of the register for FORM_TRANSPOSED_REG_TUPLE.
1157 for (const MachineInstr &Use : MRI.use_nodbg_instructions(VirtReg)) {
1158 if (Use.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
1159 Use.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO)
1160 continue;
1161
1162 unsigned UseOps = Use.getNumOperands() - 1;
1163 const TargetRegisterClass *StridedRC;
1164 switch (RegID) {
1165 case AArch64::ZPR2StridedOrContiguousRegClassID:
1166 StridedRC = &AArch64::ZPR2StridedRegClass;
1167 break;
1168 case AArch64::ZPR4StridedOrContiguousRegClassID:
1169 StridedRC = &AArch64::ZPR4StridedRegClass;
1170 break;
1171 default:
1172 llvm_unreachable("Unexpected RegID");
1173 }
1174
1175 SmallVector<MCPhysReg, 4> StridedOrder;
1176 for (MCPhysReg Reg : Order)
1177 if (StridedRC->contains(Reg))
1178 StridedOrder.push_back(Reg);
1179
1180 int OpIdx = Use.findRegisterUseOperandIdx(VirtReg, this);
1181 assert(OpIdx != -1 && "Expected operand index from register use.");
1182
1183 unsigned TupleID = MRI.getRegClass(Use.getOperand(0).getReg())->getID();
1184 bool IsMulZPR = TupleID == AArch64::ZPR2Mul2RegClassID ||
1185 TupleID == AArch64::ZPR4Mul4RegClassID;
1186
1187 const MachineOperand *AssignedRegOp = llvm::find_if(
1188 make_range(Use.operands_begin() + 1, Use.operands_end()),
1189 [&VRM](const MachineOperand &Op) {
1190 return VRM->hasPhys(Op.getReg());
1191 });
1192
1193 // Example:
1194 //
1195 // When trying to find a suitable register allocation for VirtReg %v2 in:
1196 //
1197 // %v0:zpr2stridedorcontiguous = ld1 p0/z, [...]
1198 // %v1:zpr2stridedorcontiguous = ld1 p0/z, [...]
1199 // %v2:zpr2stridedorcontiguous = ld1 p0/z, [...]
1200 // %v3:zpr2stridedorcontiguous = ld1 p0/z, [...]
1201 // %v4:zpr4mul4 = FORM_TRANSPOSED_X4 %v0:0, %v1:0, %v2:0, %v3:0
1202 //
1203 // One such suitable allocation would be:
1204 //
1205 // { z0, z8 } = ld1 p0/z, [...]
1206 // { z1, z9 } = ld1 p0/z, [...]
1207 // { z2, z10 } = ld1 p0/z, [...]
1208 // { z3, z11 } = ld1 p0/z, [...]
1209 // { z0, z1, z2, z3 } =
1210 // FORM_TRANSPOSED_X4 {z0, z8}:0, {z1, z9}:0, {z2, z10}:0, {z3, z11}:0
1211 //
1212 // Below we distinguish two cases when trying to find a register:
1213 // * None of the registers used by FORM_TRANSPOSED_X4 have been assigned
1214 // yet. In this case the code muse ensure that there are at least UseOps
1215 // free consecutive registers. If IsMulZPR is true, then the first of
1216 // registers must also be a multiple of UseOps, e.g. { z0, z1, z2, z3 }
1217 // is valid but { z1, z2, z3, z5 } is not.
1218 // * One or more of the registers used by FORM_TRANSPOSED_X4 is already
1219 // assigned a physical register, which means only checking that a
1220 // consecutive range of free tuple registers exists which includes
1221 // the assigned register.
1222 // e.g. in the example above, if { z0, z8 } is already allocated for
1223 // %v0, we just need to ensure that { z1, z9 }, { z2, z10 } and
1224 // { z3, z11 } are also free. If so, we add { z2, z10 }.
1225
1226 if (AssignedRegOp == Use.operands_end()) {
1227 // There are no registers already assigned to any of the pseudo
1228 // operands. Look for a valid starting register for the group.
1229 for (unsigned I = 0; I < StridedOrder.size(); ++I) {
1230 MCPhysReg Reg = StridedOrder[I];
1231
1232 // If the FORM_TRANSPOSE nodes use the ZPRMul classes, the starting
1233 // register of the first load should be a multiple of 2 or 4.
1234 unsigned SubRegIdx = Use.getOperand(OpIdx).getSubReg();
1235 if (IsMulZPR && (getSubReg(Reg, SubRegIdx) - AArch64::Z0) % UseOps !=
1236 ((unsigned)OpIdx - 1))
1237 continue;
1238
1239 // In the example above, if VirtReg is the third operand of the
1240 // tuple (%v2) and Reg == Z2_Z10, then we need to make sure that
1241 // Z0_Z8, Z1_Z9 and Z3_Z11 are also available.
1242 auto IsFreeConsecutiveReg = [&](unsigned UseOp) {
1243 unsigned R = Reg - (OpIdx - 1) + UseOp;
1244 return StridedRC->contains(R) &&
1245 (UseOp == 0 ||
1246 ((getSubReg(R, AArch64::zsub0) - AArch64::Z0) ==
1247 (getSubReg(R - 1, AArch64::zsub0) - AArch64::Z0) + 1)) &&
1248 !Matrix->isPhysRegUsed(R);
1249 };
1250 if (all_of(iota_range<unsigned>(0U, UseOps, /*Inclusive=*/false),
1251 IsFreeConsecutiveReg))
1252 Hints.push_back(Reg);
1253 }
1254 } else {
1255 // At least one operand already has a physical register assigned.
1256 // Find the starting sub-register of this and use it to work out the
1257 // correct strided register to suggest based on the current op index.
1258 MCPhysReg TargetStartReg =
1259 getSubReg(VRM->getPhys(AssignedRegOp->getReg()), AArch64::zsub0) +
1260 (OpIdx - AssignedRegOp->getOperandNo());
1261
1262 for (unsigned I = 0; I < StridedOrder.size(); ++I)
1263 if (getSubReg(StridedOrder[I], AArch64::zsub0) == TargetStartReg)
1264 Hints.push_back(StridedOrder[I]);
1265 }
1266
1267 if (!Hints.empty())
1268 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
1269 MF, VRM);
1270 }
1271 }
1272
1273 for (MachineInstr &MI : MRI.def_instructions(VirtReg)) {
1274 if (MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
1275 MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO)
1276 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
1277 MF, VRM);
1278
1279 unsigned FirstOpSubReg = MI.getOperand(1).getSubReg();
1280 switch (FirstOpSubReg) {
1281 case AArch64::zsub0:
1282 case AArch64::zsub1:
1283 case AArch64::zsub2:
1284 case AArch64::zsub3:
1285 break;
1286 default:
1287 continue;
1288 }
1289
1290 // Look up the physical register mapped to the first operand of the pseudo.
1291 Register FirstOpVirtReg = MI.getOperand(1).getReg();
1292 if (!VRM->hasPhys(FirstOpVirtReg))
1293 continue;
1294
1295 MCRegister TupleStartReg =
1296 getSubReg(VRM->getPhys(FirstOpVirtReg), FirstOpSubReg);
1297 for (unsigned I = 0; I < Order.size(); ++I)
1298 if (MCRegister R = getSubReg(Order[I], AArch64::zsub0))
1299 if (R == TupleStartReg)
1300 Hints.push_back(Order[I]);
1301 }
1302
1303 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
1304 VRM);
1305}
1306
1308 const MachineFunction &MF) const {
1309 const auto &MFI = MF.getFrameInfo();
1310 if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
1311 return AArch64::SP;
1312 else if (hasStackRealignment(MF))
1313 return getBaseRegister();
1314 return getFrameRegister(MF);
1315}
1316
1317/// SrcRC and DstRC will be morphed into NewRC if this returns true
1319 MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
1320 const TargetRegisterClass *DstRC, unsigned DstSubReg,
1321 const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
1322 MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
1323
1324 if (MI->isCopy() &&
1325 ((DstRC->getID() == AArch64::GPR64RegClassID) ||
1326 (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
1327 MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
1328 // Do not coalesce in the case of a 32-bit subregister copy
1329 // which implements a 32 to 64 bit zero extension
1330 // which relies on the upper 32 bits being zeroed.
1331 return false;
1332
1333 auto IsCoalescerBarrier = [](const MachineInstr &MI) {
1334 switch (MI.getOpcode()) {
1335 case AArch64::COALESCER_BARRIER_FPR16:
1336 case AArch64::COALESCER_BARRIER_FPR32:
1337 case AArch64::COALESCER_BARRIER_FPR64:
1338 case AArch64::COALESCER_BARRIER_FPR128:
1339 return true;
1340 default:
1341 return false;
1342 }
1343 };
1344
1345 // For calls that temporarily have to toggle streaming mode as part of the
1346 // call-sequence, we need to be more careful when coalescing copy instructions
1347 // so that we don't end up coalescing the NEON/FP result or argument register
1348 // with a whole Z-register, such that after coalescing the register allocator
1349 // will try to spill/reload the entire Z register.
1350 //
1351 // We do this by checking if the node has any defs/uses that are
1352 // COALESCER_BARRIER pseudos. These are 'nops' in practice, but they exist to
1353 // instruct the coalescer to avoid coalescing the copy.
1354 if (MI->isCopy() && SubReg != DstSubReg &&
1355 (AArch64::ZPRRegClass.hasSubClassEq(DstRC) ||
1356 AArch64::ZPRRegClass.hasSubClassEq(SrcRC))) {
1357 unsigned SrcReg = MI->getOperand(1).getReg();
1358 if (any_of(MRI.def_instructions(SrcReg), IsCoalescerBarrier))
1359 return false;
1360 unsigned DstReg = MI->getOperand(0).getReg();
1361 if (any_of(MRI.use_nodbg_instructions(DstReg), IsCoalescerBarrier))
1362 return false;
1363 }
1364
1365 return true;
1366}
1367
1369 MCRegister R) const {
1370 return R == AArch64::VG;
1371}
1372
1374 return (LLVMReg >= AArch64::Z0 && LLVMReg <= AArch64::Z31) ||
1375 (LLVMReg >= AArch64::P0 && LLVMReg <= AArch64::P15);
1376}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Live Register Matrix
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
static unsigned getDwarfRegNum(MCRegister Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
bool isFPReserved(const MachineFunction &MF) const
Should the Frame Pointer be reserved for the current function?
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
virtual bool isIgnoredCVReg(MCRegister LLVMReg) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
const TargetRegisterClass * getPointerRegClass(unsigned Kind=0) const override
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
bool isUserReservedReg(const MachineFunction &MF, MCRegister Reg) const
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
bool requiresRegisterScavenging(const MachineFunction &MF) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
BitVector getUserReservedRegs(const MachineFunction &MF) const
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
bool regNeedsCFI(MCRegister Reg, MCRegister &RegToUseForCFI) const
Return whether the register needs a CFI entry.
bool isAnyArgRegReserved(const MachineFunction &MF) const
void emitReservedArgRegCallError(const MachineFunction &MF) const
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
const uint32_t * getTLSCallPreservedMask() const
const uint32_t * getNoPreservedMask() const override
Register getFrameRegister(const MachineFunction &MF) const override
bool shouldAnalyzePhysregInMachineLoopInfo(MCRegister R) const override
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
const MCPhysReg * getDarwinCalleeSavedRegs(const MachineFunction *MF) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
AArch64RegisterInfo(const Triple &TT, unsigned HwMode)
const uint32_t * SMEABISupportRoutinesCallPreservedMaskFromX0() const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
unsigned getLocalAddressRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
const uint32_t * getSMStartStopCallPreservedMask() const
bool useFPForScavengingIndex(const MachineFunction &MF) const override
bool cannotEliminateFrame(const MachineFunction &MF) const
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
std::optional< std::string > explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool isXRegisterReservedForRA(size_t i) const
const AArch64TargetLowering * getTargetLowering() const override
bool isXRegCustomCalleeSaved(size_t i) const
bool isXRegisterReserved(size_t i) const
bool isCallingConvWin64(CallingConv::ID CC, bool IsVarArg) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition DebugLoc.h:124
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:352
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
Describe properties that are true of each instruction in the target description file.
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
MCSubRegIterator enumerates all sub-registers of Reg.
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
int64_t getLocalFrameSize() const
Get the size of the local object blob.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI unsigned getOperandNo() const
Returns the index of this operand in the instruction that it belongs to.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:31
int64_t getFixed() const
Returns the fixed component of the stack.
Definition TypeSize.h:47
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetOptions Options
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition VirtRegMap.h:91
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
Definition VirtRegMap.h:87
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
@ Swift
Calling convention for Swift.
Definition CallingConv.h:69
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition CallingConv.h:82
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition CallingConv.h:63
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition CallingConv.h:60
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1
Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ PreserveNone
Used for runtime calls that preserves none general registers.
Definition CallingConv.h:90
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition CallingConv.h:87
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
@ ARM64EC_Thunk_X64
Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1707
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
@ Done
Definition Threading.h:60
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1714
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1740
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1879