LLVM 22.0.0git
AArch64RegisterInfo.cpp
Go to the documentation of this file.
1//===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetRegisterInfo
10// class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64RegisterInfo.h"
16#include "AArch64InstrInfo.h"
19#include "AArch64Subtarget.h"
22#include "llvm/ADT/BitVector.h"
32#include "llvm/IR/Function.h"
35
36using namespace llvm;
37
38#define GET_CC_REGISTER_LISTS
39#include "AArch64GenCallingConv.inc"
40#define GET_REGINFO_TARGET_DESC
41#include "AArch64GenRegisterInfo.inc"
42
44 : AArch64GenRegisterInfo(AArch64::LR, 0, 0, 0, HwMode), TT(TT) {
46}
47
48/// Return whether the register needs a CFI entry. Not all unwinders may know
49/// about SVE registers, so we assume the lowest common denominator, i.e. the
50/// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
51/// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
52/// returned in \p RegToUseForCFI.
54 MCRegister &RegToUseForCFI) const {
55 if (AArch64::PPRRegClass.contains(Reg))
56 return false;
57
58 if (AArch64::ZPRRegClass.contains(Reg)) {
59 RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
60 for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
61 if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
62 return true;
63 }
64 return false;
65 }
66
67 RegToUseForCFI = Reg;
68 return true;
69}
70
71const MCPhysReg *
73 assert(MF && "Invalid MachineFunction pointer.");
74
75 auto &AFI = *MF->getInfo<AArch64FunctionInfo>();
76 const auto &F = MF->getFunction();
77 const auto *TLI = MF->getSubtarget<AArch64Subtarget>().getTargetLowering();
78 const bool Darwin = MF->getSubtarget<AArch64Subtarget>().isTargetDarwin();
79 const bool Windows = MF->getSubtarget<AArch64Subtarget>().isTargetWindows();
80
81 if (TLI->supportSwiftError() &&
82 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
83 if (Darwin)
84 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
85 if (Windows)
86 return CSR_Win_AArch64_AAPCS_SwiftError_SaveList;
87 return CSR_AArch64_AAPCS_SwiftError_SaveList;
88 }
89
90 switch (F.getCallingConv()) {
92 // GHC set of callee saved regs is empty as all those regs are
93 // used for passing STG regs around
94 return CSR_AArch64_NoRegs_SaveList;
95
97 // FIXME: Windows likely need this to be altered for properly unwinding.
98 return CSR_AArch64_NoneRegs_SaveList;
99
101 return CSR_AArch64_AllRegs_SaveList;
102
104 return CSR_Win_AArch64_Arm64EC_Thunk_SaveList;
105
107 if (Darwin)
108 return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
109 if (Windows)
110 return CSR_Win_AArch64_RT_MostRegs_SaveList;
111 return CSR_AArch64_RT_MostRegs_SaveList;
112
114 if (Darwin)
115 return CSR_Darwin_AArch64_RT_AllRegs_SaveList;
116 if (Windows)
117 return CSR_Win_AArch64_RT_AllRegs_SaveList;
118 return CSR_AArch64_RT_AllRegs_SaveList;
119
121 if (Darwin)
123 "Calling convention CFGuard_Check is unsupported on Darwin.");
124 return CSR_Win_AArch64_CFGuard_Check_SaveList;
125
127 if (Darwin)
128 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
129 if (Windows)
130 return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList;
131 return CSR_AArch64_AAPCS_SwiftTail_SaveList;
132
134 if (Darwin)
135 return CSR_Darwin_AArch64_AAVPCS_SaveList;
136 if (Windows)
137 return CSR_Win_AArch64_AAVPCS_SaveList;
138 return CSR_AArch64_AAVPCS_SaveList;
139
141 if (Darwin)
143 "Calling convention SVE_VectorCall is unsupported on Darwin.");
144 if (Windows)
145 return CSR_Win_AArch64_SVE_AAPCS_SaveList;
146 return CSR_AArch64_SVE_AAPCS_SaveList;
147
150 "Calling convention "
151 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is only "
152 "supported to improve calls to SME ACLE save/restore/disable-za "
153 "functions, and is not intended to be used beyond that scope.");
154
157 "Calling convention "
158 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1 is "
159 "only supported to improve calls to SME ACLE __arm_get_current_vg "
160 "function, and is not intended to be used beyond that scope.");
161
164 "Calling convention "
165 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
166 "only supported to improve calls to SME ACLE __arm_sme_state "
167 "and is not intended to be used beyond that scope.");
168
170 if (Darwin)
171 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
172 if (Windows)
173 return CSR_Win_AArch64_AAPCS_SaveList;
174 return CSR_AArch64_AAPCS_X18_SaveList;
175
177 if (Darwin)
178 return AFI.isSplitCSR() ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
179 : CSR_Darwin_AArch64_CXX_TLS_SaveList;
180 // FIXME: this likely should be a `report_fatal_error` condition, however,
181 // that would be a departure from the previously implemented behaviour.
183
184 default:
185 if (Darwin)
186 return AFI.hasSVE_AAPCS(*MF) ? CSR_Darwin_AArch64_SVE_AAPCS_SaveList
187 : CSR_Darwin_AArch64_AAPCS_SaveList;
188 if (Windows)
189 return AFI.hasSVE_AAPCS(*MF) ? CSR_Win_AArch64_SVE_AAPCS_SaveList
190 : CSR_Win_AArch64_AAPCS_SaveList;
191 return AFI.hasSVE_AAPCS(*MF) ? CSR_AArch64_SVE_AAPCS_SaveList
192 : CSR_AArch64_AAPCS_SaveList;
193 }
194}
195
197 const MachineFunction *MF) const {
198 assert(MF && "Invalid MachineFunction pointer.");
201 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
202 return nullptr;
203}
204
206 MachineFunction &MF) const {
207 const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
208 SmallVector<MCPhysReg, 32> UpdatedCSRs;
209 for (const MCPhysReg *I = CSRs; *I; ++I)
210 UpdatedCSRs.push_back(*I);
211
212 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
214 UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
215 }
216 }
217 // Register lists are zero-terminated.
218 UpdatedCSRs.push_back(0);
219 MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
220}
221
224 unsigned Idx) const {
225 // edge case for GPR/FPR register classes
226 if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
227 return &AArch64::FPR32RegClass;
228 else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
229 return &AArch64::FPR64RegClass;
230
231 // Forward to TableGen's default version.
232 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
233}
234
235const uint32_t *
237 CallingConv::ID CC) const {
239 "Invalid subtarget for getDarwinCallPreservedMask");
240
242 return CSR_Darwin_AArch64_CXX_TLS_RegMask;
244 return CSR_Darwin_AArch64_AAVPCS_RegMask;
246 return CSR_Darwin_AArch64_SVE_AAPCS_RegMask;
248 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
250 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
252 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
255 "Calling convention CFGuard_Check is unsupported on Darwin.");
258 ->supportSwiftError() &&
259 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
260 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
261 if (CC == CallingConv::SwiftTail)
262 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
264 return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
265 if (CC == CallingConv::PreserveAll)
266 return CSR_Darwin_AArch64_RT_AllRegs_RegMask;
267 return CSR_Darwin_AArch64_AAPCS_RegMask;
268}
269
270const uint32_t *
272 CallingConv::ID CC) const {
273 bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
274 if (CC == CallingConv::GHC)
275 // This is academic because all GHC calls are (supposed to be) tail calls
276 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
278 return SCS ? CSR_AArch64_NoneRegs_SCS_RegMask
279 : CSR_AArch64_NoneRegs_RegMask;
280 if (CC == CallingConv::AnyReg)
281 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
282
283 // All the following calling conventions are handled differently on Darwin.
285 if (SCS)
286 report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
287 return getDarwinCallPreservedMask(MF, CC);
288 }
289
291 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
293 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
294 : CSR_AArch64_SVE_AAPCS_RegMask;
296 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
298 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
300 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
302 return CSR_Win_AArch64_CFGuard_Check_RegMask;
304 ->supportSwiftError() &&
305 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
306 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
307 : CSR_AArch64_AAPCS_SwiftError_RegMask;
308 if (CC == CallingConv::SwiftTail) {
309 if (SCS)
310 report_fatal_error("ShadowCallStack attribute not supported with swifttail");
311 return CSR_AArch64_AAPCS_SwiftTail_RegMask;
312 }
314 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
315 : CSR_AArch64_RT_MostRegs_RegMask;
316 if (CC == CallingConv::PreserveAll)
317 return SCS ? CSR_AArch64_RT_AllRegs_SCS_RegMask
318 : CSR_AArch64_RT_AllRegs_RegMask;
319
320 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
321}
322
324 const MachineFunction &MF) const {
326 return CSR_AArch64_AAPCS_RegMask;
327
328 return nullptr;
329}
330
332 if (TT.isOSDarwin())
333 return CSR_Darwin_AArch64_TLS_RegMask;
334
335 assert(TT.isOSBinFormatELF() && "Invalid target");
336 return CSR_AArch64_TLS_ELF_RegMask;
337}
338
340 const uint32_t **Mask) const {
341 uint32_t *UpdatedMask = MF.allocateRegMask();
342 unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
343 memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
344
345 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
347 for (MCPhysReg SubReg :
348 subregs_inclusive(AArch64::GPR64commonRegClass.getRegister(i))) {
349 // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
350 // register mask.
351 UpdatedMask[SubReg / 32] |= 1u << (SubReg % 32);
352 }
353 }
354 }
355 *Mask = UpdatedMask;
356}
357
359 return CSR_AArch64_SMStartStop_RegMask;
360}
361
362const uint32_t *
364 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
365}
366
368 return CSR_AArch64_NoRegs_RegMask;
369}
370
371const uint32_t *
373 CallingConv::ID CC) const {
374 // This should return a register mask that is the same as that returned by
375 // getCallPreservedMask but that additionally preserves the register used for
376 // the first i64 argument (which must also be the register used to return a
377 // single i64 return value)
378 //
379 // In case that the calling convention does not use the same register for
380 // both, the function should return NULL (does not currently apply)
381 assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
383 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
384 return CSR_AArch64_AAPCS_ThisReturn_RegMask;
385}
386
388 return CSR_AArch64_StackProbe_Windows_RegMask;
389}
390
391std::optional<std::string>
393 MCRegister PhysReg) const {
394 if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
395 return std::string("X19 is used as the frame base pointer register.");
396
398 bool warn = false;
399 if (MCRegisterInfo::regsOverlap(PhysReg, AArch64::X13) ||
400 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X14) ||
401 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X23) ||
402 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X24) ||
403 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X28))
404 warn = true;
405
406 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
407 if (MCRegisterInfo::regsOverlap(PhysReg, i))
408 warn = true;
409
410 if (warn)
411 return std::string(AArch64InstPrinter::getRegisterName(PhysReg)) +
412 " is clobbered by asynchronous signals when using Arm64EC.";
413 }
414
415 return {};
416}
417
420 const AArch64FrameLowering *TFI = getFrameLowering(MF);
421
422 // FIXME: avoid re-calculating this every time.
423 BitVector Reserved(getNumRegs());
424 markSuperRegs(Reserved, AArch64::WSP);
425 markSuperRegs(Reserved, AArch64::WZR);
426
427 if (TFI->isFPReserved(MF))
428 markSuperRegs(Reserved, AArch64::W29);
429
431 // x13, x14, x23, x24, x28, and v16-v31 are clobbered by asynchronous
432 // signals, so we can't ever use them.
433 markSuperRegs(Reserved, AArch64::W13);
434 markSuperRegs(Reserved, AArch64::W14);
435 markSuperRegs(Reserved, AArch64::W23);
436 markSuperRegs(Reserved, AArch64::W24);
437 markSuperRegs(Reserved, AArch64::W28);
438 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
439 markSuperRegs(Reserved, i);
440 }
441
442 if (MF.getSubtarget<AArch64Subtarget>().isLFI()) {
443 markSuperRegs(Reserved, AArch64::W28);
444 markSuperRegs(Reserved, AArch64::W27);
445 markSuperRegs(Reserved, AArch64::W26);
446 markSuperRegs(Reserved, AArch64::W25);
447 if (!MF.getProperties().hasNoVRegs()) {
448 markSuperRegs(Reserved, AArch64::LR);
449 markSuperRegs(Reserved, AArch64::W30);
450 }
451 }
452
453 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
455 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
456 }
457
458 if (hasBasePointer(MF))
459 markSuperRegs(Reserved, AArch64::W19);
460
461 // SLH uses register W16/X16 as the taint register.
462 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
463 markSuperRegs(Reserved, AArch64::W16);
464
465 // FFR is modelled as global state that cannot be allocated.
466 if (MF.getSubtarget<AArch64Subtarget>().hasSVE())
467 Reserved.set(AArch64::FFR);
468
469 // SME tiles are not allocatable.
470 if (MF.getSubtarget<AArch64Subtarget>().hasSME()) {
471 for (MCPhysReg SubReg : subregs_inclusive(AArch64::ZA))
472 Reserved.set(SubReg);
473 }
474
475 // VG cannot be allocated
476 Reserved.set(AArch64::VG);
477
478 if (MF.getSubtarget<AArch64Subtarget>().hasSME2()) {
479 for (MCSubRegIterator SubReg(AArch64::ZT0, this, /*self=*/true);
480 SubReg.isValid(); ++SubReg)
481 Reserved.set(*SubReg);
482 }
483
484 markSuperRegs(Reserved, AArch64::FPCR);
485 markSuperRegs(Reserved, AArch64::FPMR);
486 markSuperRegs(Reserved, AArch64::FPSR);
487
489 markSuperRegs(Reserved, AArch64::X27);
490 markSuperRegs(Reserved, AArch64::X28);
491 markSuperRegs(Reserved, AArch64::W27);
492 markSuperRegs(Reserved, AArch64::W28);
493 }
494
495 assert(checkAllSuperRegsMarked(Reserved));
496
497 // Add _HI registers after checkAllSuperRegsMarked as this check otherwise
498 // becomes considerably more expensive.
499 Reserved.set(AArch64::WSP_HI);
500 Reserved.set(AArch64::WZR_HI);
501 static_assert(AArch64::W30_HI - AArch64::W0_HI == 30,
502 "Unexpected order of registers");
503 Reserved.set(AArch64::W0_HI, AArch64::W30_HI);
504 static_assert(AArch64::B31_HI - AArch64::B0_HI == 31,
505 "Unexpected order of registers");
506 Reserved.set(AArch64::B0_HI, AArch64::B31_HI);
507 static_assert(AArch64::H31_HI - AArch64::H0_HI == 31,
508 "Unexpected order of registers");
509 Reserved.set(AArch64::H0_HI, AArch64::H31_HI);
510 static_assert(AArch64::S31_HI - AArch64::S0_HI == 31,
511 "Unexpected order of registers");
512 Reserved.set(AArch64::S0_HI, AArch64::S31_HI);
513 static_assert(AArch64::D31_HI - AArch64::D0_HI == 31,
514 "Unexpected order of registers");
515 Reserved.set(AArch64::D0_HI, AArch64::D31_HI);
516 static_assert(AArch64::Q31_HI - AArch64::Q0_HI == 31,
517 "Unexpected order of registers");
518 Reserved.set(AArch64::Q0_HI, AArch64::Q31_HI);
519
520 return Reserved;
521}
522
525 BitVector Reserved(getNumRegs());
526 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
527 // ReserveXRegister is set for registers manually reserved
528 // through +reserve-x#i.
530 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
531 }
532 return Reserved;
533}
534
537 BitVector Reserved(getNumRegs());
538 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
540 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
541 }
542
544 // In order to prevent the register allocator from using LR, we need to
545 // mark it as reserved. However we don't want to keep it reserved throughout
546 // the pipeline since it prevents other infrastructure from reasoning about
547 // it's liveness. We use the NoVRegs property instead of IsSSA because
548 // IsSSA is removed before VirtRegRewriter runs.
549 if (!MF.getProperties().hasNoVRegs())
550 // Reserve LR (X30) by marking from its subregister W30 because otherwise
551 // the register allocator could clobber the subregister.
552 markSuperRegs(Reserved, AArch64::W30);
553 }
554
555 assert(checkAllSuperRegsMarked(Reserved));
556
557 // Handle strictlyReservedRegs separately to avoid re-evaluating the assert,
558 // which becomes considerably expensive when considering the _HI registers.
560
561 return Reserved;
562}
563
565 MCRegister Reg) const {
566 return getReservedRegs(MF)[Reg];
567}
568
570 MCRegister Reg) const {
571 return getUserReservedRegs(MF)[Reg];
572}
573
575 MCRegister Reg) const {
576 return getStrictlyReservedRegs(MF)[Reg];
577}
578
580 return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
581 return isStrictlyReservedReg(MF, r);
582 });
583}
584
586 const MachineFunction &MF) const {
587 const Function &F = MF.getFunction();
588 F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
589 " function calls if any of the argument registers is reserved.")});
590}
591
593 MCRegister PhysReg) const {
594 // SLH uses register X16 as the taint register but it will fallback to a different
595 // method if the user clobbers it. So X16 is not reserved for inline asm but is
596 // for normal codegen.
597 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening) &&
598 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16))
599 return true;
600
601 // ZA/ZT0 registers are reserved but may be permitted in the clobber list.
602 if (PhysReg == AArch64::ZA || PhysReg == AArch64::ZT0)
603 return true;
604
605 return !isReservedReg(MF, PhysReg);
606}
607
610 return &AArch64::GPR64spRegClass;
611}
612
615 if (RC == &AArch64::CCRRegClass)
616 return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
617 return RC;
618}
619
620MCRegister AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
621
623 const MachineFrameInfo &MFI = MF.getFrameInfo();
624
625 // In the presence of variable sized objects or funclets, if the fixed stack
626 // size is large enough that referencing from the FP won't result in things
627 // being in range relatively often, we can use a base pointer to allow access
628 // from the other direction like the SP normally works.
629 //
630 // Furthermore, if both variable sized objects are present, and the
631 // stack needs to be dynamically re-aligned, the base pointer is the only
632 // reliable way to reference the locals.
633 if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
634 if (hasStackRealignment(MF))
635 return true;
636
637 auto &ST = MF.getSubtarget<AArch64Subtarget>();
639 if (ST.hasSVE() || ST.isStreaming()) {
640 // Frames that have variable sized objects and scalable SVE objects,
641 // should always use a basepointer.
642 if (!AFI->hasCalculatedStackSizeSVE() || AFI->hasSVEStackSize())
643 return true;
644 }
645
646 // Frames with hazard padding can have a large offset between the frame
647 // pointer and GPR locals, which includes the emergency spill slot. If the
648 // emergency spill slot is not within range of the load/store instructions
649 // (which have a signed 9-bit range), we will fail to compile if it is used.
650 // Since hasBasePointer() is called before we know if we have hazard padding
651 // or an emergency spill slot we need to enable the basepointer
652 // conservatively.
653 if (ST.getStreamingHazardSize() &&
654 !AFI->getSMEFnAttrs().hasNonStreamingInterfaceAndBody()) {
655 return true;
656 }
657
658 // Conservatively estimate whether the negative offset from the frame
659 // pointer will be sufficient to reach. If a function has a smallish
660 // frame, it's less likely to have lots of spills and callee saved
661 // space, so it's all more likely to be within range of the frame pointer.
662 // If it's wrong, we'll materialize the constant and still get to the
663 // object; it's just suboptimal. Negative offsets use the unscaled
664 // load/store instructions, which have a 9-bit signed immediate.
665 return MFI.getLocalFrameSize() >= 256;
666 }
667
668 return false;
669}
670
672 MCRegister Reg) const {
675 bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv(),
676 MF.getFunction().isVarArg());
677
678 auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) {
679 return llvm::is_contained(RegList, Reg);
680 };
681
682 switch (CC) {
683 default:
684 report_fatal_error("Unsupported calling convention.");
685 case CallingConv::GHC:
686 return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
688 if (!MF.getFunction().isVarArg())
689 return HasReg(CC_AArch64_Preserve_None_ArgRegs, Reg);
690 [[fallthrough]];
691 case CallingConv::C:
699 if (STI.isTargetWindows()) {
700 if (IsVarArg)
701 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
702 switch (CC) {
703 default:
704 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
707 return HasReg(CC_AArch64_Win64PCS_Swift_ArgRegs, Reg) ||
708 HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
709 }
710 }
711 if (!STI.isTargetDarwin()) {
712 switch (CC) {
713 default:
714 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
717 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
718 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
719 }
720 }
721 if (!IsVarArg) {
722 switch (CC) {
723 default:
724 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
727 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
728 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
729 }
730 }
731 if (STI.isTargetILP32())
732 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
733 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
735 if (IsVarArg)
736 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
737 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
739 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
745 if (STI.isTargetWindows())
746 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
747 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
748 }
749}
750
753 const AArch64FrameLowering *TFI = getFrameLowering(MF);
754 return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
755}
756
758 const MachineFunction &MF) const {
759 return true;
760}
761
763 const MachineFunction &MF) const {
764 return true;
765}
766
767bool
769 // This function indicates whether the emergency spillslot should be placed
770 // close to the beginning of the stackframe (closer to FP) or the end
771 // (closer to SP).
772 //
773 // The beginning works most reliably if we have a frame pointer.
774 // In the presence of any non-constant space between FP and locals,
775 // (e.g. in case of stack realignment or a scalable SVE area), it is
776 // better to use SP or BP.
777 const AArch64FrameLowering &TFI = *getFrameLowering(MF);
779 assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
781 "Expected SVE area to be calculated by this point");
782 return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->hasSVEStackSize() &&
784}
785
787 const MachineFunction &MF) const {
788 return true;
789}
790
791bool
793 const MachineFrameInfo &MFI = MF.getFrameInfo();
795 return true;
796 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
797}
798
799/// needsFrameBaseReg - Returns true if the instruction's frame index
800/// reference would be better served by a base register other than FP
801/// or SP. Used by LocalStackFrameAllocation to determine which frame index
802/// references it should create new base registers for.
804 int64_t Offset) const {
805 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
806 assert(i < MI->getNumOperands() &&
807 "Instr doesn't have FrameIndex operand!");
808
809 // It's the load/store FI references that cause issues, as it can be difficult
810 // to materialize the offset if it won't fit in the literal field. Estimate
811 // based on the size of the local frame and some conservative assumptions
812 // about the rest of the stack frame (note, this is pre-regalloc, so
813 // we don't know everything for certain yet) whether this offset is likely
814 // to be out of range of the immediate. Return true if so.
815
816 // We only generate virtual base registers for loads and stores, so
817 // return false for everything else.
818 if (!MI->mayLoad() && !MI->mayStore())
819 return false;
820
821 // Without a virtual base register, if the function has variable sized
822 // objects, all fixed-size local references will be via the frame pointer,
823 // Approximate the offset and see if it's legal for the instruction.
824 // Note that the incoming offset is based on the SP value at function entry,
825 // so it'll be negative.
826 MachineFunction &MF = *MI->getParent()->getParent();
827 const AArch64FrameLowering *TFI = getFrameLowering(MF);
828 MachineFrameInfo &MFI = MF.getFrameInfo();
829
830 // Estimate an offset from the frame pointer.
831 // Conservatively assume all GPR callee-saved registers get pushed.
832 // FP, LR, X19-X28, D8-D15. 64-bits each.
833 int64_t FPOffset = Offset - 16 * 20;
834 // Estimate an offset from the stack pointer.
835 // The incoming offset is relating to the SP at the start of the function,
836 // but when we access the local it'll be relative to the SP after local
837 // allocation, so adjust our SP-relative offset by that allocation size.
838 Offset += MFI.getLocalFrameSize();
839 // Assume that we'll have at least some spill slots allocated.
840 // FIXME: This is a total SWAG number. We should run some statistics
841 // and pick a real one.
842 Offset += 128; // 128 bytes of spill slots
843
844 // If there is a frame pointer, try using it.
845 // The FP is only available if there is no dynamic realignment. We
846 // don't know for sure yet whether we'll need that, so we guess based
847 // on whether there are any local variables that would trigger it.
848 if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
849 return false;
850
851 // If we can reference via the stack pointer or base pointer, try that.
852 // FIXME: This (and the code that resolves the references) can be improved
853 // to only disallow SP relative references in the live range of
854 // the VLA(s). In practice, it's unclear how much difference that
855 // would make, but it may be worth doing.
856 if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
857 return false;
858
859 // If even offset 0 is illegal, we don't want a virtual base register.
860 if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
861 return false;
862
863 // The offset likely isn't legal; we want to allocate a virtual base register.
864 return true;
865}
866
868 Register BaseReg,
869 int64_t Offset) const {
870 assert(MI && "Unable to get the legal offset for nil instruction.");
873}
874
875/// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
876/// at the beginning of the basic block.
879 int FrameIdx,
880 int64_t Offset) const {
881 MachineBasicBlock::iterator Ins = MBB->begin();
882 DebugLoc DL; // Defaults to "unknown"
883 if (Ins != MBB->end())
884 DL = Ins->getDebugLoc();
885 const MachineFunction &MF = *MBB->getParent();
886 const AArch64InstrInfo *TII =
887 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
888 const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
889 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
890 Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
891 MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0));
892 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
893
894 BuildMI(*MBB, Ins, DL, MCID, BaseReg)
895 .addFrameIndex(FrameIdx)
896 .addImm(Offset)
897 .addImm(Shifter);
898
899 return BaseReg;
900}
901
903 int64_t Offset) const {
904 // ARM doesn't need the general 64-bit offsets
906
907 unsigned i = 0;
908 while (!MI.getOperand(i).isFI()) {
909 ++i;
910 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
911 }
912
913 const MachineFunction *MF = MI.getParent()->getParent();
914 const AArch64InstrInfo *TII =
915 MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
916 bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
917 assert(Done && "Unable to resolve frame index!");
918 (void)Done;
919}
920
921// Create a scratch register for the frame index elimination in an instruction.
922// This function has special handling of stack tagging loop pseudos, in which
923// case it can also change the instruction opcode.
924static Register
926 const AArch64InstrInfo *TII) {
927 // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
928 // replace the instruction with the writeback variant because it will now
929 // satisfy the operand constraints for it.
930 Register ScratchReg;
931 if (MI.getOpcode() == AArch64::STGloop ||
932 MI.getOpcode() == AArch64::STZGloop) {
933 assert(FIOperandNum == 3 &&
934 "Wrong frame index operand for STGloop/STZGloop");
935 unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
936 : AArch64::STZGloop_wback;
937 ScratchReg = MI.getOperand(1).getReg();
938 MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
939 MI.setDesc(TII->get(Op));
940 MI.tieOperands(1, 3);
941 } else {
942 ScratchReg =
943 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
944 MI.getOperand(FIOperandNum)
945 .ChangeToRegister(ScratchReg, false, false, true);
946 }
947 return ScratchReg;
948}
949
952 // The smallest scalable element supported by scaled SVE addressing
953 // modes are predicates, which are 2 scalable bytes in size. So the scalable
954 // byte offset must always be a multiple of 2.
955 assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
956
957 // Add fixed-sized offset using existing DIExpression interface.
959
960 unsigned VG = getDwarfRegNum(AArch64::VG, true);
961 int64_t VGSized = Offset.getScalable() / 2;
962 if (VGSized > 0) {
963 Ops.push_back(dwarf::DW_OP_constu);
964 Ops.push_back(VGSized);
965 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
966 Ops.push_back(dwarf::DW_OP_mul);
967 Ops.push_back(dwarf::DW_OP_plus);
968 } else if (VGSized < 0) {
969 Ops.push_back(dwarf::DW_OP_constu);
970 Ops.push_back(-VGSized);
971 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
972 Ops.push_back(dwarf::DW_OP_mul);
973 Ops.push_back(dwarf::DW_OP_minus);
974 }
975}
976
978 int SPAdj, unsigned FIOperandNum,
979 RegScavenger *RS) const {
980 assert(SPAdj == 0 && "Unexpected");
981
982 MachineInstr &MI = *II;
983 MachineBasicBlock &MBB = *MI.getParent();
984 MachineFunction &MF = *MBB.getParent();
985 const MachineFrameInfo &MFI = MF.getFrameInfo();
986 const AArch64InstrInfo *TII =
987 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
988 const AArch64FrameLowering *TFI = getFrameLowering(MF);
989 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
990 bool Tagged =
991 MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
992 Register FrameReg;
993
994 // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
995 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
996 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
997 MI.getOpcode() == TargetOpcode::STATEPOINT) {
999 TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
1000 /*PreferFP=*/true,
1001 /*ForSimm=*/false);
1002 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
1003 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
1004 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
1005 return false;
1006 }
1007
1008 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
1009 MachineOperand &FI = MI.getOperand(FIOperandNum);
1010 StackOffset Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex);
1011 assert(!Offset.getScalable() &&
1012 "Frame offsets with a scalable component are not supported");
1013 FI.ChangeToImmediate(Offset.getFixed());
1014 return false;
1015 }
1016
1018 if (MI.getOpcode() == AArch64::TAGPstack) {
1019 // TAGPstack must use the virtual frame register in its 3rd operand.
1021 FrameReg = MI.getOperand(3).getReg();
1022 Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) +
1024 } else if (Tagged) {
1026 MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
1027 if (MFI.hasVarSizedObjects() ||
1028 isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
1030 // Can't update to SP + offset in place. Precalculate the tagged pointer
1031 // in a scratch register.
1033 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
1034 Register ScratchReg =
1035 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
1036 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
1037 TII);
1038 BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
1039 .addReg(ScratchReg)
1040 .addReg(ScratchReg)
1041 .addImm(0);
1042 MI.getOperand(FIOperandNum)
1043 .ChangeToRegister(ScratchReg, false, false, true);
1044 return false;
1045 }
1046 FrameReg = AArch64::SP;
1047 Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) +
1048 (int64_t)MFI.getStackSize());
1049 } else {
1051 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
1052 }
1053
1054 // Modify MI as necessary to handle as much of 'Offset' as possible
1055 if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
1056 return true;
1057
1058 assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
1059 "Emergency spill slot is out of reach");
1060
1061 // If we get here, the immediate doesn't fit into the instruction. We folded
1062 // as much as possible above. Handle the rest, providing a register that is
1063 // SP+LargeImm.
1064 Register ScratchReg =
1066 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
1067 return false;
1068}
1069
1071 MachineFunction &MF) const {
1072 const AArch64FrameLowering *TFI = getFrameLowering(MF);
1073
1074 switch (RC->getID()) {
1075 default:
1076 return 0;
1077 case AArch64::GPR32RegClassID:
1078 case AArch64::GPR32spRegClassID:
1079 case AArch64::GPR32allRegClassID:
1080 case AArch64::GPR64spRegClassID:
1081 case AArch64::GPR64allRegClassID:
1082 case AArch64::GPR64RegClassID:
1083 case AArch64::GPR32commonRegClassID:
1084 case AArch64::GPR64commonRegClassID:
1085 return 32 - 1 // XZR/SP
1086 - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
1087 - MF.getSubtarget<AArch64Subtarget>().getNumXRegisterReserved()
1088 - hasBasePointer(MF); // X19
1089 case AArch64::FPR8RegClassID:
1090 case AArch64::FPR16RegClassID:
1091 case AArch64::FPR32RegClassID:
1092 case AArch64::FPR64RegClassID:
1093 case AArch64::FPR128RegClassID:
1094 return 32;
1095
1096 case AArch64::MatrixIndexGPR32_8_11RegClassID:
1097 case AArch64::MatrixIndexGPR32_12_15RegClassID:
1098 return 4;
1099
1100 case AArch64::DDRegClassID:
1101 case AArch64::DDDRegClassID:
1102 case AArch64::DDDDRegClassID:
1103 case AArch64::QQRegClassID:
1104 case AArch64::QQQRegClassID:
1105 case AArch64::QQQQRegClassID:
1106 return 32;
1107
1108 case AArch64::FPR128_loRegClassID:
1109 case AArch64::FPR64_loRegClassID:
1110 case AArch64::FPR16_loRegClassID:
1111 return 16;
1112 case AArch64::FPR128_0to7RegClassID:
1113 return 8;
1114 }
1115}
1116
1117// We add regalloc hints for different cases:
1118// * Choosing a better destination operand for predicated SVE instructions
1119// where the inactive lanes are undef, by choosing a register that is not
1120// unique to the other operands of the instruction.
1121//
1122// * Improve register allocation for SME multi-vector instructions where we can
1123// benefit from the strided- and contiguous register multi-vector tuples.
1124//
1125// Here FORM_TRANSPOSED_REG_TUPLE nodes are created to improve register
1126// allocation where a consecutive multi-vector tuple is constructed from the
1127// same indices of multiple strided loads. This may still result in
1128// unnecessary copies between the loads and the tuple. Here we try to return a
1129// hint to assign the contiguous ZPRMulReg starting at the same register as
1130// the first operand of the pseudo, which should be a subregister of the first
1131// strided load.
1132//
1133// For example, if the first strided load has been assigned $z16_z20_z24_z28
1134// and the operands of the pseudo are each accessing subregister zsub2, we
1135// should look through through Order to find a contiguous register which
1136// begins with $z24 (i.e. $z24_z25_z26_z27).
1138 Register VirtReg, ArrayRef<MCPhysReg> Order,
1140 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
1141 auto &ST = MF.getSubtarget<AArch64Subtarget>();
1142 const AArch64InstrInfo *TII =
1143 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
1144 const MachineRegisterInfo &MRI = MF.getRegInfo();
1145
1146 // For predicated SVE instructions where the inactive lanes are undef,
1147 // pick a destination register that is not unique to avoid introducing
1148 // a movprfx.
1149 const TargetRegisterClass *RegRC = MRI.getRegClass(VirtReg);
1150 if (AArch64::ZPRRegClass.hasSubClassEq(RegRC)) {
1151 bool ConsiderOnlyHints = TargetRegisterInfo::getRegAllocationHints(
1152 VirtReg, Order, Hints, MF, VRM);
1153
1154 for (const MachineOperand &DefOp : MRI.def_operands(VirtReg)) {
1155 const MachineInstr &Def = *DefOp.getParent();
1156 if (DefOp.isImplicit() ||
1157 (TII->get(Def.getOpcode()).TSFlags & AArch64::FalseLanesMask) !=
1159 continue;
1160
1161 unsigned InstFlags =
1162 TII->get(AArch64::getSVEPseudoMap(Def.getOpcode())).TSFlags;
1163
1164 for (MCPhysReg R : Order) {
1165 auto AddHintIfSuitable = [&](MCPhysReg R,
1166 const MachineOperand &MO) -> bool {
1167 // R is a suitable register hint if R can reuse one of the other
1168 // source operands.
1169 if (VRM->getPhys(MO.getReg()) != R)
1170 return false;
1171 Hints.push_back(R);
1172 return true;
1173 };
1174
1175 switch (InstFlags & AArch64::DestructiveInstTypeMask) {
1176 default:
1177 break;
1179 AddHintIfSuitable(R, Def.getOperand(2)) ||
1180 AddHintIfSuitable(R, Def.getOperand(3)) ||
1181 AddHintIfSuitable(R, Def.getOperand(4));
1182 break;
1185 AddHintIfSuitable(R, Def.getOperand(2)) ||
1186 AddHintIfSuitable(R, Def.getOperand(3));
1187 break;
1190 AddHintIfSuitable(R, Def.getOperand(2));
1191 break;
1193 AddHintIfSuitable(R, Def.getOperand(3));
1194 break;
1195 }
1196 }
1197 }
1198
1199 if (Hints.size())
1200 return ConsiderOnlyHints;
1201 }
1202
1203 if (!ST.hasSME() || !ST.isStreaming())
1204 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
1205 VRM);
1206
1207 // The SVE calling convention preserves registers Z8-Z23. As a result, there
1208 // are no ZPR2Strided or ZPR4Strided registers that do not overlap with the
1209 // callee-saved registers and so by default these will be pushed to the back
1210 // of the allocation order for the ZPRStridedOrContiguous classes.
1211 // If any of the instructions which define VirtReg are used by the
1212 // FORM_TRANSPOSED_REG_TUPLE pseudo, we want to favour reducing copy
1213 // instructions over reducing the number of clobbered callee-save registers,
1214 // so we add the strided registers as a hint.
1215 unsigned RegID = RegRC->getID();
1216 if (RegID == AArch64::ZPR2StridedOrContiguousRegClassID ||
1217 RegID == AArch64::ZPR4StridedOrContiguousRegClassID) {
1218
1219 // Look through uses of the register for FORM_TRANSPOSED_REG_TUPLE.
1220 for (const MachineInstr &Use : MRI.use_nodbg_instructions(VirtReg)) {
1221 if (Use.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
1222 Use.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO)
1223 continue;
1224
1225 unsigned UseOps = Use.getNumOperands() - 1;
1226 const TargetRegisterClass *StridedRC;
1227 switch (RegID) {
1228 case AArch64::ZPR2StridedOrContiguousRegClassID:
1229 StridedRC = &AArch64::ZPR2StridedRegClass;
1230 break;
1231 case AArch64::ZPR4StridedOrContiguousRegClassID:
1232 StridedRC = &AArch64::ZPR4StridedRegClass;
1233 break;
1234 default:
1235 llvm_unreachable("Unexpected RegID");
1236 }
1237
1238 SmallVector<MCPhysReg, 4> StridedOrder;
1239 for (MCPhysReg Reg : Order)
1240 if (StridedRC->contains(Reg))
1241 StridedOrder.push_back(Reg);
1242
1243 int OpIdx = Use.findRegisterUseOperandIdx(VirtReg, this);
1244 assert(OpIdx != -1 && "Expected operand index from register use.");
1245
1246 unsigned TupleID = MRI.getRegClass(Use.getOperand(0).getReg())->getID();
1247 bool IsMulZPR = TupleID == AArch64::ZPR2Mul2RegClassID ||
1248 TupleID == AArch64::ZPR4Mul4RegClassID;
1249
1250 const MachineOperand *AssignedRegOp = llvm::find_if(
1251 make_range(Use.operands_begin() + 1, Use.operands_end()),
1252 [&VRM](const MachineOperand &Op) {
1253 return VRM->hasPhys(Op.getReg());
1254 });
1255
1256 // Example:
1257 //
1258 // When trying to find a suitable register allocation for VirtReg %v2 in:
1259 //
1260 // %v0:zpr2stridedorcontiguous = ld1 p0/z, [...]
1261 // %v1:zpr2stridedorcontiguous = ld1 p0/z, [...]
1262 // %v2:zpr2stridedorcontiguous = ld1 p0/z, [...]
1263 // %v3:zpr2stridedorcontiguous = ld1 p0/z, [...]
1264 // %v4:zpr4mul4 = FORM_TRANSPOSED_X4 %v0:0, %v1:0, %v2:0, %v3:0
1265 //
1266 // One such suitable allocation would be:
1267 //
1268 // { z0, z8 } = ld1 p0/z, [...]
1269 // { z1, z9 } = ld1 p0/z, [...]
1270 // { z2, z10 } = ld1 p0/z, [...]
1271 // { z3, z11 } = ld1 p0/z, [...]
1272 // { z0, z1, z2, z3 } =
1273 // FORM_TRANSPOSED_X4 {z0, z8}:0, {z1, z9}:0, {z2, z10}:0, {z3, z11}:0
1274 //
1275 // Below we distinguish two cases when trying to find a register:
1276 // * None of the registers used by FORM_TRANSPOSED_X4 have been assigned
1277 // yet. In this case the code muse ensure that there are at least UseOps
1278 // free consecutive registers. If IsMulZPR is true, then the first of
1279 // registers must also be a multiple of UseOps, e.g. { z0, z1, z2, z3 }
1280 // is valid but { z1, z2, z3, z5 } is not.
1281 // * One or more of the registers used by FORM_TRANSPOSED_X4 is already
1282 // assigned a physical register, which means only checking that a
1283 // consecutive range of free tuple registers exists which includes
1284 // the assigned register.
1285 // e.g. in the example above, if { z0, z8 } is already allocated for
1286 // %v0, we just need to ensure that { z1, z9 }, { z2, z10 } and
1287 // { z3, z11 } are also free. If so, we add { z2, z10 }.
1288
1289 if (AssignedRegOp == Use.operands_end()) {
1290 // There are no registers already assigned to any of the pseudo
1291 // operands. Look for a valid starting register for the group.
1292 for (unsigned I = 0; I < StridedOrder.size(); ++I) {
1293 MCPhysReg Reg = StridedOrder[I];
1294
1295 // If the FORM_TRANSPOSE nodes use the ZPRMul classes, the starting
1296 // register of the first load should be a multiple of 2 or 4.
1297 unsigned SubRegIdx = Use.getOperand(OpIdx).getSubReg();
1298 if (IsMulZPR && (getSubReg(Reg, SubRegIdx) - AArch64::Z0) % UseOps !=
1299 ((unsigned)OpIdx - 1))
1300 continue;
1301
1302 // In the example above, if VirtReg is the third operand of the
1303 // tuple (%v2) and Reg == Z2_Z10, then we need to make sure that
1304 // Z0_Z8, Z1_Z9 and Z3_Z11 are also available.
1305 auto IsFreeConsecutiveReg = [&](unsigned UseOp) {
1306 unsigned R = Reg - (OpIdx - 1) + UseOp;
1307 return StridedRC->contains(R) &&
1308 (UseOp == 0 ||
1309 ((getSubReg(R, AArch64::zsub0) - AArch64::Z0) ==
1310 (getSubReg(R - 1, AArch64::zsub0) - AArch64::Z0) + 1)) &&
1311 !Matrix->isPhysRegUsed(R);
1312 };
1313 if (all_of(iota_range<unsigned>(0U, UseOps, /*Inclusive=*/false),
1314 IsFreeConsecutiveReg))
1315 Hints.push_back(Reg);
1316 }
1317 } else {
1318 // At least one operand already has a physical register assigned.
1319 // Find the starting sub-register of this and use it to work out the
1320 // correct strided register to suggest based on the current op index.
1321 MCPhysReg TargetStartReg =
1322 getSubReg(VRM->getPhys(AssignedRegOp->getReg()), AArch64::zsub0) +
1323 (OpIdx - AssignedRegOp->getOperandNo());
1324
1325 for (unsigned I = 0; I < StridedOrder.size(); ++I)
1326 if (getSubReg(StridedOrder[I], AArch64::zsub0) == TargetStartReg)
1327 Hints.push_back(StridedOrder[I]);
1328 }
1329
1330 if (!Hints.empty())
1331 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
1332 MF, VRM);
1333 }
1334 }
1335
1336 for (MachineInstr &MI : MRI.def_instructions(VirtReg)) {
1337 if (MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
1338 MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO)
1339 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
1340 MF, VRM);
1341
1342 unsigned FirstOpSubReg = MI.getOperand(1).getSubReg();
1343 switch (FirstOpSubReg) {
1344 case AArch64::zsub0:
1345 case AArch64::zsub1:
1346 case AArch64::zsub2:
1347 case AArch64::zsub3:
1348 break;
1349 default:
1350 continue;
1351 }
1352
1353 // Look up the physical register mapped to the first operand of the pseudo.
1354 Register FirstOpVirtReg = MI.getOperand(1).getReg();
1355 if (!VRM->hasPhys(FirstOpVirtReg))
1356 continue;
1357
1358 MCRegister TupleStartReg =
1359 getSubReg(VRM->getPhys(FirstOpVirtReg), FirstOpSubReg);
1360 for (unsigned I = 0; I < Order.size(); ++I)
1361 if (MCRegister R = getSubReg(Order[I], AArch64::zsub0))
1362 if (R == TupleStartReg)
1363 Hints.push_back(Order[I]);
1364 }
1365
1366 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
1367 VRM);
1368}
1369
1371 const MachineFunction &MF) const {
1372 const auto &MFI = MF.getFrameInfo();
1373 if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
1374 return AArch64::SP;
1375 else if (hasStackRealignment(MF))
1376 return getBaseRegister();
1377 return getFrameRegister(MF);
1378}
1379
1380/// SrcRC and DstRC will be morphed into NewRC if this returns true
1382 MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
1383 const TargetRegisterClass *DstRC, unsigned DstSubReg,
1384 const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
1385 MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
1386
1387 if (MI->isCopy() &&
1388 ((DstRC->getID() == AArch64::GPR64RegClassID) ||
1389 (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
1390 MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
1391 // Do not coalesce in the case of a 32-bit subregister copy
1392 // which implements a 32 to 64 bit zero extension
1393 // which relies on the upper 32 bits being zeroed.
1394 return false;
1395
1396 auto IsCoalescerBarrier = [](const MachineInstr &MI) {
1397 switch (MI.getOpcode()) {
1398 case AArch64::COALESCER_BARRIER_FPR16:
1399 case AArch64::COALESCER_BARRIER_FPR32:
1400 case AArch64::COALESCER_BARRIER_FPR64:
1401 case AArch64::COALESCER_BARRIER_FPR128:
1402 return true;
1403 default:
1404 return false;
1405 }
1406 };
1407
1408 // For calls that temporarily have to toggle streaming mode as part of the
1409 // call-sequence, we need to be more careful when coalescing copy instructions
1410 // so that we don't end up coalescing the NEON/FP result or argument register
1411 // with a whole Z-register, such that after coalescing the register allocator
1412 // will try to spill/reload the entire Z register.
1413 //
1414 // We do this by checking if the node has any defs/uses that are
1415 // COALESCER_BARRIER pseudos. These are 'nops' in practice, but they exist to
1416 // instruct the coalescer to avoid coalescing the copy.
1417 if (MI->isCopy() && SubReg != DstSubReg &&
1418 (AArch64::ZPRRegClass.hasSubClassEq(DstRC) ||
1419 AArch64::ZPRRegClass.hasSubClassEq(SrcRC))) {
1420 unsigned SrcReg = MI->getOperand(1).getReg();
1421 if (any_of(MRI.def_instructions(SrcReg), IsCoalescerBarrier))
1422 return false;
1423 unsigned DstReg = MI->getOperand(0).getReg();
1424 if (any_of(MRI.use_nodbg_instructions(DstReg), IsCoalescerBarrier))
1425 return false;
1426 }
1427
1428 return true;
1429}
1430
1432 MCRegister R) const {
1433 return R == AArch64::VG;
1434}
1435
1437 return (LLVMReg >= AArch64::Z0 && LLVMReg <= AArch64::Z31) ||
1438 (LLVMReg >= AArch64::P0 && LLVMReg <= AArch64::P15);
1439}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static bool isTargetWindows(const MachineFunction &MF)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition Compiler.h:404
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Live Register Matrix
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:487
static unsigned getDwarfRegNum(MCRegister Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
bool isFPReserved(const MachineFunction &MF) const
Should the Frame Pointer be reserved for the current function?
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
bool isIgnoredCVReg(MCRegister LLVMReg) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
const TargetRegisterClass * getPointerRegClass(unsigned Kind=0) const override
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
bool isUserReservedReg(const MachineFunction &MF, MCRegister Reg) const
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
bool requiresRegisterScavenging(const MachineFunction &MF) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
BitVector getUserReservedRegs(const MachineFunction &MF) const
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
bool regNeedsCFI(MCRegister Reg, MCRegister &RegToUseForCFI) const
Return whether the register needs a CFI entry.
bool isAnyArgRegReserved(const MachineFunction &MF) const
void emitReservedArgRegCallError(const MachineFunction &MF) const
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
const uint32_t * getTLSCallPreservedMask() const
const uint32_t * getNoPreservedMask() const override
Register getFrameRegister(const MachineFunction &MF) const override
bool shouldAnalyzePhysregInMachineLoopInfo(MCRegister R) const override
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
AArch64RegisterInfo(const Triple &TT, unsigned HwMode)
const uint32_t * SMEABISupportRoutinesCallPreservedMaskFromX0() const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
unsigned getLocalAddressRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
const uint32_t * getSMStartStopCallPreservedMask() const
bool useFPForScavengingIndex(const MachineFunction &MF) const override
bool cannotEliminateFrame(const MachineFunction &MF) const
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
std::optional< std::string > explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool isXRegisterReservedForRA(size_t i) const
const AArch64TargetLowering * getTargetLowering() const override
bool isXRegCustomCalleeSaved(size_t i) const
bool isXRegisterReserved(size_t i) const
bool isCallingConvWin64(CallingConv::ID CC, bool IsVarArg) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition DebugLoc.h:123
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:352
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:227
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:730
Describe properties that are true of each instruction in the target description file.
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
MCSubRegIterator enumerates all sub-registers of Reg.
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
int64_t getLocalFrameSize() const
Get the size of the local object blob.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI unsigned getOperandNo() const
Returns the index of this operand in the instruction that it belongs to.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
int64_t getFixed() const
Returns the fixed component of the stack.
Definition TypeSize.h:46
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetOptions Options
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition VirtRegMap.h:91
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
Definition VirtRegMap.h:87
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
int getSVEPseudoMap(uint16_t Opcode)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
@ Swift
Calling convention for Swift.
Definition CallingConv.h:69
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition CallingConv.h:82
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition CallingConv.h:63
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition CallingConv.h:60
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1
Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ PreserveNone
Used for runtime calls that preserves none general registers.
Definition CallingConv.h:90
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition CallingConv.h:87
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
@ ARM64EC_Thunk_X64
Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
@ Done
Definition Threading.h:60
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1744
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1770
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945