236#define DEBUG_TYPE "frame-info"
239 cl::desc(
"enable use of redzone on AArch64"),
244 cl::desc(
"reverse the CSR restore sequence"),
248 "stack-tagging-merge-settag",
258 cl::desc(
"Emit homogeneous prologue and epilogue for the size "
259 "optimization (default = off)"));
261STATISTIC(NumRedZoneFunctions,
"Number of functions using red zone");
272 bool IsTailCallReturn =
false;
274 unsigned RetOpcode =
MBBI->getOpcode();
275 IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi ||
276 RetOpcode == AArch64::TCRETURNri ||
277 RetOpcode == AArch64::TCRETURNriBTI;
281 int64_t ArgumentPopSize = 0;
282 if (IsTailCallReturn) {
288 ArgumentPopSize = StackAdjust.
getImm();
297 return ArgumentPopSize;
308bool AArch64FrameLowering::homogeneousPrologEpilog(
338bool AArch64FrameLowering::producePairRegisters(
MachineFunction &MF)
const {
357 if (
MI.isDebugInstr() ||
MI.isPseudo() ||
358 MI.getOpcode() == AArch64::ADDXri ||
359 MI.getOpcode() == AArch64::ADDSXri)
386 if (!IsWin64 || IsFunclet) {
394 const unsigned UnwindHelpObject = (MF.
hasEHFunclets() ? 8 : 0);
395 return alignTo(VarArgsArea + UnwindHelpObject, 16);
412 const unsigned RedZoneSize =
421 return !(MFI.
hasCalls() ||
hasFP(MF) || NumBytes > RedZoneSize ||
473 unsigned Opc =
I->getOpcode();
474 bool IsDestroy = Opc ==
TII->getCallFrameDestroyOpcode();
475 uint64_t CalleePopAmount = IsDestroy ?
I->getOperand(1).getImm() : 0;
478 int64_t Amount =
I->getOperand(0).getImm();
486 if (CalleePopAmount == 0) {
497 assert(Amount > -0xffffff && Amount < 0xffffff &&
"call frame too large");
501 }
else if (CalleePopAmount != 0) {
504 assert(CalleePopAmount < 0xffffff &&
"call frame too large");
511void AArch64FrameLowering::emitCalleeSavedGPRLocations(
525 for (
const auto &Info : CSI) {
529 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
530 unsigned DwarfReg =
TRI.getDwarfRegNum(
Info.getReg(),
true);
542void AArch64FrameLowering::emitCalleeSavedSVELocations(
558 for (
const auto &Info : CSI) {
564 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
599 const MCInstrDesc &CFIDesc =
TII.get(TargetOpcode::CFI_INSTRUCTION);
605 nullptr,
TRI.getDwarfRegNum(AArch64::SP,
true), 0));
609 if (MFI.shouldSignReturnAddress(MF)) {
617 TRI.getDwarfRegNum(AArch64::X18,
true));
620 const std::vector<CalleeSavedInfo> &CSI =
622 for (
const auto &
Info : CSI) {
623 unsigned Reg =
Info.getReg();
624 if (!
TRI.regNeedsCFI(Reg, Reg))
627 TRI.getDwarfRegNum(Reg,
true));
646 for (
const auto &
Info : CSI) {
651 unsigned Reg =
Info.getReg();
657 nullptr,
TRI.getDwarfRegNum(
Info.getReg(),
true)));
664void AArch64FrameLowering::emitCalleeSavedGPRRestores(
669void AArch64FrameLowering::emitCalleeSavedSVERestores(
683 case AArch64::W##n: \
684 case AArch64::X##n: \
709 case AArch64::B##n: \
710 case AArch64::H##n: \
711 case AArch64::S##n: \
712 case AArch64::D##n: \
713 case AArch64::Q##n: \
714 return HasSVE ? AArch64::Z##n : AArch64::Q##n
751void AArch64FrameLowering::emitZeroCallUsedRegs(
BitVector RegsToZero,
767 bool HasSVE = STI.hasSVE();
769 if (
TRI.isGeneralPurposeRegister(MF, Reg)) {
772 GPRsToZero.set(XReg);
773 }
else if (AArch64::FPR128RegClass.
contains(Reg) ||
774 AArch64::FPR64RegClass.
contains(Reg) ||
775 AArch64::FPR32RegClass.
contains(Reg) ||
776 AArch64::FPR16RegClass.
contains(Reg) ||
777 AArch64::FPR8RegClass.
contains(Reg)) {
780 FPRsToZero.set(XReg);
801 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4,
802 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9,
803 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14,
805 if (RegsToZero[PReg])
836 for (
unsigned i = 0; CSRegs[i]; ++i)
837 LiveRegs.
addReg(CSRegs[i]);
844 for (
unsigned Reg : AArch64::GPR64RegClass) {
848 return AArch64::NoRegister;
859 if (!RegInfo->hasStackRealignment(*MF))
874 unsigned StackProbeSize =
875 F.getFnAttributeAsParsedInteger(
"stack-probe-size", 4096);
876 return (StackSizeInBytes >= StackProbeSize) &&
877 !
F.hasFnAttribute(
"no-stack-arg-probe");
883 F.needsUnwindTableEntry();
886bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
892 if (homogeneousPrologEpilog(MF))
915 if (MFI.hasVarSizedObjects())
918 if (
RegInfo->hasStackRealignment(MF))
935bool AArch64FrameLowering::shouldCombineCSRLocalStackBumpInEpilogue(
937 if (!shouldCombineCSRLocalStackBump(*
MBB.
getParent(), StackBumpBytes))
947 while (LastI != Begin) {
949 if (LastI->isTransient())
954 switch (LastI->getOpcode()) {
955 case AArch64::STGloop:
956 case AArch64::STZGloop:
960 case AArch64::STZ2Gi:
973 unsigned Opc =
MBBI->getOpcode();
977 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
978 int Imm =
MBBI->getOperand(ImmIdx).getImm();
986 case AArch64::LDPDpost:
989 case AArch64::STPDpre: {
990 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
991 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
992 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFRegP_X))
999 case AArch64::LDPXpost:
1002 case AArch64::STPXpre: {
1005 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1006 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFPLR_X))
1010 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveRegP_X))
1017 case AArch64::LDRDpost:
1020 case AArch64::STRDpre: {
1021 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1022 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFReg_X))
1028 case AArch64::LDRXpost:
1031 case AArch64::STRXpre: {
1032 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1039 case AArch64::STPDi:
1040 case AArch64::LDPDi: {
1041 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1042 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1050 case AArch64::STPXi:
1051 case AArch64::LDPXi: {
1054 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1066 case AArch64::STRXui:
1067 case AArch64::LDRXui: {
1068 int Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1075 case AArch64::STRDui:
1076 case AArch64::LDRDui: {
1077 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1091 unsigned LocalStackSize) {
1093 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
1094 switch (
MBBI->getOpcode()) {
1097 case AArch64::SEH_SaveFPLR:
1098 case AArch64::SEH_SaveRegP:
1099 case AArch64::SEH_SaveReg:
1100 case AArch64::SEH_SaveFRegP:
1101 case AArch64::SEH_SaveFReg:
1102 ImmOpnd = &
MBBI->getOperand(ImmIdx);
1115 bool NeedsWinCFI,
bool *HasWinCFI,
bool EmitCFI,
1117 int CFAOffset = 0) {
1119 switch (
MBBI->getOpcode()) {
1122 case AArch64::STPXi:
1123 NewOpc = AArch64::STPXpre;
1125 case AArch64::STPDi:
1126 NewOpc = AArch64::STPDpre;
1128 case AArch64::STPQi:
1129 NewOpc = AArch64::STPQpre;
1131 case AArch64::STRXui:
1132 NewOpc = AArch64::STRXpre;
1134 case AArch64::STRDui:
1135 NewOpc = AArch64::STRDpre;
1137 case AArch64::STRQui:
1138 NewOpc = AArch64::STRQpre;
1140 case AArch64::LDPXi:
1141 NewOpc = AArch64::LDPXpost;
1143 case AArch64::LDPDi:
1144 NewOpc = AArch64::LDPDpost;
1146 case AArch64::LDPQi:
1147 NewOpc = AArch64::LDPQpost;
1149 case AArch64::LDRXui:
1150 NewOpc = AArch64::LDRXpost;
1152 case AArch64::LDRDui:
1153 NewOpc = AArch64::LDRDpost;
1155 case AArch64::LDRQui:
1156 NewOpc = AArch64::LDRQpost;
1161 auto SEH = std::next(
MBBI);
1163 SEH->eraseFromParent();
1168 int64_t MinOffset, MaxOffset;
1170 NewOpc, Scale, Width, MinOffset, MaxOffset);
1177 if (
MBBI->getOperand(
MBBI->getNumOperands() - 1).getImm() != 0 ||
1178 CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) {
1181 false,
false,
nullptr, EmitCFI,
1184 return std::prev(
MBBI);
1191 unsigned OpndIdx = 0;
1192 for (
unsigned OpndEnd =
MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
1194 MIB.
add(
MBBI->getOperand(OpndIdx));
1196 assert(
MBBI->getOperand(OpndIdx).getImm() == 0 &&
1197 "Unexpected immediate offset in first/last callee-save save/restore "
1199 assert(
MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
1200 "Unexpected base register in callee-save save/restore instruction!");
1201 assert(CSStackSizeInc % Scale == 0);
1202 MIB.
addImm(CSStackSizeInc / (
int)Scale);
1233 unsigned Opc =
MI.getOpcode();
1236 case AArch64::STPXi:
1237 case AArch64::STRXui:
1238 case AArch64::STPDi:
1239 case AArch64::STRDui:
1240 case AArch64::LDPXi:
1241 case AArch64::LDRXui:
1242 case AArch64::LDPDi:
1243 case AArch64::LDRDui:
1246 case AArch64::STPQi:
1247 case AArch64::STRQui:
1248 case AArch64::LDPQi:
1249 case AArch64::LDRQui:
1256 unsigned OffsetIdx =
MI.getNumExplicitOperands() - 1;
1257 assert(
MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
1258 "Unexpected base register in callee-save save/restore instruction!");
1262 assert(LocalStackSize % Scale == 0);
1263 OffsetOpnd.
setImm(OffsetOpnd.
getImm() + LocalStackSize / Scale);
1268 assert(
MBBI !=
MI.getParent()->end() &&
"Expecting a valid instruction");
1270 "Expecting a SEH instruction");
1281 switch (
I->getOpcode()) {
1284 case AArch64::STR_ZXI:
1285 case AArch64::STR_PXI:
1286 case AArch64::LDR_ZXI:
1287 case AArch64::LDR_PXI:
1296 [](
const auto &
Info) { return Info.getReg() == AArch64::LR; }) &&
1311 bool NeedsUnwindInfo) {
1327 if (NeedsUnwindInfo) {
1330 static const char CFIInst[] = {
1331 dwarf::DW_CFA_val_expression,
1334 static_cast<char>(
unsigned(dwarf::DW_OP_breg18)),
1335 static_cast<char>(-8) & 0x7f,
1338 nullptr,
StringRef(CFIInst,
sizeof(CFIInst))));
1378 bool HasFP =
hasFP(MF);
1380 bool HasWinCFI =
false;
1397 MFnI.needsDwarfUnwindInfo(MF));
1399 if (MFnI.shouldSignReturnAddress(MF)) {
1400 if (MFnI.shouldSignWithBKey()) {
1408 TII->get(MFnI.shouldSignWithBKey() ? AArch64::PACIBSP
1409 : AArch64::PACIASP))
1418 }
else if (NeedsWinCFI) {
1424 if (EmitCFI && MFnI.isMTETagged()) {
1487 assert(!HasFP &&
"unexpected function without stack frame but with FP");
1489 "unexpected function without stack frame but with SVE objects");
1498 ++NumRedZoneFunctions;
1531 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
1532 bool HomPrologEpilog = homogeneousPrologEpilog(MF);
1533 if (CombineSPBump) {
1534 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
1540 }
else if (HomPrologEpilog) {
1542 NumBytes -= PrologueSaveSize;
1543 }
else if (PrologueSaveSize != 0) {
1545 MBB,
MBBI,
DL,
TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI,
1547 NumBytes -= PrologueSaveSize;
1549 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
1559 NeedsWinCFI, &HasWinCFI);
1564 if (!IsFunclet && HasFP) {
1576 bool HaveInitialContext = Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1577 if (HaveInitialContext)
1580 .
addUse(HaveInitialContext ? AArch64::X22 : AArch64::XZR)
1586 if (HomPrologEpilog) {
1599 if (NeedsWinCFI && HasWinCFI) {
1604 NeedsWinCFI =
false;
1609 const int OffsetToFirstCalleeSaveFromFP =
1613 unsigned Reg = RegInfo->getDwarfRegNum(
FramePtr,
true);
1615 nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP));
1626 emitCalleeSavedGPRLocations(
MBB,
MBBI);
1629 const bool NeedsRealignment =
1630 NumBytes && !IsFunclet && RegInfo->hasStackRealignment(MF);
1631 int64_t RealignmentPadding =
1637 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
1645 if (NumBytes >= (1 << 28))
1647 "unwinding purposes");
1649 uint32_t LowNumWords = NumWords & 0xFFFF;
1656 if ((NumWords & 0xFFFF0000) != 0) {
1659 .
addImm((NumWords & 0xFFFF0000) >> 16)
1730 if (RealignmentPadding > 0) {
1733 .
addImm(RealignmentPadding)
1748 StackOffset AllocateBefore = SVEStackSize, AllocateAfter = {};
1755 CalleeSavesBegin =
MBBI;
1759 CalleeSavesEnd =
MBBI;
1762 AllocateAfter = SVEStackSize - AllocateBefore;
1767 MBB, CalleeSavesBegin,
DL, AArch64::SP, AArch64::SP, -AllocateBefore,
TII,
1769 EmitCFI && !HasFP && AllocateBefore,
1773 emitCalleeSavedSVELocations(
MBB, CalleeSavesEnd);
1778 nullptr, EmitCFI && !HasFP && AllocateAfter,
1784 unsigned scratchSPReg = AArch64::SP;
1786 if (NeedsRealignment) {
1788 assert(scratchSPReg != AArch64::NoRegister);
1797 MBB,
MBBI,
DL, scratchSPReg, AArch64::SP,
1799 false, NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP,
1803 if (NeedsRealignment) {
1805 assert(scratchSPReg != AArch64::SP);
1832 if (!IsFunclet && RegInfo->hasBasePointer(MF)) {
1844 if (NeedsWinCFI && HasWinCFI) {
1852 if (IsFunclet &&
F.hasPersonalityFn()) {
1864 bool NeedsWinCFI,
bool *HasWinCFI) {
1866 if (!MFI.shouldSignReturnAddress(MF))
1874 DL =
MBBI->getDebugLoc();
1881 if (Subtarget.hasPAuth() &&
1883 MBBI !=
MBB.
end() &&
MBBI->getOpcode() == AArch64::RET_ReallyLR &&
1886 TII->get(MFI.shouldSignWithBKey() ? AArch64::RETAB : AArch64::RETAA))
1892 TII->get(MFI.shouldSignWithBKey() ? AArch64::AUTIBSP : AArch64::AUTIASP))
1909 switch (
MI.getOpcode()) {
1912 case AArch64::CATCHRET:
1913 case AArch64::CLEANUPRET:
1928 bool HasWinCFI =
false;
1929 bool IsFunclet =
false;
1933 DL =
MBBI->getDebugLoc();
1945 TII->get(AArch64::SEH_EpilogEnd))
1965 int64_t AfterCSRPopSize = ArgumentStackToRestore;
1973 if (homogeneousPrologEpilog(MF, &
MBB)) {
1977 auto HomogeneousEpilog = std::prev(LastPopI);
1978 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
1979 LastPopI = HomogeneousEpilog;
1989 assert(AfterCSRPopSize == 0);
1992 bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(
MBB, NumBytes);
1995 bool CombineAfterCSRBump =
false;
1996 if (!CombineSPBump && PrologueSaveSize != 0) {
1998 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
2000 Pop = std::prev(Pop);
2003 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1);
2007 if (OffsetOp.
getImm() == 0 && AfterCSRPopSize >= 0) {
2009 MBB, Pop,
DL,
TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, EmitCFI,
2016 AfterCSRPopSize += PrologueSaveSize;
2017 CombineAfterCSRBump =
true;
2026 while (LastPopI != Begin) {
2032 }
else if (CombineSPBump)
2034 NeedsWinCFI, &HasWinCFI);
2076 if (CombineSPBump) {
2077 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
2080 if (EmitCFI &&
hasFP(MF)) {
2082 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2097 NumBytes -= PrologueSaveSize;
2098 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
2102 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
2105 RestoreBegin = std::prev(RestoreEnd);
2106 while (RestoreBegin !=
MBB.
begin() &&
2115 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
2116 DeallocateAfter = CalleeSavedSizeAsOffset;
2138 MBB, RestoreBegin,
DL, AArch64::SP, AArch64::SP,
2140 false,
false,
nullptr, EmitCFI && !
hasFP(MF),
2147 false,
nullptr, EmitCFI && !
hasFP(MF),
2153 false,
nullptr, EmitCFI && !
hasFP(MF),
2158 emitCalleeSavedSVERestores(
MBB, RestoreEnd);
2165 if (RedZone && AfterCSRPopSize == 0)
2172 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
2173 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
2174 if (NoCalleeSaveRestore)
2175 StackRestoreBytes += AfterCSRPopSize;
2178 MBB, LastPopI,
DL, AArch64::SP, AArch64::SP,
2185 if (NoCalleeSaveRestore || AfterCSRPopSize == 0) {
2198 MBB, LastPopI,
DL, AArch64::SP, AArch64::FP,
2201 }
else if (NumBytes)
2207 if (EmitCFI &&
hasFP(MF)) {
2209 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2220 if (AfterCSRPopSize) {
2221 assert(AfterCSRPopSize > 0 &&
"attempting to reallocate arg stack that an "
2222 "interrupt may have clobbered");
2227 false, NeedsWinCFI, &HasWinCFI, EmitCFI,
2253 int64_t ObjectOffset) {
2258 unsigned FixedObject =
2267 int64_t ObjectOffset) {
2278 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
2285 bool ForSimm)
const {
2288 bool isFixed = MFI.isFixedObjectIndex(FI);
2295 const MachineFunction &MF, int64_t ObjectOffset,
bool isFixed,
bool isSVE,
2296 Register &FrameReg,
bool PreferFP,
bool ForSimm)
const {
2319 PreferFP &= !SVEStackSize;
2327 }
else if (isCSR && RegInfo->hasStackRealignment(MF)) {
2331 assert(
hasFP(MF) &&
"Re-aligned stack must have frame pointer");
2333 }
else if (
hasFP(MF) && !RegInfo->hasStackRealignment(MF)) {
2338 bool FPOffsetFits = !ForSimm || FPOffset >= -256;
2339 PreferFP |=
Offset > -FPOffset && !SVEStackSize;
2341 if (MFI.hasVarSizedObjects()) {
2345 bool CanUseBP = RegInfo->hasBasePointer(MF);
2346 if (FPOffsetFits && CanUseBP)
2353 }
else if (FPOffset >= 0) {
2358 }
else if (MF.
hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
2365 "Funclets should only be present on Win64");
2369 if (FPOffsetFits && PreferFP)
2376 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) &&
2377 "In the presence of dynamic stack pointer realignment, "
2378 "non-argument/CSR objects cannot be accessed through the frame pointer");
2390 RegInfo->hasStackRealignment(MF))) {
2391 FrameReg = RegInfo->getFrameRegister(MF);
2395 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
2401 if (UseFP && !(isFixed || isCSR))
2402 ScalableOffset = -SVEStackSize;
2403 if (!UseFP && (isFixed || isCSR))
2404 ScalableOffset = SVEStackSize;
2407 FrameReg = RegInfo->getFrameRegister(MF);
2412 if (RegInfo->hasBasePointer(MF))
2413 FrameReg = RegInfo->getBaseRegister();
2415 assert(!MFI.hasVarSizedObjects() &&
2416 "Can't use SP when we have var sized objects.");
2417 FrameReg = AArch64::SP;
2443 Attrs.hasAttrSomewhere(Attribute::SwiftError)) &&
2448 bool NeedsWinCFI,
bool IsFirst,
2457 if (Reg2 == AArch64::FP)
2461 if (
TRI->getEncodingValue(Reg2) ==
TRI->getEncodingValue(Reg1) + 1)
2468 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 &&
2469 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst)
2479 bool UsesWinAAPCS,
bool NeedsWinCFI,
2480 bool NeedsFrameRecord,
bool IsFirst,
2488 if (NeedsFrameRecord)
2489 return Reg2 == AArch64::LR;
2497 unsigned Reg1 = AArch64::NoRegister;
2498 unsigned Reg2 = AArch64::NoRegister;
2501 enum RegType { GPR, FPR64, FPR128, PPR, ZPR }
Type;
2503 RegPairInfo() =
default;
2505 bool isPaired()
const {
return Reg2 != AArch64::NoRegister; }
2507 unsigned getScale()
const {
2521 bool isScalable()
const {
return Type == PPR ||
Type == ZPR; }
2529 bool NeedsFrameRecord) {
2539 unsigned Count = CSI.
size();
2546 "Odd number of callee-saved regs to spill!");
2548 int StackFillDir = -1;
2550 unsigned FirstReg = 0;
2558 FirstReg = Count - 1;
2564 for (
unsigned i = FirstReg; i < Count; i += RegInc) {
2566 RPI.Reg1 = CSI[i].getReg();
2568 if (AArch64::GPR64RegClass.
contains(RPI.Reg1))
2569 RPI.Type = RegPairInfo::GPR;
2570 else if (AArch64::FPR64RegClass.
contains(RPI.Reg1))
2571 RPI.Type = RegPairInfo::FPR64;
2572 else if (AArch64::FPR128RegClass.
contains(RPI.Reg1))
2573 RPI.Type = RegPairInfo::FPR128;
2574 else if (AArch64::ZPRRegClass.
contains(RPI.Reg1))
2575 RPI.Type = RegPairInfo::ZPR;
2576 else if (AArch64::PPRRegClass.
contains(RPI.Reg1))
2577 RPI.Type = RegPairInfo::PPR;
2582 if (
unsigned(i + RegInc) < Count) {
2583 Register NextReg = CSI[i + RegInc].getReg();
2584 bool IsFirst = i == FirstReg;
2586 case RegPairInfo::GPR:
2587 if (AArch64::GPR64RegClass.
contains(NextReg) &&
2589 NeedsWinCFI, NeedsFrameRecord, IsFirst,
2593 case RegPairInfo::FPR64:
2594 if (AArch64::FPR64RegClass.
contains(NextReg) &&
2599 case RegPairInfo::FPR128:
2600 if (AArch64::FPR128RegClass.
contains(NextReg))
2603 case RegPairInfo::PPR:
2604 case RegPairInfo::ZPR:
2615 assert((!RPI.isPaired() ||
2616 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) &&
2617 "Out of order callee saved regs!");
2619 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP ||
2620 RPI.Reg1 == AArch64::LR) &&
2621 "FrameRecord must be allocated together with LR");
2624 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP ||
2625 RPI.Reg2 == AArch64::LR) &&
2626 "FrameRecord must be allocated together with LR");
2634 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
2635 RPI.Reg1 + 1 == RPI.Reg2))) &&
2636 "Callee-save registers not saved as adjacent register pair!");
2638 RPI.FrameIdx = CSI[i].getFrameIdx();
2641 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
2643 int Scale = RPI.getScale();
2645 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
2646 assert(OffsetPre % Scale == 0);
2648 if (RPI.isScalable())
2649 ScalableByteOffset += StackFillDir * Scale;
2651 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
2656 RPI.Reg2 == AArch64::FP)
2657 ByteOffset += StackFillDir * 8;
2659 assert(!(RPI.isScalable() && RPI.isPaired()) &&
2660 "Paired spill/fill instructions don't exist for SVE vectors");
2664 if (NeedGapToAlignStack && !NeedsWinCFI &&
2665 !RPI.isScalable() && RPI.Type != RegPairInfo::FPR128 &&
2666 !RPI.isPaired() && ByteOffset % 16 != 0) {
2667 ByteOffset += 8 * StackFillDir;
2668 assert(MFI.getObjectAlign(RPI.FrameIdx) <=
Align(16));
2672 MFI.setObjectAlignment(RPI.FrameIdx,
Align(16));
2673 NeedGapToAlignStack =
false;
2676 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
2677 assert(OffsetPost % Scale == 0);
2680 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost;
2685 RPI.Reg2 == AArch64::FP)
2687 RPI.Offset =
Offset / Scale;
2689 assert(((!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) ||
2690 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) &&
2691 "Offset out of bounds for LDP/STP immediate");
2695 if (NeedsFrameRecord && ((!IsWindows && RPI.Reg1 == AArch64::LR &&
2696 RPI.Reg2 == AArch64::FP) ||
2697 (IsWindows && RPI.Reg1 == AArch64::FP &&
2698 RPI.Reg2 == AArch64::LR)))
2712 MFI.setObjectAlignment(CSI[0].getFrameIdx(),
Align(16));
2715 std::reverse(RegPairs.
begin(), RegPairs.
end());
2731 if (homogeneousPrologEpilog(MF)) {
2735 for (
auto &RPI : RegPairs) {
2740 if (!
MRI.isReserved(RPI.Reg1))
2742 if (!
MRI.isReserved(RPI.Reg2))
2748 unsigned Reg1 = RPI.Reg1;
2749 unsigned Reg2 = RPI.Reg2;
2765 case RegPairInfo::GPR:
2766 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
2768 Alignment =
Align(8);
2770 case RegPairInfo::FPR64:
2771 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
2773 Alignment =
Align(8);
2775 case RegPairInfo::FPR128:
2776 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
2778 Alignment =
Align(16);
2780 case RegPairInfo::ZPR:
2781 StrOpc = AArch64::STR_ZXI;
2783 Alignment =
Align(16);
2785 case RegPairInfo::PPR:
2786 StrOpc = AArch64::STR_PXI;
2788 Alignment =
Align(2);
2793 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2794 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
2797 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) &&
2798 "Windows unwdinding requires a consecutive (FP,LR) pair");
2802 unsigned FrameIdxReg1 = RPI.FrameIdx;
2803 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2804 if (NeedsWinCFI && RPI.isPaired()) {
2809 if (!
MRI.isReserved(Reg1))
2811 if (RPI.isPaired()) {
2812 if (!
MRI.isReserved(Reg2))
2832 if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR)
2849 DL =
MBBI->getDebugLoc();
2854 unsigned Reg1 = RPI.Reg1;
2855 unsigned Reg2 = RPI.Reg2;
2869 case RegPairInfo::GPR:
2870 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
2872 Alignment =
Align(8);
2874 case RegPairInfo::FPR64:
2875 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
2877 Alignment =
Align(8);
2879 case RegPairInfo::FPR128:
2880 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
2882 Alignment =
Align(16);
2884 case RegPairInfo::ZPR:
2885 LdrOpc = AArch64::LDR_ZXI;
2887 Alignment =
Align(16);
2889 case RegPairInfo::PPR:
2890 LdrOpc = AArch64::LDR_PXI;
2892 Alignment =
Align(2);
2897 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2898 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
2904 unsigned FrameIdxReg1 = RPI.FrameIdx;
2905 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2906 if (NeedsWinCFI && RPI.isPaired()) {
2911 if (RPI.isPaired()) {
2932 for (
const RegPairInfo &RPI :
reverse(RegPairs))
2933 if (RPI.isScalable())
2936 if (homogeneousPrologEpilog(MF, &
MBB)) {
2939 for (
auto &RPI : RegPairs) {
2948 for (
const RegPairInfo &RPI :
reverse(RegPairs)) {
2949 if (RPI.isScalable())
2958 for (
const RegPairInfo &RPI : RegPairs) {
2959 if (RPI.isScalable())
2981 unsigned UnspilledCSGPR = AArch64::NoRegister;
2982 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
2991 unsigned ExtraCSSpill = 0;
2993 for (
unsigned i = 0; CSRegs[i]; ++i) {
2994 const unsigned Reg = CSRegs[i];
2997 if (Reg == BasePointerReg)
3000 bool RegUsed = SavedRegs.
test(Reg);
3001 unsigned PairedReg = AArch64::NoRegister;
3002 if (AArch64::GPR64RegClass.
contains(Reg) ||
3003 AArch64::FPR64RegClass.contains(Reg) ||
3004 AArch64::FPR128RegClass.contains(Reg))
3005 PairedReg = CSRegs[i ^ 1];
3008 if (AArch64::GPR64RegClass.
contains(Reg) &&
3010 UnspilledCSGPR = Reg;
3011 UnspilledCSGPRPaired = PairedReg;
3019 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister &&
3020 !SavedRegs.
test(PairedReg)) {
3021 SavedRegs.
set(PairedReg);
3022 if (AArch64::GPR64RegClass.
contains(PairedReg) &&
3024 ExtraCSSpill = PairedReg;
3035 SavedRegs.
set(AArch64::X18);
3039 unsigned CSStackSize = 0;
3040 unsigned SVECSStackSize = 0;
3043 for (
unsigned Reg : SavedRegs.
set_bits()) {
3045 if (AArch64::PPRRegClass.
contains(Reg) ||
3046 AArch64::ZPRRegClass.contains(Reg))
3053 unsigned NumSavedRegs = SavedRegs.
count();
3059 SavedRegs.
set(AArch64::FP);
3060 SavedRegs.
set(AArch64::LR);
3070 int64_t SVEStackSize =
3071 alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16);
3072 bool CanEliminateFrame = (SavedRegs.
count() == 0) && !SVEStackSize;
3081 int64_t CalleeStackUsed = 0;
3084 if (FixedOff > CalleeStackUsed) CalleeStackUsed = FixedOff;
3088 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize +
3089 CalleeStackUsed) > EstimatedStackSizeLimit;
3091 AFI->setHasStackFrame(
true);
3100 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
3102 <<
" to get a scratch register.\n");
3103 SavedRegs.
set(UnspilledCSGPR);
3107 if (producePairRegisters(MF))
3108 SavedRegs.
set(UnspilledCSGPRPaired);
3109 ExtraCSSpill = UnspilledCSGPR;
3117 unsigned Size =
TRI->getSpillSize(RC);
3118 Align Alignment =
TRI->getSpillAlign(RC);
3121 LLVM_DEBUG(
dbgs() <<
"No available CS registers, allocated fi#" << FI
3122 <<
" as the emergency spill slot.\n");
3127 CSStackSize += 8 * (SavedRegs.
count() - NumSavedRegs);
3131 if (
hasFP(MF) && AFI->hasSwiftAsyncContext())
3136 << EstimatedStackSize + AlignedCSStackSize
3140 AFI->getCalleeSavedStackSize() == AlignedCSStackSize) &&
3141 "Should not invalidate callee saved info");
3145 AFI->setCalleeSavedStackSize(AlignedCSStackSize);
3146 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize);
3147 AFI->setSVECalleeSavedStackSize(
alignTo(SVECSStackSize, 16));
3152 std::vector<CalleeSavedInfo> &CSI,
unsigned &MinCSFrameIndex,
3153 unsigned &MaxCSFrameIndex)
const {
3161 std::reverse(CSI.begin(), CSI.end());
3175 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3176 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3179 for (
auto &CS : CSI) {
3186 CS.setFrameIdx(FrameIdx);
3188 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3189 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3193 Reg == AArch64::FP) {
3196 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3197 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3211 int &Min,
int &Max) {
3212 Min = std::numeric_limits<int>::max();
3213 Max = std::numeric_limits<int>::min();
3219 for (
auto &CS : CSI) {
3220 if (AArch64::ZPRRegClass.
contains(CS.getReg()) ||
3221 AArch64::PPRRegClass.contains(CS.getReg())) {
3222 assert((Max == std::numeric_limits<int>::min() ||
3223 Max + 1 == CS.getFrameIdx()) &&
3224 "SVE CalleeSaves are not consecutive");
3226 Min = std::min(Min, CS.getFrameIdx());
3227 Max = std::max(Max, CS.getFrameIdx());
3230 return Min != std::numeric_limits<int>::max();
3239 int &MinCSFrameIndex,
3240 int &MaxCSFrameIndex,
3241 bool AssignOffsets) {
3246 "SVE vectors should never be passed on the stack by value, only by "
3250 auto Assign = [&MFI](
int FI, int64_t
Offset) {
3260 for (
int I = MinCSFrameIndex;
I <= MaxCSFrameIndex; ++
I) {
3276 int StackProtectorFI = -1;
3280 ObjectsToAllocate.
push_back(StackProtectorFI);
3286 if (
I == StackProtectorFI)
3288 if (MaxCSFrameIndex >=
I &&
I >= MinCSFrameIndex)
3297 for (
unsigned FI : ObjectsToAllocate) {
3302 if (Alignment >
Align(16))
3304 "Alignment of scalable vectors > 16 bytes is not yet supported");
3314int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets(
3316 int MinCSFrameIndex, MaxCSFrameIndex;
3320int64_t AArch64FrameLowering::assignSVEStackObjectOffsets(
3331 "Upwards growing stack unsupported");
3333 int MinCSFrameIndex, MaxCSFrameIndex;
3334 int64_t SVEStackSize =
3335 assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex);
3355 int64_t FixedObject =
3368 assert(DstReg &&
"There must be a free register after frame setup");
3377struct TagStoreInstr {
3400 std::optional<int64_t> FrameRegUpdate;
3402 unsigned FrameRegUpdateFlags;
3413 :
MBB(
MBB), ZeroData(ZeroData) {
3419 void addInstruction(TagStoreInstr
I) {
3421 TagStores.
back().Offset + TagStores.
back().Size ==
I.Offset) &&
3422 "Non-adjacent tag store instructions.");
3437 const int64_t kMinOffset = -256 * 16;
3438 const int64_t kMaxOffset = 255 * 16;
3441 int64_t BaseRegOffsetBytes = FrameRegOffset.
getFixed();
3442 if (BaseRegOffsetBytes < kMinOffset ||
3443 BaseRegOffsetBytes + (
Size -
Size % 32) > kMaxOffset ||
3447 BaseRegOffsetBytes % 16 != 0) {
3448 Register ScratchReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3451 BaseReg = ScratchReg;
3452 BaseRegOffsetBytes = 0;
3457 int64_t InstrSize = (
Size > 16) ? 32 : 16;
3460 ? (ZeroData ? AArch64::STZGi : AArch64::STGi)
3461 : (ZeroData ? AArch64::STZ2Gi : AArch64::ST2Gi);
3462 assert(BaseRegOffsetBytes % 16 == 0);
3466 .
addImm(BaseRegOffsetBytes / 16)
3470 if (BaseRegOffsetBytes == 0)
3472 BaseRegOffsetBytes += InstrSize;
3486 :
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3487 Register SizeReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3491 int64_t LoopSize =
Size;
3494 if (FrameRegUpdate && *FrameRegUpdate)
3495 LoopSize -= LoopSize % 32;
3497 TII->get(ZeroData ? AArch64::STZGloop_wback
3498 : AArch64::STGloop_wback))
3505 LoopI->
setFlags(FrameRegUpdateFlags);
3507 int64_t ExtraBaseRegUpdate =
3508 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.
getFixed() -
Size) : 0;
3509 if (LoopSize <
Size) {
3514 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex))
3518 .
addImm(1 + ExtraBaseRegUpdate / 16)
3521 }
else if (ExtraBaseRegUpdate) {
3525 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri))
3528 .
addImm(std::abs(ExtraBaseRegUpdate))
3538 int64_t
Size, int64_t *TotalOffset) {
3540 if ((
MI.getOpcode() == AArch64::ADDXri ||
3541 MI.getOpcode() == AArch64::SUBXri) &&
3542 MI.getOperand(0).getReg() == Reg &&
MI.getOperand(1).getReg() == Reg) {
3544 int64_t
Offset =
MI.getOperand(2).getImm() << Shift;
3545 if (
MI.getOpcode() == AArch64::SUBXri)
3547 int64_t AbsPostOffset = std::abs(
Offset -
Size);
3548 const int64_t kMaxOffset =
3550 if (AbsPostOffset <= kMaxOffset && AbsPostOffset % 16 == 0) {
3561 for (
auto &TS : TSE) {
3565 if (
MI->memoperands_empty()) {
3569 MemRefs.
append(
MI->memoperands_begin(),
MI->memoperands_end());
3575 bool TryMergeSPUpdate) {
3576 if (TagStores.
empty())
3578 TagStoreInstr &FirstTagStore = TagStores[0];
3579 TagStoreInstr &LastTagStore = TagStores[TagStores.
size() - 1];
3580 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size;
3581 DL = TagStores[0].MI->getDebugLoc();
3585 *MF, FirstTagStore.Offset,
false ,
false , Reg,
3588 FrameRegUpdate = std::nullopt;
3590 mergeMemRefs(TagStores, CombinedMemRefs);
3593 for (
const auto &Instr
3594 : TagStores) {
dbgs() <<
" " << *Instr.MI; });
3600 if (TagStores.size() < 2)
3602 emitUnrolled(InsertI);
3605 int64_t TotalOffset = 0;
3606 if (TryMergeSPUpdate) {
3612 if (InsertI !=
MBB->
end() &&
3613 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.
getFixed() +
Size,
3615 UpdateInstr = &*InsertI++;
3621 if (!UpdateInstr && TagStores.size() < 2)
3625 FrameRegUpdate = TotalOffset;
3626 FrameRegUpdateFlags = UpdateInstr->
getFlags();
3633 for (
auto &TS : TagStores)
3634 TS.MI->eraseFromParent();
3638 int64_t &
Size,
bool &ZeroData) {
3642 unsigned Opcode =
MI.getOpcode();
3643 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi ||
3644 Opcode == AArch64::STZ2Gi);
3646 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) {
3647 if (!
MI.getOperand(0).isDead() || !
MI.getOperand(1).isDead())
3649 if (!
MI.getOperand(2).isImm() || !
MI.getOperand(3).isFI())
3652 Size =
MI.getOperand(2).getImm();
3656 if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi)
3658 else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi)
3663 if (
MI.getOperand(0).getReg() != AArch64::SP || !
MI.getOperand(1).isFI())
3667 16 *
MI.getOperand(2).getImm();
3687 if (!isMergeableStackTaggingInstruction(
MI,
Offset,
Size, FirstZeroData))
3693 constexpr int kScanLimit = 10;
3696 NextI !=
E && Count < kScanLimit; ++NextI) {
3705 if (isMergeableStackTaggingInstruction(
MI,
Offset,
Size, ZeroData)) {
3706 if (ZeroData != FirstZeroData)
3714 if (!
MI.isTransient())
3723 if (
MI.mayLoadOrStore() ||
MI.hasUnmodeledSideEffects())
3732 [](
const TagStoreInstr &
Left,
const TagStoreInstr &
Right) {
3737 int64_t CurOffset = Instrs[0].Offset;
3738 for (
auto &Instr : Instrs) {
3739 if (CurOffset > Instr.Offset)
3741 CurOffset = Instr.Offset + Instr.Size;
3746 TagStoreEdit TSE(
MBB, FirstZeroData);
3747 std::optional<int64_t> EndOffset;
3748 for (
auto &Instr : Instrs) {
3749 if (EndOffset && *EndOffset != Instr.Offset) {
3751 TSE.emitCode(InsertI, TFI,
false);
3755 TSE.addInstruction(Instr);
3756 EndOffset = Instr.Offset + Instr.Size;
3774 II = tryMergeAdjacentSTG(II,
this, RS);
3782 bool IgnoreSPUpdates)
const {
3784 if (IgnoreSPUpdates) {
3787 FrameReg = AArch64::SP;
3797 FrameReg = AArch64::SP;
3822 bool IsValid =
false;
3824 int ObjectIndex = 0;
3826 int GroupIndex = -1;
3828 bool ObjectFirst =
false;
3831 bool GroupFirst =
false;
3836 int NextGroupIndex = 0;
3837 std::vector<FrameObject> &Objects;
3840 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {}
3842 void EndCurrentGroup() {
3843 if (CurrentMembers.
size() > 1) {
3848 for (
int Index : CurrentMembers) {
3849 Objects[
Index].GroupIndex = NextGroupIndex;
3855 CurrentMembers.clear();
3859bool FrameObjectCompare(
const FrameObject &
A,
const FrameObject &
B) {
3877 return std::make_tuple(!
A.IsValid,
A.ObjectFirst,
A.GroupFirst,
A.GroupIndex,
3879 std::make_tuple(!
B.IsValid,
B.ObjectFirst,
B.GroupFirst,
B.GroupIndex,
3891 for (
auto &Obj : ObjectsToAllocate) {
3892 FrameObjects[Obj].IsValid =
true;
3893 FrameObjects[Obj].ObjectIndex = Obj;
3897 GroupBuilder GB(FrameObjects);
3898 for (
auto &
MBB : MF) {
3899 for (
auto &
MI :
MBB) {
3900 if (
MI.isDebugInstr())
3903 switch (
MI.getOpcode()) {
3904 case AArch64::STGloop:
3905 case AArch64::STZGloop:
3909 case AArch64::STZGi:
3910 case AArch64::ST2Gi:
3911 case AArch64::STZ2Gi:
3924 FrameObjects[FI].IsValid)
3932 GB.AddMember(TaggedFI);
3934 GB.EndCurrentGroup();
3937 GB.EndCurrentGroup();
3947 FrameObjects[*TBPI].ObjectFirst =
true;
3948 FrameObjects[*TBPI].GroupFirst =
true;
3949 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex;
3950 if (FirstGroupIndex >= 0)
3951 for (FrameObject &Object : FrameObjects)
3952 if (Object.GroupIndex == FirstGroupIndex)
3953 Object.GroupFirst =
true;
3959 for (
auto &Obj : FrameObjects) {
3963 ObjectsToAllocate[i++] = Obj.ObjectIndex;
3970 dbgs() <<
" " << Obj.ObjectIndex <<
": group " << Obj.GroupIndex;
3971 if (Obj.ObjectFirst)
3972 dbgs() <<
", first";
3974 dbgs() <<
", group-first";
unsigned const MachineRegisterInfo * MRI
static int64_t getArgumentStackToRestore(MachineFunction &MF, MachineBasicBlock &MBB)
Returns how much of the incoming argument stack area (in bytes) we should clean up in an epilogue.
static void emitShadowCallStackEpilogue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static void emitCalleeSavedRestores(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool SVE)
static void computeCalleeSaveRegisterPairs(MachineFunction &MF, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI, SmallVectorImpl< RegPairInfo > &RegPairs, bool NeedsFrameRecord)
static void InsertReturnAddressAuth(MachineFunction &MF, MachineBasicBlock &MBB, bool NeedsWinCFI, bool *HasWinCFI)
static const unsigned DefaultSafeSPDisplacement
This is the biggest offset to the stack pointer we can encode in aarch64 instructions (without using ...
static bool needsWinCFI(const MachineFunction &MF)
static cl::opt< bool > ReverseCSRRestoreSeq("reverse-csr-restore-seq", cl::desc("reverse the CSR restore sequence"), cl::init(false), cl::Hidden)
static void insertCFISameValue(const MCInstrDesc &Desc, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, unsigned DwarfReg)
static cl::opt< bool > StackTaggingMergeSetTag("stack-tagging-merge-settag", cl::desc("merge settag instruction in function epilog"), cl::init(true), cl::Hidden)
static bool produceCompactUnwindFrame(MachineFunction &MF)
static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex, bool AssignOffsets)
static cl::opt< bool > OrderFrameObjects("aarch64-order-frame-objects", cl::desc("sort stack allocations"), cl::init(true), cl::Hidden)
static bool windowsRequiresStackProbe(MachineFunction &MF, uint64_t StackSizeInBytes)
static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, uint64_t LocalStackSize, bool NeedsWinCFI, bool *HasWinCFI)
static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, bool NeedsWinCFI, bool IsFirst, const TargetRegisterInfo *TRI)
static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI, MachineInstr::MIFlag FrameFlag=MachineInstr::FrameSetup, int CFAOffset=0)
static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, unsigned LocalStackSize)
static StackOffset getSVEStackSize(const MachineFunction &MF)
Returns the size of the entire SVE stackframe (calleesaves + spills).
static cl::opt< bool > EnableRedZone("aarch64-redzone", cl::desc("enable use of redzone on AArch64"), cl::init(false), cl::Hidden)
static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI, const TargetInstrInfo &TII, MachineInstr::MIFlag Flag)
static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB)
cl::opt< bool > EnableHomogeneousPrologEpilog("homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " "optimization (default = off)"))
static bool IsSVECalleeSave(MachineBasicBlock::iterator I)
static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, bool UsesWinAAPCS, bool NeedsWinCFI, bool NeedsFrameRecord, bool IsFirst, const TargetRegisterInfo *TRI)
Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction.
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg)
static StackOffset getFPOffset(const MachineFunction &MF, int64_t ObjectOffset)
static bool isTargetWindows(const MachineFunction &MF)
static StackOffset getStackOffset(const MachineFunction &MF, int64_t ObjectOffset)
static unsigned estimateRSStackSizeLimit(MachineFunction &MF)
Look at each instruction that references stack frames and return the stack size limit beyond which so...
static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, int &Min, int &Max)
returns true if there are any SVE callee saves.
static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE)
static bool isFuncletReturnInstr(const MachineInstr &MI)
static void emitShadowCallStackPrologue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool NeedsWinCFI, bool NeedsUnwindInfo)
static unsigned getFixedObjectSize(const MachineFunction &MF, const AArch64FunctionInfo *AFI, bool IsWin64, bool IsFunclet)
Returns the size of the fixed object area (allocated next to sp on entry) On Win64 this may include a...
static bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const int kSetTagLoopThreshold
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
static void clear(coro::Shape &Shape)
static const HTTPClientCleanup Cleanup
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
unsigned const TargetRegisterInfo * TRI
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static const unsigned FramePtr
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool enableStackSlotScavenging(const MachineFunction &MF) const override
Returns true if the stack slot holes in the fixed and callee-save stack area should be used when allo...
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
void resetCFIToInitialState(MachineBasicBlock &MBB) const override
Emit CFI instructions that recreate the state of the unwind information upon fucntion entry.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
bool canUseRedZone(const MachineFunction &MF) const
Can this function use the red zone for local allocations.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
int getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const
Funclets only need to account for space for the callee saved registers, as the locals are accounted f...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - Provide a base+offset reference to an FI slot for debug info.
StackOffset resolveFrameOffsetReference(const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE, Register &FrameReg, bool PreferFP, bool ForSimm) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
For Win64 AArch64 EH, the offset to the Unwind object is from the SP before the update.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve the parent's frame pointer...
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
void setSwiftAsyncContextFrameIdx(int FI)
unsigned getTailCallReservedStack() const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
unsigned getArgumentStackToRestore() const
void setLocalStackSize(uint64_t Size)
int getCalleeSaveBaseToFrameRecordOffset() const
uint64_t getStackSizeSVE() const
void setHasRedZone(bool s)
bool hasStackFrame() const
std::optional< int > getTaggedBasePointerIndex() const
uint64_t getLocalStackSize() const
void setStackRealigned(bool s)
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
unsigned getVarArgsGPRSize() const
void setStackSizeSVE(uint64_t S)
bool isStackRealigned() const
bool hasSwiftAsyncContext() const
void setTaggedBasePointerOffset(unsigned Offset)
unsigned getSVECalleeSavedStackSize() const
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
void setMinMaxSVECSFrameIndex(int Min, int Max)
bool hasCalleeSaveStackFreeSpace() const
static bool isSEHInstruction(const MachineInstr &MI)
Return true if the instructions is a SEH instruciton used for unwinding on Windows.
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool hasBasePointer(const MachineFunction &MF) const
bool cannotEliminateFrame(const MachineFunction &MF) const
unsigned getBaseRegister() const
bool isTargetWindows() const
const AArch64RegisterInfo * getRegisterInfo() const override
const AArch64InstrInfo * getInstrInfo() const override
bool isTargetILP32() const
const AArch64TargetLowering * getTargetLowering() const override
bool isTargetMachO() const
bool isCallingConvWin64(CallingConv::ID CC) const
const char * getChkStkName() const
bool isXRegisterReserved(size_t i) const
bool swiftAsyncContextIsDynamicallySet() const
Return whether FrameLowering should always set the "extended frame present" bit in FP,...
unsigned getRedZoneSize(const Function &F) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
bool test(unsigned Idx) const
size_type count() const
count - Returns the number of bits which are set.
iterator_range< const_set_bits_iterator > set_bits() const
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
void addLiveIns(const MachineBasicBlock &MBB)
Adds all live-in registers of basic block MBB.
void addReg(MCPhysReg Reg)
Adds a physical register and all its sub-registers to the set.
bool usesWindowsCFI() const
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int Offset)
.cfi_def_cfa_offset modifies a rule for computing CFA.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register)
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction createSameValue(MCSymbol *L, unsigned Register)
.cfi_same_value Current value of Register is the same as in the previous frame.
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int Offset)
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createNegateRAState(MCSymbol *L)
.cfi_negate_ra_state AArch64 negate RA state.
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset)
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
MachineInstr & instr_back()
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
int getStackProtectorIndex() const
Return the index for the stack protector object.
uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
void setStackID(int ObjectIdx, uint8_t ID)
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
unsigned addFrameInst(const MCCFIInstruction &Inst)
void setHasWinCFI(bool v)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineModuleInfo & getMMI() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool hasEHFunclets() const
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
void setFlags(unsigned flags)
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
uint32_t getFlags() const
Return the MI flags bitvector.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
const MCContext & getContext() const
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
static MachineOperand CreateImm(int64_t Val)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isLiveIn(Register Reg) const
const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
bool isPhysRegUsed(MCRegister PhysReg, bool SkipRegMaskTest=false) const
Return true if the specified register is modified or read in this function.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Register FindUnusedReg(const TargetRegisterClass *RC) const
Find an unused register of the specified register class.
void backward()
Update internal register state and move MBB iterator backwards.
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset get(int64_t Fixed, int64_t Scalable)
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
TargetInstrInfo - Interface to description of machine instruction set.
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
SwiftAsyncFramePointerMode SwiftAsyncFramePointer
Control when and how the Swift async frame pointer bit should be set.
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const TargetRegisterClass * getMinimalPhysRegClass(MCRegister Reg, MVT VT=MVT::Other) const
Returns the Register Class of a physical register of the given type, picking the most sub register cl...
Align getSpillAlign(const TargetRegisterClass &RC) const
Return the minimum required alignment in bytes for a spill slot for a register of this class.
bool hasStackRealignment(const MachineFunction &MF) const
True if stack realignment is required and still possible.
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const