267#define DEBUG_TYPE "frame-info"
270 cl::desc(
"enable use of redzone on AArch64"),
274 "stack-tagging-merge-settag",
284 cl::desc(
"Split allocation of ZPR & PPR objects"),
289 cl::desc(
"Emit homogeneous prologue and epilogue for the size "
290 "optimization (default = off)"));
302 "aarch64-disable-multivector-spill-fill",
311 bool IsTailCallReturn = (
MBB.end() !=
MBBI)
315 int64_t ArgumentPopSize = 0;
316 if (IsTailCallReturn) {
322 ArgumentPopSize = StackAdjust.
getImm();
331 return ArgumentPopSize;
399bool AArch64FrameLowering::homogeneousPrologEpilog(
432 unsigned NumGPRs = 0;
433 for (
unsigned I = 0; CSRegs[
I]; ++
I) {
435 if (Reg == AArch64::LR) {
436 assert(CSRegs[
I + 1] == AArch64::FP);
437 if (NumGPRs % 2 != 0)
449bool AArch64FrameLowering::producePairRegisters(
MachineFunction &MF)
const {
468 if (
MI.isDebugInstr() ||
MI.isPseudo() ||
469 MI.getOpcode() == AArch64::ADDXri ||
470 MI.getOpcode() == AArch64::ADDSXri)
495 bool IsWin64,
bool IsFunclet)
const {
497 "Tail call reserved stack must be aligned to 16 bytes");
498 if (!IsWin64 || IsFunclet) {
503 Attribute::SwiftAsync))
517 int FrameIndex =
H.CatchObj.FrameIndex;
518 if ((FrameIndex != INT_MAX) &&
519 CatchObjFrameIndices.
insert(FrameIndex)) {
520 FixedObjectSize =
alignTo(FixedObjectSize,
527 FixedObjectSize += 8;
529 return alignTo(FixedObjectSize, 16);
540 const unsigned RedZoneSize =
553 bool LowerQRegCopyThroughMem = Subtarget.hasFPARMv8() &&
557 return !(MFI.
hasCalls() ||
hasFP(MF) || NumBytes > RedZoneSize ||
578 RegInfo->hasStackRealignment(MF))
625 if (TT.isOSDarwin() || TT.isOSWindows())
663 unsigned Opc =
I->getOpcode();
664 bool IsDestroy =
Opc ==
TII->getCallFrameDestroyOpcode();
665 uint64_t CalleePopAmount = IsDestroy ?
I->getOperand(1).getImm() : 0;
668 int64_t Amount =
I->getOperand(0).getImm();
676 if (CalleePopAmount == 0) {
687 assert(Amount > -0xffffff && Amount < 0xffffff &&
"call frame too large");
698 "non-reserved call frame without var sized objects?");
707 }
else if (CalleePopAmount != 0) {
710 assert(CalleePopAmount < 0xffffff &&
"call frame too large");
722 const auto &
TRI = *Subtarget.getRegisterInfo();
728 CFIBuilder.buildDefCFA(AArch64::SP, 0);
731 if (MFI.shouldSignReturnAddress(MF))
732 MFI.branchProtectionPAuthLR() ? CFIBuilder.buildNegateRAStateWithPC()
733 : CFIBuilder.buildNegateRAState();
736 if (MFI.needsShadowCallStackPrologueEpilogue(MF))
737 CFIBuilder.buildSameValue(AArch64::X18);
740 const std::vector<CalleeSavedInfo> &CSI =
742 for (
const auto &Info : CSI) {
744 if (!
TRI.regNeedsCFI(Reg, Reg))
746 CFIBuilder.buildSameValue(Reg);
759 case AArch64::W##n: \
760 case AArch64::X##n: \
785 case AArch64::B##n: \
786 case AArch64::H##n: \
787 case AArch64::S##n: \
788 case AArch64::D##n: \
789 case AArch64::Q##n: \
790 return HasSVE ? AArch64::Z##n : AArch64::Q##n
827void AArch64FrameLowering::emitZeroCallUsedRegs(
BitVector RegsToZero,
838 const AArch64Subtarget &STI = MF.
getSubtarget<AArch64Subtarget>();
841 BitVector GPRsToZero(
TRI.getNumRegs());
842 BitVector FPRsToZero(
TRI.getNumRegs());
845 if (
TRI.isGeneralPurposeRegister(MF,
Reg)) {
848 GPRsToZero.set(XReg);
852 FPRsToZero.set(XReg);
859 for (MCRegister
Reg : GPRsToZero.set_bits())
863 for (MCRegister
Reg : FPRsToZero.set_bits())
867 for (MCRegister PReg :
868 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4,
869 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9,
870 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14,
872 if (RegsToZero[PReg])
878bool AArch64FrameLowering::windowsRequiresStackProbe(
880 const AArch64Subtarget &Subtarget = MF.
getSubtarget<AArch64Subtarget>();
881 const AArch64FunctionInfo &MFI = *MF.
getInfo<AArch64FunctionInfo>();
885 StackSizeInBytes >= uint64_t(MFI.getStackProbeSize());
894 for (
unsigned i = 0; CSRegs[i]; ++i)
900 bool HasCall)
const {
910 const AArch64Subtarget &Subtarget = MF->
getSubtarget<AArch64Subtarget>();
912 LivePhysRegs LiveRegs(
TRI);
915 LiveRegs.addReg(AArch64::X16);
916 LiveRegs.addReg(AArch64::X17);
917 LiveRegs.addReg(AArch64::X18);
922 if (LiveRegs.available(
MRI, AArch64::X9))
925 for (
unsigned Reg : AArch64::GPR64RegClass) {
926 if (LiveRegs.available(
MRI,
Reg))
929 return AArch64::NoRegister;
956 MBB.isLiveIn(AArch64::NZCV))
960 if (findScratchNonCalleeSaveRegister(TmpMBB) == AArch64::NoRegister)
966 windowsRequiresStackProbe(*MF, std::numeric_limits<uint64_t>::max()))
967 if (findScratchNonCalleeSaveRegister(TmpMBB,
true) == AArch64::NoRegister)
976 F.needsUnwindTableEntry();
979bool AArch64FrameLowering::shouldSignReturnAddressEverywhere(
995 unsigned Opc =
MBBI->getOpcode();
999 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
1000 int Imm =
MBBI->getOperand(ImmIdx).getImm();
1008 case AArch64::STR_ZXI:
1009 case AArch64::LDR_ZXI: {
1010 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1017 case AArch64::STR_PXI:
1018 case AArch64::LDR_PXI: {
1019 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1026 case AArch64::LDPDpost:
1029 case AArch64::STPDpre: {
1030 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1031 unsigned Reg1 = RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1032 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFRegP_X))
1039 case AArch64::LDPXpost:
1042 case AArch64::STPXpre: {
1045 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1046 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFPLR_X))
1050 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveRegP_X))
1051 .
addImm(RegInfo->getSEHRegNum(Reg0))
1052 .
addImm(RegInfo->getSEHRegNum(Reg1))
1057 case AArch64::LDRDpost:
1060 case AArch64::STRDpre: {
1061 unsigned Reg = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1062 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFReg_X))
1068 case AArch64::LDRXpost:
1071 case AArch64::STRXpre: {
1072 unsigned Reg = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1079 case AArch64::STPDi:
1080 case AArch64::LDPDi: {
1081 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1082 unsigned Reg1 = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1090 case AArch64::STPXi:
1091 case AArch64::LDPXi: {
1095 int SEHReg0 = RegInfo->getSEHRegNum(Reg0);
1096 int SEHReg1 = RegInfo->getSEHRegNum(Reg1);
1098 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1102 else if (SEHReg0 >= 19 && SEHReg1 >= 19)
1109 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegIP))
1116 case AArch64::STRXui:
1117 case AArch64::LDRXui: {
1118 int Reg = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1125 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegI))
1131 case AArch64::STRDui:
1132 case AArch64::LDRDui: {
1133 unsigned Reg = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1140 case AArch64::STPQi:
1141 case AArch64::LDPQi: {
1142 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1143 unsigned Reg1 = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1144 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegQP))
1151 case AArch64::LDPQpost:
1154 case AArch64::STPQpre: {
1155 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1156 unsigned Reg1 = RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1157 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegQPX))
1176 if (ST.isTargetDarwin())
1198 DL =
MBBI->getDebugLoc();
1205 EmitSignRA(MF.
front());
1207 if (
MBB.isEHFuncletEntry())
1209 if (
MBB.isReturnBlock())
1265 StackOffset SVEStackSize = ZPRStackSize + PPRStackSize;
1270 if (MFI.isVariableSizedObjectIndex(FI)) {
1280 if (MFI.hasScalableStackID(FI)) {
1281 if (FPAfterSVECalleeSaves &&
1284 "split-sve-objects not supported with FPAfterSVECalleeSaves");
1292 AccessOffset = -PPRStackSize;
1293 return AccessOffset +
1298 bool IsFixed = MFI.isFixedObjectIndex(FI);
1303 if (!IsFixed && !IsCSR) {
1304 ScalableOffset = -SVEStackSize;
1305 }
else if (FPAfterSVECalleeSaves && IsCSR) {
1320 int64_t ObjectOffset)
const {
1324 bool IsWin64 = Subtarget.isCallingConvWin64(
F.getCallingConv(),
F.isVarArg());
1325 unsigned FixedObject =
1326 getFixedObjectSize(MF, AFI, IsWin64,
false);
1334 int64_t ObjectOffset)
const {
1345 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
1346 ? getFPOffset(MF, ObjectOffset).getFixed()
1347 : getStackOffset(MF, ObjectOffset).getFixed();
1352 bool ForSimm)
const {
1354 int64_t ObjectOffset = MFI.getObjectOffset(FI);
1355 bool isFixed = MFI.isFixedObjectIndex(FI);
1358 FrameReg, PreferFP, ForSimm);
1364 bool ForSimm)
const {
1370 int64_t FPOffset = getFPOffset(MF, ObjectOffset).getFixed();
1371 int64_t
Offset = getStackOffset(MF, ObjectOffset).getFixed();
1374 bool isSVE = MFI.isScalableStackID(StackID);
1378 StackOffset SVEStackSize = ZPRStackSize + PPRStackSize;
1389 PreferFP &= !SVEStackSize;
1397 }
else if (isCSR && RegInfo->hasStackRealignment(MF)) {
1401 assert(
hasFP(MF) &&
"Re-aligned stack must have frame pointer");
1403 }
else if (
hasFP(MF) && !RegInfo->hasStackRealignment(MF)) {
1408 bool FPOffsetFits = !ForSimm || FPOffset >= -256;
1409 PreferFP |=
Offset > -FPOffset && !SVEStackSize;
1411 if (FPOffset >= 0) {
1415 }
else if (MFI.hasVarSizedObjects()) {
1419 bool CanUseBP = RegInfo->hasBasePointer(MF);
1420 if (FPOffsetFits && CanUseBP)
1427 }
else if (MF.
hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
1434 "Funclets should only be present on Win64");
1438 if (FPOffsetFits && PreferFP)
1445 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) &&
1446 "In the presence of dynamic stack pointer realignment, "
1447 "non-argument/CSR objects cannot be accessed through the frame pointer");
1464 FPOffset -= PPRStackSize;
1466 SPOffset -= PPRStackSize;
1471 if (FPAfterSVECalleeSaves) {
1482 RegInfo->hasStackRealignment(MF))) {
1483 FrameReg = RegInfo->getFrameRegister(MF);
1486 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
1493 if (FPAfterSVECalleeSaves) {
1500 SVEAreaOffset = SVECalleeSavedStack;
1502 SVEAreaOffset = SVECalleeSavedStack - SVEStackSize;
1505 SVEAreaOffset = SVEStackSize;
1507 SVEAreaOffset = SVEStackSize - SVECalleeSavedStack;
1510 if (UseFP && !(isFixed || isCSR))
1511 SVEAreaOffset = -SVEStackSize;
1512 if (!UseFP && (isFixed || isCSR))
1513 SVEAreaOffset = SVEStackSize;
1517 FrameReg = RegInfo->getFrameRegister(MF);
1522 if (RegInfo->hasBasePointer(MF))
1523 FrameReg = RegInfo->getBaseRegister();
1525 assert(!MFI.hasVarSizedObjects() &&
1526 "Can't use SP when we have var sized objects.");
1527 FrameReg = AArch64::SP;
1555 Attrs.hasAttrSomewhere(Attribute::SwiftError)) &&
1561 unsigned SpillCount,
unsigned Reg1,
1562 unsigned Reg2,
bool NeedsWinCFI,
1572 if (Reg2 == AArch64::FP)
1582 if (
TRI->getEncodingValue(Reg2) ==
TRI->getEncodingValue(Reg1) + 1)
1583 return SpillExtendedVolatile
1584 ? !((Reg1 == AArch64::FP && Reg2 == AArch64::LR) ||
1585 (SpillCount % 2) == 0)
1593 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 &&
1594 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst)
1604 unsigned SpillCount,
unsigned Reg1,
1605 unsigned Reg2,
bool UsesWinAAPCS,
1606 bool NeedsWinCFI,
bool NeedsFrameRecord,
1611 Reg1, Reg2, NeedsWinCFI, IsFirst,
1616 if (NeedsFrameRecord)
1617 return Reg2 == AArch64::LR;
1629 enum RegType { GPR, FPR64, FPR128, PPR, ZPR, VG }
Type;
1630 const TargetRegisterClass *RC;
1632 RegPairInfo() =
default;
1634 bool isPaired()
const {
return Reg2.
isValid(); }
1636 bool isScalable()
const {
return Type == PPR ||
Type == ZPR; }
1642 for (
unsigned PReg = AArch64::P8; PReg <= AArch64::P15; ++PReg) {
1643 if (SavedRegs.
test(PReg)) {
1644 unsigned PNReg = PReg - AArch64::P0 + AArch64::PN0;
1658 bool IsLocallyStreaming =
1664 return Subtarget.hasSVE2p1() ||
1665 (Subtarget.hasSME2() &&
1666 (!IsLocallyStreaming && Subtarget.
isStreaming()));
1674 bool NeedsFrameRecord) {
1691 (
Count & 1) == 0) &&
1692 "Odd number of callee-saved regs to spill!");
1694 int StackFillDir = -1;
1696 unsigned FirstReg = 0;
1704 FirstReg =
Count - 1;
1716 bool SpillExtendedVolatile =
1718 const auto &
Reg = CSI.getReg();
1719 return Reg >= AArch64::X0 &&
Reg <= AArch64::X18;
1722 int ZPRByteOffset = 0;
1723 int PPRByteOffset = 0;
1728 }
else if (!FPAfterSVECalleeSaves) {
1740 for (
unsigned i = FirstReg; i <
Count; i += RegInc) {
1742 RPI.Reg1 = CSI[i].getReg();
1744 if (AArch64::GPR64RegClass.
contains(RPI.Reg1)) {
1745 RPI.Type = RegPairInfo::GPR;
1746 RPI.RC = &AArch64::GPR64RegClass;
1747 }
else if (AArch64::FPR64RegClass.
contains(RPI.Reg1)) {
1748 RPI.Type = RegPairInfo::FPR64;
1749 RPI.RC = &AArch64::FPR64RegClass;
1750 }
else if (AArch64::FPR128RegClass.
contains(RPI.Reg1)) {
1751 RPI.Type = RegPairInfo::FPR128;
1752 RPI.RC = &AArch64::FPR128RegClass;
1753 }
else if (AArch64::ZPRRegClass.
contains(RPI.Reg1)) {
1754 RPI.Type = RegPairInfo::ZPR;
1755 RPI.RC = &AArch64::ZPRRegClass;
1756 }
else if (AArch64::PPRRegClass.
contains(RPI.Reg1)) {
1757 RPI.Type = RegPairInfo::PPR;
1758 RPI.RC = &AArch64::PPRRegClass;
1759 }
else if (RPI.Reg1 == AArch64::VG) {
1760 RPI.Type = RegPairInfo::VG;
1761 RPI.RC = &AArch64::FIXED_REGSRegClass;
1766 int &ScalableByteOffset = RPI.Type == RegPairInfo::PPR && SplitPPRs
1771 if (HasCSHazardPadding &&
1774 ByteOffset += StackFillDir * StackHazardSize;
1778 int Scale =
TRI->getSpillSize(*RPI.RC);
1780 if (
unsigned(i + RegInc) <
Count && !HasCSHazardPadding) {
1781 MCRegister NextReg = CSI[i + RegInc].getReg();
1782 bool IsFirst = i == FirstReg;
1783 unsigned SpillCount = NeedsWinCFI ? FirstReg - i : i;
1785 case RegPairInfo::GPR:
1786 if (AArch64::GPR64RegClass.
contains(NextReg) &&
1788 SpillExtendedVolatile, SpillCount, RPI.Reg1, NextReg, IsWindows,
1789 NeedsWinCFI, NeedsFrameRecord, IsFirst,
TRI))
1792 case RegPairInfo::FPR64:
1793 if (AArch64::FPR64RegClass.
contains(NextReg) &&
1795 SpillExtendedVolatile, SpillCount, RPI.Reg1, NextReg, IsWindows,
1796 NeedsWinCFI, NeedsFrameRecord, IsFirst,
TRI))
1799 case RegPairInfo::FPR128:
1800 if (AArch64::FPR128RegClass.
contains(NextReg))
1803 case RegPairInfo::PPR:
1805 case RegPairInfo::ZPR:
1807 ((RPI.Reg1 - AArch64::Z0) & 1) == 0 && (NextReg == RPI.Reg1 + 1)) {
1810 int Offset = (ScalableByteOffset + StackFillDir * 2 * Scale) / Scale;
1815 case RegPairInfo::VG:
1826 assert((!RPI.isPaired() ||
1827 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) &&
1828 "Out of order callee saved regs!");
1830 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP ||
1831 RPI.Reg1 == AArch64::LR) &&
1832 "FrameRecord must be allocated together with LR");
1835 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP ||
1836 RPI.Reg2 == AArch64::LR) &&
1837 "FrameRecord must be allocated together with LR");
1845 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
1846 RPI.Reg1 + 1 == RPI.Reg2))) &&
1847 "Callee-save registers not saved as adjacent register pair!");
1849 RPI.FrameIdx = CSI[i].getFrameIdx();
1852 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
1856 if (RPI.isScalable() && ScalableByteOffset % Scale != 0) {
1857 ScalableByteOffset =
alignTo(ScalableByteOffset, Scale);
1860 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
1861 assert(OffsetPre % Scale == 0);
1863 if (RPI.isScalable())
1864 ScalableByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
1866 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
1871 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
1872 (IsWindows && RPI.Reg2 == AArch64::LR)))
1873 ByteOffset += StackFillDir * 8;
1877 if (NeedGapToAlignStack && !IsWindows && !RPI.isScalable() &&
1878 RPI.Type != RegPairInfo::FPR128 && !RPI.isPaired() &&
1879 ByteOffset % 16 != 0) {
1880 ByteOffset += 8 * StackFillDir;
1886 NeedGapToAlignStack =
false;
1889 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
1890 assert(OffsetPost % Scale == 0);
1893 int Offset = IsWindows ? OffsetPre : OffsetPost;
1898 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
1899 (IsWindows && RPI.Reg2 == AArch64::LR)))
1901 RPI.Offset =
Offset / Scale;
1903 assert((!RPI.isPaired() ||
1904 (!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) ||
1905 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) &&
1906 "Offset out of bounds for LDP/STP immediate");
1908 auto isFrameRecord = [&] {
1910 return IsWindows ? RPI.Reg1 == AArch64::FP && RPI.Reg2 == AArch64::LR
1911 : RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP;
1919 return i > 0 && RPI.Reg1 == AArch64::FP &&
1920 CSI[i - 1].getReg() == AArch64::LR;
1925 if (NeedsFrameRecord && isFrameRecord())
1942 std::reverse(RegPairs.
begin(), RegPairs.
end());
1962 MRI.freezeReservedRegs();
1964 if (homogeneousPrologEpilog(MF)) {
1968 for (
auto &RPI : RegPairs) {
1973 if (!
MRI.isReserved(RPI.Reg1))
1974 MBB.addLiveIn(RPI.Reg1);
1975 if (RPI.isPaired() && !
MRI.isReserved(RPI.Reg2))
1976 MBB.addLiveIn(RPI.Reg2);
1980 bool PTrueCreated =
false;
1996 unsigned Size =
TRI->getSpillSize(*RPI.RC);
1997 Align Alignment =
TRI->getSpillAlign(*RPI.RC);
1999 case RegPairInfo::GPR:
2000 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
2002 case RegPairInfo::FPR64:
2003 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
2005 case RegPairInfo::FPR128:
2006 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
2008 case RegPairInfo::ZPR:
2009 StrOpc = RPI.isPaired() ? AArch64::ST1B_2Z_IMM : AArch64::STR_ZXI;
2011 case RegPairInfo::PPR:
2012 StrOpc = AArch64::STR_PXI;
2014 case RegPairInfo::VG:
2015 StrOpc = AArch64::STRXui;
2021 if (X0Scratch != AArch64::NoRegister)
2027 if (Reg1 == AArch64::VG) {
2029 Reg1 = findScratchNonCalleeSaveRegister(&
MBB,
true);
2030 assert(Reg1 != AArch64::NoRegister);
2040 return STI.getRegisterInfo()->isSuperOrSubRegisterEq(
2041 AArch64::X0, LiveIn.PhysReg);
2049 RTLIB::Libcall LC = RTLIB::SMEABI_GET_CURRENT_VG;
2051 TRI->getCallPreservedMask(MF, TLI.getLibcallCallingConv(LC));
2065 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2067 dbgs() <<
", " << RPI.FrameIdx + 1;
2072 !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) &&
2073 "Windows unwdinding requires a consecutive (FP,LR) pair");
2077 unsigned FrameIdxReg1 = RPI.FrameIdx;
2078 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2084 if (RPI.isPaired() && RPI.isScalable()) {
2090 "Expects SVE2.1 or SME2 target and a predicate register");
2091#ifdef EXPENSIVE_CHECKS
2092 auto IsPPR = [](
const RegPairInfo &c) {
2093 return c.Reg1 == RegPairInfo::PPR;
2095 auto PPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsPPR);
2096 auto IsZPR = [](
const RegPairInfo &c) {
2097 return c.Type == RegPairInfo::ZPR;
2099 auto ZPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsZPR);
2100 assert(!(PPRBegin < ZPRBegin) &&
2101 "Expected callee save predicate to be handled first");
2103 if (!PTrueCreated) {
2104 PTrueCreated =
true;
2109 if (!
MRI.isReserved(Reg1))
2110 MBB.addLiveIn(Reg1);
2111 if (!
MRI.isReserved(Reg2))
2112 MBB.addLiveIn(Reg2);
2113 MIB.
addReg( AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0));
2129 if (!
MRI.isReserved(Reg1))
2130 MBB.addLiveIn(Reg1);
2131 if (RPI.isPaired()) {
2132 if (!
MRI.isReserved(Reg2))
2133 MBB.addLiveIn(Reg2);
2152 if (RPI.Type == RegPairInfo::ZPR) {
2156 }
else if (RPI.Type == RegPairInfo::PPR) {
2176 DL =
MBBI->getDebugLoc();
2179 if (homogeneousPrologEpilog(MF, &
MBB)) {
2182 for (
auto &RPI : RegPairs) {
2190 auto IsPPR = [](
const RegPairInfo &c) {
return c.Type == RegPairInfo::PPR; };
2192 auto PPREnd = std::find_if_not(PPRBegin, RegPairs.
end(), IsPPR);
2193 std::reverse(PPRBegin, PPREnd);
2194 auto IsZPR = [](
const RegPairInfo &c) {
return c.Type == RegPairInfo::ZPR; };
2196 auto ZPREnd = std::find_if_not(ZPRBegin, RegPairs.
end(), IsZPR);
2197 std::reverse(ZPRBegin, ZPREnd);
2199 bool PTrueCreated =
false;
2200 for (
const RegPairInfo &RPI : RegPairs) {
2213 unsigned Size =
TRI->getSpillSize(*RPI.RC);
2214 Align Alignment =
TRI->getSpillAlign(*RPI.RC);
2216 case RegPairInfo::GPR:
2217 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
2219 case RegPairInfo::FPR64:
2220 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
2222 case RegPairInfo::FPR128:
2223 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
2225 case RegPairInfo::ZPR:
2226 LdrOpc = RPI.isPaired() ? AArch64::LD1B_2Z_IMM : AArch64::LDR_ZXI;
2228 case RegPairInfo::PPR:
2229 LdrOpc = AArch64::LDR_PXI;
2231 case RegPairInfo::VG:
2238 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2240 dbgs() <<
", " << RPI.FrameIdx + 1;
2247 unsigned FrameIdxReg1 = RPI.FrameIdx;
2248 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2255 if (RPI.isPaired() && RPI.isScalable()) {
2260 "Expects SVE2.1 or SME2 target and a predicate register");
2261#ifdef EXPENSIVE_CHECKS
2262 assert(!(PPRBegin < ZPRBegin) &&
2263 "Expected callee save predicate to be handled first");
2265 if (!PTrueCreated) {
2266 PTrueCreated =
true;
2271 MIB.
addReg( AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0),
2288 if (RPI.isPaired()) {
2315 return std::optional<int>(PSV->getFrameIndex());
2326 return std::nullopt;
2332 if (!
MI.mayLoadOrStore() ||
MI.getNumMemOperands() < 1)
2333 return std::nullopt;
2340 return AArch64::PPRRegClass.contains(
MI.getOperand(0).getReg());
2346void AArch64FrameLowering::determineStackHazardSlot(
2349 auto *AFI = MF.
getInfo<AArch64FunctionInfo>();
2350 if (StackHazardSize == 0 || StackHazardSize % 16 != 0 ||
2364 return AArch64::FPR64RegClass.contains(Reg) ||
2365 AArch64::FPR128RegClass.contains(Reg) ||
2366 AArch64::ZPRRegClass.contains(Reg);
2369 return AArch64::PPRRegClass.contains(Reg);
2371 bool HasFPRStackObjects =
false;
2372 bool HasPPRStackObjects =
false;
2374 enum SlotType : uint8_t {
2385 for (
auto &
MBB : MF) {
2386 for (
auto &
MI :
MBB) {
2388 if (!FI || FI < 0 || FI >
int(SlotTypes.size()))
2395 ? SlotType::ZPRorFPR
2401 for (
int FI = 0; FI < int(SlotTypes.size()); ++FI) {
2402 HasFPRStackObjects |= SlotTypes[FI] == SlotType::ZPRorFPR;
2405 if (SlotTypes[FI] == SlotType::PPR) {
2407 HasPPRStackObjects =
true;
2412 if (HasFPRCSRs || HasFPRStackObjects) {
2415 << StackHazardSize <<
"\n");
2426 LLVM_DEBUG(
dbgs() <<
"Using SplitSVEObjects for SVE CC function\n");
2432 LLVM_DEBUG(
dbgs() <<
"Determining if SplitSVEObjects should be used in "
2433 "non-SVE CC function...\n");
2440 <<
"Calling convention is not supported with SplitSVEObjects\n");
2444 if (!HasPPRCSRs && !HasPPRStackObjects) {
2446 dbgs() <<
"Not using SplitSVEObjects as no PPRs are on the stack\n");
2450 if (!HasFPRCSRs && !HasFPRStackObjects) {
2453 <<
"Not using SplitSVEObjects as no FPRs or ZPRs are on the stack\n");
2457 [[maybe_unused]]
const AArch64Subtarget &Subtarget =
2458 MF.getSubtarget<AArch64Subtarget>();
2460 "Expected SVE to be available for PPRs");
2462 const TargetRegisterInfo *
TRI = MF.getSubtarget().getRegisterInfo();
2466 BitVector FPRZRegs(SavedRegs.
size());
2467 for (
size_t Reg = 0,
E = SavedRegs.
size(); HasFPRCSRs &&
Reg <
E; ++
Reg) {
2468 BitVector::reference RegBit = SavedRegs[
Reg];
2471 unsigned SubRegIdx = 0;
2473 SubRegIdx = AArch64::dsub;
2475 SubRegIdx = AArch64::zsub;
2482 TRI->getMatchingSuperReg(
Reg, SubRegIdx, &AArch64::ZPRRegClass);
2485 SavedRegs |= FPRZRegs;
2505 unsigned UnspilledCSGPR = AArch64::NoRegister;
2506 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
2512 RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister() :
MCRegister();
2514 unsigned ExtraCSSpill = 0;
2515 bool HasUnpairedGPR64 =
false;
2516 bool HasPairZReg =
false;
2517 BitVector UserReservedRegs = RegInfo->getUserReservedRegs(MF);
2518 BitVector ReservedRegs = RegInfo->getReservedRegs(MF);
2521 for (
unsigned i = 0; CSRegs[i]; ++i) {
2525 if (Reg == BasePointerReg)
2530 if (UserReservedRegs[Reg]) {
2531 SavedRegs.
reset(Reg);
2535 bool RegUsed = SavedRegs.
test(Reg);
2537 const bool RegIsGPR64 = AArch64::GPR64RegClass.contains(Reg);
2538 if (RegIsGPR64 || AArch64::FPR64RegClass.
contains(Reg) ||
2539 AArch64::FPR128RegClass.
contains(Reg)) {
2542 if (HasUnpairedGPR64)
2543 PairedReg = CSRegs[i % 2 == 0 ? i - 1 : i + 1];
2545 PairedReg = CSRegs[i ^ 1];
2552 if (RegIsGPR64 && !AArch64::GPR64RegClass.
contains(PairedReg)) {
2553 PairedReg = AArch64::NoRegister;
2554 HasUnpairedGPR64 =
true;
2556 assert(PairedReg == AArch64::NoRegister ||
2557 AArch64::GPR64RegClass.
contains(Reg, PairedReg) ||
2558 AArch64::FPR64RegClass.
contains(Reg, PairedReg) ||
2559 AArch64::FPR128RegClass.
contains(Reg, PairedReg));
2562 if (AArch64::GPR64RegClass.
contains(Reg) && !ReservedRegs[Reg]) {
2563 UnspilledCSGPR = Reg;
2564 UnspilledCSGPRPaired = PairedReg;
2572 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister &&
2573 !SavedRegs.
test(PairedReg)) {
2574 SavedRegs.
set(PairedReg);
2575 if (AArch64::GPR64RegClass.
contains(PairedReg) &&
2576 !ReservedRegs[PairedReg])
2577 ExtraCSSpill = PairedReg;
2580 HasPairZReg |= (AArch64::ZPRRegClass.contains(Reg, CSRegs[i ^ 1]) &&
2581 SavedRegs.
test(CSRegs[i ^ 1]));
2589 if (PnReg.isValid())
2595 SavedRegs.
set(AArch64::P8);
2600 "Predicate cannot be a reserved register");
2610 SavedRegs.
set(AArch64::X18);
2616 determineStackHazardSlot(MF, SavedRegs);
2619 unsigned CSStackSize = 0;
2620 unsigned ZPRCSStackSize = 0;
2621 unsigned PPRCSStackSize = 0;
2623 for (
unsigned Reg : SavedRegs.
set_bits()) {
2625 assert(RC &&
"expected register class!");
2626 auto SpillSize =
TRI->getSpillSize(*RC);
2627 bool IsZPR = AArch64::ZPRRegClass.contains(Reg);
2628 bool IsPPR = !IsZPR && AArch64::PPRRegClass.contains(Reg);
2630 ZPRCSStackSize += SpillSize;
2632 PPRCSStackSize += SpillSize;
2634 CSStackSize += SpillSize;
2640 unsigned NumSavedRegs = SavedRegs.
count();
2653 SavedRegs.
set(AArch64::LR);
2658 windowsRequiresStackProbe(MF, EstimatedStackSize + CSStackSize + 16)) {
2659 SavedRegs.
set(AArch64::FP);
2660 SavedRegs.
set(AArch64::LR);
2664 dbgs() <<
"*** determineCalleeSaves\nSaved CSRs:";
2665 for (
unsigned Reg : SavedRegs.
set_bits())
2671 auto [ZPRLocalStackSize, PPRLocalStackSize] =
2673 uint64_t SVELocals = ZPRLocalStackSize + PPRLocalStackSize;
2675 alignTo(ZPRCSStackSize + PPRCSStackSize + SVELocals, 16);
2676 bool CanEliminateFrame = (SavedRegs.
count() == 0) && !SVEStackSize;
2685 int64_t CalleeStackUsed = 0;
2688 if (FixedOff > CalleeStackUsed)
2689 CalleeStackUsed = FixedOff;
2693 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize +
2694 CalleeStackUsed) > EstimatedStackSizeLimit;
2695 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
2705 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
2707 <<
" to get a scratch register.\n");
2708 SavedRegs.
set(UnspilledCSGPR);
2709 ExtraCSSpill = UnspilledCSGPR;
2714 if (producePairRegisters(MF)) {
2715 if (UnspilledCSGPRPaired == AArch64::NoRegister) {
2718 SavedRegs.
reset(UnspilledCSGPR);
2719 ExtraCSSpill = AArch64::NoRegister;
2722 SavedRegs.
set(UnspilledCSGPRPaired);
2731 unsigned Size =
TRI->getSpillSize(RC);
2732 Align Alignment =
TRI->getSpillAlign(RC);
2734 RS->addScavengingFrameIndex(FI);
2735 LLVM_DEBUG(
dbgs() <<
"No available CS registers, allocated fi#" << FI
2736 <<
" as the emergency spill slot.\n");
2741 CSStackSize += 8 * (SavedRegs.
count() - NumSavedRegs);
2750 << EstimatedStackSize + AlignedCSStackSize <<
" bytes.\n");
2754 "Should not invalidate callee saved info");
2765 std::vector<CalleeSavedInfo> &CSI)
const {
2774 std::reverse(CSI.begin(), CSI.end());
2794 find_if(CSI, [](
auto &Info) {
return Info.getReg() == AArch64::LR; });
2795 if (It != CSI.end())
2796 CSI.insert(It, VGInfo);
2798 CSI.push_back(VGInfo);
2802 int HazardSlotIndex = std::numeric_limits<int>::max();
2803 for (
auto &CS : CSI) {
2811 assert(HazardSlotIndex == std::numeric_limits<int>::max() &&
2812 "Unexpected register order for hazard slot");
2814 LLVM_DEBUG(
dbgs() <<
"Created CSR Hazard at slot " << HazardSlotIndex
2820 unsigned Size = RegInfo->getSpillSize(*RC);
2821 Align Alignment(RegInfo->getSpillAlign(*RC));
2823 CS.setFrameIdx(FrameIdx);
2828 Reg == AArch64::FP) {
2838 HazardSlotIndex == std::numeric_limits<int>::max()) {
2840 LLVM_DEBUG(
dbgs() <<
"Created CSR Hazard at slot " << HazardSlotIndex
2867 int &Min,
int &Max) {
2868 Min = std::numeric_limits<int>::max();
2869 Max = std::numeric_limits<int>::min();
2875 for (
auto &CS : CSI) {
2876 if (AArch64::ZPRRegClass.
contains(CS.getReg()) ||
2877 AArch64::PPRRegClass.contains(CS.getReg())) {
2878 assert((Max == std::numeric_limits<int>::min() ||
2879 Max + 1 == CS.getFrameIdx()) &&
2880 "SVE CalleeSaves are not consecutive");
2881 Min = std::min(Min, CS.getFrameIdx());
2882 Max = std::max(Max, CS.getFrameIdx());
2885 return Min != std::numeric_limits<int>::max();
2898 uint64_t &ZPRStackTop = SVEStack.ZPRStackSize;
2906 "SVE vectors should never be passed on the stack by value, only by "
2910 auto AllocateObject = [&](
int FI) {
2919 if (Alignment >
Align(16))
2921 "Alignment of scalable vectors > 16 bytes is not yet supported");
2924 StackTop =
alignTo(StackTop, Alignment);
2926 assert(StackTop < (
uint64_t)std::numeric_limits<int64_t>::max() &&
2927 "SVE StackTop far too large?!");
2929 int64_t
Offset = -int64_t(StackTop);
2937 int MinCSFrameIndex, MaxCSFrameIndex;
2939 for (
int FI = MinCSFrameIndex; FI <= MaxCSFrameIndex; ++FI)
2952 int StackProtectorFI = -1;
2956 ObjectsToAllocate.
push_back(StackProtectorFI);
2972 for (
unsigned FI : ObjectsToAllocate)
2987 "Upwards growing stack unsupported");
3002 int64_t CurrentOffset =
3006 int FrameIndex =
H.CatchObj.FrameIndex;
3007 if ((FrameIndex != INT_MAX) && MFI.
getObjectOffset(FrameIndex) == 0) {
3018 int64_t UnwindHelpOffset =
alignTo(CurrentOffset + 8,
Align(16));
3019 assert(UnwindHelpOffset == getFixedObjectSize(MF, AFI,
true,
3021 "UnwindHelpOffset must be at the start of the fixed object area");
3024 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
3034 RS->enterBasicBlockEnd(
MBB);
3036 Register DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass);
3037 assert(DstReg &&
"There must be a free register after frame setup");
3048struct TagStoreInstr {
3056 MachineFunction *MF;
3057 MachineBasicBlock *
MBB;
3058 MachineRegisterInfo *
MRI;
3067 StackOffset FrameRegOffset;
3071 std::optional<int64_t> FrameRegUpdate;
3073 unsigned FrameRegUpdateFlags;
3083 TagStoreEdit(MachineBasicBlock *
MBB,
bool ZeroData)
3084 :
MBB(
MBB), ZeroData(ZeroData) {
3090 void addInstruction(TagStoreInstr
I) {
3092 TagStores.
back().Offset + TagStores.
back().Size ==
I.Offset) &&
3093 "Non-adjacent tag store instructions.");
3096 void clear() { TagStores.
clear(); }
3101 const AArch64FrameLowering *TFI,
bool TryMergeSPUpdate);
3108 const int64_t kMinOffset = -256 * 16;
3109 const int64_t kMaxOffset = 255 * 16;
3112 int64_t BaseRegOffsetBytes = FrameRegOffset.
getFixed();
3113 if (BaseRegOffsetBytes < kMinOffset ||
3114 BaseRegOffsetBytes + (
Size -
Size % 32) > kMaxOffset ||
3118 BaseRegOffsetBytes % 16 != 0) {
3119 Register ScratchReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3123 BaseRegOffsetBytes = 0;
3128 int64_t InstrSize = (
Size > 16) ? 32 : 16;
3131 ? (ZeroData ? AArch64::STZGi : AArch64::STGi)
3133 assert(BaseRegOffsetBytes % 16 == 0);
3137 .
addImm(BaseRegOffsetBytes / 16)
3141 if (BaseRegOffsetBytes == 0)
3143 BaseRegOffsetBytes += InstrSize;
3157 :
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3158 Register SizeReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3162 int64_t LoopSize =
Size;
3165 if (FrameRegUpdate && *FrameRegUpdate)
3166 LoopSize -= LoopSize % 32;
3168 TII->get(ZeroData ? AArch64::STZGloop_wback
3169 : AArch64::STGloop_wback))
3176 LoopI->
setFlags(FrameRegUpdateFlags);
3178 int64_t ExtraBaseRegUpdate =
3179 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.
getFixed() -
Size) : 0;
3180 LLVM_DEBUG(
dbgs() <<
"TagStoreEdit::emitLoop: LoopSize=" << LoopSize
3181 <<
", Size=" <<
Size
3182 <<
", ExtraBaseRegUpdate=" << ExtraBaseRegUpdate
3183 <<
", FrameRegUpdate=" << FrameRegUpdate
3184 <<
", FrameRegOffset.getFixed()="
3185 << FrameRegOffset.
getFixed() <<
"\n");
3186 if (LoopSize <
Size) {
3190 int64_t STGOffset = ExtraBaseRegUpdate + 16;
3191 assert(STGOffset % 16 == 0 && STGOffset >= -4096 && STGOffset <= 4080 &&
3192 "STG immediate out of range");
3194 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex))
3201 }
else if (ExtraBaseRegUpdate) {
3203 int64_t AddSubOffset = std::abs(ExtraBaseRegUpdate);
3204 assert(AddSubOffset <= 4095 &&
"ADD/SUB immediate out of range");
3207 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri))
3220 int64_t
Size, int64_t *TotalOffset) {
3222 if ((
MI.getOpcode() == AArch64::ADDXri ||
3223 MI.getOpcode() == AArch64::SUBXri) &&
3224 MI.getOperand(0).getReg() ==
Reg &&
MI.getOperand(1).getReg() ==
Reg) {
3226 int64_t
Offset =
MI.getOperand(2).getImm() << Shift;
3227 if (
MI.getOpcode() == AArch64::SUBXri)
3238 const int64_t kMaxOffset = 4080 - 16;
3240 const int64_t kMinOffset = -4095;
3241 if (PostOffset <= kMaxOffset && PostOffset >= kMinOffset &&
3242 PostOffset % 16 == 0) {
3253 for (
auto &TS : TSE) {
3257 if (
MI->memoperands_empty()) {
3261 MemRefs.
append(
MI->memoperands_begin(),
MI->memoperands_end());
3267 bool TryMergeSPUpdate) {
3268 if (TagStores.
empty())
3270 TagStoreInstr &FirstTagStore = TagStores[0];
3271 TagStoreInstr &LastTagStore = TagStores[TagStores.
size() - 1];
3272 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size;
3273 DL = TagStores[0].MI->getDebugLoc();
3277 *MF, FirstTagStore.Offset,
false ,
3281 FrameRegUpdate = std::nullopt;
3283 mergeMemRefs(TagStores, CombinedMemRefs);
3286 dbgs() <<
"Replacing adjacent STG instructions:\n";
3287 for (
const auto &Instr : TagStores) {
3296 if (TagStores.
size() < 2)
3298 emitUnrolled(InsertI);
3301 int64_t TotalOffset = 0;
3302 if (TryMergeSPUpdate) {
3308 if (InsertI !=
MBB->
end() &&
3309 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.
getFixed() +
Size,
3311 UpdateInstr = &*InsertI++;
3317 if (!UpdateInstr && TagStores.
size() < 2)
3321 FrameRegUpdate = TotalOffset;
3322 FrameRegUpdateFlags = UpdateInstr->
getFlags();
3329 for (
auto &TS : TagStores)
3330 TS.MI->eraseFromParent();
3334 int64_t &
Size,
bool &ZeroData) {
3338 unsigned Opcode =
MI.getOpcode();
3339 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi ||
3340 Opcode == AArch64::STZ2Gi);
3342 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) {
3343 if (!
MI.getOperand(0).isDead() || !
MI.getOperand(1).isDead())
3345 if (!
MI.getOperand(2).isImm() || !
MI.getOperand(3).isFI())
3348 Size =
MI.getOperand(2).getImm();
3352 if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi)
3354 else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi)
3359 if (
MI.getOperand(0).getReg() != AArch64::SP || !
MI.getOperand(1).isFI())
3363 16 *
MI.getOperand(2).getImm();
3383 if (!isMergeableStackTaggingInstruction(
MI,
Offset,
Size, FirstZeroData))
3389 constexpr int kScanLimit = 10;
3392 NextI !=
E &&
Count < kScanLimit; ++NextI) {
3401 if (isMergeableStackTaggingInstruction(
MI,
Offset,
Size, ZeroData)) {
3402 if (ZeroData != FirstZeroData)
3410 if (!
MI.isTransient())
3419 if (
MI.mayLoadOrStore() ||
MI.hasUnmodeledSideEffects() ||
MI.isCall())
3435 LiveRegs.addLiveOuts(*
MBB);
3440 LiveRegs.stepBackward(*
I);
3443 if (LiveRegs.contains(AArch64::NZCV))
3447 [](
const TagStoreInstr &
Left,
const TagStoreInstr &
Right) {
3452 int64_t CurOffset = Instrs[0].Offset;
3453 for (
auto &Instr : Instrs) {
3454 if (CurOffset >
Instr.Offset)
3461 TagStoreEdit TSE(
MBB, FirstZeroData);
3462 std::optional<int64_t> EndOffset;
3463 for (
auto &Instr : Instrs) {
3464 if (EndOffset && *EndOffset !=
Instr.Offset) {
3466 TSE.emitCode(InsertI, TFI,
false);
3470 TSE.addInstruction(Instr);
3489 II = tryMergeAdjacentSTG(
II,
this, RS);
3496 shouldSignReturnAddressEverywhere(MF))
3505 bool IgnoreSPUpdates)
const {
3507 if (IgnoreSPUpdates) {
3510 FrameReg = AArch64::SP;
3520 FrameReg = AArch64::SP;
3545 bool IsValid =
false;
3547 int ObjectIndex = 0;
3549 int GroupIndex = -1;
3551 bool ObjectFirst =
false;
3554 bool GroupFirst =
false;
3559 enum { AccessFPR = 1, AccessHazard = 2, AccessGPR = 4 };
3563 SmallVector<int, 8> CurrentMembers;
3564 int NextGroupIndex = 0;
3565 std::vector<FrameObject> &Objects;
3568 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {}
3569 void AddMember(
int Index) { CurrentMembers.
push_back(Index); }
3570 void EndCurrentGroup() {
3571 if (CurrentMembers.
size() > 1) {
3576 for (
int Index : CurrentMembers) {
3577 Objects[
Index].GroupIndex = NextGroupIndex;
3583 CurrentMembers.clear();
3587bool FrameObjectCompare(
const FrameObject &
A,
const FrameObject &
B) {
3609 return std::make_tuple(!
A.IsValid,
A.Accesses,
A.ObjectFirst,
A.GroupFirst,
3610 A.GroupIndex,
A.ObjectIndex) <
3611 std::make_tuple(!
B.IsValid,
B.Accesses,
B.ObjectFirst,
B.GroupFirst,
3612 B.GroupIndex,
B.ObjectIndex);
3621 ObjectsToAllocate.
empty())
3626 for (
auto &Obj : ObjectsToAllocate) {
3627 FrameObjects[Obj].IsValid =
true;
3628 FrameObjects[Obj].ObjectIndex = Obj;
3633 GroupBuilder GB(FrameObjects);
3634 for (
auto &
MBB : MF) {
3635 for (
auto &
MI :
MBB) {
3636 if (
MI.isDebugInstr())
3641 if (FI && *FI >= 0 && *FI < (
int)FrameObjects.size()) {
3644 FrameObjects[*FI].Accesses |= FrameObject::AccessFPR;
3646 FrameObjects[*FI].Accesses |= FrameObject::AccessGPR;
3651 switch (
MI.getOpcode()) {
3652 case AArch64::STGloop:
3653 case AArch64::STZGloop:
3657 case AArch64::STZGi:
3658 case AArch64::ST2Gi:
3659 case AArch64::STZ2Gi:
3672 FrameObjects[FI].IsValid)
3680 GB.AddMember(TaggedFI);
3682 GB.EndCurrentGroup();
3685 GB.EndCurrentGroup();
3690 FrameObject::AccessHazard;
3692 for (
auto &Obj : FrameObjects)
3693 if (!Obj.Accesses ||
3694 Obj.Accesses == (FrameObject::AccessGPR | FrameObject::AccessFPR))
3695 Obj.Accesses = FrameObject::AccessGPR;
3704 FrameObjects[*TBPI].ObjectFirst =
true;
3705 FrameObjects[*TBPI].GroupFirst =
true;
3706 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex;
3707 if (FirstGroupIndex >= 0)
3708 for (FrameObject &Object : FrameObjects)
3709 if (Object.GroupIndex == FirstGroupIndex)
3710 Object.GroupFirst =
true;
3716 for (
auto &Obj : FrameObjects) {
3720 ObjectsToAllocate[i++] = Obj.ObjectIndex;
3724 dbgs() <<
"Final frame order:\n";
3725 for (
auto &Obj : FrameObjects) {
3728 dbgs() <<
" " << Obj.ObjectIndex <<
": group " << Obj.GroupIndex;
3729 if (Obj.ObjectFirst)
3730 dbgs() <<
", first";
3732 dbgs() <<
", group-first";
3743AArch64FrameLowering::inlineStackProbeLoopExactMultiple(
3754 MF.
insert(MBBInsertPoint, LoopMBB);
3756 MF.
insert(MBBInsertPoint, ExitMBB);
3787 MBB.addSuccessor(LoopMBB);
3791 return ExitMBB->
begin();
3794void AArch64FrameLowering::inlineStackProbeFixed(
3799 const AArch64InstrInfo *
TII =
3801 AArch64FunctionInfo *AFI = MF.
getInfo<AArch64FunctionInfo>();
3806 int64_t ProbeSize = MF.
getInfo<AArch64FunctionInfo>()->getStackProbeSize();
3807 int64_t NumBlocks = FrameSize / ProbeSize;
3808 int64_t ResidualSize = FrameSize % ProbeSize;
3810 LLVM_DEBUG(
dbgs() <<
"Stack probing: total " << FrameSize <<
" bytes, "
3811 << NumBlocks <<
" blocks of " << ProbeSize
3812 <<
" bytes, plus " << ResidualSize <<
" bytes\n");
3817 for (
int i = 0; i < NumBlocks; ++i) {
3823 EmitAsyncCFI && !HasFP, CFAOffset);
3832 }
else if (NumBlocks != 0) {
3838 EmitAsyncCFI && !HasFP, CFAOffset);
3840 MBBI = inlineStackProbeLoopExactMultiple(
MBBI, ProbeSize, ScratchReg);
3842 if (EmitAsyncCFI && !HasFP) {
3845 .buildDefCFARegister(AArch64::SP);
3849 if (ResidualSize != 0) {
3855 EmitAsyncCFI && !HasFP, CFAOffset);
3872 SmallVector<MachineInstr *, 4> ToReplace;
3873 for (MachineInstr &
MI :
MBB)
3874 if (
MI.getOpcode() == AArch64::PROBED_STACKALLOC ||
3875 MI.getOpcode() == AArch64::PROBED_STACKALLOC_VAR)
3878 for (MachineInstr *
MI : ToReplace) {
3879 if (
MI->getOpcode() == AArch64::PROBED_STACKALLOC) {
3880 Register ScratchReg =
MI->getOperand(0).getReg();
3881 int64_t FrameSize =
MI->getOperand(1).getImm();
3883 MI->getOperand(3).getImm());
3884 inlineStackProbeFixed(
MI->getIterator(), ScratchReg, FrameSize,
3887 assert(
MI->getOpcode() == AArch64::PROBED_STACKALLOC_VAR &&
3888 "Stack probe pseudo-instruction expected");
3889 const AArch64InstrInfo *
TII =
3890 MI->getMF()->getSubtarget<AArch64Subtarget>().getInstrInfo();
3891 Register TargetReg =
MI->getOperand(0).getReg();
3892 (void)
TII->probedStackAlloc(
MI->getIterator(), TargetReg,
true);
3894 MI->eraseFromParent();
3914 return std::make_tuple(
start(),
Idx) <
3915 std::make_tuple(Rhs.
start(), Rhs.
Idx);
3945 << (
Offset.getFixed() < 0 ?
"" :
"+") <<
Offset.getFixed();
3946 if (
Offset.getScalable())
3947 OS << (
Offset.getScalable() < 0 ?
"" :
"+") <<
Offset.getScalable()
3958void AArch64FrameLowering::emitRemarks(
3961 auto *AFI = MF.
getInfo<AArch64FunctionInfo>();
3966 const uint64_t HazardSize =
3969 if (HazardSize == 0)
3977 std::vector<StackAccess> StackAccesses(MFI.
getNumObjects());
3979 size_t NumFPLdSt = 0;
3980 size_t NumNonFPLdSt = 0;
3983 for (
const MachineBasicBlock &
MBB : MF) {
3984 for (
const MachineInstr &
MI :
MBB) {
3985 if (!
MI.mayLoadOrStore() ||
MI.getNumMemOperands() < 1)
3987 for (MachineMemOperand *MMO :
MI.memoperands()) {
3994 StackAccesses[ArrIdx].Idx = FrameIdx;
3995 StackAccesses[ArrIdx].Offset =
4006 StackAccesses[ArrIdx].AccessTypes |= RegTy;
4017 if (NumFPLdSt == 0 || NumNonFPLdSt == 0)
4028 if (StackAccesses.front().isMixed())
4029 MixedObjects.push_back(&StackAccesses.front());
4031 for (
auto It = StackAccesses.begin(), End = std::prev(StackAccesses.end());
4033 const auto &
First = *It;
4034 const auto &Second = *(It + 1);
4036 if (Second.isMixed())
4037 MixedObjects.push_back(&Second);
4039 if ((
First.isSME() && Second.isCPU()) ||
4040 (
First.isCPU() && Second.isSME())) {
4041 uint64_t Distance =
static_cast<uint64_t
>(Second.start() -
First.end());
4042 if (Distance < HazardSize)
4047 auto EmitRemark = [&](llvm::StringRef Str) {
4049 auto R = MachineOptimizationRemarkAnalysis(
4050 "sme",
"StackHazard", MF.getFunction().getSubprogram(), &MF.front());
4051 return R <<
formatv(
"stack hazard in '{0}': ", MF.getName()).str() << Str;
4055 for (
const auto &
P : HazardPairs)
4056 EmitRemark(
formatv(
"{0} is too close to {1}", *
P.first, *
P.second).str());
4058 for (
const auto *Obj : MixedObjects)
4060 formatv(
"{0} accessed by both GP and FP instructions", *Obj).str());
unsigned const MachineRegisterInfo * MRI
static void getLiveRegsForEntryMBB(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB)
static const unsigned DefaultSafeSPDisplacement
This is the biggest offset to the stack pointer we can encode in aarch64 instructions (without using ...
static bool produceCompactUnwindFrame(const AArch64FrameLowering &, MachineFunction &MF)
static cl::opt< bool > StackTaggingMergeSetTag("stack-tagging-merge-settag", cl::desc("merge settag instruction in function epilog"), cl::init(true), cl::Hidden)
bool enableMultiVectorSpillFill(const AArch64Subtarget &Subtarget, MachineFunction &MF)
static std::optional< int > getLdStFrameID(const MachineInstr &MI, const MachineFrameInfo &MFI)
static cl::opt< bool > SplitSVEObjects("aarch64-split-sve-objects", cl::desc("Split allocation of ZPR & PPR objects"), cl::init(true), cl::Hidden)
static cl::opt< bool > StackHazardInNonStreaming("aarch64-stack-hazard-in-non-streaming", cl::init(false), cl::Hidden)
void computeCalleeSaveRegisterPairs(const AArch64FrameLowering &AFL, MachineFunction &MF, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI, SmallVectorImpl< RegPairInfo > &RegPairs, bool NeedsFrameRecord)
static cl::opt< bool > OrderFrameObjects("aarch64-order-frame-objects", cl::desc("sort stack allocations"), cl::init(true), cl::Hidden)
static bool invalidateRegisterPairing(bool SpillExtendedVolatile, unsigned SpillCount, unsigned Reg1, unsigned Reg2, bool UsesWinAAPCS, bool NeedsWinCFI, bool NeedsFrameRecord, bool IsFirst, const TargetRegisterInfo *TRI)
Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction.
static cl::opt< bool > DisableMultiVectorSpillFill("aarch64-disable-multivector-spill-fill", cl::desc("Disable use of LD/ST pairs for SME2 or SVE2p1"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableRedZone("aarch64-redzone", cl::desc("enable use of redzone on AArch64"), cl::init(false), cl::Hidden)
cl::opt< bool > EnableHomogeneousPrologEpilog("homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " "optimization (default = off)"))
static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL, const MachineFunction &MF)
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg)
static SVEStackSizes determineSVEStackSizes(MachineFunction &MF, AssignObjectOffsets AssignOffsets)
Process all the SVE stack objects and the SVE stack size and offsets for each object.
static bool isTargetWindows(const MachineFunction &MF)
static unsigned estimateRSStackSizeLimit(MachineFunction &MF)
Look at each instruction that references stack frames and return the stack size limit beyond which so...
static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, int &Min, int &Max)
returns true if there are any SVE callee saves.
static cl::opt< unsigned > StackHazardRemarkSize("aarch64-stack-hazard-remark-size", cl::init(0), cl::Hidden)
static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE)
static unsigned getStackHazardSize(const MachineFunction &MF)
static bool invalidateWindowsRegisterPairing(bool SpillExtendedVolatile, unsigned SpillCount, unsigned Reg1, unsigned Reg2, bool NeedsWinCFI, bool IsFirst, const TargetRegisterInfo *TRI)
MCRegister findFreePredicateReg(BitVector &SavedRegs)
static bool isPPRAccess(const MachineInstr &MI)
static std::optional< int > getMMOFrameID(MachineMemOperand *MMO, const MachineFrameInfo &MFI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file contains the declaration of the AArch64PrologueEmitter and AArch64EpilogueEmitter classes,...
static const int kSetTagLoopThreshold
static int getArgumentStackToRestore(MachineFunction &MF, MachineBasicBlock &MBB)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
#define CASE(ATTRNAME, AANAME,...)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DXIL Forward Handle Accesses
const HexagonInstrInfo * TII
static std::string getTypeString(Type *T)
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
void emitEpilogue()
Emit the epilogue.
StackOffset getSVEStackSize(const MachineFunction &MF) const
Returns the size of the entire SVE stackframe (PPRs + ZPRs).
StackOffset getZPRStackSize(const MachineFunction &MF) const
Returns the size of the entire ZPR stackframe (calleesaves + spills).
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool enableStackSlotScavenging(const MachineFunction &MF) const override
Returns true if the stack slot holes in the fixed and callee-save stack area should be used when allo...
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
bool enableFullCFIFixup(const MachineFunction &MF) const override
enableFullCFIFixup - Returns true if we may need to fix the unwind information such that it is accura...
StackOffset getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI) const override
getFrameIndexReferenceFromSP - This method returns the offset from the stack pointer to the slot of t...
bool enableCFIFixup(const MachineFunction &MF) const override
Returns true if we may need to fix the unwind information for the function.
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
friend class AArch64PrologueEmitter
bool hasFPImpl(const MachineFunction &MF) const override
hasFPImpl - Return true if the specified function should have a dedicated frame pointer register.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
friend class AArch64EpilogueEmitter
void resetCFIToInitialState(MachineBasicBlock &MBB) const override
Emit CFI instructions that recreate the state of the unwind information upon function entry.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
bool hasSVECalleeSavesAboveFrameRecord(const MachineFunction &MF) const
StackOffset resolveFrameOffsetReference(const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, TargetStackID::Value StackID, Register &FrameReg, bool PreferFP, bool ForSimm) const
bool canUseRedZone(const MachineFunction &MF) const
Can this function use the red zone for local allocations.
bool needsWinCFI(const MachineFunction &MF) const
bool isFPReserved(const MachineFunction &MF) const
Should the Frame Pointer be reserved for the current function?
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
int getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const
Funclets only need to account for space for the callee saved registers, as the locals are accounted f...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
StackOffset getPPRStackSize(const MachineFunction &MF) const
Returns the size of the entire PPR stackframe (calleesaves + spills + hazard padding).
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - Provide a base+offset reference to an FI slot for debug info.
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
For Win64 AArch64 EH, the offset to the Unwind object is from the SP before the update.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve the parent's frame pointer...
bool requiresSaveVG(const MachineFunction &MF) const
void emitPacRetPlusLeafHardening(MachineFunction &MF) const
Harden the entire function with pac-ret.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
unsigned getPPRCalleeSavedStackSize() const
void setHasStackFrame(bool s)
void setSwiftAsyncContextFrameIdx(int FI)
unsigned getTailCallReservedStack() const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
bool hasStackProbing() const
unsigned getArgumentStackToRestore() const
void setCalleeSaveStackHasFreeSpace(bool s)
int getCalleeSaveBaseToFrameRecordOffset() const
SignReturnAddress getSignReturnAddressCondition() const
bool hasStreamingModeChanges() const
void setPredicateRegForFillSpill(unsigned Reg)
int getStackHazardSlotIndex() const
void setCalleeSavedStackSize(unsigned Size)
void setSplitSVEObjects(bool s)
bool hasStackFrame() const
void setStackSizeSVE(uint64_t ZPR, uint64_t PPR)
std::optional< int > getTaggedBasePointerIndex() const
SMEAttrs getSMEFnAttrs() const
uint64_t getLocalStackSize() const
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
unsigned getVarArgsGPRSize() const
uint64_t getStackSizePPR() const
bool hasSwiftAsyncContext() const
bool hasStackHazardSlotIndex() const
void setStackHazardSlotIndex(int Index)
unsigned getZPRCalleeSavedStackSize() const
void setStackHazardCSRSlotIndex(int Index)
unsigned getPredicateRegForFillSpill() const
void setSVECalleeSavedStackSize(unsigned ZPR, unsigned PPR)
bool hasCalculatedStackSizeSVE() const
uint64_t getStackSizeZPR() const
bool hasSVEStackSize() const
bool isStackHazardIncludedInCalleeSaveArea() const
unsigned getSVECalleeSavedStackSize() const
bool hasSplitSVEObjects() const
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
bool hasCalleeSaveStackFreeSpace() const
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static bool isFpOrNEON(Register Reg)
Returns whether the physical register is FP or NEON.
void emitPrologue()
Emit the prologue.
bool isTargetWindows() const
const AArch64RegisterInfo * getRegisterInfo() const override
bool isNeonAvailable() const
Returns true if the target has NEON and the function at runtime is known to have NEON enabled (e....
const AArch64InstrInfo * getInstrInfo() const override
const AArch64TargetLowering * getTargetLowering() const override
bool isTargetMachO() const
bool isSVEorStreamingSVEAvailable() const
Returns true if the target has access to either the full range of SVE instructions,...
bool isStreaming() const
Returns true if the function has a streaming body.
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
unsigned getRedZoneSize(const Function &F) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
bool test(unsigned Idx) const
size_type count() const
count - Returns the number of bits which are set.
iterator_range< const_set_bits_iterator > set_bits() const
size_type size() const
size - Returns the number of bits in this bitvector.
Helper class for creating CFI instructions and inserting them into MIR.
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool usesWindowsCFI() const
Wrapper class representing physical registers. Should be passed by value.
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
MachineInstr & instr_back()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
reverse_iterator rbegin()
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
const AllocaInst * getObjectAllocation(int ObjectIdx) const
Return the underlying Alloca of the specified stack object if it exists.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
bool isCalleeSavedObjectIndex(int ObjectIdx) const
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
bool hasScalableStackID(int ObjectIdx) const
int getStackProtectorIndex() const
Return the index for the stack protector object.
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
LLVM_ABI uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
void setStackID(int ObjectIdx, uint8_t ID)
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
unsigned getNumObjects() const
Return the number of objects.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
bool hasStackObjects() const
Return true if there are any stack objects in this function.
uint8_t getStackID(int ObjectIdx) const
unsigned getNumFixedObjects() const
Return the number of fixed objects.
void setIsCalleeSavedObjectIndex(int ObjectIdx, bool IsCalleeSaved)
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool hasEHFunclets() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
void setFlags(unsigned flags)
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
uint32_t getFlags() const
Return the MI flags bitvector.
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const Value * getValue() const
Return the base address of the memory access.
MachineOperand class - Representation of each machine instruction operand.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI bool isLiveIn(Register Reg) const
LLVM_ABI const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
LLVM_ABI bool isPhysRegUsed(MCRegister PhysReg, bool SkipRegMaskTest=false) const
Return true if the specified register is modified or read in this function.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
bool hasStreamingInterface() const
bool hasNonStreamingInterfaceAndBody() const
bool hasStreamingBody() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset get(int64_t Fixed, int64_t Scalable)
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
virtual bool enableCFIFixup(const MachineFunction &MF) const
Returns true if we may need to fix the unwind information for the function.
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
LLVM_ABI bool FramePointerIsReserved(const MachineFunction &MF) const
FramePointerIsReserved - This returns true if the frame pointer must always either point to a new fra...
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool hasStackRealignment(const MachineFunction &MF) const
True if stack realignment is required and still possible.
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Triple - Helper class for working with autoconf configuration names.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ CXX_FAST_TLS
Used for access functions.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ C
The default llvm calling convention, compatible with C.
@ Define
Register definition.
@ ScalablePredicateVector
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
@ AArch64FrameOffsetCannotUpdate
Offset cannot apply.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
FunctionAddr VTableAddr Count
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ LLVM_MARK_AS_BITMASK_ENUM
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool operator<(const StackAccess &Rhs) const
void print(raw_ostream &OS) const
std::string getTypeString() const
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Pair of physical register and lane mask.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
SmallVector< WinEHTryBlockMapEntry, 4 > TryBlockMap
SmallVector< WinEHHandlerType, 1 > HandlerArray