236#define DEBUG_TYPE "frame-info"
239 cl::desc(
"enable use of redzone on AArch64"),
244 cl::desc(
"reverse the CSR restore sequence"),
248 "stack-tagging-merge-settag",
258 cl::desc(
"Emit homogeneous prologue and epilogue for the size "
259 "optimization (default = off)"));
261STATISTIC(NumRedZoneFunctions,
"Number of functions using red zone");
277 int64_t ArgumentPopSize = 0;
278 if (IsTailCallReturn) {
284 ArgumentPopSize = StackAdjust.
getImm();
293 return ArgumentPopSize;
304bool AArch64FrameLowering::homogeneousPrologEpilog(
331 if (AFI->hasSwiftAsyncContext())
338 unsigned NumGPRs = 0;
339 for (
unsigned I = 0; CSRegs[
I]; ++
I) {
341 if (Reg == AArch64::LR) {
342 assert(CSRegs[
I + 1] == AArch64::FP);
343 if (NumGPRs % 2 != 0)
347 if (AArch64::GPR64RegClass.
contains(Reg))
355bool AArch64FrameLowering::producePairRegisters(
MachineFunction &MF)
const {
374 if (
MI.isDebugInstr() ||
MI.isPseudo() ||
375 MI.getOpcode() == AArch64::ADDXri ||
376 MI.getOpcode() == AArch64::ADDSXri)
403 if (!IsWin64 || IsFunclet) {
411 const unsigned UnwindHelpObject = (MF.
hasEHFunclets() ? 8 : 0);
412 return alignTo(VarArgsArea + UnwindHelpObject, 16);
429 const unsigned RedZoneSize =
438 return !(MFI.
hasCalls() ||
hasFP(MF) || NumBytes > RedZoneSize ||
491 unsigned Opc =
I->getOpcode();
492 bool IsDestroy = Opc ==
TII->getCallFrameDestroyOpcode();
493 uint64_t CalleePopAmount = IsDestroy ?
I->getOperand(1).getImm() : 0;
496 int64_t Amount =
I->getOperand(0).getImm();
504 if (CalleePopAmount == 0) {
515 assert(Amount > -0xffffff && Amount < 0xffffff &&
"call frame too large");
519 }
else if (CalleePopAmount != 0) {
522 assert(CalleePopAmount < 0xffffff &&
"call frame too large");
529void AArch64FrameLowering::emitCalleeSavedGPRLocations(
543 for (
const auto &Info : CSI) {
547 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
548 unsigned DwarfReg =
TRI.getDwarfRegNum(
Info.getReg(),
true);
560void AArch64FrameLowering::emitCalleeSavedSVELocations(
576 for (
const auto &Info : CSI) {
582 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
617 const MCInstrDesc &CFIDesc =
TII.get(TargetOpcode::CFI_INSTRUCTION);
623 nullptr,
TRI.getDwarfRegNum(AArch64::SP,
true), 0));
627 if (MFI.shouldSignReturnAddress(MF)) {
633 if (MFI.needsShadowCallStackPrologueEpilogue(MF))
635 TRI.getDwarfRegNum(AArch64::X18,
true));
638 const std::vector<CalleeSavedInfo> &CSI =
640 for (
const auto &
Info : CSI) {
641 unsigned Reg =
Info.getReg();
642 if (!
TRI.regNeedsCFI(Reg, Reg))
645 TRI.getDwarfRegNum(Reg,
true));
664 for (
const auto &
Info : CSI) {
669 unsigned Reg =
Info.getReg();
675 nullptr,
TRI.getDwarfRegNum(
Info.getReg(),
true)));
682void AArch64FrameLowering::emitCalleeSavedGPRRestores(
687void AArch64FrameLowering::emitCalleeSavedSVERestores(
692void AArch64FrameLowering::allocateStackSpace(
694 bool NeedsRealignment,
StackOffset AllocSize,
bool NeedsWinCFI,
695 bool *HasWinCFI,
bool EmitCFI,
StackOffset InitialOffset)
const {
712 EmitCFI, InitialOffset);
714 if (NeedsRealignment) {
715 const int64_t MaxAlign = MFI.getMaxAlign().value();
716 const uint64_t AndMask = ~(MaxAlign - 1);
739 case AArch64::W##n: \
740 case AArch64::X##n: \
765 case AArch64::B##n: \
766 case AArch64::H##n: \
767 case AArch64::S##n: \
768 case AArch64::D##n: \
769 case AArch64::Q##n: \
770 return HasSVE ? AArch64::Z##n : AArch64::Q##n
807void AArch64FrameLowering::emitZeroCallUsedRegs(
BitVector RegsToZero,
823 bool HasSVE = STI.hasSVE();
825 if (
TRI.isGeneralPurposeRegister(MF, Reg)) {
828 GPRsToZero.set(XReg);
829 }
else if (AArch64::FPR128RegClass.
contains(Reg) ||
830 AArch64::FPR64RegClass.
contains(Reg) ||
831 AArch64::FPR32RegClass.
contains(Reg) ||
832 AArch64::FPR16RegClass.
contains(Reg) ||
833 AArch64::FPR8RegClass.
contains(Reg)) {
836 FPRsToZero.set(XReg);
852 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4,
853 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9,
854 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14,
856 if (RegsToZero[PReg])
887 for (
unsigned i = 0; CSRegs[i]; ++i)
888 LiveRegs.
addReg(CSRegs[i]);
895 for (
unsigned Reg : AArch64::GPR64RegClass) {
899 return AArch64::NoRegister;
910 if (!RegInfo->hasStackRealignment(*MF))
925 unsigned StackProbeSize =
926 F.getFnAttributeAsParsedInteger(
"stack-probe-size", 4096);
927 return (StackSizeInBytes >= StackProbeSize) &&
928 !
F.hasFnAttribute(
"no-stack-arg-probe");
934 F.needsUnwindTableEntry();
937bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
943 if (homogeneousPrologEpilog(MF))
966 if (MFI.hasVarSizedObjects())
969 if (
RegInfo->hasStackRealignment(MF))
986bool AArch64FrameLowering::shouldCombineCSRLocalStackBumpInEpilogue(
988 if (!shouldCombineCSRLocalStackBump(*
MBB.
getParent(), StackBumpBytes))
998 while (LastI != Begin) {
1000 if (LastI->isTransient())
1005 switch (LastI->getOpcode()) {
1006 case AArch64::STGloop:
1007 case AArch64::STZGloop:
1009 case AArch64::STZGi:
1010 case AArch64::ST2Gi:
1011 case AArch64::STZ2Gi:
1024 unsigned Opc =
MBBI->getOpcode();
1028 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
1029 int Imm =
MBBI->getOperand(ImmIdx).getImm();
1037 case AArch64::LDPDpost:
1040 case AArch64::STPDpre: {
1041 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1042 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1043 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFRegP_X))
1050 case AArch64::LDPXpost:
1053 case AArch64::STPXpre: {
1056 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1057 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFPLR_X))
1061 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveRegP_X))
1068 case AArch64::LDRDpost:
1071 case AArch64::STRDpre: {
1072 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1073 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFReg_X))
1079 case AArch64::LDRXpost:
1082 case AArch64::STRXpre: {
1083 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1090 case AArch64::STPDi:
1091 case AArch64::LDPDi: {
1092 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1093 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1101 case AArch64::STPXi:
1102 case AArch64::LDPXi: {
1105 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1117 case AArch64::STRXui:
1118 case AArch64::LDRXui: {
1119 int Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1126 case AArch64::STRDui:
1127 case AArch64::LDRDui: {
1128 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1142 unsigned LocalStackSize) {
1144 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
1145 switch (
MBBI->getOpcode()) {
1148 case AArch64::SEH_SaveFPLR:
1149 case AArch64::SEH_SaveRegP:
1150 case AArch64::SEH_SaveReg:
1151 case AArch64::SEH_SaveFRegP:
1152 case AArch64::SEH_SaveFReg:
1153 ImmOpnd = &
MBBI->getOperand(ImmIdx);
1166 bool NeedsWinCFI,
bool *HasWinCFI,
bool EmitCFI,
1168 int CFAOffset = 0) {
1170 switch (
MBBI->getOpcode()) {
1173 case AArch64::STPXi:
1174 NewOpc = AArch64::STPXpre;
1176 case AArch64::STPDi:
1177 NewOpc = AArch64::STPDpre;
1179 case AArch64::STPQi:
1180 NewOpc = AArch64::STPQpre;
1182 case AArch64::STRXui:
1183 NewOpc = AArch64::STRXpre;
1185 case AArch64::STRDui:
1186 NewOpc = AArch64::STRDpre;
1188 case AArch64::STRQui:
1189 NewOpc = AArch64::STRQpre;
1191 case AArch64::LDPXi:
1192 NewOpc = AArch64::LDPXpost;
1194 case AArch64::LDPDi:
1195 NewOpc = AArch64::LDPDpost;
1197 case AArch64::LDPQi:
1198 NewOpc = AArch64::LDPQpost;
1200 case AArch64::LDRXui:
1201 NewOpc = AArch64::LDRXpost;
1203 case AArch64::LDRDui:
1204 NewOpc = AArch64::LDRDpost;
1206 case AArch64::LDRQui:
1207 NewOpc = AArch64::LDRQpost;
1212 auto SEH = std::next(
MBBI);
1214 SEH->eraseFromParent();
1219 int64_t MinOffset, MaxOffset;
1221 NewOpc, Scale, Width, MinOffset, MaxOffset);
1228 if (
MBBI->getOperand(
MBBI->getNumOperands() - 1).getImm() != 0 ||
1229 CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) {
1232 false,
false,
nullptr, EmitCFI,
1235 return std::prev(
MBBI);
1242 unsigned OpndIdx = 0;
1243 for (
unsigned OpndEnd =
MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
1245 MIB.
add(
MBBI->getOperand(OpndIdx));
1247 assert(
MBBI->getOperand(OpndIdx).getImm() == 0 &&
1248 "Unexpected immediate offset in first/last callee-save save/restore "
1250 assert(
MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
1251 "Unexpected base register in callee-save save/restore instruction!");
1252 assert(CSStackSizeInc % Scale == 0);
1253 MIB.
addImm(CSStackSizeInc / (
int)Scale);
1284 unsigned Opc =
MI.getOpcode();
1287 case AArch64::STPXi:
1288 case AArch64::STRXui:
1289 case AArch64::STPDi:
1290 case AArch64::STRDui:
1291 case AArch64::LDPXi:
1292 case AArch64::LDRXui:
1293 case AArch64::LDPDi:
1294 case AArch64::LDRDui:
1297 case AArch64::STPQi:
1298 case AArch64::STRQui:
1299 case AArch64::LDPQi:
1300 case AArch64::LDRQui:
1307 unsigned OffsetIdx =
MI.getNumExplicitOperands() - 1;
1308 assert(
MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
1309 "Unexpected base register in callee-save save/restore instruction!");
1313 assert(LocalStackSize % Scale == 0);
1314 OffsetOpnd.
setImm(OffsetOpnd.
getImm() + LocalStackSize / Scale);
1319 assert(
MBBI !=
MI.getParent()->end() &&
"Expecting a valid instruction");
1321 "Expecting a SEH instruction");
1332 switch (
I->getOpcode()) {
1335 case AArch64::STR_ZXI:
1336 case AArch64::STR_PXI:
1337 case AArch64::LDR_ZXI:
1338 case AArch64::LDR_PXI:
1349 bool NeedsUnwindInfo) {
1365 if (NeedsUnwindInfo) {
1368 static const char CFIInst[] = {
1369 dwarf::DW_CFA_val_expression,
1372 static_cast<char>(
unsigned(dwarf::DW_OP_breg18)),
1373 static_cast<char>(-8) & 0x7f,
1376 nullptr,
StringRef(CFIInst,
sizeof(CFIInst))));
1414 const int OffsetToFirstCalleeSaveFromFP =
1418 unsigned Reg =
TRI->getDwarfRegNum(
FramePtr,
true);
1420 nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP));
1453 bool HasFP =
hasFP(MF);
1455 bool HasWinCFI =
false;
1464 while (NonFrameStart !=
End &&
1469 if (NonFrameStart !=
MBB.
end()) {
1479 if (NonFrameStart ==
MBB.
end())
1484 for (auto &Op : MI.operands())
1485 if (Op.isReg() && Op.isDef())
1486 assert(!LiveRegs.contains(Op.getReg()) &&
1487 "live register clobbered by inserted prologue instructions");
1504 if (MFnI.needsShadowCallStackPrologueEpilogue(MF))
1506 MFnI.needsDwarfUnwindInfo(MF));
1508 if (MFnI.shouldSignReturnAddress(MF)) {
1515 if (EmitCFI && MFnI.isMTETagged()) {
1593 assert(!HasFP &&
"unexpected function without stack frame but with FP");
1595 "unexpected function without stack frame but with SVE objects");
1604 ++NumRedZoneFunctions;
1637 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
1638 bool HomPrologEpilog = homogeneousPrologEpilog(MF);
1639 if (CombineSPBump) {
1640 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
1646 }
else if (HomPrologEpilog) {
1648 NumBytes -= PrologueSaveSize;
1649 }
else if (PrologueSaveSize != 0) {
1651 MBB,
MBBI,
DL,
TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI,
1653 NumBytes -= PrologueSaveSize;
1655 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
1664 NeedsWinCFI, &HasWinCFI);
1669 if (!IsFunclet && HasFP) {
1681 bool HaveInitialContext = Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1682 if (HaveInitialContext)
1684 Register Reg = HaveInitialContext ? AArch64::X22 : AArch64::XZR;
1700 if (HomPrologEpilog) {
1713 if (NeedsWinCFI && HasWinCFI) {
1718 NeedsWinCFI =
false;
1729 emitCalleeSavedGPRLocations(
MBB,
MBBI);
1732 const bool NeedsRealignment =
1733 NumBytes && !IsFunclet && RegInfo->hasStackRealignment(MF);
1734 int64_t RealignmentPadding =
1740 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
1748 if (NumBytes >= (1 << 28))
1750 "unwinding purposes");
1752 uint32_t LowNumWords = NumWords & 0xFFFF;
1759 if ((NumWords & 0xFFFF0000) != 0) {
1762 .
addImm((NumWords & 0xFFFF0000) >> 16)
1833 if (RealignmentPadding > 0) {
1834 if (RealignmentPadding >= 4096) {
1837 .
addImm(RealignmentPadding)
1847 .
addImm(RealignmentPadding)
1864 StackOffset SVECalleeSavesSize = {}, SVELocalsSize = SVEStackSize;
1871 CalleeSavesBegin =
MBBI;
1875 CalleeSavesEnd =
MBBI;
1878 SVELocalsSize = SVEStackSize - SVECalleeSavesSize;
1884 allocateStackSpace(
MBB, CalleeSavesBegin,
false, SVECalleeSavesSize,
false,
1885 nullptr, EmitAsyncCFI && !HasFP, CFAOffset);
1886 CFAOffset += SVECalleeSavesSize;
1889 emitCalleeSavedSVELocations(
MBB, CalleeSavesEnd);
1894 "Cannot use redzone with stack realignment");
1899 allocateStackSpace(
MBB, CalleeSavesEnd, NeedsRealignment,
1901 NeedsWinCFI, &HasWinCFI, EmitAsyncCFI && !HasFP,
1913 if (!IsFunclet && RegInfo->hasBasePointer(MF)) {
1925 if (NeedsWinCFI && HasWinCFI) {
1933 if (IsFunclet &&
F.hasPersonalityFn()) {
1943 if (EmitCFI && !EmitAsyncCFI) {
1950 *RegInfo, AArch64::SP, AArch64::SP, TotalSize,
1956 emitCalleeSavedGPRLocations(
MBB,
MBBI);
1957 emitCalleeSavedSVELocations(
MBB,
MBBI);
1962 switch (
MI.getOpcode()) {
1965 case AArch64::CATCHRET:
1966 case AArch64::CLEANUPRET:
1981 bool HasWinCFI =
false;
1982 bool IsFunclet =
false;
1985 DL =
MBBI->getDebugLoc();
1993 BuildMI(MBB, MBB.getFirstTerminator(), DL,
1994 TII->get(AArch64::PAUTH_EPILOGUE))
1995 .setMIFlag(MachineInstr::FrameDestroy);
2005 TII->get(AArch64::SEH_EpilogEnd))
2032 int64_t AfterCSRPopSize = ArgumentStackToRestore;
2040 if (homogeneousPrologEpilog(MF, &
MBB)) {
2044 auto HomogeneousEpilog = std::prev(LastPopI);
2045 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
2046 LastPopI = HomogeneousEpilog;
2056 assert(AfterCSRPopSize == 0);
2059 bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(
MBB, NumBytes);
2062 bool CombineAfterCSRBump =
false;
2063 if (!CombineSPBump && PrologueSaveSize != 0) {
2065 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
2067 Pop = std::prev(Pop);
2070 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1);
2074 if (OffsetOp.
getImm() == 0 && AfterCSRPopSize >= 0) {
2076 MBB, Pop,
DL,
TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, EmitCFI,
2083 AfterCSRPopSize += PrologueSaveSize;
2084 CombineAfterCSRBump =
true;
2093 while (LastPopI != Begin) {
2099 }
else if (CombineSPBump)
2101 NeedsWinCFI, &HasWinCFI);
2113 EpilogStartI = LastPopI;
2149 if (CombineSPBump) {
2150 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
2153 if (EmitCFI &&
hasFP(MF)) {
2155 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2170 NumBytes -= PrologueSaveSize;
2171 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
2175 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
2178 RestoreBegin = std::prev(RestoreEnd);
2179 while (RestoreBegin !=
MBB.
begin() &&
2188 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
2189 DeallocateAfter = CalleeSavedSizeAsOffset;
2211 MBB, RestoreBegin,
DL, AArch64::SP, AArch64::SP,
2213 false,
false,
nullptr, EmitCFI && !
hasFP(MF),
2220 false,
nullptr, EmitCFI && !
hasFP(MF),
2226 false,
nullptr, EmitCFI && !
hasFP(MF),
2231 emitCalleeSavedSVERestores(
MBB, RestoreEnd);
2238 if (RedZone && AfterCSRPopSize == 0)
2245 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
2246 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
2247 if (NoCalleeSaveRestore)
2248 StackRestoreBytes += AfterCSRPopSize;
2251 MBB, LastPopI,
DL, AArch64::SP, AArch64::SP,
2258 if (NoCalleeSaveRestore || AfterCSRPopSize == 0) {
2271 MBB, LastPopI,
DL, AArch64::SP, AArch64::FP,
2274 }
else if (NumBytes)
2280 if (EmitCFI &&
hasFP(MF)) {
2282 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2293 if (AfterCSRPopSize) {
2294 assert(AfterCSRPopSize > 0 &&
"attempting to reallocate arg stack that an "
2295 "interrupt may have clobbered");
2300 false, NeedsWinCFI, &HasWinCFI, EmitCFI,
2331 int64_t ObjectOffset) {
2336 unsigned FixedObject =
2345 int64_t ObjectOffset) {
2356 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
2363 bool ForSimm)
const {
2366 bool isFixed = MFI.isFixedObjectIndex(FI);
2373 const MachineFunction &MF, int64_t ObjectOffset,
bool isFixed,
bool isSVE,
2374 Register &FrameReg,
bool PreferFP,
bool ForSimm)
const {
2397 PreferFP &= !SVEStackSize;
2405 }
else if (isCSR && RegInfo->hasStackRealignment(MF)) {
2409 assert(
hasFP(MF) &&
"Re-aligned stack must have frame pointer");
2411 }
else if (
hasFP(MF) && !RegInfo->hasStackRealignment(MF)) {
2416 bool FPOffsetFits = !ForSimm || FPOffset >= -256;
2417 PreferFP |=
Offset > -FPOffset && !SVEStackSize;
2419 if (MFI.hasVarSizedObjects()) {
2423 bool CanUseBP = RegInfo->hasBasePointer(MF);
2424 if (FPOffsetFits && CanUseBP)
2431 }
else if (FPOffset >= 0) {
2436 }
else if (MF.
hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
2443 "Funclets should only be present on Win64");
2447 if (FPOffsetFits && PreferFP)
2454 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) &&
2455 "In the presence of dynamic stack pointer realignment, "
2456 "non-argument/CSR objects cannot be accessed through the frame pointer");
2468 RegInfo->hasStackRealignment(MF))) {
2469 FrameReg = RegInfo->getFrameRegister(MF);
2473 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
2479 if (UseFP && !(isFixed || isCSR))
2480 ScalableOffset = -SVEStackSize;
2481 if (!UseFP && (isFixed || isCSR))
2482 ScalableOffset = SVEStackSize;
2485 FrameReg = RegInfo->getFrameRegister(MF);
2490 if (RegInfo->hasBasePointer(MF))
2491 FrameReg = RegInfo->getBaseRegister();
2493 assert(!MFI.hasVarSizedObjects() &&
2494 "Can't use SP when we have var sized objects.");
2495 FrameReg = AArch64::SP;
2521 Attrs.hasAttrSomewhere(Attribute::SwiftError)) &&
2526 bool NeedsWinCFI,
bool IsFirst,
2535 if (Reg2 == AArch64::FP)
2539 if (
TRI->getEncodingValue(Reg2) ==
TRI->getEncodingValue(Reg1) + 1)
2546 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 &&
2547 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst)
2557 bool UsesWinAAPCS,
bool NeedsWinCFI,
2558 bool NeedsFrameRecord,
bool IsFirst,
2566 if (NeedsFrameRecord)
2567 return Reg2 == AArch64::LR;
2575 unsigned Reg1 = AArch64::NoRegister;
2576 unsigned Reg2 = AArch64::NoRegister;
2579 enum RegType { GPR, FPR64, FPR128, PPR, ZPR }
Type;
2581 RegPairInfo() =
default;
2583 bool isPaired()
const {
return Reg2 != AArch64::NoRegister; }
2585 unsigned getScale()
const {
2599 bool isScalable()
const {
return Type == PPR ||
Type == ZPR; }
2607 bool NeedsFrameRecord) {
2617 unsigned Count = CSI.
size();
2624 "Odd number of callee-saved regs to spill!");
2626 int StackFillDir = -1;
2628 unsigned FirstReg = 0;
2636 FirstReg = Count - 1;
2642 for (
unsigned i = FirstReg; i < Count; i += RegInc) {
2644 RPI.Reg1 = CSI[i].getReg();
2646 if (AArch64::GPR64RegClass.
contains(RPI.Reg1))
2647 RPI.Type = RegPairInfo::GPR;
2648 else if (AArch64::FPR64RegClass.
contains(RPI.Reg1))
2649 RPI.Type = RegPairInfo::FPR64;
2650 else if (AArch64::FPR128RegClass.
contains(RPI.Reg1))
2651 RPI.Type = RegPairInfo::FPR128;
2652 else if (AArch64::ZPRRegClass.
contains(RPI.Reg1))
2653 RPI.Type = RegPairInfo::ZPR;
2654 else if (AArch64::PPRRegClass.
contains(RPI.Reg1))
2655 RPI.Type = RegPairInfo::PPR;
2660 if (
unsigned(i + RegInc) < Count) {
2661 Register NextReg = CSI[i + RegInc].getReg();
2662 bool IsFirst = i == FirstReg;
2664 case RegPairInfo::GPR:
2665 if (AArch64::GPR64RegClass.
contains(NextReg) &&
2667 NeedsWinCFI, NeedsFrameRecord, IsFirst,
2671 case RegPairInfo::FPR64:
2672 if (AArch64::FPR64RegClass.
contains(NextReg) &&
2677 case RegPairInfo::FPR128:
2678 if (AArch64::FPR128RegClass.
contains(NextReg))
2681 case RegPairInfo::PPR:
2682 case RegPairInfo::ZPR:
2693 assert((!RPI.isPaired() ||
2694 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) &&
2695 "Out of order callee saved regs!");
2697 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP ||
2698 RPI.Reg1 == AArch64::LR) &&
2699 "FrameRecord must be allocated together with LR");
2702 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP ||
2703 RPI.Reg2 == AArch64::LR) &&
2704 "FrameRecord must be allocated together with LR");
2712 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
2713 RPI.Reg1 + 1 == RPI.Reg2))) &&
2714 "Callee-save registers not saved as adjacent register pair!");
2716 RPI.FrameIdx = CSI[i].getFrameIdx();
2719 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
2721 int Scale = RPI.getScale();
2723 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
2724 assert(OffsetPre % Scale == 0);
2726 if (RPI.isScalable())
2727 ScalableByteOffset += StackFillDir * Scale;
2729 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
2734 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
2735 (IsWindows && RPI.Reg2 == AArch64::LR)))
2736 ByteOffset += StackFillDir * 8;
2738 assert(!(RPI.isScalable() && RPI.isPaired()) &&
2739 "Paired spill/fill instructions don't exist for SVE vectors");
2743 if (NeedGapToAlignStack && !NeedsWinCFI &&
2744 !RPI.isScalable() && RPI.Type != RegPairInfo::FPR128 &&
2745 !RPI.isPaired() && ByteOffset % 16 != 0) {
2746 ByteOffset += 8 * StackFillDir;
2747 assert(MFI.getObjectAlign(RPI.FrameIdx) <=
Align(16));
2751 MFI.setObjectAlignment(RPI.FrameIdx,
Align(16));
2752 NeedGapToAlignStack =
false;
2755 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
2756 assert(OffsetPost % Scale == 0);
2759 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost;
2764 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
2765 (IsWindows && RPI.Reg2 == AArch64::LR)))
2767 RPI.Offset =
Offset / Scale;
2769 assert(((!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) ||
2770 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) &&
2771 "Offset out of bounds for LDP/STP immediate");
2775 if (NeedsFrameRecord && ((!IsWindows && RPI.Reg1 == AArch64::LR &&
2776 RPI.Reg2 == AArch64::FP) ||
2777 (IsWindows && RPI.Reg1 == AArch64::FP &&
2778 RPI.Reg2 == AArch64::LR)))
2792 MFI.setObjectAlignment(CSI[0].getFrameIdx(),
Align(16));
2795 std::reverse(RegPairs.
begin(), RegPairs.
end());
2811 if (homogeneousPrologEpilog(MF)) {
2815 for (
auto &RPI : RegPairs) {
2820 if (!
MRI.isReserved(RPI.Reg1))
2822 if (RPI.isPaired() && !
MRI.isReserved(RPI.Reg2))
2828 unsigned Reg1 = RPI.Reg1;
2829 unsigned Reg2 = RPI.Reg2;
2845 case RegPairInfo::GPR:
2846 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
2848 Alignment =
Align(8);
2850 case RegPairInfo::FPR64:
2851 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
2853 Alignment =
Align(8);
2855 case RegPairInfo::FPR128:
2856 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
2858 Alignment =
Align(16);
2860 case RegPairInfo::ZPR:
2861 StrOpc = AArch64::STR_ZXI;
2863 Alignment =
Align(16);
2865 case RegPairInfo::PPR:
2866 StrOpc = AArch64::STR_PXI;
2868 Alignment =
Align(2);
2873 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2874 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
2877 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) &&
2878 "Windows unwdinding requires a consecutive (FP,LR) pair");
2882 unsigned FrameIdxReg1 = RPI.FrameIdx;
2883 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2884 if (NeedsWinCFI && RPI.isPaired()) {
2889 if (!
MRI.isReserved(Reg1))
2891 if (RPI.isPaired()) {
2892 if (!
MRI.isReserved(Reg2))
2912 if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR)
2929 DL =
MBBI->getDebugLoc();
2934 unsigned Reg1 = RPI.Reg1;
2935 unsigned Reg2 = RPI.Reg2;
2949 case RegPairInfo::GPR:
2950 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
2952 Alignment =
Align(8);
2954 case RegPairInfo::FPR64:
2955 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
2957 Alignment =
Align(8);
2959 case RegPairInfo::FPR128:
2960 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
2962 Alignment =
Align(16);
2964 case RegPairInfo::ZPR:
2965 LdrOpc = AArch64::LDR_ZXI;
2967 Alignment =
Align(16);
2969 case RegPairInfo::PPR:
2970 LdrOpc = AArch64::LDR_PXI;
2972 Alignment =
Align(2);
2977 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2978 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
2984 unsigned FrameIdxReg1 = RPI.FrameIdx;
2985 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2986 if (NeedsWinCFI && RPI.isPaired()) {
2991 if (RPI.isPaired()) {
3012 for (
const RegPairInfo &RPI :
reverse(RegPairs))
3013 if (RPI.isScalable())
3016 if (homogeneousPrologEpilog(MF, &
MBB)) {
3019 for (
auto &RPI : RegPairs) {
3028 for (
const RegPairInfo &RPI :
reverse(RegPairs)) {
3029 if (RPI.isScalable())
3038 for (
const RegPairInfo &RPI : RegPairs) {
3039 if (RPI.isScalable())
3061 unsigned UnspilledCSGPR = AArch64::NoRegister;
3062 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
3071 unsigned ExtraCSSpill = 0;
3072 bool HasUnpairedGPR64 =
false;
3074 for (
unsigned i = 0; CSRegs[i]; ++i) {
3075 const unsigned Reg = CSRegs[i];
3078 if (Reg == BasePointerReg)
3081 bool RegUsed = SavedRegs.
test(Reg);
3082 unsigned PairedReg = AArch64::NoRegister;
3083 const bool RegIsGPR64 = AArch64::GPR64RegClass.contains(Reg);
3084 if (RegIsGPR64 || AArch64::FPR64RegClass.
contains(Reg) ||
3085 AArch64::FPR128RegClass.contains(Reg)) {
3088 if (HasUnpairedGPR64)
3089 PairedReg = CSRegs[i % 2 == 0 ? i - 1 : i + 1];
3091 PairedReg = CSRegs[i ^ 1];
3098 if (RegIsGPR64 && !AArch64::GPR64RegClass.
contains(PairedReg)) {
3099 PairedReg = AArch64::NoRegister;
3100 HasUnpairedGPR64 =
true;
3102 assert(PairedReg == AArch64::NoRegister ||
3103 AArch64::GPR64RegClass.
contains(Reg, PairedReg) ||
3104 AArch64::FPR64RegClass.
contains(Reg, PairedReg) ||
3105 AArch64::FPR128RegClass.
contains(Reg, PairedReg));
3108 if (AArch64::GPR64RegClass.
contains(Reg) &&
3110 UnspilledCSGPR = Reg;
3111 UnspilledCSGPRPaired = PairedReg;
3119 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister &&
3120 !SavedRegs.
test(PairedReg)) {
3121 SavedRegs.
set(PairedReg);
3122 if (AArch64::GPR64RegClass.
contains(PairedReg) &&
3124 ExtraCSSpill = PairedReg;
3135 SavedRegs.
set(AArch64::X18);
3139 unsigned CSStackSize = 0;
3140 unsigned SVECSStackSize = 0;
3143 for (
unsigned Reg : SavedRegs.
set_bits()) {
3145 if (AArch64::PPRRegClass.
contains(Reg) ||
3146 AArch64::ZPRRegClass.
contains(Reg))
3153 unsigned NumSavedRegs = SavedRegs.
count();
3159 SavedRegs.
set(AArch64::FP);
3160 SavedRegs.
set(AArch64::LR);
3170 int64_t SVEStackSize =
3171 alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16);
3172 bool CanEliminateFrame = (SavedRegs.
count() == 0) && !SVEStackSize;
3181 int64_t CalleeStackUsed = 0;
3184 if (FixedOff > CalleeStackUsed) CalleeStackUsed = FixedOff;
3188 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize +
3189 CalleeStackUsed) > EstimatedStackSizeLimit;
3191 AFI->setHasStackFrame(
true);
3200 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
3202 <<
" to get a scratch register.\n");
3203 SavedRegs.
set(UnspilledCSGPR);
3204 ExtraCSSpill = UnspilledCSGPR;
3209 if (producePairRegisters(MF)) {
3210 if (UnspilledCSGPRPaired == AArch64::NoRegister) {
3213 SavedRegs.
reset(UnspilledCSGPR);
3214 ExtraCSSpill = AArch64::NoRegister;
3217 SavedRegs.
set(UnspilledCSGPRPaired);
3226 unsigned Size =
TRI->getSpillSize(RC);
3227 Align Alignment =
TRI->getSpillAlign(RC);
3230 LLVM_DEBUG(
dbgs() <<
"No available CS registers, allocated fi#" << FI
3231 <<
" as the emergency spill slot.\n");
3236 CSStackSize += 8 * (SavedRegs.
count() - NumSavedRegs);
3240 if (
hasFP(MF) && AFI->hasSwiftAsyncContext())
3245 << EstimatedStackSize + AlignedCSStackSize
3249 AFI->getCalleeSavedStackSize() == AlignedCSStackSize) &&
3250 "Should not invalidate callee saved info");
3254 AFI->setCalleeSavedStackSize(AlignedCSStackSize);
3255 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize);
3256 AFI->setSVECalleeSavedStackSize(
alignTo(SVECSStackSize, 16));
3261 std::vector<CalleeSavedInfo> &CSI,
unsigned &MinCSFrameIndex,
3262 unsigned &MaxCSFrameIndex)
const {
3270 std::reverse(CSI.begin(), CSI.end());
3284 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3285 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3288 for (
auto &CS : CSI) {
3295 CS.setFrameIdx(FrameIdx);
3297 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3298 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3302 Reg == AArch64::FP) {
3305 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3306 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3326 int &Min,
int &Max) {
3327 Min = std::numeric_limits<int>::max();
3328 Max = std::numeric_limits<int>::min();
3334 for (
auto &CS : CSI) {
3335 if (AArch64::ZPRRegClass.
contains(CS.getReg()) ||
3336 AArch64::PPRRegClass.contains(CS.getReg())) {
3337 assert((Max == std::numeric_limits<int>::min() ||
3338 Max + 1 == CS.getFrameIdx()) &&
3339 "SVE CalleeSaves are not consecutive");
3341 Min = std::min(Min, CS.getFrameIdx());
3342 Max = std::max(Max, CS.getFrameIdx());
3345 return Min != std::numeric_limits<int>::max();
3354 int &MinCSFrameIndex,
3355 int &MaxCSFrameIndex,
3356 bool AssignOffsets) {
3361 "SVE vectors should never be passed on the stack by value, only by "
3365 auto Assign = [&MFI](
int FI, int64_t
Offset) {
3375 for (
int I = MinCSFrameIndex;
I <= MaxCSFrameIndex; ++
I) {
3391 int StackProtectorFI = -1;
3395 ObjectsToAllocate.
push_back(StackProtectorFI);
3401 if (
I == StackProtectorFI)
3403 if (MaxCSFrameIndex >=
I &&
I >= MinCSFrameIndex)
3412 for (
unsigned FI : ObjectsToAllocate) {
3417 if (Alignment >
Align(16))
3419 "Alignment of scalable vectors > 16 bytes is not yet supported");
3429int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets(
3431 int MinCSFrameIndex, MaxCSFrameIndex;
3435int64_t AArch64FrameLowering::assignSVEStackObjectOffsets(
3446 "Upwards growing stack unsupported");
3448 int MinCSFrameIndex, MaxCSFrameIndex;
3449 int64_t SVEStackSize =
3450 assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex);
3470 int64_t FixedObject =
3483 assert(DstReg &&
"There must be a free register after frame setup");
3492struct TagStoreInstr {
3515 std::optional<int64_t> FrameRegUpdate;
3517 unsigned FrameRegUpdateFlags;
3528 :
MBB(
MBB), ZeroData(ZeroData) {
3534 void addInstruction(TagStoreInstr
I) {
3536 TagStores.
back().Offset + TagStores.
back().Size ==
I.Offset) &&
3537 "Non-adjacent tag store instructions.");
3552 const int64_t kMinOffset = -256 * 16;
3553 const int64_t kMaxOffset = 255 * 16;
3556 int64_t BaseRegOffsetBytes = FrameRegOffset.
getFixed();
3557 if (BaseRegOffsetBytes < kMinOffset ||
3558 BaseRegOffsetBytes + (
Size -
Size % 32) > kMaxOffset ||
3562 BaseRegOffsetBytes % 16 != 0) {
3563 Register ScratchReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3566 BaseReg = ScratchReg;
3567 BaseRegOffsetBytes = 0;
3572 int64_t InstrSize = (
Size > 16) ? 32 : 16;
3575 ? (ZeroData ? AArch64::STZGi : AArch64::STGi)
3576 : (ZeroData ? AArch64::STZ2Gi : AArch64::ST2Gi);
3577 assert(BaseRegOffsetBytes % 16 == 0);
3581 .
addImm(BaseRegOffsetBytes / 16)
3585 if (BaseRegOffsetBytes == 0)
3587 BaseRegOffsetBytes += InstrSize;
3601 :
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3602 Register SizeReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3606 int64_t LoopSize =
Size;
3609 if (FrameRegUpdate && *FrameRegUpdate)
3610 LoopSize -= LoopSize % 32;
3612 TII->get(ZeroData ? AArch64::STZGloop_wback
3613 : AArch64::STGloop_wback))
3620 LoopI->
setFlags(FrameRegUpdateFlags);
3622 int64_t ExtraBaseRegUpdate =
3623 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.
getFixed() -
Size) : 0;
3624 if (LoopSize <
Size) {
3629 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex))
3633 .
addImm(1 + ExtraBaseRegUpdate / 16)
3636 }
else if (ExtraBaseRegUpdate) {
3640 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri))
3643 .
addImm(std::abs(ExtraBaseRegUpdate))
3653 int64_t
Size, int64_t *TotalOffset) {
3655 if ((
MI.getOpcode() == AArch64::ADDXri ||
3656 MI.getOpcode() == AArch64::SUBXri) &&
3657 MI.getOperand(0).getReg() == Reg &&
MI.getOperand(1).getReg() == Reg) {
3659 int64_t
Offset =
MI.getOperand(2).getImm() << Shift;
3660 if (
MI.getOpcode() == AArch64::SUBXri)
3662 int64_t AbsPostOffset = std::abs(
Offset -
Size);
3663 const int64_t kMaxOffset =
3665 if (AbsPostOffset <= kMaxOffset && AbsPostOffset % 16 == 0) {
3676 for (
auto &TS : TSE) {
3680 if (
MI->memoperands_empty()) {
3684 MemRefs.
append(
MI->memoperands_begin(),
MI->memoperands_end());
3690 bool TryMergeSPUpdate) {
3691 if (TagStores.
empty())
3693 TagStoreInstr &FirstTagStore = TagStores[0];
3694 TagStoreInstr &LastTagStore = TagStores[TagStores.
size() - 1];
3695 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size;
3696 DL = TagStores[0].MI->getDebugLoc();
3700 *MF, FirstTagStore.Offset,
false ,
false , Reg,
3703 FrameRegUpdate = std::nullopt;
3705 mergeMemRefs(TagStores, CombinedMemRefs);
3708 for (
const auto &Instr
3709 : TagStores) {
dbgs() <<
" " << *
Instr.MI; });
3715 if (TagStores.size() < 2)
3717 emitUnrolled(InsertI);
3720 int64_t TotalOffset = 0;
3721 if (TryMergeSPUpdate) {
3727 if (InsertI !=
MBB->
end() &&
3728 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.
getFixed() +
Size,
3730 UpdateInstr = &*InsertI++;
3736 if (!UpdateInstr && TagStores.size() < 2)
3740 FrameRegUpdate = TotalOffset;
3741 FrameRegUpdateFlags = UpdateInstr->
getFlags();
3748 for (
auto &TS : TagStores)
3749 TS.MI->eraseFromParent();
3753 int64_t &
Size,
bool &ZeroData) {
3758 ZeroData = (
Opcode == AArch64::STZGloop ||
Opcode == AArch64::STZGi ||
3759 Opcode == AArch64::STZ2Gi);
3761 if (
Opcode == AArch64::STGloop ||
Opcode == AArch64::STZGloop) {
3762 if (!
MI.getOperand(0).isDead() || !
MI.getOperand(1).isDead())
3764 if (!
MI.getOperand(2).isImm() || !
MI.getOperand(3).isFI())
3767 Size =
MI.getOperand(2).getImm();
3771 if (
Opcode == AArch64::STGi ||
Opcode == AArch64::STZGi)
3773 else if (
Opcode == AArch64::ST2Gi ||
Opcode == AArch64::STZ2Gi)
3778 if (
MI.getOperand(0).getReg() != AArch64::SP || !
MI.getOperand(1).isFI())
3782 16 *
MI.getOperand(2).getImm();
3802 if (!isMergeableStackTaggingInstruction(
MI,
Offset,
Size, FirstZeroData))
3808 constexpr int kScanLimit = 10;
3811 NextI !=
E && Count < kScanLimit; ++NextI) {
3820 if (isMergeableStackTaggingInstruction(
MI,
Offset,
Size, ZeroData)) {
3821 if (ZeroData != FirstZeroData)
3829 if (!
MI.isTransient())
3838 if (
MI.mayLoadOrStore() ||
MI.hasUnmodeledSideEffects())
3854 LiveRegs.addLiveOuts(*
MBB);
3859 LiveRegs.stepBackward(*
I);
3862 if (LiveRegs.contains(AArch64::NZCV))
3866 [](
const TagStoreInstr &
Left,
const TagStoreInstr &
Right) {
3871 int64_t CurOffset = Instrs[0].Offset;
3872 for (
auto &Instr : Instrs) {
3873 if (CurOffset >
Instr.Offset)
3880 TagStoreEdit TSE(
MBB, FirstZeroData);
3881 std::optional<int64_t> EndOffset;
3882 for (
auto &Instr : Instrs) {
3883 if (EndOffset && *EndOffset !=
Instr.Offset) {
3885 TSE.emitCode(InsertI, TFI,
false);
3889 TSE.addInstruction(Instr);
3908 II = tryMergeAdjacentSTG(II,
this, RS);
3916 bool IgnoreSPUpdates)
const {
3918 if (IgnoreSPUpdates) {
3921 FrameReg = AArch64::SP;
3931 FrameReg = AArch64::SP;
3956 bool IsValid =
false;
3958 int ObjectIndex = 0;
3960 int GroupIndex = -1;
3962 bool ObjectFirst =
false;
3965 bool GroupFirst =
false;
3970 int NextGroupIndex = 0;
3971 std::vector<FrameObject> &Objects;
3974 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {}
3976 void EndCurrentGroup() {
3977 if (CurrentMembers.
size() > 1) {
3982 for (
int Index : CurrentMembers) {
3983 Objects[
Index].GroupIndex = NextGroupIndex;
3989 CurrentMembers.clear();
3993bool FrameObjectCompare(
const FrameObject &
A,
const FrameObject &
B) {
4011 return std::make_tuple(!
A.IsValid,
A.ObjectFirst,
A.GroupFirst,
A.GroupIndex,
4013 std::make_tuple(!
B.IsValid,
B.ObjectFirst,
B.GroupFirst,
B.GroupIndex,
4025 for (
auto &Obj : ObjectsToAllocate) {
4026 FrameObjects[Obj].IsValid =
true;
4027 FrameObjects[Obj].ObjectIndex = Obj;
4031 GroupBuilder GB(FrameObjects);
4032 for (
auto &
MBB : MF) {
4033 for (
auto &
MI :
MBB) {
4034 if (
MI.isDebugInstr())
4037 switch (
MI.getOpcode()) {
4038 case AArch64::STGloop:
4039 case AArch64::STZGloop:
4043 case AArch64::STZGi:
4044 case AArch64::ST2Gi:
4045 case AArch64::STZ2Gi:
4058 FrameObjects[FI].IsValid)
4066 GB.AddMember(TaggedFI);
4068 GB.EndCurrentGroup();
4071 GB.EndCurrentGroup();
4081 FrameObjects[*TBPI].ObjectFirst =
true;
4082 FrameObjects[*TBPI].GroupFirst =
true;
4083 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex;
4084 if (FirstGroupIndex >= 0)
4085 for (FrameObject &Object : FrameObjects)
4086 if (Object.GroupIndex == FirstGroupIndex)
4087 Object.GroupFirst =
true;
4093 for (
auto &Obj : FrameObjects) {
4097 ObjectsToAllocate[i++] = Obj.ObjectIndex;
4104 dbgs() <<
" " << Obj.ObjectIndex <<
": group " << Obj.GroupIndex;
4105 if (Obj.ObjectFirst)
4106 dbgs() <<
", first";
4108 dbgs() <<
", group-first";
unsigned const MachineRegisterInfo * MRI
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static int64_t getArgumentStackToRestore(MachineFunction &MF, MachineBasicBlock &MBB)
Returns how much of the incoming argument stack area (in bytes) we should clean up in an epilogue.
static void emitShadowCallStackEpilogue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static void emitCalleeSavedRestores(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool SVE)
static void computeCalleeSaveRegisterPairs(MachineFunction &MF, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI, SmallVectorImpl< RegPairInfo > &RegPairs, bool NeedsFrameRecord)
static const unsigned DefaultSafeSPDisplacement
This is the biggest offset to the stack pointer we can encode in aarch64 instructions (without using ...
static void emitDefineCFAWithFP(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned FixedObject)
static bool needsWinCFI(const MachineFunction &MF)
static cl::opt< bool > ReverseCSRRestoreSeq("reverse-csr-restore-seq", cl::desc("reverse the CSR restore sequence"), cl::init(false), cl::Hidden)
static void insertCFISameValue(const MCInstrDesc &Desc, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, unsigned DwarfReg)
static cl::opt< bool > StackTaggingMergeSetTag("stack-tagging-merge-settag", cl::desc("merge settag instruction in function epilog"), cl::init(true), cl::Hidden)
static bool produceCompactUnwindFrame(MachineFunction &MF)
static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex, bool AssignOffsets)
static cl::opt< bool > OrderFrameObjects("aarch64-order-frame-objects", cl::desc("sort stack allocations"), cl::init(true), cl::Hidden)
static bool windowsRequiresStackProbe(MachineFunction &MF, uint64_t StackSizeInBytes)
static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, uint64_t LocalStackSize, bool NeedsWinCFI, bool *HasWinCFI)
static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, bool NeedsWinCFI, bool IsFirst, const TargetRegisterInfo *TRI)
static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI, MachineInstr::MIFlag FrameFlag=MachineInstr::FrameSetup, int CFAOffset=0)
static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, unsigned LocalStackSize)
static StackOffset getSVEStackSize(const MachineFunction &MF)
Returns the size of the entire SVE stackframe (calleesaves + spills).
static cl::opt< bool > EnableRedZone("aarch64-redzone", cl::desc("enable use of redzone on AArch64"), cl::init(false), cl::Hidden)
static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI, const TargetInstrInfo &TII, MachineInstr::MIFlag Flag)
static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB)
static void getLivePhysRegsUpTo(MachineInstr &MI, const TargetRegisterInfo &TRI, LivePhysRegs &LiveRegs)
Collect live registers from the end of MI's parent up to (including) MI in LiveRegs.
cl::opt< bool > EnableHomogeneousPrologEpilog("homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " "optimization (default = off)"))
static bool IsSVECalleeSave(MachineBasicBlock::iterator I)
static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, bool UsesWinAAPCS, bool NeedsWinCFI, bool NeedsFrameRecord, bool IsFirst, const TargetRegisterInfo *TRI)
Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction.
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg)
static StackOffset getFPOffset(const MachineFunction &MF, int64_t ObjectOffset)
static bool isTargetWindows(const MachineFunction &MF)
static StackOffset getStackOffset(const MachineFunction &MF, int64_t ObjectOffset)
static unsigned estimateRSStackSizeLimit(MachineFunction &MF)
Look at each instruction that references stack frames and return the stack size limit beyond which so...
static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, int &Min, int &Max)
returns true if there are any SVE callee saves.
static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE)
static bool isFuncletReturnInstr(const MachineInstr &MI)
static void emitShadowCallStackPrologue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool NeedsWinCFI, bool NeedsUnwindInfo)
static unsigned getFixedObjectSize(const MachineFunction &MF, const AArch64FunctionInfo *AFI, bool IsWin64, bool IsFunclet)
Returns the size of the fixed object area (allocated next to sp on entry) On Win64 this may include a...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const int kSetTagLoopThreshold
This file contains the simple types necessary to represent the attributes associated with functions a...
#define CASE(ATTRNAME, AANAME,...)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
static void clear(coro::Shape &Shape)
static const HTTPClientCleanup Cleanup
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
unsigned const TargetRegisterInfo * TRI
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static const unsigned FramePtr
static constexpr uint32_t Opcode
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool enableStackSlotScavenging(const MachineFunction &MF) const override
Returns true if the stack slot holes in the fixed and callee-save stack area should be used when allo...
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
bool enableCFIFixup(MachineFunction &MF) const override
Returns true if we may need to fix the unwind information for the function.
void resetCFIToInitialState(MachineBasicBlock &MBB) const override
Emit CFI instructions that recreate the state of the unwind information upon fucntion entry.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
bool canUseRedZone(const MachineFunction &MF) const
Can this function use the red zone for local allocations.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
int getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const
Funclets only need to account for space for the callee saved registers, as the locals are accounted f...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - Provide a base+offset reference to an FI slot for debug info.
StackOffset resolveFrameOffsetReference(const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE, Register &FrameReg, bool PreferFP, bool ForSimm) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
For Win64 AArch64 EH, the offset to the Unwind object is from the SP before the update.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve the parent's frame pointer...
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const
void setSwiftAsyncContextFrameIdx(int FI)
unsigned getTailCallReservedStack() const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
unsigned getArgumentStackToRestore() const
void setLocalStackSize(uint64_t Size)
int getCalleeSaveBaseToFrameRecordOffset() const
bool hasStreamingModeChanges() const
bool shouldSignReturnAddress(const MachineFunction &MF) const
uint64_t getStackSizeSVE() const
void setHasRedZone(bool s)
bool hasStackFrame() const
std::optional< int > getTaggedBasePointerIndex() const
uint64_t getLocalStackSize() const
void setStackRealigned(bool s)
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
unsigned getVarArgsGPRSize() const
void setStackSizeSVE(uint64_t S)
bool isStackRealigned() const
bool hasSwiftAsyncContext() const
void setTaggedBasePointerOffset(unsigned Offset)
unsigned getSVECalleeSavedStackSize() const
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
void setMinMaxSVECSFrameIndex(int Min, int Max)
bool hasCalleeSaveStackFreeSpace() const
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static bool isSEHInstruction(const MachineInstr &MI)
Return true if the instructions is a SEH instruciton used for unwinding on Windows.
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool hasBasePointer(const MachineFunction &MF) const
bool cannotEliminateFrame(const MachineFunction &MF) const
unsigned getBaseRegister() const
bool isTargetWindows() const
const AArch64RegisterInfo * getRegisterInfo() const override
const AArch64InstrInfo * getInstrInfo() const override
bool isTargetILP32() const
const AArch64TargetLowering * getTargetLowering() const override
bool isTargetMachO() const
const Triple & getTargetTriple() const
bool isCallingConvWin64(CallingConv::ID CC) const
const char * getChkStkName() const
bool swiftAsyncContextIsDynamicallySet() const
Return whether FrameLowering should always set the "extended frame present" bit in FP,...
unsigned getRedZoneSize(const Function &F) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
bool test(unsigned Idx) const
size_type count() const
count - Returns the number of bits which are set.
iterator_range< const_set_bits_iterator > set_bits() const
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
void stepBackward(const MachineInstr &MI)
Simulates liveness when stepping backwards over an instruction(bundle).
void removeReg(MCPhysReg Reg)
Removes a physical register, all its sub-registers, and all its super-registers from the set.
void addLiveIns(const MachineBasicBlock &MBB)
Adds all live-in registers of basic block MBB.
void addLiveOuts(const MachineBasicBlock &MBB)
Adds all live-out registers of basic block MBB.
void addReg(MCPhysReg Reg)
Adds a physical register and all its sub-registers to the set.
bool usesWindowsCFI() const
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction createNegateRAState(MCSymbol *L, SMLoc Loc={})
.cfi_negate_ra_state AArch64 negate RA state.
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, SMLoc Loc={}, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
static MCCFIInstruction createSameValue(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_same_value Current value of Register is the same as in the previous frame.
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
instr_iterator instr_begin()
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
MachineInstr & instr_back()
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
instr_iterator instr_end()
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
reverse_iterator rbegin()
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
int getStackProtectorIndex() const
Return the index for the stack protector object.
uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
void setStackID(int ObjectIdx, uint8_t ID)
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
unsigned addFrameInst(const MCCFIInstruction &Inst)
void setHasWinCFI(bool v)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
<