19#define DEBUG_TYPE "frame-info"
21STATISTIC(NumRedZoneFunctions,
"Number of functions using red zone");
32 return AFI->hasStreamingModeChanges() &&
38 unsigned Opc =
MBBI->getOpcode();
39 if (
Opc == AArch64::CNTD_XPiI)
45 if (
Opc == AArch64::BL)
48 return Opc == TargetOpcode::COPY;
53 switch (
I->getOpcode()) {
56 case AArch64::LD1B_2Z_IMM:
57 case AArch64::ST1B_2Z_IMM:
58 case AArch64::STR_ZXI:
59 case AArch64::LDR_ZXI:
60 case AArch64::PTRUE_C_B:
63 case AArch64::SEH_SaveZReg:
70 switch (
I->getOpcode()) {
73 case AArch64::STR_PXI:
74 case AArch64::LDR_PXI:
77 case AArch64::SEH_SavePReg:
99 if (
AFL.hasSVECalleeSavesAboveFrameRecord(
MF)) {
100 if (AFI->hasStackHazardSlotIndex())
101 reportFatalUsageError(
"SME hazard padding is not supported on Windows");
102 SVELayout = SVEStackLayout::CalleeSavesAboveFrameRecord;
103 }
else if (
AFI->hasSplitSVEObjects()) {
104 SVELayout = SVEStackLayout::Split;
117 if (
AFL.requiresSaveVG(
MF)) {
118 auto &TLI = *
Subtarget.getTargetLowering();
123 switch (
MBBI->getOpcode()) {
127 NewOpc = AArch64::STPXpre;
130 NewOpc = AArch64::STPDpre;
133 NewOpc = AArch64::STPQpre;
135 case AArch64::STRXui:
136 NewOpc = AArch64::STRXpre;
138 case AArch64::STRDui:
139 NewOpc = AArch64::STRDpre;
141 case AArch64::STRQui:
142 NewOpc = AArch64::STRQpre;
145 NewOpc = AArch64::LDPXpost;
148 NewOpc = AArch64::LDPDpost;
151 NewOpc = AArch64::LDPQpost;
153 case AArch64::LDRXui:
154 NewOpc = AArch64::LDRXpost;
156 case AArch64::LDRDui:
157 NewOpc = AArch64::LDRDpost;
159 case AArch64::LDRQui:
160 NewOpc = AArch64::LDRQpost;
164 int64_t MinOffset, MaxOffset;
165 bool Success =
TII->getMemOpInfo(NewOpc, Scale, Width, MinOffset, MaxOffset);
171 if (
MBBI->getOperand(
MBBI->getNumOperands() - 1).getImm() != 0 ||
172 CSStackSizeInc < MinOffset * (int64_t)Scale.
getFixedValue() ||
173 CSStackSizeInc > MaxOffset * (int64_t)Scale.
getFixedValue()) {
187 return std::prev(
MBBI);
192 auto SEH = std::next(
MBBI);
193 if (AArch64InstrInfo::isSEHInstruction(*SEH))
194 SEH->eraseFromParent();
201 unsigned OpndIdx = 0;
202 for (
unsigned OpndEnd =
MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
204 MIB.
add(
MBBI->getOperand(OpndIdx));
206 assert(
MBBI->getOperand(OpndIdx).getImm() == 0 &&
207 "Unexpected immediate offset in first/last callee-save save/restore "
209 assert(
MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
210 "Unexpected base register in callee-save save/restore instruction!");
211 assert(CSStackSizeInc % Scale == 0);
212 MIB.
addImm(CSStackSizeInc / (
int)Scale);
220 AFL.insertSEH(*MIB, *
TII, FrameFlag);
227 return std::prev(
MBB.erase(
MBBI));
232 unsigned LocalStackSize) {
234 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
235 switch (
MBBI->getOpcode()) {
238 case AArch64::SEH_SaveFPLR:
239 case AArch64::SEH_SaveRegP:
240 case AArch64::SEH_SaveReg:
241 case AArch64::SEH_SaveFRegP:
242 case AArch64::SEH_SaveFReg:
243 case AArch64::SEH_SaveAnyRegI:
244 case AArch64::SEH_SaveAnyRegIP:
245 case AArch64::SEH_SaveAnyRegQP:
246 case AArch64::SEH_SaveAnyRegQPX:
247 ImmOpnd = &
MBBI->getOperand(ImmIdx);
256 if (AArch64InstrInfo::isSEHInstruction(
MI))
259 unsigned Opc =
MI.getOpcode();
263 case AArch64::STRXui:
265 case AArch64::STRDui:
267 case AArch64::LDRXui:
269 case AArch64::LDRDui:
273 case AArch64::STRQui:
275 case AArch64::LDRQui:
282 unsigned OffsetIdx =
MI.getNumExplicitOperands() - 1;
283 assert(
MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
284 "Unexpected base register in callee-save save/restore instruction!");
288 assert(LocalStackSize % Scale == 0);
289 OffsetOpnd.
setImm(OffsetOpnd.
getImm() + LocalStackSize / Scale);
294 assert(
MBBI !=
MI.getParent()->end() &&
"Expecting a valid instruction");
295 assert(AArch64InstrInfo::isSEHInstruction(*
MBBI) &&
296 "Expecting a SEH instruction");
303 if (
AFL.homogeneousPrologEpilog(
MF))
306 if (
AFI->getLocalStackSize() == 0)
317 if (
AFL.needsWinCFI(
MF) &&
AFI->getCalleeSavedStackSize() > 0 &&
318 MF.getFunction().hasOptSize())
323 if (StackBumpBytes >= 512 ||
324 AFL.windowsRequiresStackProbe(
MF, StackBumpBytes))
327 if (
MFI.hasVarSizedObjects())
336 if (
AFL.canUseRedZone(
MF))
341 if (
AFI->hasSVEStackSize())
355 return {{PPRCalleeSavesSize, PPRLocalsSize},
356 {ZPRCalleeSavesSize, ZPRLocalsSize}};
359 {ZPRCalleeSavesSize, PPRLocalsSize + ZPRLocalsSize}};
368 BeforePPRs =
SVE.PPR.CalleeSavesSize;
370 if (
SVE.ZPR.CalleeSavesSize)
371 AfterPPRs +=
SVE.PPR.LocalsSize +
SVE.ZPR.CalleeSavesSize;
373 AfterZPRs +=
SVE.PPR.LocalsSize;
375 return {BeforePPRs, AfterPPRs, AfterZPRs};
391 IsEpilogue ?
MBB.begin() :
MBB.getFirstTerminator();
392 auto AdjustI = [&](
auto MBBI) {
return IsEpilogue ? std::prev(
MBBI) :
MBBI; };
394 if (PPRCalleeSavesSize) {
395 PPRsI = AdjustI(PPRsI);
398 IsEpilogue ? (--PPRsI) : (++PPRsI);
401 if (ZPRCalleeSavesSize) {
402 ZPRsI = AdjustI(ZPRsI);
405 IsEpilogue ? (--ZPRsI) : (++ZPRsI);
408 return {{PPRsI,
MBBI}, {ZPRsI, PPRsI}};
409 return {{
MBBI, PPRsI}, {PPRsI, ZPRsI}};
417 EmitAsyncCFI =
AFI->needsAsyncDwarfUnwindInfo(
MF);
422 collectBlockLiveins();
439void AArch64PrologueEmitter::collectBlockLiveins() {
442 PrologueEndI =
MBB.begin();
443 while (PrologueEndI !=
MBB.end() &&
447 if (PrologueEndI !=
MBB.end()) {
463void AArch64PrologueEmitter::verifyPrologueClobbers()
const {
464 if (PrologueEndI ==
MBB.end())
467 for (MachineInstr &
MI :
468 make_range(
MBB.instr_begin(), PrologueEndI->getIterator())) {
469 for (
auto &
Op :
MI.operands())
470 if (
Op.isReg() &&
Op.isDef())
471 assert(!LiveRegs.contains(
Op.getReg()) &&
472 "live register clobbered by inserted prologue instructions");
477void AArch64PrologueEmitter::determineLocalsStackSize(
478 uint64_t StackSize, uint64_t PrologueSaveSize) {
479 AFI->setLocalStackSize(StackSize - PrologueSaveSize);
486 static const int64_t MAX_BYTES_PER_SCALABLE_BYTE = 16;
487 return Size.getScalable() * MAX_BYTES_PER_SCALABLE_BYTE +
Size.getFixed();
490void AArch64PrologueEmitter::allocateStackSpace(
492 StackOffset AllocSize,
bool EmitCFI, StackOffset InitialOffset,
493 bool FollowupAllocs) {
500 const uint64_t AndMask = ~(MaxAlign - 1);
503 Register TargetReg = RealignmentPadding
504 ?
AFL.findScratchNonCalleeSaveRegister(&
MBB)
511 if (RealignmentPadding) {
532 if (AllocSize.getScalable() == 0 && RealignmentPadding == 0) {
534 assert(ScratchReg != AArch64::NoRegister);
537 .
addImm(AllocSize.getFixed())
538 .
addImm(InitialOffset.getFixed())
539 .
addImm(InitialOffset.getScalable());
544 if (FollowupAllocs) {
564 int64_t ProbeSize =
AFI->getStackProbeSize();
565 if (
upperBound(AllocSize) + RealignmentPadding <= ProbeSize) {
566 Register ScratchReg = RealignmentPadding
567 ?
AFL.findScratchNonCalleeSaveRegister(&
MBB)
569 assert(ScratchReg != AArch64::NoRegister);
574 if (RealignmentPadding) {
580 AFI->setStackRealigned(
true);
582 if (FollowupAllocs ||
upperBound(AllocSize) + RealignmentPadding >
602 assert(TargetReg != AArch64::NoRegister);
607 if (RealignmentPadding) {
620 .buildDefCFARegister(AArch64::SP);
622 if (RealignmentPadding)
623 AFI->setStackRealigned(
true);
633 AFI->setHasRedZone(
false);
643 if (
AFI->getArgumentStackToRestore())
646 if (
AFI->shouldSignReturnAddress(
MF)) {
649 if (!
AFL.shouldSignReturnAddressEverywhere(
MF)) {
657 if (
AFI->needsShadowCallStackPrologueEpilogue(
MF)) {
658 emitShadowCallStackPrologue(PrologueBeginI,
DL);
670 if (
HasFP &&
AFI->hasSwiftAsyncContext())
671 emitSwiftAsyncContextFramePointer(PrologueBeginI,
DL);
680 if (std::optional<int> TBPI =
AFI->getTaggedBasePointerIndex())
681 AFI->setTaggedBasePointerOffset(-
MFI.getObjectOffset(*TBPI));
683 AFI->setTaggedBasePointerOffset(
MFI.getStackSize());
693 if (!
AFI->hasStackFrame() && !
AFL.windowsRequiresStackProbe(
MF, NumBytes))
694 return emitEmptyStackFramePrologue(NumBytes, PrologueBeginI,
DL);
696 bool IsWin64 =
Subtarget.isCallingConvWin64(F.getCallingConv(), F.isVarArg());
699 auto PrologueSaveSize =
AFI->getCalleeSavedStackSize() + FixedObject;
701 determineLocalsStackSize(NumBytes, PrologueSaveSize);
709 "unexpected SVE allocs after PPRs with CalleeSavesAboveFrameRecord");
716 allocateStackSpace(PrologueBeginI, 0, SaveSize,
false,
StackOffset{},
718 NumBytes -= FixedObject;
725 MBBI,
DL, -
AFI->getCalleeSavedStackSize(), EmitAsyncCFI);
726 NumBytes -=
AFI->getCalleeSavedStackSize();
727 }
else if (CombineSPBump) {
728 assert(!
AFL.getSVEStackSize(
MF) &&
"Cannot combine SP bump with SVE");
736 NumBytes -= PrologueSaveSize;
737 }
else if (PrologueSaveSize != 0) {
739 PrologueBeginI,
DL, -PrologueSaveSize, EmitAsyncCFI);
740 NumBytes -= PrologueSaveSize;
742 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
747 auto &TLI = *
Subtarget.getTargetLowering();
750 while (AfterGPRSavesI != EndI &&
757 AFI->getLocalStackSize());
764 emitFramePointerSetup(AfterGPRSavesI,
DL, FixedObject);
770 emitCalleeSavedGPRLocations(AfterGPRSavesI);
773 const bool NeedsRealignment =
775 const int64_t RealignmentPadding =
776 (NeedsRealignment &&
MFI.getMaxAlign() >
Align(16))
777 ?
MFI.getMaxAlign().value() - 16
780 if (
AFL.windowsRequiresStackProbe(
MF, NumBytes + RealignmentPadding))
781 emitWindowsStackProbe(AfterGPRSavesI,
DL, NumBytes, RealignmentPadding);
791 auto [PPRRange, ZPRRange] =
793 ZPR.CalleeSavesSize,
false);
794 AfterSVESavesI = ZPRRange.End;
796 emitCalleeSavedSVELocations(AfterSVESavesI);
798 allocateStackSpace(PPRRange.Begin, 0, SVEAllocs.
BeforePPRs,
799 EmitAsyncCFI && !
HasFP, CFAOffset,
803 assert(PPRRange.End == ZPRRange.Begin &&
804 "Expected ZPR callee saves after PPR locals");
805 allocateStackSpace(PPRRange.End, 0, SVEAllocs.
AfterPPRs,
806 EmitAsyncCFI && !
HasFP, CFAOffset,
819 assert(!(
AFL.canUseRedZone(
MF) && NeedsRealignment) &&
820 "Cannot use redzone with stack realignment");
821 if (!
AFL.canUseRedZone(
MF)) {
825 allocateStackSpace(AfterSVESavesI, RealignmentPadding, SVEAllocs.
AfterZPRs,
826 EmitAsyncCFI && !
HasFP, CFAOffset,
827 MFI.hasVarSizedObjects());
866 MBB.addLiveIn(AArch64::X1);
870 if (
EmitCFI && !EmitAsyncCFI) {
872 emitDefineCFAWithFP(AfterSVESavesI, FixedObject);
875 AFL.getSVEStackSize(
MF) +
882 emitCalleeSavedGPRLocations(AfterSVESavesI);
883 emitCalleeSavedSVELocations(AfterSVESavesI);
887void AArch64PrologueEmitter::emitShadowCallStackPrologue(
898 MBB.addLiveIn(AArch64::X18);
907 static const char CFIInst[] = {
908 dwarf::DW_CFA_val_expression,
911 static_cast<char>(
unsigned(dwarf::DW_OP_breg18)),
912 static_cast<char>(-8) & 0x7f,
915 .buildEscape(StringRef(CFIInst,
sizeof(CFIInst)));
919void AArch64PrologueEmitter::emitSwiftAsyncContextFramePointer(
921 switch (
MF.getTarget().Options.SwiftAsyncFramePointer) {
923 if (
Subtarget.swiftAsyncContextIsDynamicallySet()) {
965void AArch64PrologueEmitter::emitEmptyStackFramePrologue(
968 assert(!
HasFP &&
"unexpected function without stack frame but with FP");
970 "unexpected function without stack frame but with SVE objects");
972 AFI->setLocalStackSize(NumBytes);
982 if (
AFL.canUseRedZone(
MF)) {
983 AFI->setHasRedZone(
true);
984 ++NumRedZoneFunctions;
991 MCSymbol *FrameLabel =
MF.getContext().createTempSymbol();
994 .buildDefCFAOffset(NumBytes, FrameLabel);
1005void AArch64PrologueEmitter::emitFramePointerSetup(
1007 unsigned FixedObject) {
1008 int64_t FPOffset =
AFI->getCalleeSaveBaseToFrameRecordOffset();
1010 FPOffset +=
AFI->getLocalStackSize();
1012 if (
AFI->hasSwiftAsyncContext()) {
1016 const auto &
Attrs =
MF.getFunction().getAttributes();
1017 bool HaveInitialContext =
Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1018 if (HaveInitialContext)
1019 MBB.addLiveIn(AArch64::X22);
1020 Register Reg = HaveInitialContext ? AArch64::X22 : AArch64::XZR;
1058 emitDefineCFAWithFP(
MBBI, FixedObject);
1062void AArch64PrologueEmitter::emitDefineCFAWithFP(
1064 const int OffsetToFirstCalleeSaveFromFP =
1065 AFI->getCalleeSaveBaseToFrameRecordOffset() -
1066 AFI->getCalleeSavedStackSize();
1069 .buildDefCFA(
FramePtr, FixedObject - OffsetToFirstCalleeSaveFromFP);
1072void AArch64PrologueEmitter::emitWindowsStackProbe(
1074 int64_t RealignmentPadding)
const {
1075 if (
AFI->getSVECalleeSavedStackSize())
1080 unsigned X15Scratch = AArch64::NoRegister;
1082 [
this](
const MachineBasicBlock::RegisterMaskPair &LiveIn) {
1083 return RegInfo.isSuperOrSubRegisterEq(AArch64::X15,
1086 X15Scratch =
AFL.findScratchNonCalleeSaveRegister(&
MBB,
true);
1087 assert(X15Scratch != AArch64::NoRegister &&
1088 (X15Scratch < AArch64::X15 || X15Scratch > AArch64::X17));
1090 LiveRegs.removeReg(AArch64::X15);
1099 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
1107 if (NumBytes >= (1 << 28))
1109 "unwinding purposes");
1111 uint32_t LowNumWords = NumWords & 0xFFFF;
1118 if ((NumWords & 0xFFFF0000) != 0) {
1121 .
addImm((NumWords & 0xFFFF0000) >> 16)
1133 const AArch64TargetLowering *TLI =
Subtarget.getTargetLowering();
1134 RTLIB::LibcallImpl ChkStkLibcall = TLI->getLibcallImpl(RTLIB::STACK_PROBE);
1135 if (ChkStkLibcall == RTLIB::Unsupported)
1138 const char *ChkStk = TLI->getLibcallImplName(ChkStkLibcall).data();
1139 switch (
MF.getTarget().getCodeModel()) {
1203 if (RealignmentPadding > 0) {
1204 if (RealignmentPadding >= 4096) {
1207 .
addImm(RealignmentPadding)
1217 .
addImm(RealignmentPadding)
1222 uint64_t AndMask = ~(
MFI.getMaxAlign().value() - 1);
1226 AFI->setStackRealigned(
true);
1232 if (X15Scratch != AArch64::NoRegister) {
1241void AArch64PrologueEmitter::emitCalleeSavedGPRLocations(
1243 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1248 for (
const auto &
Info : CSI) {
1249 unsigned FrameIdx =
Info.getFrameIdx();
1250 if (
MFI.hasScalableStackID(FrameIdx))
1253 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
1254 int64_t
Offset =
MFI.getObjectOffset(FrameIdx) -
AFL.getOffsetOfLocalArea();
1255 CFIBuilder.buildOffset(
Info.getReg(),
Offset);
1259void AArch64PrologueEmitter::emitCalleeSavedSVELocations(
1262 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1268 std::optional<int64_t> IncomingVGOffsetFromDefCFA;
1269 if (
AFL.requiresSaveVG(
MF)) {
1271 reverse(CSI), [](
auto &
Info) {
return Info.getReg() == AArch64::VG; });
1272 IncomingVGOffsetFromDefCFA =
MFI.getObjectOffset(IncomingVG.getFrameIdx()) -
1273 AFL.getOffsetOfLocalArea();
1276 StackOffset PPRStackSize =
AFL.getPPRStackSize(
MF);
1277 for (
const auto &
Info : CSI) {
1278 int FI =
Info.getFrameIdx();
1279 if (!
MFI.hasScalableStackID(FI))
1284 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
1285 MCRegister
Reg =
Info.getReg();
1299 CFIBuilder.insertCFIInst(
1305 switch (
MI.getOpcode()) {
1308 case AArch64::CATCHRET:
1309 case AArch64::CLEANUPRET:
1320 SEHEpilogueStartI =
MBB.end();
1327 "expected negative offset (with optional fixed portion)");
1329 if (int64_t FixedOffset =
Offset.getFixed()) {
1345 if (
MBB.end() != EpilogueEndI) {
1346 DL = EpilogueEndI->getDebugLoc();
1360 int64_t ArgumentStackToRestore =
AFL.getArgumentStackToRestore(
MF,
MBB);
1361 bool IsWin64 =
Subtarget.isCallingConvWin64(
MF.getFunction().getCallingConv(),
1362 MF.getFunction().isVarArg());
1365 int64_t AfterCSRPopSize = ArgumentStackToRestore;
1366 auto PrologueSaveSize =
AFI->getCalleeSavedStackSize() + FixedObject;
1371 if (
MF.hasEHFunclets())
1372 AFI->setLocalStackSize(NumBytes - PrologueSaveSize);
1376 auto FirstHomogenousEpilogI =
MBB.getFirstTerminator();
1377 if (FirstHomogenousEpilogI !=
MBB.begin()) {
1378 auto HomogeneousEpilog = std::prev(FirstHomogenousEpilogI);
1379 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
1380 FirstHomogenousEpilogI = HomogeneousEpilog;
1390 assert(AfterCSRPopSize == 0);
1394 bool CombineSPBump = shouldCombineCSRLocalStackBump(NumBytes);
1396 unsigned ProloguePopSize = PrologueSaveSize;
1402 ProloguePopSize -= FixedObject;
1403 AfterCSRPopSize += FixedObject;
1407 if (!CombineSPBump && ProloguePopSize != 0) {
1409 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
1410 AArch64InstrInfo::isSEHInstruction(*Pop) ||
1413 Pop = std::prev(Pop);
1420 if (
OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0) {
1426 if (AArch64InstrInfo::isSEHInstruction(*AfterLastPop))
1440 AfterCSRPopSize += ProloguePopSize;
1449 while (FirstGPRRestoreI != Begin) {
1456 }
else if (CombineSPBump)
1458 AFI->getLocalStackSize());
1468 BuildMI(
MBB, FirstGPRRestoreI, DL,
TII->get(AArch64::SEH_EpilogStart))
1470 SEHEpilogueStartI = FirstGPRRestoreI;
1471 --SEHEpilogueStartI;
1481 ?
MBB.getFirstTerminator()
1483 PPR.CalleeSavesSize, ZPR.CalleeSavesSize,
true);
1485 if (
HasFP &&
AFI->hasSwiftAsyncContext())
1486 emitSwiftAsyncContextFramePointer(EpilogueEndI, DL);
1489 if (CombineSPBump) {
1490 assert(!
AFI->hasSVEStackSize() &&
"Cannot combine SP bump with SVE");
1504 NumBytes -= PrologueSaveSize;
1505 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
1507 StackOffset SVECalleeSavesSize = ZPR.CalleeSavesSize + PPR.CalleeSavesSize;
1513 "unexpected SVE allocs after PPRs with CalleeSavesAboveFrameRecord");
1517 if (!
AFI->isStackRealigned() && !
MFI.hasVarSizedObjects()) {
1527 }
else if (
AFI->hasSVEStackSize()) {
1532 (
AFI->isStackRealigned() ||
MFI.hasVarSizedObjects()) ? AArch64::FP
1534 if (SVECalleeSavesSize && BaseForSVEDealloc == AArch64::FP) {
1538 -SVECalleeSavesSize - PPR.LocalsSize -
1542 moveSPBelowFP(ZPRRange.Begin, FPOffsetZPR);
1550 assert(!FPOffsetPPR.
getFixed() &&
"expected only scalable offset");
1554 }
else if (BaseForSVEDealloc == AArch64::SP) {
1563 NumBytes -= NonSVELocals.getFixed();
1571 assert(PPRRange.Begin == ZPRRange.End &&
1572 "Expected PPR restores after ZPR");
1585 emitCalleeSavedSVERestores(
1590 bool RedZone =
AFL.canUseRedZone(
MF);
1593 if (RedZone && AfterCSRPopSize == 0)
1600 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
1601 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
1602 if (NoCalleeSaveRestore)
1603 StackRestoreBytes += AfterCSRPopSize;
1606 MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP,
1613 if (NoCalleeSaveRestore || AfterCSRPopSize == 0)
1623 if (!
IsFunclet && (
MFI.hasVarSizedObjects() ||
AFI->isStackRealigned())) {
1625 MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::FP,
1628 }
else if (NumBytes)
1641 if (AfterCSRPopSize) {
1642 assert(AfterCSRPopSize > 0 &&
"attempting to reallocate arg stack that an "
1643 "interrupt may have clobbered");
1646 MBB,
MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
1653bool AArch64EpilogueEmitter::shouldCombineCSRLocalStackBump(
1665 while (LastI != Begin) {
1667 if (LastI->isTransient())
1672 switch (LastI->getOpcode()) {
1673 case AArch64::STGloop:
1674 case AArch64::STZGloop:
1676 case AArch64::STZGi:
1677 case AArch64::ST2Gi:
1678 case AArch64::STZ2Gi:
1686void AArch64EpilogueEmitter::emitSwiftAsyncContextFramePointer(
1688 switch (
MF.getTarget().Options.SwiftAsyncFramePointer) {
1716void AArch64EpilogueEmitter::emitShadowCallStackEpilogue(
1730 if (
AFI->needsAsyncDwarfUnwindInfo(
MF))
1732 .buildRestore(AArch64::X18);
1735void AArch64EpilogueEmitter::emitCalleeSavedRestores(
1737 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1743 for (
const auto &
Info : CSI) {
1744 if (
SVE !=
MFI.hasScalableStackID(
Info.getFrameIdx()))
1747 MCRegister
Reg =
Info.getReg();
1751 CFIBuilder.buildRestore(
Info.getReg());
1755void AArch64EpilogueEmitter::finalizeEpilogue()
const {
1756 if (
AFI->needsShadowCallStackPrologueEpilogue(
MF)) {
1757 emitShadowCallStackEpilogue(
MBB.getFirstTerminator(), DL);
1761 emitCalleeSavedGPRRestores(
MBB.getFirstTerminator());
1762 if (
AFI->shouldSignReturnAddress(
MF)) {
1765 if (!
AFL.shouldSignReturnAddressEverywhere(
MF)) {
1767 TII->get(AArch64::PAUTH_EPILOGUE))
1774 BuildMI(
MBB,
MBB.getFirstTerminator(), DL,
TII->get(AArch64::SEH_EpilogEnd))
1776 if (!
MF.hasWinCFI())
1777 MF.setHasWinCFI(
true);
1782 MBB.erase(SEHEpilogueStartI);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file contains the declaration of the AArch64PrologueEmitter and AArch64EpilogueEmitter classes,...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Analysis containing CSE Info
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static const unsigned FramePtr
void emitEpilogue()
Emit the epilogue.
AArch64EpilogueEmitter(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
void setStackRealigned(bool s)
void emitPrologue()
Emit the prologue.
AArch64PrologueEmitter(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
const MachineFrameInfo & MFI
AArch64FunctionInfo * AFI
MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int CSStackSizeInc, bool EmitCFI, MachineInstr::MIFlag FrameFlag=MachineInstr::FrameSetup, int CFAOffset=0) const
SVEFrameSizes getSVEStackFrameSizes() const
bool isVGInstruction(MachineBasicBlock::iterator MBBI, const TargetLowering &TLI) const
AArch64PrologueEpilogueCommon(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
const AArch64RegisterInfo & RegInfo
const AArch64FrameLowering & AFL
@ CalleeSavesAboveFrameRecord
void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, uint64_t LocalStackSize) const
bool shouldCombineCSRLocalStackBump(uint64_t StackBumpBytes) const
const AArch64Subtarget & Subtarget
SVEStackAllocations getSVEStackAllocations(SVEFrameSizes const &)
const AArch64InstrInfo * TII
bool requiresGetVGCall() const
const AArch64TargetLowering * getTargetLowering() const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
Helper class for creating CFI instructions and inserting them into MIR.
void buildDefCFAOffset(int64_t Offset, MCSymbol *Label=nullptr) const
void insertCFIInst(const MCCFIInstruction &CFIInst) const
void buildDefCFA(MCRegister Reg, int64_t Offset) const
A set of physical registers with utility functions to track liveness when walking backward/forward th...
MachineInstrBundleIterator< MachineInstr > iterator
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
static MachineOperand CreateImm(int64_t Val)
const char * getSymbolName() const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Wrapper class representing virtual and physical registers.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
This is an optimization pass for GlobalISel generic memory operations.
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
static bool matchLibcall(const TargetLowering &TLI, const MachineOperand &MO, RTLIB::Libcall LC)
static bool isPartOfSVECalleeSaves(MachineBasicBlock::iterator I)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
static bool isFuncletReturnInstr(const MachineInstr &MI)
auto reverse(ContainerTy &&C)
static void getLivePhysRegsUpTo(MachineInstr &MI, const TargetRegisterInfo &TRI, LivePhysRegs &LiveRegs)
Collect live registers from the end of MI's parent up to (including) MI in LiveRegs.
@ Always
Always set the bit.
@ Never
Never set the bit.
@ DeploymentBased
Determine whether to set the bit statically or dynamically based on the deployment target.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
static bool isPartOfPPRCalleeSaves(MachineBasicBlock::iterator I)
@ Success
The lock was released successfully.
static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, unsigned LocalStackSize)
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA, std::optional< int64_t > IncomingVGOffsetFromDefCFA)
DWARFExpression::Operation Op
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
static SVEPartitions partitionSVECS(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, StackOffset PPRCalleeSavesSize, StackOffset ZPRCalleeSavesSize, bool IsEpilogue)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static bool isPartOfZPRCalleeSaves(MachineBasicBlock::iterator I)
static int64_t upperBound(StackOffset Size)
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
MachineBasicBlock::iterator End
struct llvm::SVEPartitions::@327166152017175235362202041204223104077330276266 ZPR
struct llvm::SVEPartitions::@327166152017175235362202041204223104077330276266 PPR
MachineBasicBlock::iterator Begin
StackOffset totalSize() const