54#define DEBUG_TYPE "aarch64-call-lowering"
72 if (OrigVT == MVT::i1 || OrigVT == MVT::i8)
73 ValVT = LocVT = MVT::i8;
74 else if (OrigVT == MVT::i16)
75 ValVT = LocVT = MVT::i16;
81 return (ValVT == MVT::i8 || ValVT == MVT::i16) ?
LLT(ValVT)
87struct AArch64IncomingValueAssigner
89 AArch64IncomingValueAssigner(
CCAssignFn *AssignFn_,
91 : IncomingValueAssigner(AssignFn_, AssignFnVarArg_) {}
93 bool assignArg(
unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
95 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
96 CCState &State)
override {
98 return IncomingValueAssigner::assignArg(ValNo, OrigVT, ValVT, LocVT,
99 LocInfo, Info, Flags, State);
103struct AArch64OutgoingValueAssigner
105 const AArch64Subtarget &Subtarget;
112 AArch64OutgoingValueAssigner(
CCAssignFn *AssignFn_,
114 const AArch64Subtarget &Subtarget_,
116 : OutgoingValueAssigner(AssignFn_, AssignFnVarArg_),
117 Subtarget(Subtarget_), IsReturn(IsReturn) {}
119 bool assignArg(
unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
121 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
122 CCState &State)
override {
126 bool UseVarArgsCCForFixed = IsCalleeWin && State.
isVarArg();
129 if (!
Flags.isVarArg() && !UseVarArgsCCForFixed) {
132 Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags,
Info.Ty, State);
134 Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags,
Info.Ty, State);
142 IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &
MRI)
143 : IncomingValueHandler(MIRBuilder,
MRI) {}
146 MachinePointerInfo &MPO,
147 ISD::ArgFlagsTy Flags)
override {
148 auto &MFI = MIRBuilder.getMF().getFrameInfo();
152 const bool IsImmutable = !
Flags.isByVal();
154 int FI = MFI.CreateFixedObject(
Size,
Offset, IsImmutable);
156 auto AddrReg = MIRBuilder.buildFrameIndex(
LLT::pointer(0, 64), FI);
157 return AddrReg.getReg(0);
160 LLT getStackValueStoreType(
const DataLayout &
DL,
const CCValAssign &VA,
161 ISD::ArgFlagsTy Flags)
const override {
164 if (
Flags.isPointer())
170 const CCValAssign &VA)
override {
171 markRegUsed(PhysReg);
172 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
176 const MachinePointerInfo &MPO,
177 const CCValAssign &VA)
override {
178 MachineFunction &MF = MIRBuilder.getMF();
198 case CCValAssign::LocInfo::ZExt:
199 MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, ValVReg, Addr, *MMO);
201 case CCValAssign::LocInfo::SExt:
202 MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, ValVReg, Addr, *MMO);
205 MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
226struct CallReturnHandler :
public IncomingArgHandler {
227 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &
MRI,
228 MachineInstrBuilder MIB)
229 : IncomingArgHandler(MIRBuilder,
MRI), MIB(MIB) {}
235 MachineInstrBuilder MIB;
239struct ReturnedArgCallReturnHandler :
public CallReturnHandler {
240 ReturnedArgCallReturnHandler(MachineIRBuilder &MIRBuilder,
241 MachineRegisterInfo &
MRI,
242 MachineInstrBuilder MIB)
243 : CallReturnHandler(MIRBuilder,
MRI, MIB) {}
249 OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &
MRI,
250 MachineInstrBuilder MIB,
bool IsTailCall =
false,
252 : OutgoingValueHandler(MIRBuilder,
MRI), MIB(MIB), IsTailCall(IsTailCall),
254 Subtarget(MIRBuilder.getMF().getSubtarget<AArch64Subtarget>()) {}
257 MachinePointerInfo &MPO,
258 ISD::ArgFlagsTy Flags)
override {
259 MachineFunction &MF = MIRBuilder.getMF();
264 assert(!
Flags.isByVal() &&
"byval unhandled with tail calls");
268 auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);
270 return FIReg.getReg(0);
274 SPReg = MIRBuilder.buildCopy(p0,
Register(AArch64::SP)).getReg(0);
276 auto OffsetReg = MIRBuilder.buildConstant(s64,
Offset);
278 auto AddrReg = MIRBuilder.buildPtrAdd(p0,
SPReg, OffsetReg);
281 return AddrReg.getReg(0);
288 LLT getStackValueStoreType(
const DataLayout &
DL,
const CCValAssign &VA,
289 ISD::ArgFlagsTy Flags)
const override {
290 if (
Flags.isPointer())
296 const CCValAssign &VA)
override {
297 MIB.addUse(PhysReg, RegState::Implicit);
298 Register ExtReg = extendRegister(ValVReg, VA);
299 MIRBuilder.buildCopy(PhysReg, ExtReg);
304 const CCValAssign &VA,
309 auto *
DefMI =
MRI.getVRegDef(ValVReg);
314 if (
Op == TargetOpcode::G_ZEXT ||
Op == TargetOpcode::G_ANYEXT ||
326 auto *LoadAddrDef =
MRI.getVRegDef(LoadReg);
327 if (LoadAddrDef->getOpcode() != TargetOpcode::G_FRAME_INDEX)
330 int LoadFI = LoadAddrDef->getOperand(1).getIndex();
332 auto *StoreAddrDef =
MRI.getVRegDef(StoreAddr);
333 if (StoreAddrDef->getOpcode() != TargetOpcode::G_FRAME_INDEX)
335 int StoreFI = StoreAddrDef->getOperand(1).getIndex();
348 const MachinePointerInfo &MPO,
349 const CCValAssign &VA)
override {
350 MachineFunction &MF = MIRBuilder.getMF();
355 MIRBuilder.buildStore(ValVReg, Addr, *MMO);
358 void assignValueToAddress(
const CallLowering::ArgInfo &Arg,
unsigned RegIndex,
360 const MachinePointerInfo &MPO,
361 const CCValAssign &VA)
override {
365 if (Arg.
Flags[0].isVarArg())
369 if (VA.
getLocInfo() != CCValAssign::LocInfo::FPExt) {
378 ValVReg = extendRegister(ValVReg, VA, MaxSize);
384 assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);
387 MachineInstrBuilder MIB;
398 const AArch64Subtarget &Subtarget;
414 "Return value without a vreg");
419 }
else if (!VRegs.
empty()) {
426 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(
F.getCallingConv());
427 auto &
DL =
F.getDataLayout();
433 "For each split Type there should be exactly one VReg.");
438 for (
unsigned i = 0; i < SplitEVTs.
size(); ++i) {
440 ArgInfo CurArgInfo =
ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx), 0};
445 auto &Flags = CurArgInfo.
Flags[0];
447 !Flags.isSExt() && !Flags.isZExt()) {
449 }
else if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) ==
452 MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
453 if (
EVT(NewVT) != SplitEVTs[i]) {
454 unsigned ExtendOp = TargetOpcode::G_ANYEXT;
455 if (
F.getAttributes().hasRetAttr(Attribute::SExt))
456 ExtendOp = TargetOpcode::G_SEXT;
457 else if (
F.getAttributes().hasRetAttr(Attribute::ZExt))
458 ExtendOp = TargetOpcode::G_ZEXT;
473 CurVReg = MIRBuilder.
buildInstr(ExtendOp, {NewLLT}, {CurVReg})
492 if (NewLLT !=
MRI.getType(CurVReg)) {
494 CurVReg = MIRBuilder.
buildInstr(ExtendOp, {NewLLT}, {CurVReg})
500 if (CurVReg != CurArgInfo.
Regs[0]) {
501 CurArgInfo.
Regs[0] = CurVReg;
508 AArch64OutgoingValueAssigner Assigner(AssignFn, AssignFn, Subtarget,
510 OutgoingArgHandler Handler(MIRBuilder,
MRI, MIB);
512 MIRBuilder, CC,
F.isVarArg());
515 if (SwiftErrorVReg) {
517 MIRBuilder.
buildCopy(AArch64::X21, SwiftErrorVReg);
527 bool IsVarArg)
const {
530 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
533 return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv));
550 assert(
F.isVarArg() &&
"Expected F to be vararg?");
554 CCState CCInfo(
F.getCallingConv(),
true, MF, ArgLocs,
573 for (
const auto &
F : Forwards) {
574 MBB.addLiveIn(
F.PReg);
583 const bool GlobalISelFlag =
595 return !
F.hasOptNone();
599 return A.getType()->isScalableTy();
603 if (!ST.hasNEON() || !ST.hasFPARMv8()) {
604 LLVM_DEBUG(
dbgs() <<
"Falling back to SDAG because we don't support no-NEON\n");
609 if (Attrs.hasZAState() || Attrs.hasZT0State() ||
610 Attrs.hasStreamingInterfaceOrBody() ||
611 Attrs.hasStreamingCompatibleInterface())
617void AArch64CallLowering::saveVarArgRegisters(
628 bool IsWin64CC = Subtarget.isCallingConvWin64(CCInfo.
getCallingConv(),
634 unsigned NumVariadicGPRArgRegs =
GPRArgRegs.size() - FirstVariadicGPR + 1;
636 unsigned GPRSaveSize = 8 * (
GPRArgRegs.size() - FirstVariadicGPR);
638 if (GPRSaveSize != 0) {
641 -
static_cast<int>(GPRSaveSize),
false);
642 if (GPRSaveSize & 15)
645 -
static_cast<int>(
alignTo(GPRSaveSize, 16)),
654 for (
unsigned i = FirstVariadicGPR; i <
GPRArgRegs.size(); ++i) {
655 Register Val =
MRI.createGenericVirtualRegister(s64);
661 MF, GPRIdx, (i - FirstVariadicGPR) * 8)
665 FIN = MIRBuilder.
buildPtrAdd(
MRI.createGenericVirtualRegister(p0),
672 if (Subtarget.hasFPARMv8() && !IsWin64CC) {
675 unsigned FPRSaveSize = 16 * (
FPRArgRegs.size() - FirstVariadicFPR);
677 if (FPRSaveSize != 0) {
684 for (
unsigned i = FirstVariadicFPR; i <
FPRArgRegs.size(); ++i) {
695 FIN = MIRBuilder.
buildPtrAdd(
MRI.createGenericVirtualRegister(p0),
710 auto &
DL =
F.getDataLayout();
715 if (
F.isVarArg() && Subtarget.isWindowsArm64EC())
725 Subtarget.isCallingConvWin64(
F.getCallingConv(),
F.isVarArg()) &&
726 !Subtarget.isWindowsArm64EC();
737 for (
auto &Arg :
F.args()) {
738 if (
DL.getTypeStoreSize(Arg.getType()).isZero())
741 ArgInfo OrigArg{VRegs[i], Arg, i};
748 MRI.getType(OrigArg.
Regs[0]).getSizeInBits() == 1 &&
749 "Unexpected registers used for i1 arg");
751 auto &Flags = OrigArg.
Flags[0];
752 if (!Flags.isZExt() && !Flags.isSExt()) {
756 OrigArg.
Regs[0] = WideReg;
761 if (Arg.hasAttribute(Attribute::SwiftAsync))
772 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(
F.getCallingConv(), IsWin64 &&
F.isVarArg());
774 AArch64IncomingValueAssigner Assigner(AssignFn, AssignFn);
777 CCState CCInfo(
F.getCallingConv(),
F.isVarArg(), MF, ArgLocs,
F.getContext());
782 if (!BoolArgs.
empty()) {
783 for (
auto &KV : BoolArgs) {
786 LLT WideTy =
MRI.getType(WideReg);
787 assert(
MRI.getType(OrigReg).getScalarSizeInBits() == 1 &&
788 "Unexpected bit size of a bool arg");
795 uint64_t StackSize = Assigner.StackSize;
797 if ((!Subtarget.isTargetDarwin() && !Subtarget.isWindowsArm64EC()) || IsWin64) {
803 saveVarArgRegisters(MIRBuilder, Handler, CCInfo);
804 }
else if (Subtarget.isWindowsArm64EC()) {
809 StackSize =
alignTo(Assigner.StackSize, Subtarget.isTargetILP32() ? 4 : 8);
819 StackSize =
alignTo(StackSize, 16);
835 if (Subtarget.hasCustomCallingConv())
836 Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
871static std::pair<CCAssignFn *, CCAssignFn *>
876bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(
884 if (CalleeCC == CallerCC)
891 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
896 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
899 AArch64IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
900 CalleeAssignFnVarArg);
901 AArch64IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
902 CallerAssignFnVarArg);
909 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
910 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
911 if (MF.
getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) {
912 TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
913 TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
916 return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);
919bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
923 if (OrigOutArgs.
empty())
931 const AArch64Subtarget &Subtarget = MF.
getSubtarget<AArch64Subtarget>();
939 CCState OutInfo(CalleeCC,
false, MF, OutLocs, Ctx);
941 AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
952 const AArch64FunctionInfo *FuncInfo = MF.
getInfo<AArch64FunctionInfo>();
954 LLVM_DEBUG(
dbgs() <<
"... Cannot fit call operands on caller's stack.\n");
962 const uint32_t *CallerPreservedMask =
TRI->getCallPreservedMask(MF, CallerCC);
971 for (
unsigned i = 0; i < OutLocs.
size(); ++i) {
972 auto &ArgLoc = OutLocs[i];
973 if (ArgLoc.isRegLoc())
978 <<
"... Cannot tail call vararg function with stack arguments\n");
992 if (!Info.IsTailCall)
1001 if (Info.SwiftErrorVReg) {
1006 LLVM_DEBUG(
dbgs() <<
"... Cannot handle tail calls with swifterror yet.\n");
1011 LLVM_DEBUG(
dbgs() <<
"... Calling convention cannot be tail called.\n");
1033 return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();
1035 LLVM_DEBUG(
dbgs() <<
"... Cannot tail call from callers with byval, "
1036 "inreg, or swifterror arguments\n");
1047 if (Info.Callee.isGlobal()) {
1051 (!TT.isOSWindows() || TT.isOSBinFormatELF() ||
1052 TT.isOSBinFormatMachO())) {
1053 LLVM_DEBUG(
dbgs() <<
"... Cannot tail call externally-defined function "
1054 "with weak linkage for this OS.\n");
1069 "Unexpected variadic calling convention");
1073 if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
1076 <<
"... Caller and callee have incompatible calling conventions.\n");
1080 if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
1084 dbgs() <<
"... Call is eligible for tail call optimization.\n");
1090 std::optional<CallLowering::PtrAuthInfo> &PAI,
1098 assert(IsIndirect &&
"Direct call should not be authenticated");
1100 "Invalid auth call key");
1101 return AArch64::BLRA;
1105 return AArch64::TCRETURNdi;
1111 assert(!PAI &&
"ptrauth tail-calls not yet supported with PAuthLR");
1112 return AArch64::TCRETURNrix17;
1115 return AArch64::AUTH_TCRETURN_BTI;
1116 return AArch64::TCRETURNrix16x17;
1120 assert(!PAI &&
"ptrauth tail-calls not yet supported with PAuthLR");
1121 return AArch64::TCRETURNrinotx16;
1125 return AArch64::AUTH_TCRETURN;
1126 return AArch64::TCRETURNri;
1129static const uint32_t *
1134 if (!OutArgs.
empty() && OutArgs[0].Flags[0].isReturned()) {
1136 Mask =
TRI.getThisReturnPreservedMask(MF, Info.CallConv);
1138 OutArgs[0].Flags[0].setReturned(
false);
1139 Mask =
TRI.getCallPreservedMask(MF, Info.CallConv);
1142 Mask =
TRI.getCallPreservedMask(MF, Info.CallConv);
1147bool AArch64CallLowering::lowerTailCall(
1150 MachineFunction &MF = MIRBuilder.
getMF();
1154 AArch64FunctionInfo *FuncInfo = MF.
getInfo<AArch64FunctionInfo>();
1167 MachineInstrBuilder CallSeqStart;
1169 CallSeqStart = MIRBuilder.
buildInstr(AArch64::ADJCALLSTACKDOWN);
1176 const AArch64Subtarget &Subtarget = MF.
getSubtarget<AArch64Subtarget>();
1184 if (
Opc == AArch64::AUTH_TCRETURN ||
Opc == AArch64::AUTH_TCRETURN_BTI) {
1187 "Invalid auth call key");
1188 MIB.addImm(
Info.PAI->Key);
1191 uint16_t IntDisc = 0;
1192 std::tie(IntDisc, AddrDisc) =
1195 MIB.addImm(IntDisc);
1196 MIB.addUse(AddrDisc);
1197 if (AddrDisc != AArch64::NoRegister) {
1201 MIB->getOperand(4), 4));
1206 const uint32_t *
Mask =
TRI->getCallPreservedMask(MF, CalleeCC);
1208 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1209 MIB.addRegMask(Mask);
1212 MIB->setCFIType(MF,
Info.CFIType->getZExtValue());
1214 if (
TRI->isAnyArgRegReserved(MF))
1215 TRI->emitReservedArgRegCallError(MF);
1227 unsigned NumBytes = 0;
1234 CCState OutInfo(CalleeCC,
false, MF, OutLocs,
F.getContext());
1236 AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
1243 NumBytes =
alignTo(OutInfo.getStackSize(), 16);
1248 FPDiff = NumReusableBytes - NumBytes;
1252 if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (
unsigned)-FPDiff)
1260 assert(FPDiff % 16 == 0 &&
"unaligned stack on tail call");
1265 AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1269 OutgoingArgHandler Handler(MIRBuilder,
MRI, MIB,
1272 CalleeCC,
Info.IsVarArg))
1277 if (
Info.IsVarArg &&
Info.IsMustTailCall) {
1281 for (
const auto &
F : Forwards) {
1285 if (
any_of(MIB->uses(), [&ForwardedReg, &
TRI](
const MachineOperand &Use) {
1288 return TRI->regsOverlap(Use.getReg(), ForwardedReg);
1301 MIB->getOperand(1).setImm(FPDiff);
1315 if (MIB->getOperand(0).isReg())
1318 MIB->getDesc(), MIB->getOperand(0), 0);
1321 Info.LoweredTailCall =
true;
1330 auto &
DL =
F.getDataLayout();
1348 for (
auto &OrigArg : Info.OrigArgs) {
1351 auto &Flags = OrigArg.Flags[0];
1352 if (OrigArg.Ty->isIntegerTy(1) && !Flags.isSExt() && !Flags.isZExt()) {
1355 MRI.getType(OutArg.
Regs[0]).getSizeInBits() == 1 &&
1356 "Unexpected registers used for i1 arg");
1368 if (!Info.OrigRet.Ty->isVoidTy())
1372 bool CanTailCallOpt =
1376 if (Info.IsMustTailCall && !CanTailCallOpt) {
1380 LLVM_DEBUG(
dbgs() <<
"Failed to lower musttail call as tail call\n");
1384 Info.IsTailCall = CanTailCallOpt;
1386 return lowerTailCall(MIRBuilder, Info, OutArgs);
1391 std::tie(AssignFnFixed, AssignFnVarArg) =
1395 CallSeqStart = MIRBuilder.
buildInstr(AArch64::ADJCALLSTACKDOWN);
1405 Opc = Info.PAI ? AArch64::BLRA_RVMARKER : AArch64::BLR_RVMARKER;
1408 else if (Info.CB && Info.CB->hasFnAttr(Attribute::ReturnsTwice) &&
1409 !Subtarget.noBTIAtReturnTwice() &&
1411 Opc = AArch64::BLR_BTI;
1415 if (Info.Callee.isSymbol() &&
F.getParent()->getRtLibUseGOT()) {
1416 auto MIB = MIRBuilder.
buildInstr(TargetOpcode::G_GLOBAL_VALUE);
1425 unsigned CalleeOpNo = 0;
1427 if (
Opc == AArch64::BLR_RVMARKER ||
Opc == AArch64::BLRA_RVMARKER) {
1431 MIB.addGlobalAddress(ARCFn);
1438 }
else if (Info.CFIType) {
1439 MIB->setCFIType(MF, Info.CFIType->getZExtValue());
1443 MIB.add(Info.Callee);
1449 AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1452 OutgoingArgHandler Handler(MIRBuilder,
MRI, MIB,
false);
1454 Info.CallConv, Info.IsVarArg))
1459 if (
Opc == AArch64::BLRA ||
Opc == AArch64::BLRA_RVMARKER) {
1462 "Invalid auth call key");
1463 MIB.addImm(Info.PAI->Key);
1467 std::tie(IntDisc, AddrDisc) =
1470 MIB.addImm(IntDisc);
1471 MIB.addUse(AddrDisc);
1472 if (AddrDisc != AArch64::NoRegister) {
1475 MIB->getDesc(), MIB->getOperand(CalleeOpNo + 3),
1482 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1483 MIB.addRegMask(Mask);
1485 if (
TRI->isAnyArgRegReserved(MF))
1486 TRI->emitReservedArgRegCallError(MF);
1494 ?
alignTo(Assigner.StackSize, 16)
1498 MIRBuilder.
buildInstr(AArch64::ADJCALLSTACKUP)
1499 .
addImm(Assigner.StackSize)
1505 if (MIB->getOperand(CalleeOpNo).isReg())
1508 MIB->getOperand(CalleeOpNo), CalleeOpNo);
1513 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {
1514 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);
1515 CallReturnHandler Handler(MIRBuilder,
MRI, MIB);
1516 bool UsingReturnedArg =
1517 !OutArgs.
empty() && OutArgs[0].Flags[0].isReturned();
1519 AArch64OutgoingValueAssigner Assigner(RetAssignFn, RetAssignFn, Subtarget,
1521 ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder,
MRI, MIB);
1523 UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs,
1524 MIRBuilder, Info.CallConv, Info.IsVarArg,
1525 UsingReturnedArg ?
ArrayRef(OutArgs[0].Regs)
1530 if (Info.SwiftErrorVReg) {
1535 if (!Info.CanLowerReturn) {
1537 Info.DemoteRegister, Info.DemoteStackIndex);
1543 return Ty.getSizeInBits() == 64;
unsigned const MachineRegisterInfo * MRI
static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder, CCAssignFn *AssignFn)
Helper function to compute forwarded registers for musttail calls.
static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, bool IsTailCall, std::optional< CallLowering::PtrAuthInfo > &PAI, MachineRegisterInfo &MRI)
static LLT getStackValueStoreTypeHack(const CCValAssign &VA)
static const uint32_t * getMaskForArgs(SmallVectorImpl< AArch64CallLowering::ArgInfo > &OutArgs, AArch64CallLowering::CallLoweringInfo &Info, const AArch64RegisterInfo &TRI, MachineFunction &MF)
static void applyStackPassedSmallTypeDAGHack(EVT OrigVT, MVT &ValVT, MVT &LocVT)
static std::pair< CCAssignFn *, CCAssignFn * > getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI)
Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for CC.
static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt)
This file describes how to lower LLVM calls to machine code calls.
MachineInstrBuilder MachineInstrBuilder & DefMI
static std::tuple< SDValue, SDValue > extractPtrauthBlendDiscriminators(SDValue Disc, SelectionDAG *DAG)
static bool shouldLowerTailCallStackArg(const MachineFunction &MF, const CCValAssign &VA, SDValue Arg, ISD::ArgFlagsTy Flags, int CallOffset)
Check whether a stack argument requires lowering in a tail call.
static const MCPhysReg GPRArgRegs[]
static const MCPhysReg FPRArgRegs[]
cl::opt< bool > EnableSVEGISel("aarch64-enable-gisel-sve", cl::Hidden, cl::desc("Enable / disable SVE scalable vectors in Global ISel"), cl::init(false))
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< int > EnableGlobalISelAtO("aarch64-enable-global-isel-at-O", cl::Hidden, cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"), cl::init(0))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Implement a low-level type suitable for MachineInstr level instruction selection.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
This file defines ARC utility functions which are used by various parts of the compiler.
static constexpr MCPhysReg SPReg
This file defines the SmallVector class.
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const override
This hook must be implemented to lower outgoing return values, described by Val, into the specified v...
bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl< BaseArgInfo > &Outs, bool IsVarArg) const override
This hook must be implemented to check whether the return values described by Outs can fit into the r...
bool fallBackToDAGISel(const MachineFunction &MF) const override
bool isTypeIsValidForThisReturn(EVT Ty) const override
For targets which support the "returned" parameter attribute, returns true if the given type is a val...
bool isEligibleForTailCallOptimization(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, SmallVectorImpl< ArgInfo > &InArgs, SmallVectorImpl< ArgInfo > &OutArgs) const
Returns true if the call can be lowered as a tail call.
AArch64CallLowering(const AArch64TargetLowering &TLI)
bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override
This hook must be implemented to lower the given call instruction, including argument and return valu...
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const override
This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
bool branchTargetEnforcement() const
void setVarArgsStackIndex(int Index)
void setTailCallReservedStack(unsigned bytes)
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
void setBytesInStackArgArea(unsigned bytes)
void setVarArgsGPRIndex(int Index)
bool branchProtectionPAuthLR() const
void setVarArgsFPRSize(unsigned Size)
unsigned getBytesInStackArgArea() const
void setVarArgsFPRIndex(int Index)
void setVarArgsGPRSize(unsigned Size)
void setArgumentStackToRestore(unsigned bytes)
const AArch64RegisterInfo * getRegisterInfo() const override
const AArch64InstrInfo * getInstrInfo() const override
bool isWindowsArm64EC() const
bool isCallingConvWin64(CallingConv::ID CC, bool IsVarArg) const
const RegisterBankInfo * getRegBankInfo() const override
bool hasCustomCallingConv() const
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
CCState - This class holds information needed while lowering arguments and return values.
MachineFunction & getMachineFunction() const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void analyzeMustTailForwardedRegisters(SmallVectorImpl< ForwardedRegister > &Forwards, ArrayRef< MVT > RegParmTypes, CCAssignFn Fn)
Compute the set of registers that need to be preserved and forwarded to any musttail calls.
CallingConv::ID getCallingConv() const
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
bool isAllocated(MCRegister Reg) const
isAllocated - Return true if the specified register (or an alias) is allocated.
CCValAssign - Represent assignment of one arg/retval to a location.
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const
Load the returned value from the stack into virtual registers in VRegs.
bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, ArrayRef< Register > ThisReturnRegs={}) const
Use Handler to insert code to handle the argument/return values represented by Args.
bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const
void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const
Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.
void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const
Insert the hidden sret ArgInfo to the beginning of SplitArgs.
bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, ArrayRef< Register > ThisReturnRegs={}) const
Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...
void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const
Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const
Analyze the argument list in Args, using Assigner to populate CCInfo.
bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const
CallLowering(const TargetLowering *TLI)
const TargetLowering * getTLI() const
Getter for generic TargetLowering class.
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Register DemoteRegister
DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg allocated to hold a pointer to ...
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
iterator_range< arg_iterator > args()
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasExternalWeakLinkage() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
bool isVector() const
Return true if this is a vector value type.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
void setHasTailCall(bool V=true)
bool hasMustTailInVarArgFunc() const
Returns true if the function is variadic and contains a musttail call.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
void setInstr(MachineInstr &MI)
Set the insertion point to before MI.
MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op, unsigned Size)
Build and insert Res = G_ASSERT_ZEXT Op, Size.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
void setMBB(MachineBasicBlock &MBB)
Set the insertion point to the end of MBB.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void setDeactivationSymbol(MachineFunction &MF, Value *DS)
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
const Triple & getTargetTriple() const
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
virtual const RegisterBankInfo * getRegBankInfo() const
If the information for the register banks is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Triple - Helper class for working with autoconf configuration names.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
ArrayRef< MCPhysReg > getFPRArgRegs()
ArrayRef< MCPhysReg > getGPRArgRegs()
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ ARM64EC_Thunk_Native
Calling convention used in the ARM64EC ABI to implement calls between ARM64 code and thunks.
@ Swift
Calling convention for Swift.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ ARM64EC_Thunk_X64
Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.
@ C
The default llvm calling convention, compatible with C.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool attachedCallOpBundleNeedsMarker(const CallBase *CB)
This function determines whether the clang_arc_attachedcall should be emitted with or without the mar...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
@ Implicit
Not emitted register (e.g. carry, or temporary result).
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ Success
The lock was released successfully.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
LLVM_ABI bool isAssertMI(const MachineInstr &MI)
Returns true if the instruction MI is one of the assert instructions.
LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
LLVM_ABI Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
std::optional< bool > EnableGlobalISelOption
SmallVector< Register, 4 > Regs
SmallVector< ISD::ArgFlagsTy, 4 > Flags
Base class for ValueHandlers used for arguments coming into the current function, or for return value...
void assignValueToReg(Register ValVReg, Register PhysReg, const CCValAssign &VA) override
Provides a default implementation for argument handling.
Base class for ValueHandlers used for arguments passed to a function call, or for return values.
MachineIRBuilder & MIRBuilder
MachineRegisterInfo & MRI
virtual LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const
Return the in-memory size to write for the argument at VA.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Describes a register that needs to be forwarded from the prologue to a musttail call.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.