84#include "llvm/IR/IntrinsicsARM.h"
120#define DEBUG_TYPE "arm-isel"
123STATISTIC(NumMovwMovt,
"Number of GAs materialized with movw + movt");
124STATISTIC(NumLoopByVals,
"Number of loops generated for byval arguments");
126 "Number of constants with their storage promoted into constant pools");
130 cl::desc(
"Enable / disable ARM interworking (for debugging only)"),
135 cl::desc(
"Enable / disable promotion of unnamed_addr constants into "
140 cl::desc(
"Maximum size of constant to promote into a constant pool"),
144 cl::desc(
"Maximum size of ALL constants to promote into a constant pool"),
149 cl::desc(
"Maximum interleave factor for MVE VLDn to generate."),
157 ARM::R0, ARM::R1, ARM::R2, ARM::R3
171void ARMTargetLowering::addTypeForNEON(
MVT VT,
MVT PromotedLdStVT) {
172 if (VT != PromotedLdStVT) {
181 if (ElemTy != MVT::f64)
185 if (ElemTy == MVT::i32) {
229void ARMTargetLowering::addDRTypeForNEON(
MVT VT) {
231 addTypeForNEON(VT, MVT::f64);
234void ARMTargetLowering::addQRTypeForNEON(
MVT VT) {
236 addTypeForNEON(VT, MVT::v2f64);
239void ARMTargetLowering::setAllExpand(
MVT VT) {
252void ARMTargetLowering::addAllExtLoads(
const MVT From,
const MVT To,
253 LegalizeAction Action) {
259void ARMTargetLowering::addMVEVectorTypes(
bool HasMVEFP) {
260 const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 };
262 for (
auto VT : IntTypes) {
336 const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 };
337 for (
auto VT : FloatTypes) {
406 const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 };
407 for (
auto VT : LongTypes) {
424 addAllExtLoads(MVT::v8i16, MVT::v8i8,
Legal);
425 addAllExtLoads(MVT::v4i32, MVT::v4i16,
Legal);
426 addAllExtLoads(MVT::v4i32, MVT::v4i8,
Legal);
443 for (
auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) {
452 const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1, MVT::v2i1};
453 for (
auto VT : pTypes) {
509 for (
int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
517 if (Subtarget->isThumb() && Subtarget->
hasVFP2Base() &&
518 Subtarget->
hasARMOps() && !Subtarget->useSoftFloat()) {
519 static const struct {
521 const char *
const Name;
543 { RTLIB::UO_F32,
"__unordsf2vfp",
ISD::SETNE },
552 { RTLIB::UO_F64,
"__unorddf2vfp",
ISD::SETNE },
577 for (
const auto &LC : LibraryCalls) {
589 static const struct {
591 const char *
const Name;
676 for (
const auto &LC : LibraryCalls) {
686 static const struct {
688 const char *
const Name;
691 } MemOpsLibraryCalls[] = {
699 for (
const auto &LC : MemOpsLibraryCalls) {
709 static const struct {
711 const char *
const Name;
724 for (
const auto &LC : LibraryCalls) {
756 static const struct {
758 const char *
const Name;
766 for (
const auto &LC : LibraryCalls) {
777 if (!Subtarget->useSoftFloat() && !Subtarget->
isThumb1Only() &&
778 Subtarget->hasFPRegs()) {
788 setAllExpand(MVT::f32);
789 if (!Subtarget->hasFP64())
790 setAllExpand(MVT::f64);
793 if (Subtarget->hasFullFP16()) {
802 if (Subtarget->hasBF16()) {
804 setAllExpand(MVT::bf16);
805 if (!Subtarget->hasFullFP16())
815 addAllExtLoads(VT, InnerVT,
Expand);
830 if (Subtarget->hasMVEIntegerOps())
831 addMVEVectorTypes(Subtarget->hasMVEFloatOps());
834 if (Subtarget->hasLOB()) {
838 if (Subtarget->hasNEON()) {
839 addDRTypeForNEON(MVT::v2f32);
840 addDRTypeForNEON(MVT::v8i8);
841 addDRTypeForNEON(MVT::v4i16);
842 addDRTypeForNEON(MVT::v2i32);
843 addDRTypeForNEON(MVT::v1i64);
845 addQRTypeForNEON(MVT::v4f32);
846 addQRTypeForNEON(MVT::v2f64);
847 addQRTypeForNEON(MVT::v16i8);
848 addQRTypeForNEON(MVT::v8i16);
849 addQRTypeForNEON(MVT::v4i32);
850 addQRTypeForNEON(MVT::v2i64);
852 if (Subtarget->hasFullFP16()) {
853 addQRTypeForNEON(MVT::v8f16);
854 addDRTypeForNEON(MVT::v4f16);
857 if (Subtarget->hasBF16()) {
858 addQRTypeForNEON(MVT::v8bf16);
859 addDRTypeForNEON(MVT::v4bf16);
863 if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) {
903 if (Subtarget->hasNEON()) {
1017 for (
MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
1026 for (
auto VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16,
1035 if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
1043 if (Subtarget->hasMVEIntegerOps()) {
1048 if (Subtarget->hasMVEFloatOps()) {
1052 if (!Subtarget->hasFP64()) {
1099 if (Subtarget->hasFullFP16()) {
1105 if (!Subtarget->hasFP16()) {
1156 if (Subtarget->hasDSP()) {
1178 if (Subtarget->
isThumb1Only() || !Subtarget->hasV6Ops()
1179 || (Subtarget->
isThumb2() && !Subtarget->hasDSP()))
1194 if (Subtarget->hasMVEIntegerOps())
1204 if (!Subtarget->
isThumb1Only() && Subtarget->hasV6T2Ops())
1215 if (!Subtarget->hasV5TOps() || Subtarget->
isThumb1Only()) {
1224 if (Subtarget->hasPerfMon())
1228 if (!Subtarget->hasV6Ops())
1231 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
1232 : Subtarget->hasDivideInARMMode();
1239 if (Subtarget->
isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
1256 HasStandaloneRem =
false;
1261 const char *
const Name;
1263 } LibraryCalls[] = {
1275 for (
const auto &LC : LibraryCalls) {
1282 const char *
const Name;
1284 } LibraryCalls[] = {
1296 for (
const auto &LC : LibraryCalls) {
1334 InsertFencesForAtomic =
false;
1336 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
1340 if (!Subtarget->isThumb() || !Subtarget->
isMClass())
1345 if (!Subtarget->hasAcquireRelease() ||
1348 InsertFencesForAtomic =
true;
1354 if (Subtarget->hasDataBarrier())
1355 InsertFencesForAtomic =
true;
1375 if (!InsertFencesForAtomic) {
1383 (!Subtarget->
isMClass() && Subtarget->hasV6Ops())) {
1395 }
else if ((Subtarget->
isMClass() && Subtarget->hasV8MBaselineOps()) ||
1396 Subtarget->hasForced32BitAtomics()) {
1410 if (!Subtarget->hasV6Ops()) {
1416 if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() &&
1448 if (Subtarget->hasFullFP16()) {
1458 if (Subtarget->hasFullFP16())
1473 if (!Subtarget->useSoftFloat() && Subtarget->
hasVFP2Base() &&
1487 if (!Subtarget->useSoftFloat() && !Subtarget->
isThumb1Only()) {
1495 if (!Subtarget->hasFP16()) {
1526 if (Subtarget->hasNEON()) {
1533 if (Subtarget->hasFP64()) {
1546 if (Subtarget->hasFullFP16()) {
1565 if (Subtarget->hasNEON()) {
1577 if (Subtarget->hasFullFP16()) {
1609 if (Subtarget->hasMVEIntegerOps())
1612 if (Subtarget->hasV6Ops())
1617 if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) ||
1624 if (Subtarget->useSoftFloat() || Subtarget->
isThumb1Only() ||
1653 return Subtarget->useSoftFloat();
1666std::pair<const TargetRegisterClass *, uint8_t>
1677 case MVT::f32:
case MVT::f64:
case MVT::v8i8:
case MVT::v4i16:
1678 case MVT::v2i32:
case MVT::v1i64:
case MVT::v2f32:
1679 RRC = &ARM::DPRRegClass;
1687 case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
1688 case MVT::v4f32:
case MVT::v2f64:
1689 RRC = &ARM::DPRRegClass;
1693 RRC = &ARM::DPRRegClass;
1697 RRC = &ARM::DPRRegClass;
1701 return std::make_pair(RRC,
Cost);
1705#define MAKE_CASE(V) \
1924 if ((Subtarget->hasMVEIntegerOps() &&
1925 (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
1926 VT == MVT::v16i8)) ||
1927 (Subtarget->hasMVEFloatOps() &&
1928 (VT == MVT::v2f64 || VT == MVT::v4f32 || VT == MVT::v8f16)))
1942 if (Subtarget->hasNEON()) {
1943 if (VT == MVT::v4i64)
1944 return &ARM::QQPRRegClass;
1945 if (VT == MVT::v8i64)
1946 return &ARM::QQQQPRRegClass;
1948 if (Subtarget->hasMVEIntegerOps()) {
1949 if (VT == MVT::v4i64)
1950 return &ARM::MQQPRRegClass;
1951 if (VT == MVT::v8i64)
1952 return &ARM::MQQQQPRRegClass;
1961 Align &PrefAlign)
const {
1962 if (!isa<MemIntrinsic>(CI))
1980 unsigned NumVals =
N->getNumValues();
1984 for (
unsigned i = 0; i != NumVals; ++i) {
1985 EVT VT =
N->getValueType(i);
1986 if (VT == MVT::Glue || VT == MVT::Other)
1992 if (!
N->isMachineOpcode())
2016 if (
auto Const = dyn_cast<ConstantSDNode>(
Op.getOperand(1)))
2017 return Const->getZExtValue() == 16;
2024 if (
auto Const = dyn_cast<ConstantSDNode>(
Op.getOperand(1)))
2025 return Const->getZExtValue() == 16;
2032 if (
auto Const = dyn_cast<ConstantSDNode>(
Op.getOperand(1)))
2033 return Const->getZExtValue() == 16;
2102 bool isVarArg)
const {
2123 else if (Subtarget->hasFPRegs() && !Subtarget->
isThumb1Only() &&
2144 bool isVarArg)
const {
2145 return CCAssignFnForNode(
CC,
false, isVarArg);
2149 bool isVarArg)
const {
2150 return CCAssignFnForNode(
CC,
true, isVarArg);
2157 bool isVarArg)
const {
2158 switch (getEffectiveCallingConv(
CC, isVarArg)) {
2184 if (Subtarget->hasFullFP16()) {
2197 if (Subtarget->hasFullFP16()) {
2211SDValue ARMTargetLowering::LowerCallResult(
2215 SDValue ThisVal,
bool isCmseNSCall)
const {
2223 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
2228 if (i == 0 && isThisReturn) {
2230 "unexpected return calling convention register assignment");
2241 Chain =
Lo.getValue(1);
2242 InGlue =
Lo.getValue(2);
2246 Chain =
Hi.getValue(1);
2247 InGlue =
Hi.getValue(2);
2259 Chain =
Lo.getValue(1);
2260 InGlue =
Lo.getValue(2);
2263 Chain =
Hi.getValue(1);
2264 InGlue =
Hi.getValue(2);
2308std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg(
2310 bool IsTailCall,
int SPDiff)
const {
2332 return std::make_pair(DstAddr, DstInfo);
2341ARMTargetLowering::ByValCopyKind ARMTargetLowering::ByValNeedsCopyForTailCall(
2347 if (isa<GlobalAddressSDNode>(Src) || isa<ExternalSymbolSDNode>(Src))
2352 auto *SrcFrameIdxNode = dyn_cast<FrameIndexSDNode>(Src);
2353 auto *DstFrameIdxNode = dyn_cast<FrameIndexSDNode>(Dst);
2354 if (!SrcFrameIdxNode || !DstFrameIdxNode)
2357 int SrcFI = SrcFrameIdxNode->getIndex();
2358 int DstFI = DstFrameIdxNode->getIndex();
2360 "byval passed in non-fixed stack slot");
2382 if (SrcOffset == DstOffset)
2390 RegsToPassVector &RegsToPass,
2397 DAG.
getVTList(MVT::i32, MVT::i32), Arg);
2398 unsigned id = Subtarget->
isLittle() ? 0 : 1;
2411 std::tie(DstAddr, DstInfo) =
2412 computeAddrForCallArg(dl, DAG, NextVA, StackPtr, IsTailCall, SPDiff);
2445 bool isStructRet = (Outs.
empty()) ?
false : Outs[0].
Flags.isSRet();
2446 bool isThisReturn =
false;
2447 bool isCmseNSCall =
false;
2448 bool isSibCall =
false;
2449 bool PreferIndirect =
false;
2450 bool GuardWithBTI =
false;
2460 !Subtarget->noBTIAtReturnTwice())
2465 isCmseNSCall =
true;
2477 if (isa<GlobalAddressSDNode>(Callee)) {
2481 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2484 PreferIndirect = Subtarget->isThumb() && Subtarget->
hasMinSize() &&
2486 return isa<Instruction>(U) &&
2487 cast<Instruction>(U)->getParent() == BB;
2494 IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs, PreferIndirect);
2508 "site marked musttail");
2511 unsigned NumBytes = CCInfo.getStackSize();
2520 if (isTailCall && !isSibCall) {
2527 assert(StackAlign &&
"data layout string is missing stack alignment");
2528 NumBytes =
alignTo(NumBytes, *StackAlign);
2533 SPDiff = NumReusableBytes - NumBytes;
2537 if (SPDiff < 0 && AFI->getArgRegsSaveSize() < (
unsigned)-SPDiff)
2553 RegsToPassVector RegsToPass;
2568 SDValue Src = OutVals[ArgIdx];
2571 if (!
Flags.isByVal())
2576 std::tie(Dst, DstInfo) =
2577 computeAddrForCallArg(dl, DAG, VA,
SDValue(),
true, SPDiff);
2578 ByValCopyKind
Copy = ByValNeedsCopyForTailCall(DAG, Src, Dst, Flags);
2580 if (Copy == NoCopy) {
2585 }
else if (Copy == CopyOnce) {
2589 ByValTemporaries[ArgIdx] = Src;
2591 assert(Copy == CopyViaTemp &&
"unexpected enum value");
2595 int TempFrameIdx = MFI.CreateStackObject(
2596 Flags.getByValSize(),
Flags.getNonZeroByValAlign(),
false);
2605 SDValue Ops[] = {Chain, Temp, Src, SizeNode, AlignNode};
2608 ByValTemporaries[ArgIdx] = Temp;
2611 if (!ByValCopyChains.
empty())
2621 bool AfterFormalArgLoads =
false;
2625 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2627 ++i, ++realArgIdx) {
2629 SDValue Arg = OutVals[realArgIdx];
2631 bool isByVal =
Flags.isByVal();
2651 if (isTailCall && VA.
isMemLoc() && !AfterFormalArgLoads) {
2656 AfterFormalArgLoads =
true;
2668 auto ArgVT = Outs[realArgIdx].ArgVT;
2669 if (isCmseNSCall && (ArgVT == MVT::f16)) {
2687 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, VA, ArgLocs[++i],
2688 StackPtr, MemOpChains, isTailCall, SPDiff);
2692 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, VA, ArgLocs[++i],
2693 StackPtr, MemOpChains, isTailCall, SPDiff);
2698 std::tie(DstAddr, DstInfo) =
2699 computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2703 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
2704 StackPtr, MemOpChains, isTailCall, SPDiff);
2706 if (realArgIdx == 0 &&
Flags.isReturned() && !
Flags.isSwiftSelf() &&
2707 Outs[0].VT == MVT::i32) {
2709 "unexpected calling convention register assignment");
2710 assert(!
Ins.empty() && Ins[0].VT == MVT::i32 &&
2711 "unexpected use of 'returned'");
2712 isThisReturn =
true;
2717 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), Arg));
2718 }
else if (isByVal) {
2720 unsigned offset = 0;
2724 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
2725 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
2728 bool NeedsStackCopy;
2729 if (ByValTemporaries.
contains(realArgIdx)) {
2730 ByValSrc = ByValTemporaries[realArgIdx];
2731 NeedsStackCopy =
true;
2734 NeedsStackCopy = !isTailCall;
2738 if (CurByValIdx < ByValArgsCount) {
2739 unsigned RegBegin, RegEnd;
2740 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
2745 for (i = 0, j = RegBegin;
j < RegEnd; i++,
j++) {
2752 RegsToPass.push_back(std::make_pair(j, Load));
2757 offset = RegEnd - RegBegin;
2759 CCInfo.nextInRegsParam();
2764 if (NeedsStackCopy &&
Flags.getByValSize() > 4 * offset) {
2768 std::tie(Dst, DstInfo) =
2769 computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2778 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
2786 std::tie(DstAddr, DstInfo) =
2787 computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2794 if (!MemOpChains.
empty())
2800 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2801 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
2802 RegsToPass[i].second, InGlue);
2809 bool isDirect =
false;
2814 GVal =
G->getGlobal();
2815 bool isStub = !
TM.shouldAssumeDSOLocal(GVal) && Subtarget->
isTargetMachO();
2817 bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->
isMClass());
2818 bool isLocalARMFunc =
false;
2821 if (Subtarget->genLongCalls()) {
2823 "long-calls codegen is not position independent!");
2827 if (isa<GlobalAddressSDNode>(Callee)) {
2828 if (Subtarget->genExecuteOnly()) {
2847 const char *
Sym = S->getSymbol();
2849 if (Subtarget->genExecuteOnly()) {
2868 }
else if (isa<GlobalAddressSDNode>(Callee)) {
2869 if (!PreferIndirect) {
2874 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !
ARMInterworking);
2876 if (isStub && Subtarget->
isThumb1Only() && !Subtarget->hasV5TOps()) {
2888 "Windows is the only supported COFF target");
2892 else if (!
TM.shouldAssumeDSOLocal(GVal))
2908 const char *
Sym = S->getSymbol();
2909 if (isARMFunc && Subtarget->
isThumb1Only() && !Subtarget->hasV5TOps()) {
2913 ARMPCLabelIndex, 4);
2927 assert(!isARMFunc && !isDirect &&
2928 "Cannot handle call to ARM function or direct call");
2931 "call to non-secure function would "
2932 "require passing arguments on stack",
2939 "call to non-secure function would return value through pointer",
2947 if (Subtarget->isThumb()) {
2950 else if (isCmseNSCall)
2952 else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2957 if (!isDirect && !Subtarget->hasV5TOps())
2959 else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2972 if (isTailCall && !isSibCall) {
2977 std::vector<SDValue> Ops;
2978 Ops.push_back(Chain);
2979 Ops.push_back(Callee);
2987 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2988 Ops.push_back(DAG.
getRegister(RegsToPass[i].first,
2989 RegsToPass[i].second.getValueType()));
3001 isThisReturn =
false;
3007 assert(Mask &&
"Missing call preserved mask for calling convention");
3011 Ops.push_back(InGlue);
3022 Chain = DAG.
getNode(CallOpc, dl, {MVT::Other, MVT::Glue}, Ops);
3034 Chain = DAG.
getCALLSEQ_END(Chain, NumBytes, CalleePopBytes, InGlue, dl);
3040 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
3041 InVals, isThisReturn,
3042 isThisReturn ? OutVals[0] :
SDValue(), isCmseNSCall);
3049void ARMTargetLowering::HandleByVal(
CCState *State,
unsigned &
Size,
3050 Align Alignment)
const {
3052 Alignment = std::max(Alignment,
Align(4));
3058 unsigned AlignInRegs = Alignment.
value() / 4;
3059 unsigned Waste = (ARM::R4 -
Reg) % AlignInRegs;
3060 for (
unsigned i = 0; i < Waste; ++i)
3066 unsigned Excess = 4 * (ARM::R4 -
Reg);
3073 if (NSAAOffset != 0 &&
Size > Excess) {
3085 unsigned ByValRegBegin =
Reg;
3086 unsigned ByValRegEnd = std::min<unsigned>(Reg +
Size / 4, ARM::R4);
3090 for (
unsigned i = Reg + 1; i != ByValRegEnd; ++i)
3096 Size = std::max<int>(
Size - Excess, 0);
3104bool ARMTargetLowering::IsEligibleForTailCallOptimization(
3130 if (!isa<GlobalAddressSDNode>(
Callee.getNode()) || isIndirect) {
3132 for (
Register R : {ARM::R0, ARM::R1, ARM::R2, ARM::R3})
3133 AddressRegisters.
insert(R);
3136 AddressRegisters.
insert(ARM::R12);
3139 AddressRegisters.
erase(
AL.getLocReg());
3140 if (AddressRegisters.
empty()) {
3141 LLVM_DEBUG(
dbgs() <<
"false (no reg to hold function pointer)\n");
3160 <<
" (guaranteed tail-call CC)\n");
3161 return CalleeCC == CallerCC;
3166 bool isCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
3168 if (isCalleeStructRet != isCallerStructRet) {
3184 (!
TT.isOSWindows() ||
TT.isOSBinFormatELF() ||
3185 TT.isOSBinFormatMachO())) {
3194 getEffectiveCallingConv(CalleeCC, isVarArg),
3195 getEffectiveCallingConv(CallerCC, CallerF.
isVarArg()), MF,
C, Ins,
3203 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
3204 if (CalleeCC != CallerCC) {
3205 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
3206 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) {
3225 LLVM_DEBUG(
dbgs() <<
"false (parameters in CSRs do not match)\n");
3244 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
3253 StringRef IntKind =
F.getFnAttribute(
"interrupt").getValueAsString();
3266 if (IntKind ==
"" || IntKind ==
"IRQ" || IntKind ==
"FIQ" ||
3269 else if (IntKind ==
"SWI" || IntKind ==
"UNDEF")
3273 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
3300 bool isLittleEndian = Subtarget->
isLittle();
3312 "secure entry function would return value through pointer",
3318 for (
unsigned i = 0, realRVLocIdx = 0;
3320 ++i, ++realRVLocIdx) {
3324 SDValue Arg = OutVals[realRVLocIdx];
3325 bool ReturnF16 =
false;
3360 auto RetVT = Outs[realRVLocIdx].ArgVT;
3382 DAG.
getVTList(MVT::i32, MVT::i32), Half);
3386 HalfGPRs.
getValue(isLittleEndian ? 0 : 1), Glue);
3392 HalfGPRs.
getValue(isLittleEndian ? 1 : 0), Glue);
3404 DAG.
getVTList(MVT::i32, MVT::i32), Arg);
3406 fmrrd.
getValue(isLittleEndian ? 0 : 1), Glue);
3411 fmrrd.
getValue(isLittleEndian ? 1 : 0), Glue);
3455 return DAG.
getNode(RetNode, dl, MVT::Other, RetOps);
3458bool ARMTargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
3459 if (
N->getNumValues() != 1)
3461 if (!
N->hasNUsesOfValue(1, 0))
3469 if (
Copy->getOperand(
Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3471 TCChain =
Copy->getOperand(0);
3485 SDValue UseChain =
U->getOperand(0);
3493 if (
U->getOperand(
U->getNumOperands() - 1).getValueType() == MVT::Glue)
3501 if (!
Copy->hasOneUse())
3508 if (
Copy->getOperand(
Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3510 TCChain =
Copy->getOperand(0);
3515 bool HasRet =
false;
3530bool ARMTargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
3548 &&
"LowerWRITE_REGISTER called for non-i64 type argument.");
3564 EVT PtrVT =
Op.getValueType();
3574 if (Subtarget->genExecuteOnly()) {
3576 auto T =
const_cast<Type*
>(
CP->getType());
3577 auto C =
const_cast<Constant*
>(
CP->getConstVal());
3588 return LowerGlobalAddress(GA, DAG);
3593 Align CPAlign =
CP->getAlign();
3595 CPAlign = std::max(CPAlign,
Align(4));
3596 if (
CP->isMachineConstantPoolEntry())
3608 if (Subtarget->genExecuteOnly() && !Subtarget->hasV8MBaselineOps())
3617 unsigned ARMPCLabelIndex = 0;
3620 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
3623 if (!IsPositionIndependent) {
3626 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3637 if (!IsPositionIndependent)