83#include "llvm/IR/IntrinsicsARM.h"
122#define DEBUG_TYPE "arm-isel"
125STATISTIC(NumMovwMovt,
"Number of GAs materialized with movw + movt");
126STATISTIC(NumLoopByVals,
"Number of loops generated for byval arguments");
128 "Number of constants with their storage promoted into constant pools");
132 cl::desc(
"Enable / disable ARM interworking (for debugging only)"),
137 cl::desc(
"Enable / disable promotion of unnamed_addr constants into "
142 cl::desc(
"Maximum size of constant to promote into a constant pool"),
146 cl::desc(
"Maximum size of ALL constants to promote into a constant pool"),
151 cl::desc(
"Maximum interleave factor for MVE VLDn to generate."),
156 ARM::R0, ARM::R1, ARM::R2, ARM::R3
159void ARMTargetLowering::addTypeForNEON(
MVT VT,
MVT PromotedLdStVT) {
160 if (VT != PromotedLdStVT) {
169 if (ElemTy != MVT::f64)
173 if (ElemTy == MVT::i32) {
209 VT != MVT::v2i64 && VT != MVT::v1i64)
217void ARMTargetLowering::addDRTypeForNEON(
MVT VT) {
219 addTypeForNEON(VT, MVT::f64);
222void ARMTargetLowering::addQRTypeForNEON(
MVT VT) {
224 addTypeForNEON(VT, MVT::v2f64);
227void ARMTargetLowering::setAllExpand(
MVT VT) {
240void ARMTargetLowering::addAllExtLoads(
const MVT From,
const MVT To,
241 LegalizeAction Action) {
247void ARMTargetLowering::addMVEVectorTypes(
bool HasMVEFP) {
248 const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 };
250 for (
auto VT : IntTypes) {
324 const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 };
325 for (
auto VT : FloatTypes) {
393 const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 };
394 for (
auto VT : LongTypes) {
411 addAllExtLoads(MVT::v8i16, MVT::v8i8,
Legal);
412 addAllExtLoads(MVT::v4i32, MVT::v4i16,
Legal);
413 addAllExtLoads(MVT::v4i32, MVT::v4i8,
Legal);
430 for (
auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) {
439 const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1, MVT::v2i1};
440 for (
auto VT : pTypes) {
496 for (
int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
504 if (Subtarget->isThumb() && Subtarget->
hasVFP2Base() &&
505 Subtarget->
hasARMOps() && !Subtarget->useSoftFloat()) {
506 static const struct {
508 const char *
const Name;
530 { RTLIB::UO_F32,
"__unordsf2vfp",
ISD::SETNE },
539 { RTLIB::UO_F64,
"__unorddf2vfp",
ISD::SETNE },
564 for (
const auto &LC : LibraryCalls) {
584 static const struct {
586 const char *
const Name;
671 for (
const auto &LC : LibraryCalls) {
681 static const struct {
683 const char *
const Name;
686 } MemOpsLibraryCalls[] = {
694 for (
const auto &LC : MemOpsLibraryCalls) {
704 static const struct {
706 const char *
const Name;
719 for (
const auto &LC : LibraryCalls) {
751 static const struct {
753 const char *
const Name;
761 for (
const auto &LC : LibraryCalls) {
772 if (!Subtarget->useSoftFloat() && !Subtarget->
isThumb1Only() &&
773 Subtarget->hasFPRegs()) {
783 setAllExpand(MVT::f32);
784 if (!Subtarget->hasFP64())
785 setAllExpand(MVT::f64);
788 if (Subtarget->hasFullFP16()) {
797 if (Subtarget->hasBF16()) {
799 setAllExpand(MVT::bf16);
800 if (!Subtarget->hasFullFP16())
807 addAllExtLoads(VT, InnerVT,
Expand);
822 if (Subtarget->hasMVEIntegerOps())
823 addMVEVectorTypes(Subtarget->hasMVEFloatOps());
826 if (Subtarget->hasLOB()) {
830 if (Subtarget->hasNEON()) {
831 addDRTypeForNEON(MVT::v2f32);
832 addDRTypeForNEON(MVT::v8i8);
833 addDRTypeForNEON(MVT::v4i16);
834 addDRTypeForNEON(MVT::v2i32);
835 addDRTypeForNEON(MVT::v1i64);
837 addQRTypeForNEON(MVT::v4f32);
838 addQRTypeForNEON(MVT::v2f64);
839 addQRTypeForNEON(MVT::v16i8);
840 addQRTypeForNEON(MVT::v8i16);
841 addQRTypeForNEON(MVT::v4i32);
842 addQRTypeForNEON(MVT::v2i64);
844 if (Subtarget->hasFullFP16()) {
845 addQRTypeForNEON(MVT::v8f16);
846 addDRTypeForNEON(MVT::v4f16);
849 if (Subtarget->hasBF16()) {
850 addQRTypeForNEON(MVT::v8bf16);
851 addDRTypeForNEON(MVT::v4bf16);
855 if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) {
894 if (Subtarget->hasNEON()) {
1006 for (
MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
1015 for (
auto VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16,
1024 if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
1032 if (Subtarget->hasMVEIntegerOps()) {
1037 if (Subtarget->hasMVEFloatOps()) {
1041 if (!Subtarget->hasFP64()) {
1088 if (Subtarget->hasFullFP16()) {
1094 if (!Subtarget->hasFP16()) {
1142 if (Subtarget->hasDSP()) {
1164 if (Subtarget->
isThumb1Only() || !Subtarget->hasV6Ops()
1165 || (Subtarget->
isThumb2() && !Subtarget->hasDSP()))
1180 if (Subtarget->hasMVEIntegerOps())
1190 if (!Subtarget->
isThumb1Only() && Subtarget->hasV6T2Ops())
1201 if (!Subtarget->hasV5TOps() || Subtarget->
isThumb1Only()) {
1210 if (Subtarget->hasPerfMon())
1214 if (!Subtarget->hasV6Ops())
1217 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
1218 : Subtarget->hasDivideInARMMode();
1225 if (Subtarget->
isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
1242 HasStandaloneRem =
false;
1247 const char *
const Name;
1249 } LibraryCalls[] = {
1261 for (
const auto &LC : LibraryCalls) {
1268 const char *
const Name;
1270 } LibraryCalls[] = {
1282 for (
const auto &LC : LibraryCalls) {
1326 InsertFencesForAtomic =
false;
1328 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
1332 if (!Subtarget->isThumb() || !Subtarget->
isMClass())
1337 if (!Subtarget->hasAcquireRelease() ||
1340 InsertFencesForAtomic =
true;
1346 if (Subtarget->hasDataBarrier())
1347 InsertFencesForAtomic =
true;
1367 if (!InsertFencesForAtomic) {
1375 (!Subtarget->
isMClass() && Subtarget->hasV6Ops())) {
1387 }
else if ((Subtarget->
isMClass() && Subtarget->hasV8MBaselineOps()) ||
1388 Subtarget->hasForced32BitAtomics()) {
1402 if (!Subtarget->hasV6Ops()) {
1408 if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() &&
1434 if (Subtarget->hasFullFP16()) {
1444 if (Subtarget->hasFullFP16())
1459 if (!Subtarget->useSoftFloat() && Subtarget->
hasVFP2Base() &&
1473 if (!Subtarget->useSoftFloat() && !Subtarget->
isThumb1Only()) {
1481 if (!Subtarget->hasFP16()) {
1512 if (Subtarget->hasNEON()) {
1519 if (Subtarget->hasFP64()) {
1532 if (Subtarget->hasFullFP16()) {
1550 if (Subtarget->hasNEON()) {
1566 if (Subtarget->hasFullFP16()) {
1584 if (Subtarget->hasMVEIntegerOps())
1587 if (Subtarget->hasV6Ops())
1592 if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) ||
1599 if (Subtarget->useSoftFloat() || Subtarget->
isThumb1Only() ||
1625 if (Subtarget->isThumb() || Subtarget->
isThumb2())
1630 return Subtarget->useSoftFloat();
1643std::pair<const TargetRegisterClass *, uint8_t>
1654 case MVT::f32:
case MVT::f64:
case MVT::v8i8:
case MVT::v4i16:
1655 case MVT::v2i32:
case MVT::v1i64:
case MVT::v2f32:
1656 RRC = &ARM::DPRRegClass;
1664 case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
1665 case MVT::v4f32:
case MVT::v2f64:
1666 RRC = &ARM::DPRRegClass;
1670 RRC = &ARM::DPRRegClass;
1674 RRC = &ARM::DPRRegClass;
1678 return std::make_pair(RRC,
Cost);
1682#define MAKE_CASE(V) \
1902 if ((Subtarget->hasMVEIntegerOps() &&
1903 (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
1904 VT == MVT::v16i8)) ||
1905 (Subtarget->hasMVEFloatOps() &&
1906 (VT == MVT::v2f64 || VT == MVT::v4f32 || VT == MVT::v8f16)))
1920 if (Subtarget->hasNEON()) {
1921 if (VT == MVT::v4i64)
1922 return &ARM::QQPRRegClass;
1923 if (VT == MVT::v8i64)
1924 return &ARM::QQQQPRRegClass;
1926 if (Subtarget->hasMVEIntegerOps()) {
1927 if (VT == MVT::v4i64)
1928 return &ARM::MQQPRRegClass;
1929 if (VT == MVT::v8i64)
1930 return &ARM::MQQQQPRRegClass;
1939 Align &PrefAlign)
const {
1940 if (!isa<MemIntrinsic>(CI))
1958 unsigned NumVals =
N->getNumValues();
1962 for (
unsigned i = 0; i != NumVals; ++i) {
1963 EVT VT =
N->getValueType(i);
1964 if (VT == MVT::Glue || VT == MVT::Other)
1970 if (!
N->isMachineOpcode())
1994 if (
auto Const = dyn_cast<ConstantSDNode>(
Op.getOperand(1)))
1995 return Const->getZExtValue() == 16;
2002 if (
auto Const = dyn_cast<ConstantSDNode>(
Op.getOperand(1)))
2003 return Const->getZExtValue() == 16;
2010 if (
auto Const = dyn_cast<ConstantSDNode>(
Op.getOperand(1)))
2011 return Const->getZExtValue() == 16;
2080 bool isVarArg)
const {
2101 else if (Subtarget->hasFPRegs() && !Subtarget->
isThumb1Only() &&
2122 bool isVarArg)
const {
2123 return CCAssignFnForNode(
CC,
false, isVarArg);
2127 bool isVarArg)
const {
2128 return CCAssignFnForNode(
CC,
true, isVarArg);
2135 bool isVarArg)
const {
2136 switch (getEffectiveCallingConv(
CC, isVarArg)) {
2162 if (Subtarget->hasFullFP16()) {
2175 if (Subtarget->hasFullFP16()) {
2189SDValue ARMTargetLowering::LowerCallResult(
2201 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
2206 if (i == 0 && isThisReturn) {
2208 "unexpected return calling convention register assignment");
2219 Chain =
Lo.getValue(1);
2220 InGlue =
Lo.getValue(2);
2224 Chain =
Hi.getValue(1);
2225 InGlue =
Hi.getValue(2);
2237 Chain =
Lo.getValue(1);
2238 InGlue =
Lo.getValue(2);
2241 Chain =
Hi.getValue(1);
2242 InGlue =
Hi.getValue(2);
2277std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg(
2279 bool IsTailCall,
int SPDiff)
const {
2301 return std::make_pair(DstAddr, DstInfo);
2306 RegsToPassVector &RegsToPass,
2313 DAG.
getVTList(MVT::i32, MVT::i32), Arg);
2314 unsigned id = Subtarget->
isLittle() ? 0 : 1;
2327 std::tie(DstAddr, DstInfo) =
2328 computeAddrForCallArg(dl, DAG, NextVA, StackPtr, IsTailCall, SPDiff);
2360 bool isStructRet = (Outs.
empty()) ?
false : Outs[0].
Flags.isSRet();
2361 bool isThisReturn =
false;
2362 bool isCmseNSCall =
false;
2363 bool isSibCall =
false;
2364 bool PreferIndirect =
false;
2365 bool GuardWithBTI =
false;
2369 !Subtarget->noBTIAtReturnTwice())
2374 isCmseNSCall =
true;
2386 if (isa<GlobalAddressSDNode>(Callee)) {
2390 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2393 PreferIndirect = Subtarget->isThumb() && Subtarget->
hasMinSize() &&
2395 return isa<Instruction>(U) &&
2396 cast<Instruction>(U)->getParent() == BB;
2402 isTailCall = IsEligibleForTailCallOptimization(
2403 Callee, CallConv, isVarArg, isStructRet,
2419 "site marked musttail");
2427 unsigned NumBytes = CCInfo.getStackSize();
2436 if (isTailCall && !isSibCall) {
2443 NumBytes =
alignTo(NumBytes, StackAlign);
2448 SPDiff = NumReusableBytes - NumBytes;
2452 if (SPDiff < 0 && AFI->getArgRegsSaveSize() < (
unsigned)-SPDiff)
2468 RegsToPassVector RegsToPass;
2476 bool AfterFormalArgLoads =
false;
2480 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
2482 ++i, ++realArgIdx) {
2484 SDValue Arg = OutVals[realArgIdx];
2486 bool isByVal =
Flags.isByVal();
2506 if (isTailCall && VA.
isMemLoc() && !AfterFormalArgLoads) {
2508 AfterFormalArgLoads =
true;
2520 auto ArgVT = Outs[realArgIdx].ArgVT;
2521 if (isCmseNSCall && (ArgVT == MVT::f16)) {
2539 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, VA, ArgLocs[++i],
2540 StackPtr, MemOpChains, isTailCall, SPDiff);
2544 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, VA, ArgLocs[++i],
2545 StackPtr, MemOpChains, isTailCall, SPDiff);
2550 std::tie(DstAddr, DstInfo) =
2551 computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2555 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
2556 StackPtr, MemOpChains, isTailCall, SPDiff);
2558 if (realArgIdx == 0 &&
Flags.isReturned() && !
Flags.isSwiftSelf() &&
2559 Outs[0].VT == MVT::i32) {
2561 "unexpected calling convention register assignment");
2562 assert(!
Ins.empty() && Ins[0].VT == MVT::i32 &&
2563 "unexpected use of 'returned'");
2564 isThisReturn =
true;
2569 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), Arg));
2570 }
else if (isByVal) {
2572 unsigned offset = 0;
2576 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
2577 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
2579 if (CurByValIdx < ByValArgsCount) {
2581 unsigned RegBegin, RegEnd;
2582 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
2587 for (i = 0, j = RegBegin;
j < RegEnd; i++,
j++) {
2594 RegsToPass.push_back(std::make_pair(j, Load));
2599 offset = RegEnd - RegBegin;
2601 CCInfo.nextInRegsParam();
2604 if (
Flags.getByValSize() > 4*offset) {
2608 std::tie(Dst, DstInfo) =
2609 computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2618 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
2626 std::tie(DstAddr, DstInfo) =
2627 computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2634 if (!MemOpChains.
empty())
2640 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2641 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
2642 RegsToPass[i].second, InGlue);
2649 bool isDirect =
false;
2655 GVal =
G->getGlobal();
2659 bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->
isMClass());
2660 bool isLocalARMFunc =
false;
2663 if (Subtarget->genLongCalls()) {
2665 "long-calls codegen is not position independent!");
2669 if (isa<GlobalAddressSDNode>(Callee)) {
2670 if (Subtarget->genExecuteOnly()) {
2689 const char *
Sym = S->getSymbol();
2691 if (Subtarget->genExecuteOnly()) {
2710 }
else if (isa<GlobalAddressSDNode>(Callee)) {
2711 if (!PreferIndirect) {
2716 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !
ARMInterworking);
2718 if (isStub && Subtarget->
isThumb1Only() && !Subtarget->hasV5TOps()) {
2730 "Windows is the only supported COFF target");
2734 else if (!
TM.shouldAssumeDSOLocal(*GVal->
getParent(), GVal))
2750 const char *
Sym = S->getSymbol();
2751 if (isARMFunc && Subtarget->
isThumb1Only() && !Subtarget->hasV5TOps()) {
2755 ARMPCLabelIndex, 4);
2769 assert(!isARMFunc && !isDirect &&
2770 "Cannot handle call to ARM function or direct call");
2773 "call to non-secure function would "
2774 "require passing arguments on stack",
2781 "call to non-secure function would return value through pointer",
2789 if (Subtarget->isThumb()) {
2792 else if (isCmseNSCall)
2794 else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2799 if (!isDirect && !Subtarget->hasV5TOps())
2801 else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2814 if (isTailCall && !isSibCall) {
2819 std::vector<SDValue> Ops;
2820 Ops.push_back(Chain);
2821 Ops.push_back(Callee);
2829 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2830 Ops.push_back(DAG.
getRegister(RegsToPass[i].first,
2831 RegsToPass[i].second.getValueType()));
2843 isThisReturn =
false;
2849 assert(Mask &&
"Missing call preserved mask for calling convention");
2853 Ops.push_back(InGlue);
2865 Chain = DAG.
getNode(CallOpc, dl, NodeTys, Ops);
2877 Chain = DAG.
getCALLSEQ_END(Chain, NumBytes, CalleePopBytes, InGlue, dl);
2883 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
2884 InVals, isThisReturn,
2885 isThisReturn ? OutVals[0] :
SDValue());
2892void ARMTargetLowering::HandleByVal(
CCState *State,
unsigned &
Size,
2893 Align Alignment)
const {
2895 Alignment = std::max(Alignment,
Align(4));
2901 unsigned AlignInRegs = Alignment.
value() / 4;
2902 unsigned Waste = (ARM::R4 -
Reg) % AlignInRegs;
2903 for (
unsigned i = 0; i < Waste; ++i)
2909 unsigned Excess = 4 * (ARM::R4 -
Reg);
2916 if (NSAAOffset != 0 &&
Size > Excess) {
2928 unsigned ByValRegBegin =
Reg;
2929 unsigned ByValRegEnd = std::min<unsigned>(Reg +
Size / 4, ARM::R4);
2933 for (
unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2939 Size = std::max<int>(
Size - Excess, 0);
2950 int FI = std::numeric_limits<int>::max();
2958 if (!Flags.isByVal()) {
2964 }
else if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2965 if (Flags.isByVal())
2980 assert(FI != std::numeric_limits<int>::max());
2989bool ARMTargetLowering::IsEligibleForTailCallOptimization(
2991 bool isCalleeStructRet,
bool isCallerStructRet,
2995 const bool isIndirect)
const {
3008 if (Outs.
size() >= 4 &&
3009 (!isa<GlobalAddressSDNode>(
Callee.getNode()) || isIndirect)) {
3027 return CalleeCC == CallerCC;
3031 if (isCalleeStructRet || isCallerStructRet)
3045 (!
TT.isOSWindows() ||
TT.isOSBinFormatELF() ||
TT.isOSBinFormatMachO()))
3052 getEffectiveCallingConv(CalleeCC, isVarArg),
3053 getEffectiveCallingConv(CallerCC, CallerF.
isVarArg()), MF,
C, Ins,
3059 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
3060 if (CalleeCC != CallerCC) {
3061 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
3062 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
3075 if (!Outs.
empty()) {
3079 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs,
C);
3081 if (CCInfo.getStackSize()) {
3087 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
3089 ++i, ++realArgIdx) {
3092 SDValue Arg = OutVals[realArgIdx];
3096 if (VA.
needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) {
3103 if (!ArgLocs[++i].isRegLoc())
3105 if (RegVT == MVT::v2f64) {
3106 if (!ArgLocs[++i].isRegLoc())
3108 if (!ArgLocs[++i].isRegLoc())
3133 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
3142 StringRef IntKind =
F.getFnAttribute(
"interrupt").getValueAsString();
3155 if (IntKind ==
"" || IntKind ==
"IRQ" || IntKind ==
"FIQ" ||
3158 else if (IntKind ==
"SWI" || IntKind ==
"UNDEF")
3162 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
3189 bool isLittleEndian = Subtarget->
isLittle();
3201 "secure entry function would return value through pointer",
3207 for (
unsigned i = 0, realRVLocIdx = 0;
3209 ++i, ++realRVLocIdx) {
3213 SDValue Arg = OutVals[realRVLocIdx];
3214 bool ReturnF16 =
false;
3249 auto RetVT = Outs[realRVLocIdx].ArgVT;
3271 DAG.
getVTList(MVT::i32, MVT::i32), Half);
3275 HalfGPRs.
getValue(isLittleEndian ? 0 : 1), Glue);
3281 HalfGPRs.
getValue(isLittleEndian ? 1 : 0), Glue);
3293 DAG.
getVTList(MVT::i32, MVT::i32), Arg);
3295 fmrrd.
getValue(isLittleEndian ? 0 : 1), Glue);
3300 fmrrd.
getValue(isLittleEndian ? 1 : 0), Glue);
3344 return DAG.
getNode(RetNode, dl, MVT::Other, RetOps);
3347bool ARMTargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
3348 if (
N->getNumValues() != 1)
3350 if (!
N->hasNUsesOfValue(1, 0))
3358 if (
Copy->getOperand(
Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3360 TCChain =
Copy->getOperand(0);
3374 SDValue UseChain =
U->getOperand(0);
3382 if (
U->getOperand(
U->getNumOperands() - 1).getValueType() == MVT::Glue)
3390 if (!
Copy->hasOneUse())
3397 if (
Copy->getOperand(
Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3399 TCChain =
Copy->getOperand(0);
3404 bool HasRet =
false;
3419bool ARMTargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
3437 &&
"LowerWRITE_REGISTER called for non-i64 type argument.");
3453 EVT PtrVT =
Op.getValueType();
3463 if (Subtarget->genExecuteOnly()) {
3465 auto T =
const_cast<Type*
>(
CP->getType());
3466 auto C =
const_cast<Constant*
>(
CP->getConstVal());
3477 return LowerGlobalAddress(GA, DAG);
3482 Align CPAlign =
CP->getAlign();
3484 CPAlign = std::max(CPAlign,
Align(4));
3485 if (
CP->isMachineConstantPoolEntry())
3497 if (Subtarget->genExecuteOnly() && !Subtarget->hasV8MBaselineOps())
3506 unsigned ARMPCLabelIndex = 0;
3509 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
3512 if (!IsPositionIndependent) {
3515 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3526 if (!IsPositionIndependent)
3557ARMTargetLowering::LowerGlobalTLSAddressDarwin(
SDValue Op,
3560 "This function expects a Darwin target");
3565 SDValue DescAddr = LowerGlobalAddressDarwin(
Op, DAG);
3571 MVT::i32,
DL, Chain, DescAddr,
3595 Chain, FuncTLVGet, DAG.
getRegister(ARM::R0, MVT::i32),
3601ARMTargetLowering::LowerGlobalTLSAddressWindows(
SDValue Op,
3618 DAG.
getVTList(MVT::i32, MVT::Other), Ops);
3645 const auto *GA = cast<GlobalAddressSDNode>(
Op);