84#include "llvm/IR/IntrinsicsARM.h"
122#define DEBUG_TYPE "arm-isel"
125STATISTIC(NumMovwMovt,
"Number of GAs materialized with movw + movt");
126STATISTIC(NumLoopByVals,
"Number of loops generated for byval arguments");
128 "Number of constants with their storage promoted into constant pools");
132 cl::desc(
"Enable / disable ARM interworking (for debugging only)"),
137 cl::desc(
"Enable / disable promotion of unnamed_addr constants into "
142 cl::desc(
"Maximum size of constant to promote into a constant pool"),
146 cl::desc(
"Maximum size of ALL constants to promote into a constant pool"),
151 cl::desc(
"Maximum interleave factor for MVE VLDn to generate."),
156 ARM::R0, ARM::R1, ARM::R2, ARM::R3
170void ARMTargetLowering::addTypeForNEON(
MVT VT,
MVT PromotedLdStVT) {
171 if (VT != PromotedLdStVT) {
180 if (ElemTy != MVT::f64)
184 if (ElemTy == MVT::i32) {
228void ARMTargetLowering::addDRTypeForNEON(
MVT VT) {
230 addTypeForNEON(VT, MVT::f64);
233void ARMTargetLowering::addQRTypeForNEON(
MVT VT) {
235 addTypeForNEON(VT, MVT::v2f64);
238void ARMTargetLowering::setAllExpand(
MVT VT) {
251void ARMTargetLowering::addAllExtLoads(
const MVT From,
const MVT To,
252 LegalizeAction Action) {
258void ARMTargetLowering::addMVEVectorTypes(
bool HasMVEFP) {
259 const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 };
261 for (
auto VT : IntTypes) {
335 const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 };
336 for (
auto VT : FloatTypes) {
405 const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 };
406 for (
auto VT : LongTypes) {
423 addAllExtLoads(MVT::v8i16, MVT::v8i8,
Legal);
424 addAllExtLoads(MVT::v4i32, MVT::v4i16,
Legal);
425 addAllExtLoads(MVT::v4i32, MVT::v4i8,
Legal);
442 for (
auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) {
451 const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1, MVT::v2i1};
452 for (
auto VT : pTypes) {
508 for (
int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
516 if (Subtarget->isThumb() && Subtarget->
hasVFP2Base() &&
517 Subtarget->
hasARMOps() && !Subtarget->useSoftFloat()) {
518 static const struct {
520 const char *
const Name;
542 { RTLIB::UO_F32,
"__unordsf2vfp",
ISD::SETNE },
551 { RTLIB::UO_F64,
"__unorddf2vfp",
ISD::SETNE },
576 for (
const auto &LC : LibraryCalls) {
588 static const struct {
590 const char *
const Name;
675 for (
const auto &LC : LibraryCalls) {
685 static const struct {
687 const char *
const Name;
690 } MemOpsLibraryCalls[] = {
698 for (
const auto &LC : MemOpsLibraryCalls) {
708 static const struct {
710 const char *
const Name;
723 for (
const auto &LC : LibraryCalls) {
755 static const struct {
757 const char *
const Name;
765 for (
const auto &LC : LibraryCalls) {
776 if (!Subtarget->useSoftFloat() && !Subtarget->
isThumb1Only() &&
777 Subtarget->hasFPRegs()) {
787 setAllExpand(MVT::f32);
788 if (!Subtarget->hasFP64())
789 setAllExpand(MVT::f64);
792 if (Subtarget->hasFullFP16()) {
801 if (Subtarget->hasBF16()) {
803 setAllExpand(MVT::bf16);
804 if (!Subtarget->hasFullFP16())
811 addAllExtLoads(VT, InnerVT,
Expand);
826 if (Subtarget->hasMVEIntegerOps())
827 addMVEVectorTypes(Subtarget->hasMVEFloatOps());
830 if (Subtarget->hasLOB()) {
834 if (Subtarget->hasNEON()) {
835 addDRTypeForNEON(MVT::v2f32);
836 addDRTypeForNEON(MVT::v8i8);
837 addDRTypeForNEON(MVT::v4i16);
838 addDRTypeForNEON(MVT::v2i32);
839 addDRTypeForNEON(MVT::v1i64);
841 addQRTypeForNEON(MVT::v4f32);
842 addQRTypeForNEON(MVT::v2f64);
843 addQRTypeForNEON(MVT::v16i8);
844 addQRTypeForNEON(MVT::v8i16);
845 addQRTypeForNEON(MVT::v4i32);
846 addQRTypeForNEON(MVT::v2i64);
848 if (Subtarget->hasFullFP16()) {
849 addQRTypeForNEON(MVT::v8f16);
850 addDRTypeForNEON(MVT::v4f16);
853 if (Subtarget->hasBF16()) {
854 addQRTypeForNEON(MVT::v8bf16);
855 addDRTypeForNEON(MVT::v4bf16);
859 if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) {
899 if (Subtarget->hasNEON()) {
1013 for (
MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
1022 for (
auto VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16,
1031 if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
1039 if (Subtarget->hasMVEIntegerOps()) {
1044 if (Subtarget->hasMVEFloatOps()) {
1048 if (!Subtarget->hasFP64()) {
1095 if (Subtarget->hasFullFP16()) {
1101 if (!Subtarget->hasFP16()) {
1149 if (Subtarget->hasDSP()) {
1171 if (Subtarget->
isThumb1Only() || !Subtarget->hasV6Ops()
1172 || (Subtarget->
isThumb2() && !Subtarget->hasDSP()))
1187 if (Subtarget->hasMVEIntegerOps())
1197 if (!Subtarget->
isThumb1Only() && Subtarget->hasV6T2Ops())
1208 if (!Subtarget->hasV5TOps() || Subtarget->
isThumb1Only()) {
1217 if (Subtarget->hasPerfMon())
1221 if (!Subtarget->hasV6Ops())
1224 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
1225 : Subtarget->hasDivideInARMMode();
1232 if (Subtarget->
isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
1249 HasStandaloneRem =
false;
1254 const char *
const Name;
1256 } LibraryCalls[] = {
1268 for (
const auto &LC : LibraryCalls) {
1275 const char *
const Name;
1277 } LibraryCalls[] = {
1289 for (
const auto &LC : LibraryCalls) {
1327 InsertFencesForAtomic =
false;
1329 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
1333 if (!Subtarget->isThumb() || !Subtarget->
isMClass())
1338 if (!Subtarget->hasAcquireRelease() ||
1341 InsertFencesForAtomic =
true;
1347 if (Subtarget->hasDataBarrier())
1348 InsertFencesForAtomic =
true;
1368 if (!InsertFencesForAtomic) {
1376 (!Subtarget->
isMClass() && Subtarget->hasV6Ops())) {
1388 }
else if ((Subtarget->
isMClass() && Subtarget->hasV8MBaselineOps()) ||
1389 Subtarget->hasForced32BitAtomics()) {
1403 if (!Subtarget->hasV6Ops()) {
1409 if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() &&
1441 if (Subtarget->hasFullFP16()) {
1451 if (Subtarget->hasFullFP16())
1466 if (!Subtarget->useSoftFloat() && Subtarget->
hasVFP2Base() &&
1480 if (!Subtarget->useSoftFloat() && !Subtarget->
isThumb1Only()) {
1488 if (!Subtarget->hasFP16()) {
1519 if (Subtarget->hasNEON()) {
1526 if (Subtarget->hasFP64()) {
1539 if (Subtarget->hasFullFP16()) {
1558 if (Subtarget->hasNEON()) {
1570 if (Subtarget->hasFullFP16()) {
1602 if (Subtarget->hasMVEIntegerOps())
1605 if (Subtarget->hasV6Ops())
1610 if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) ||
1617 if (Subtarget->useSoftFloat() || Subtarget->
isThumb1Only() ||
1645 return Subtarget->useSoftFloat();
1658std::pair<const TargetRegisterClass *, uint8_t>
1669 case MVT::f32:
case MVT::f64:
case MVT::v8i8:
case MVT::v4i16:
1670 case MVT::v2i32:
case MVT::v1i64:
case MVT::v2f32:
1671 RRC = &ARM::DPRRegClass;
1679 case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
1680 case MVT::v4f32:
case MVT::v2f64:
1681 RRC = &ARM::DPRRegClass;
1685 RRC = &ARM::DPRRegClass;
1689 RRC = &ARM::DPRRegClass;
1693 return std::make_pair(RRC,
Cost);
1697#define MAKE_CASE(V) \
1916 if ((Subtarget->hasMVEIntegerOps() &&
1917 (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
1918 VT == MVT::v16i8)) ||
1919 (Subtarget->hasMVEFloatOps() &&
1920 (VT == MVT::v2f64 || VT == MVT::v4f32 || VT == MVT::v8f16)))
1934 if (Subtarget->hasNEON()) {
1935 if (VT == MVT::v4i64)
1936 return &ARM::QQPRRegClass;
1937 if (VT == MVT::v8i64)
1938 return &ARM::QQQQPRRegClass;
1940 if (Subtarget->hasMVEIntegerOps()) {
1941 if (VT == MVT::v4i64)
1942 return &ARM::MQQPRRegClass;
1943 if (VT == MVT::v8i64)
1944 return &ARM::MQQQQPRRegClass;
1953 Align &PrefAlign)
const {
1954 if (!isa<MemIntrinsic>(CI))
1972 unsigned NumVals =
N->getNumValues();
1976 for (
unsigned i = 0; i != NumVals; ++i) {
1977 EVT VT =
N->getValueType(i);
1978 if (VT == MVT::Glue || VT == MVT::Other)
1984 if (!
N->isMachineOpcode())
2008 if (
auto Const = dyn_cast<ConstantSDNode>(
Op.getOperand(1)))
2009 return Const->getZExtValue() == 16;
2016 if (
auto Const = dyn_cast<ConstantSDNode>(
Op.getOperand(1)))
2017 return Const->getZExtValue() == 16;
2024 if (
auto Const = dyn_cast<ConstantSDNode>(
Op.getOperand(1)))
2025 return Const->getZExtValue() == 16;
2094 bool isVarArg)
const {
2115 else if (Subtarget->hasFPRegs() && !Subtarget->
isThumb1Only() &&
2136 bool isVarArg)
const {
2137 return CCAssignFnForNode(
CC,
false, isVarArg);
2141 bool isVarArg)
const {
2142 return CCAssignFnForNode(
CC,
true, isVarArg);
2149 bool isVarArg)
const {
2150 switch (getEffectiveCallingConv(
CC, isVarArg)) {
2176 if (Subtarget->hasFullFP16()) {
2189 if (Subtarget->hasFullFP16()) {
2203SDValue ARMTargetLowering::LowerCallResult(
2207 SDValue ThisVal,
bool isCmseNSCall)
const {
2215 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
2220 if (i == 0 && isThisReturn) {
2222 "unexpected return calling convention register assignment");
2233 Chain =
Lo.getValue(1);
2234 InGlue =
Lo.getValue(2);
2238 Chain =
Hi.getValue(1);
2239 InGlue =
Hi.getValue(2);
2251 Chain =
Lo.getValue(1);
2252 InGlue =
Lo.getValue(2);
2255 Chain =
Hi.getValue(1);
2256 InGlue =
Hi.getValue(2);
2300std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg(
2302 bool IsTailCall,
int SPDiff)
const {
2324 return std::make_pair(DstAddr, DstInfo);
2329 RegsToPassVector &RegsToPass,
2336 DAG.
getVTList(MVT::i32, MVT::i32), Arg);
2337 unsigned id = Subtarget->
isLittle() ? 0 : 1;
2350 std::tie(DstAddr, DstInfo) =
2351 computeAddrForCallArg(dl, DAG, NextVA, StackPtr, IsTailCall, SPDiff);
2383 bool isStructRet = (Outs.
empty()) ?
false : Outs[0].
Flags.isSRet();
2384 bool isThisReturn =
false;
2385 bool isCmseNSCall =
false;
2386 bool isSibCall =
false;
2387 bool PreferIndirect =
false;
2388 bool GuardWithBTI =
false;
2398 !Subtarget->noBTIAtReturnTwice())
2403 isCmseNSCall =
true;
2415 if (isa<GlobalAddressSDNode>(Callee)) {
2419 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2422 PreferIndirect = Subtarget->isThumb() && Subtarget->
hasMinSize() &&
2424 return isa<Instruction>(U) &&
2425 cast<Instruction>(U)->getParent() == BB;
2432 IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs, PreferIndirect);
2446 "site marked musttail");
2449 unsigned NumBytes = CCInfo.getStackSize();
2458 if (isTailCall && !isSibCall) {
2465 NumBytes =
alignTo(NumBytes, StackAlign);
2470 SPDiff = NumReusableBytes - NumBytes;
2474 if (SPDiff < 0 && AFI->getArgRegsSaveSize() < (
unsigned)-SPDiff)
2490 RegsToPassVector RegsToPass;
2498 bool AfterFormalArgLoads =
false;
2502 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
2504 ++i, ++realArgIdx) {
2506 SDValue Arg = OutVals[realArgIdx];
2508 bool isByVal =
Flags.isByVal();
2528 if (isTailCall && VA.
isMemLoc() && !AfterFormalArgLoads) {
2530 AfterFormalArgLoads =
true;
2542 auto ArgVT = Outs[realArgIdx].ArgVT;
2543 if (isCmseNSCall && (ArgVT == MVT::f16)) {
2561 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, VA, ArgLocs[++i],
2562 StackPtr, MemOpChains, isTailCall, SPDiff);
2566 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, VA, ArgLocs[++i],
2567 StackPtr, MemOpChains, isTailCall, SPDiff);
2572 std::tie(DstAddr, DstInfo) =
2573 computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2577 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
2578 StackPtr, MemOpChains, isTailCall, SPDiff);
2580 if (realArgIdx == 0 &&
Flags.isReturned() && !
Flags.isSwiftSelf() &&
2581 Outs[0].VT == MVT::i32) {
2583 "unexpected calling convention register assignment");
2584 assert(!
Ins.empty() && Ins[0].VT == MVT::i32 &&
2585 "unexpected use of 'returned'");
2586 isThisReturn =
true;
2590 CSInfo.ArgRegPairs.emplace_back(VA.
getLocReg(), i);
2591 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), Arg));
2592 }
else if (isByVal) {
2594 unsigned offset = 0;
2598 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
2599 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
2601 if (CurByValIdx < ByValArgsCount) {
2603 unsigned RegBegin, RegEnd;
2604 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
2609 for (i = 0, j = RegBegin;
j < RegEnd; i++,
j++) {
2616 RegsToPass.push_back(std::make_pair(j, Load));
2621 offset = RegEnd - RegBegin;
2623 CCInfo.nextInRegsParam();
2626 if (
Flags.getByValSize() > 4*offset) {
2630 std::tie(Dst, DstInfo) =
2631 computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2640 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
2648 std::tie(DstAddr, DstInfo) =
2649 computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2656 if (!MemOpChains.
empty())
2662 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2663 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
2664 RegsToPass[i].second, InGlue);
2671 bool isDirect =
false;
2676 GVal =
G->getGlobal();
2677 bool isStub = !
TM.shouldAssumeDSOLocal(GVal) && Subtarget->
isTargetMachO();
2679 bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->
isMClass());
2680 bool isLocalARMFunc =
false;
2683 if (Subtarget->genLongCalls()) {
2685 "long-calls codegen is not position independent!");
2689 if (isa<GlobalAddressSDNode>(Callee)) {
2690 if (Subtarget->genExecuteOnly()) {
2709 const char *
Sym = S->getSymbol();
2711 if (Subtarget->genExecuteOnly()) {
2730 }
else if (isa<GlobalAddressSDNode>(Callee)) {
2731 if (!PreferIndirect) {
2736 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !
ARMInterworking);
2738 if (isStub && Subtarget->
isThumb1Only() && !Subtarget->hasV5TOps()) {
2750 "Windows is the only supported COFF target");
2754 else if (!
TM.shouldAssumeDSOLocal(GVal))
2770 const char *
Sym = S->getSymbol();
2771 if (isARMFunc && Subtarget->
isThumb1Only() && !Subtarget->hasV5TOps()) {
2775 ARMPCLabelIndex, 4);
2789 assert(!isARMFunc && !isDirect &&
2790 "Cannot handle call to ARM function or direct call");
2793 "call to non-secure function would "
2794 "require passing arguments on stack",
2801 "call to non-secure function would return value through pointer",
2809 if (Subtarget->isThumb()) {
2812 else if (isCmseNSCall)
2814 else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2819 if (!isDirect && !Subtarget->hasV5TOps())
2821 else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2834 if (isTailCall && !isSibCall) {
2839 std::vector<SDValue> Ops;
2840 Ops.push_back(Chain);
2841 Ops.push_back(Callee);
2849 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2850 Ops.push_back(DAG.
getRegister(RegsToPass[i].first,
2851 RegsToPass[i].second.getValueType()));
2863 isThisReturn =
false;
2869 assert(Mask &&
"Missing call preserved mask for calling convention");
2873 Ops.push_back(InGlue);
2885 Chain = DAG.
getNode(CallOpc, dl, NodeTys, Ops);
2897 Chain = DAG.
getCALLSEQ_END(Chain, NumBytes, CalleePopBytes, InGlue, dl);
2903 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
2904 InVals, isThisReturn,
2905 isThisReturn ? OutVals[0] :
SDValue(), isCmseNSCall);
2912void ARMTargetLowering::HandleByVal(
CCState *State,
unsigned &
Size,
2913 Align Alignment)
const {
2915 Alignment = std::max(Alignment,
Align(4));
2921 unsigned AlignInRegs = Alignment.
value() / 4;
2922 unsigned Waste = (ARM::R4 -
Reg) % AlignInRegs;
2923 for (
unsigned i = 0; i < Waste; ++i)
2929 unsigned Excess = 4 * (ARM::R4 -
Reg);
2936 if (NSAAOffset != 0 &&
Size > Excess) {
2948 unsigned ByValRegBegin =
Reg;
2949 unsigned ByValRegEnd = std::min<unsigned>(Reg +
Size / 4, ARM::R4);
2953 for (
unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2959 Size = std::max<int>(
Size - Excess, 0);
2970 int FI = std::numeric_limits<int>::max();
2978 if (!Flags.isByVal()) {
2984 }
else if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2985 if (Flags.isByVal())
3000 assert(FI != std::numeric_limits<int>::max());
3011bool ARMTargetLowering::IsEligibleForTailCallOptimization(
3037 if (!isa<GlobalAddressSDNode>(
Callee.getNode()) || isIndirect) {
3039 for (
Register R : {ARM::R0, ARM::R1, ARM::R2, ARM::R3})
3040 AddressRegisters.
insert(R);
3043 AddressRegisters.
insert(ARM::R12);
3046 AddressRegisters.
erase(
AL.getLocReg());
3047 if (AddressRegisters.
empty())
3061 return CalleeCC == CallerCC;
3065 bool isCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
3067 if (isCalleeStructRet || isCallerStructRet)
3081 (!
TT.isOSWindows() ||
TT.isOSBinFormatELF() ||
TT.isOSBinFormatMachO()))
3088 getEffectiveCallingConv(CalleeCC, isVarArg),
3089 getEffectiveCallingConv(CallerCC, CallerF.
isVarArg()), MF,
C, Ins,
3095 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
3096 if (CalleeCC != CallerCC) {
3097 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
3098 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
3111 if (!Outs.
empty()) {
3118 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
3120 ++i, ++realArgIdx) {
3123 SDValue Arg = OutVals[realArgIdx];
3127 if (VA.
needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) {
3134 if (!ArgLocs[++i].isRegLoc())
3136 if (RegVT == MVT::v2f64) {
3137 if (!ArgLocs[++i].isRegLoc())
3139 if (!ArgLocs[++i].isRegLoc())
3164 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
3173 StringRef IntKind =
F.getFnAttribute(
"interrupt").getValueAsString();
3186 if (IntKind ==
"" || IntKind ==
"IRQ" || IntKind ==
"FIQ" ||
3189 else if (IntKind ==
"SWI" || IntKind ==
"UNDEF")
3193 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
3220 bool isLittleEndian = Subtarget->
isLittle();
3232 "secure entry function would return value through pointer",
3238 for (
unsigned i = 0, realRVLocIdx = 0;
3240 ++i, ++realRVLocIdx) {
3244 SDValue Arg = OutVals[realRVLocIdx];
3245 bool ReturnF16 =
false;
3280 auto RetVT = Outs[realRVLocIdx].ArgVT;
3302 DAG.
getVTList(MVT::i32, MVT::i32), Half);
3306 HalfGPRs.
getValue(isLittleEndian ? 0 : 1), Glue);
3312 HalfGPRs.
getValue(isLittleEndian ? 1 : 0), Glue);
3324 DAG.
getVTList(MVT::i32, MVT::i32), Arg);
3326 fmrrd.
getValue(isLittleEndian ? 0 : 1), Glue);
3331 fmrrd.
getValue(isLittleEndian ? 1 : 0), Glue);
3375 return DAG.
getNode(RetNode, dl, MVT::Other, RetOps);
3378bool ARMTargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
3379 if (
N->getNumValues() != 1)
3381 if (!
N->hasNUsesOfValue(1, 0))
3389 if (
Copy->getOperand(
Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3391 TCChain =
Copy->getOperand(0);
3405 SDValue UseChain =
U->getOperand(0);
3413 if (
U->getOperand(
U->getNumOperands() - 1).getValueType() == MVT::Glue)
3421 if (!
Copy->hasOneUse())
3428 if (
Copy->getOperand(
Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3430 TCChain =
Copy->getOperand(0);
3435 bool HasRet =
false;
3450bool ARMTargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
3468 &&
"LowerWRITE_REGISTER called for non-i64 type argument.");
3484 EVT PtrVT =
Op.getValueType();
3494 if (Subtarget->genExecuteOnly()) {
3496 auto T =
const_cast<Type*
>(
CP->getType());
3497 auto C =
const_cast<Constant*
>(
CP->getConstVal());
3508 return LowerGlobalAddress(GA, DAG);
3513 Align CPAlign =
CP->getAlign();
3515 CPAlign = std::max(CPAlign,
Align(4));
3516 if (
CP->isMachineConstantPoolEntry())
3528 if (Subtarget->genExecuteOnly() && !Subtarget->hasV8MBaselineOps())
3537 unsigned ARMPCLabelIndex = 0;
3540 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
3543 if (!IsPositionIndependent) {
3546 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3557 if (!IsPositionIndependent)
3588ARMTargetLowering::LowerGlobalTLSAddressDarwin(
SDValue Op,
3591 "This function expects a Darwin target");
3596 SDValue DescAddr = LowerGlobalAddressDarwin(
Op, DAG);
3602 MVT::i32,
DL, Chain, DescAddr,
3626 Chain, FuncTLVGet, DAG.
getRegister(ARM::R0, MVT::i32),
3632ARMTargetLowering::LowerGlobalTLSAddressWindows(
SDValue Op,