83 #include "llvm/IR/IntrinsicsARM.h"
118 using namespace llvm;
121 #define DEBUG_TYPE "arm-isel"
123 STATISTIC(NumTailCalls,
"Number of tail calls");
124 STATISTIC(NumMovwMovt,
"Number of GAs materialized with movw + movt");
125 STATISTIC(NumLoopByVals,
"Number of loops generated for byval arguments");
127 "Number of constants with their storage promoted into constant pools");
131 cl::desc(
"Enable / disable ARM interworking (for debugging only)"),
136 cl::desc(
"Enable / disable promotion of unnamed_addr constants into "
141 cl::desc(
"Maximum size of constant to promote into a constant pool"),
145 cl::desc(
"Maximum size of ALL constants to promote into a constant pool"),
150 cl::desc(
"Maximum interleave factor for MVE VLDn to generate."),
155 ARM::R0, ARM::R1,
ARM::R2, ARM::R3
158 void ARMTargetLowering::addTypeForNEON(
MVT VT,
MVT PromotedLdStVT) {
159 if (VT != PromotedLdStVT) {
160 setOperationAction(
ISD::LOAD, VT, Promote);
161 AddPromotedToType (
ISD::LOAD, VT, PromotedLdStVT);
164 AddPromotedToType (
ISD::STORE, VT, PromotedLdStVT);
192 setOperationAction(
ISD::SHL, VT, Custom);
193 setOperationAction(
ISD::SRA, VT, Custom);
194 setOperationAction(
ISD::SRL, VT, Custom);
198 setOperationAction(
ISD::SDIV, VT, Expand);
199 setOperationAction(
ISD::UDIV, VT, Expand);
200 setOperationAction(
ISD::FDIV, VT, Expand);
201 setOperationAction(
ISD::SREM, VT, Expand);
202 setOperationAction(
ISD::UREM, VT, Expand);
203 setOperationAction(
ISD::FREM, VT, Expand);
210 setOperationAction(Opcode, VT,
Legal);
213 setOperationAction(Opcode, VT,
Legal);
216 void ARMTargetLowering::addDRTypeForNEON(
MVT VT) {
217 addRegisterClass(VT, &ARM::DPRRegClass);
221 void ARMTargetLowering::addQRTypeForNEON(
MVT VT) {
222 addRegisterClass(VT, &ARM::DPairRegClass);
226 void ARMTargetLowering::setAllExpand(
MVT VT) {
228 setOperationAction(Opc, VT, Expand);
239 void ARMTargetLowering::addAllExtLoads(
const MVT From,
const MVT To,
246 void ARMTargetLowering::addMVEVectorTypes(
bool HasMVEFP) {
249 for (
auto VT : IntTypes) {
250 addRegisterClass(VT, &ARM::MQPRRegClass);
255 setOperationAction(
ISD::SHL, VT, Custom);
256 setOperationAction(
ISD::SRA, VT, Custom);
257 setOperationAction(
ISD::SRL, VT, Custom);
267 setOperationAction(
ISD::CTTZ, VT, Custom);
282 setOperationAction(
ISD::UDIV, VT, Expand);
283 setOperationAction(
ISD::SDIV, VT, Expand);
284 setOperationAction(
ISD::UREM, VT, Expand);
285 setOperationAction(
ISD::SREM, VT, Expand);
316 setIndexedLoadAction(
im, VT,
Legal);
317 setIndexedStoreAction(
im, VT,
Legal);
318 setIndexedMaskedLoadAction(
im, VT,
Legal);
319 setIndexedMaskedStoreAction(
im, VT,
Legal);
324 for (
auto VT : FloatTypes) {
325 addRegisterClass(VT, &ARM::MQPRRegClass);
346 setIndexedLoadAction(
im, VT,
Legal);
347 setIndexedStoreAction(
im, VT,
Legal);
348 setIndexedMaskedLoadAction(
im, VT,
Legal);
349 setIndexedMaskedStoreAction(
im, VT,
Legal);
362 setOperationAction(
ISD::FDIV, VT, Expand);
363 setOperationAction(
ISD::FREM, VT, Expand);
365 setOperationAction(
ISD::FSIN, VT, Expand);
366 setOperationAction(
ISD::FCOS, VT, Expand);
367 setOperationAction(
ISD::FPOW, VT, Expand);
368 setOperationAction(
ISD::FLOG, VT, Expand);
371 setOperationAction(
ISD::FEXP, VT, Expand);
392 for (
auto VT : LongTypes) {
393 addRegisterClass(VT, &ARM::MQPRRegClass);
429 setIndexedLoadAction(
im, VT,
Legal);
430 setIndexedStoreAction(
im, VT,
Legal);
431 setIndexedMaskedLoadAction(
im, VT,
Legal);
432 setIndexedMaskedStoreAction(
im, VT,
Legal);
438 for (
auto VT : pTypes) {
439 addRegisterClass(VT, &ARM::VCCRRegClass);
448 setOperationAction(
ISD::LOAD, VT, Custom);
486 for (
int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
494 if (Subtarget->isThumb() && Subtarget->
hasVFP2Base() &&
495 Subtarget->
hasARMOps() && !Subtarget->useSoftFloat()) {
496 static const struct {
498 const char *
const Name;
520 { RTLIB::UO_F32,
"__unordsf2vfp",
ISD::SETNE },
529 { RTLIB::UO_F64,
"__unorddf2vfp",
ISD::SETNE },
554 for (
const auto &LC : LibraryCalls) {
574 static const struct {
576 const char *
const Name;
661 for (
const auto &LC : LibraryCalls) {
671 static const struct {
673 const char *
const Name;
676 } MemOpsLibraryCalls[] = {
684 for (
const auto &LC : MemOpsLibraryCalls) {
694 static const struct {
696 const char *
const Name;
709 for (
const auto &LC : LibraryCalls) {
741 static const struct {
743 const char *
const Name;
751 for (
const auto &LC : LibraryCalls) {
762 if (!Subtarget->useSoftFloat() && !Subtarget->
isThumb1Only() &&
763 Subtarget->hasFPRegs()) {
774 if (!Subtarget->hasFP64())
778 if (Subtarget->hasFullFP16()) {
787 if (Subtarget->hasBF16()) {
790 if (!Subtarget->hasFullFP16())
797 addAllExtLoads(VT, InnerVT,
Expand);
812 if (Subtarget->hasMVEIntegerOps())
813 addMVEVectorTypes(Subtarget->hasMVEFloatOps());
816 if (Subtarget->hasLOB()) {
820 if (Subtarget->hasNEON()) {
834 if (Subtarget->hasFullFP16()) {
839 if (Subtarget->hasBF16()) {
845 if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) {
883 if (Subtarget->hasNEON()) {
1003 if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
1011 if (Subtarget->hasMVEIntegerOps()) {
1016 if (Subtarget->hasMVEFloatOps()) {
1020 if (!Subtarget->hasFP64()) {
1066 if (Subtarget->hasFullFP16()) {
1072 if (!Subtarget->hasFP16()) {
1120 if (Subtarget->hasDSP()) {
1142 if (Subtarget->
isThumb1Only() || !Subtarget->hasV6Ops()
1143 || (Subtarget->
isThumb2() && !Subtarget->hasDSP()))
1158 if (Subtarget->hasMVEIntegerOps())
1168 if (!Subtarget->
isThumb1Only() && Subtarget->hasV6T2Ops())
1179 if (!Subtarget->hasV5TOps() || Subtarget->
isThumb1Only()) {
1188 if (Subtarget->hasPerfMon())
1192 if (!Subtarget->hasV6Ops())
1195 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
1196 : Subtarget->hasDivideInARMMode();
1203 if (Subtarget->
isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
1220 HasStandaloneRem =
false;
1225 const char *
const Name;
1227 } LibraryCalls[] = {
1239 for (
const auto &LC : LibraryCalls) {
1246 const char *
const Name;
1248 } LibraryCalls[] = {
1260 for (
const auto &LC : LibraryCalls) {
1304 InsertFencesForAtomic =
false;
1306 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
1310 if (!Subtarget->isThumb() || !Subtarget->
isMClass())
1315 if (!Subtarget->hasAcquireRelease() ||
1318 InsertFencesForAtomic =
true;
1324 if (Subtarget->hasDataBarrier())
1325 InsertFencesForAtomic =
true;
1345 if (!InsertFencesForAtomic) {
1353 (!Subtarget->
isMClass() && Subtarget->hasV6Ops())) {
1365 }
else if (Subtarget->
isMClass() && Subtarget->hasV8MBaselineOps()) {
1377 if (!Subtarget->hasV6Ops()) {
1383 if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() &&
1409 if (Subtarget->hasFullFP16()) {
1419 if (Subtarget->hasFullFP16())
1434 if (!Subtarget->useSoftFloat() && Subtarget->
hasVFP2Base() &&
1448 if (!Subtarget->useSoftFloat() && !Subtarget->
isThumb1Only()) {
1456 if (!Subtarget->hasFP16()) {
1487 if (Subtarget->hasNEON()) {
1494 if (Subtarget->hasFP64()) {
1507 if (Subtarget->hasFullFP16()) {
1524 if (Subtarget->hasNEON()) {
1540 if (Subtarget->hasFullFP16()) {
1558 if (Subtarget->hasMVEIntegerOps())
1561 if (Subtarget->hasV6Ops())
1566 if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) ||
1573 if (Subtarget->useSoftFloat() || Subtarget->
isThumb1Only() ||
1598 if (Subtarget->isThumb() || Subtarget->
isThumb2())
1603 return Subtarget->useSoftFloat();
1616 std::pair<const TargetRegisterClass *, uint8_t>
1629 RRC = &ARM::DPRRegClass;
1639 RRC = &ARM::DPRRegClass;
1643 RRC = &ARM::DPRRegClass;
1647 RRC = &ARM::DPRRegClass;
1651 return std::make_pair(
RRC, Cost);
1655 #define MAKE_CASE(V) \
1875 if ((Subtarget->hasMVEIntegerOps() &&
1878 (Subtarget->hasMVEFloatOps() &&
1893 if (Subtarget->hasNEON()) {
1895 return &ARM::QQPRRegClass;
1897 return &ARM::QQQQPRRegClass;
1899 if (Subtarget->hasMVEIntegerOps()) {
1901 return &ARM::MQQPRRegClass;
1903 return &ARM::MQQQQPRRegClass;
1912 Align &PrefAlign)
const {
1913 if (!isa<MemIntrinsic>(CI))
1931 unsigned NumVals =
N->getNumValues();
1935 for (
unsigned i = 0;
i != NumVals; ++
i) {
1936 EVT VT =
N->getValueType(
i);
1943 if (!
N->isMachineOpcode())
1967 if (
auto Const = dyn_cast<ConstantSDNode>(
Op.getOperand(1)))
1968 return Const->getZExtValue() == 16;
1975 if (
auto Const = dyn_cast<ConstantSDNode>(
Op.getOperand(1)))
1976 return Const->getZExtValue() == 16;
1983 if (
auto Const = dyn_cast<ConstantSDNode>(
Op.getOperand(1)))
1984 return Const->getZExtValue() == 16;
2053 bool isVarArg)
const {
2093 bool isVarArg)
const {
2094 return CCAssignFnForNode(CC,
false, isVarArg);
2098 bool isVarArg)
const {
2099 return CCAssignFnForNode(CC,
true, isVarArg);
2106 bool isVarArg)
const {
2107 switch (getEffectiveCallingConv(CC, isVarArg)) {
2131 if (Subtarget->hasFullFP16()) {
2144 if (Subtarget->hasFullFP16()) {
2158 SDValue ARMTargetLowering::LowerCallResult(
2170 for (
unsigned i = 0;
i != RVLocs.size(); ++
i) {
2175 if (
i == 0 && isThisReturn) {
2177 "unexpected return calling convention register assignment");
2178 InVals.push_back(ThisVal);
2188 Chain =
Lo.getValue(1);
2189 InFlag =
Lo.getValue(2);
2193 Chain =
Hi.getValue(1);
2194 InFlag =
Hi.getValue(2);
2206 Chain =
Lo.getValue(1);
2207 InFlag =
Lo.getValue(2);
2210 Chain =
Hi.getValue(1);
2211 InFlag =
Hi.getValue(2);
2240 InVals.push_back(Val);
2246 std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg(
2248 bool IsTailCall,
int SPDiff)
const {
2270 return std::make_pair(DstAddr, DstInfo);
2275 RegsToPassVector &RegsToPass,
2283 unsigned id = Subtarget->
isLittle() ? 0 : 1;
2296 std::tie(DstAddr, DstInfo) =
2297 computeAddrForCallArg(dl, DAG, NextVA, StackPtr, IsTailCall, SPDiff);
2298 MemOpChains.push_back(
2329 bool isStructRet = (Outs.empty()) ?
false : Outs[0].Flags.isSRet();
2330 bool isThisReturn =
false;
2331 bool isCmseNSCall =
false;
2332 bool isSibCall =
false;
2333 bool PreferIndirect =
false;
2334 bool GuardWithBTI =
false;
2338 !Subtarget->noBTIAtReturnTwice())
2343 isCmseNSCall =
true;
2355 if (isa<GlobalAddressSDNode>(Callee)) {
2359 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2362 PreferIndirect = Subtarget->isThumb() && Subtarget->
hasMinSize() &&
2364 return isa<Instruction>(U) &&
2365 cast<Instruction>(U)->getParent() == BB;
2371 isTailCall = IsEligibleForTailCallOptimization(
2372 Callee, CallConv, isVarArg, isStructRet,
2388 "site marked musttail");
2396 unsigned NumBytes = CCInfo.getNextStackOffset();
2405 if (isTailCall && !isSibCall) {
2417 SPDiff = NumReusableBytes - NumBytes;
2421 if (SPDiff < 0 && AFI->getArgRegsSaveSize() < (
unsigned)-SPDiff)
2437 RegsToPassVector RegsToPass;
2445 bool AfterFormalArgLoads =
false;
2449 for (
unsigned i = 0, realArgIdx = 0,
e = ArgLocs.size();
2451 ++
i, ++realArgIdx) {
2455 bool isByVal = Flags.
isByVal();
2475 if (isTailCall && VA.
isMemLoc() && !AfterFormalArgLoads) {
2477 AfterFormalArgLoads =
true;
2489 auto ArgVT = Outs[realArgIdx].ArgVT;
2490 if (isCmseNSCall && (ArgVT ==
MVT::f16)) {
2508 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, VA, ArgLocs[++
i],
2509 StackPtr, MemOpChains, isTailCall, SPDiff);
2513 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, VA, ArgLocs[++
i],
2514 StackPtr, MemOpChains, isTailCall, SPDiff);
2519 std::tie(DstAddr, DstInfo) =
2520 computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2521 MemOpChains.push_back(DAG.
getStore(Chain, dl, Op1, DstAddr, DstInfo));
2524 PassF64ArgInRegs(dl, DAG, Chain,
Arg, RegsToPass, VA, ArgLocs[++
i],
2525 StackPtr, MemOpChains, isTailCall, SPDiff);
2530 "unexpected calling convention register assignment");
2532 "unexpected use of 'returned'");
2533 isThisReturn =
true;
2538 RegsToPass.push_back(std::make_pair(VA.
getLocReg(),
Arg));
2539 }
else if (isByVal) {
2541 unsigned offset = 0;
2545 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
2546 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
2548 if (CurByValIdx < ByValArgsCount) {
2550 unsigned RegBegin, RegEnd;
2551 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
2556 for (
i = 0,
j = RegBegin;
j < RegEnd;
i++,
j++) {
2562 MemOpChains.push_back(
Load.getValue(1));
2563 RegsToPass.push_back(std::make_pair(
j,
Load));
2568 offset = RegEnd - RegBegin;
2570 CCInfo.nextInRegsParam();
2577 std::tie(Dst, DstInfo) =
2578 computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2587 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
2595 std::tie(DstAddr, DstInfo) =
2596 computeAddrForCallArg(dl, DAG, VA, StackPtr, isTailCall, SPDiff);
2599 MemOpChains.push_back(
Store);
2603 if (!MemOpChains.empty())
2609 for (
unsigned i = 0,
e = RegsToPass.size();
i !=
e; ++
i) {
2611 RegsToPass[
i].second, InFlag);
2618 bool isDirect =
false;
2624 GV =
G->getGlobal();
2628 bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->
isMClass());
2629 bool isLocalARMFunc =
false;
2632 if (Subtarget->genLongCalls()) {
2634 "long-calls codegen is not position independent!");
2638 if (isa<GlobalAddressSDNode>(Callee)) {
2651 const char *Sym =
S->getSymbol();
2657 ARMPCLabelIndex, 0);
2665 }
else if (isa<GlobalAddressSDNode>(Callee)) {
2666 if (!PreferIndirect) {
2671 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !
ARMInterworking);
2673 if (isStub && Subtarget->
isThumb1Only() && !Subtarget->hasV5TOps()) {
2685 "Windows is the only supported COFF target");
2689 else if (!
TM.shouldAssumeDSOLocal(*GV->
getParent(), GV))
2705 const char *Sym =
S->getSymbol();
2706 if (isARMFunc && Subtarget->
isThumb1Only() && !Subtarget->hasV5TOps()) {
2710 ARMPCLabelIndex, 4);
2724 assert(!isARMFunc && !isDirect &&
2725 "Cannot handle call to ARM function or direct call");
2728 "call to non-secure function would "
2729 "require passing arguments on stack",
2736 "call to non-secure function would return value through pointer",
2744 if (Subtarget->isThumb()) {
2747 else if (isCmseNSCall)
2749 else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2754 if (!isDirect && !Subtarget->hasV5TOps())
2756 else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2769 if (isTailCall && !isSibCall) {
2775 std::vector<SDValue> Ops;
2776 Ops.push_back(Chain);
2777 Ops.push_back(Callee);
2785 for (
unsigned i = 0,
e = RegsToPass.size();
i !=
e; ++
i)
2787 RegsToPass[
i].second.getValueType()));
2799 isThisReturn =
false;
2805 assert(
Mask &&
"Missing call preserved mask for calling convention");
2809 Ops.push_back(InFlag);
2820 Chain = DAG.
getNode(CallOpc, dl, NodeTys, Ops);
2840 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
Ins, dl, DAG,
2841 InVals, isThisReturn,
2842 isThisReturn ? OutVals[0] :
SDValue());
2849 void ARMTargetLowering::HandleByVal(
CCState *State,
unsigned &Size,
2850 Align Alignment)
const {
2858 unsigned AlignInRegs =
Alignment.value() / 4;
2859 unsigned Waste = (
ARM::R4 -
Reg) % AlignInRegs;
2860 for (
unsigned i = 0;
i < Waste; ++
i)
2873 if (NSAAOffset != 0 && Size > Excess) {
2885 unsigned ByValRegBegin =
Reg;
2886 unsigned ByValRegEnd = std::min<unsigned>(
Reg + Size / 4,
ARM::R4);
2890 for (
unsigned i =
Reg + 1;
i != ByValRegEnd; ++
i)
2896 Size = std::max<int>(Size - Excess, 0);
2906 unsigned Bytes =
Arg.getValueSizeInBits() / 8;
2909 Register VR = cast<RegisterSDNode>(
Arg.getOperand(1))->getReg();
2929 SDValue Ptr = Ld->getBasePtr();
2946 bool ARMTargetLowering::IsEligibleForTailCallOptimization(
2948 bool isCalleeStructRet,
bool isCallerStructRet,
2952 const bool isIndirect)
const {
2965 if (Outs.size() >= 4 &&
2966 (!isa<GlobalAddressSDNode>(
Callee.getNode()) || isIndirect)) {
2984 return CalleeCC == CallerCC;
2988 if (isCalleeStructRet || isCallerStructRet)
3002 (!
TT.isOSWindows() ||
TT.isOSBinFormatELF() ||
TT.isOSBinFormatMachO()))
3009 getEffectiveCallingConv(CalleeCC, isVarArg),
3010 getEffectiveCallingConv(CallerCC, CallerF.
isVarArg()), MF,
C,
Ins,
3017 if (CalleeCC != CallerCC) {
3032 if (!Outs.empty()) {
3036 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs,
C);
3038 if (CCInfo.getNextStackOffset()) {
3044 for (
unsigned i = 0, realArgIdx = 0,
e = ArgLocs.size();
3046 ++
i, ++realArgIdx) {
3060 if (!ArgLocs[++
i].isRegLoc())
3063 if (!ArgLocs[++
i].isRegLoc())
3065 if (!ArgLocs[++
i].isRegLoc())
3099 StringRef IntKind =
F.getFnAttribute(
"interrupt").getValueAsString();
3112 if (IntKind ==
"" || IntKind ==
"IRQ" || IntKind ==
"FIQ" ||
3115 else if (IntKind ==
"SWI" || IntKind ==
"UNDEF")
3119 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
3121 RetOps.
insert(RetOps.begin() + 1,
3145 RetOps.push_back(Chain);
3146 bool isLittleEndian = Subtarget->
isLittle();
3158 "secure entry function would return value through pointer",
3164 for (
unsigned i = 0, realRVLocIdx = 0;
3166 ++
i, ++realRVLocIdx) {
3171 bool ReturnF16 =
false;
3206 auto RetVT = Outs[realRVLocIdx].ArgVT;
3284 RetOps.push_back(
Flag);
3304 bool ARMTargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
3305 if (
N->getNumValues() != 1)
3307 if (!
N->hasNUsesOfValue(1, 0))
3317 TCChain =
Copy->getOperand(0);
3339 if (U->getOperand(U->getNumOperands() - 1).getValueType() ==
MVT::Glue)
3347 if (!
Copy->hasOneUse())
3356 TCChain =
Copy->getOperand(0);
3361 bool HasRet =
false;
3376 bool ARMTargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
3394 &&
"LowerWRITE_REGISTER called for non-i64 type argument.");
3400 SDValue Ops[] = {
Op->getOperand(0),
Op->getOperand(1), Lo, Hi };
3412 EVT PtrVT =
Op.getValueType();
3422 if (Subtarget->genExecuteOnly()) {
3424 auto T =
const_cast<Type*
>(
CP->getType());
3425 auto C =
const_cast<Constant*
>(
CP->getConstVal());
3436 return LowerGlobalAddress(GA, DAG);
3439 if (
CP->isMachineConstantPoolEntry())
3455 unsigned ARMPCLabelIndex = 0;
3458 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
3461 if (!IsPositionIndependent) {
3464 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3475 if (!IsPositionIndependent)
3506 ARMTargetLowering::LowerGlobalTLSAddressDarwin(
SDValue Op,
3509 "This function expects a Darwin target");
3514 SDValue DescAddr = LowerGlobalAddressDarwin(
Op, DAG);
3550 ARMTargetLowering::LowerGlobalTLSAddressWindows(
SDValue Op,
3594 const auto *GA = cast<GlobalAddressSDNode>(
Op);
3611 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
3633 Args.push_back(Entry);
3641 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
3642 return CallResult.first;
3664 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
3672 PtrVT, dl, Chain, Offset,
3674 Chain =
Offset.getValue(1);
3680 PtrVT, dl, Chain, Offset,
3690 PtrVT, dl, Chain, Offset,
3706 return LowerGlobalTLSAddressDarwin(
Op, DAG);
3709 return LowerGlobalTLSAddressWindows(
Op, DAG);
3718 return LowerToTLSGeneralDynamicModel(GA, DAG);
3721 return LowerToTLSExecModels(GA, DAG,
model);
3730 while (!Worklist.empty()) {
3732 if (isa<ConstantExpr>(U)) {
3737 auto *
I = dyn_cast<Instruction>(U);
3738 if (!
I ||
I->getParent()->getParent() !=
F)
3766 auto *GVar = dyn_cast<GlobalVariable>(GV);
3767 if (!GVar || !GVar->hasInitializer() ||
3768 !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3769 !GVar->hasLocalLinkage())
3774 auto *
Init = GVar->getInitializer();
3776 Init->needsDynamicRelocation())
3785 auto *CDAInit = dyn_cast<ConstantDataArray>(
Init);
3788 unsigned RequiredPadding = 4 - (Size % 4);
3789 bool PaddingPossible =
3790 RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3795 unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);