46#include "llvm/IR/IntrinsicsHexagon.h"
69#define DEBUG_TYPE "hexagon-lowering"
73 cl::desc(
"Control jump table emission on Hexagon target"));
77 cl::desc(
"Enable Hexagon SDNode scheduling"));
80 cl::desc(
"Enable Fast Math processing"));
84 cl::desc(
"Set minimum jump tables"));
88 cl::desc(
"Max #stores to inline memcpy"));
92 cl::desc(
"Max #stores to inline memcpy"));
96 cl::desc(
"Max #stores to inline memmove"));
101 cl::desc(
"Max #stores to inline memmove"));
105 cl::desc(
"Max #stores to inline memset"));
109 cl::desc(
"Max #stores to inline memset"));
113 cl::desc(
"Rewrite unaligned loads as a pair of aligned loads"));
118 cl::desc(
"Disable minimum alignment of 1 for "
119 "arguments passed by value on stack"));
123 class HexagonCCState :
public CCState {
124 unsigned NumNamedVarArgParams = 0;
129 unsigned NumNamedArgs)
131 NumNamedVarArgParams(NumNamedArgs) {}
132 unsigned getNumNamedVarArgParams()
const {
return NumNamedVarArgParams; }
144 Hexagon::R0, Hexagon::R1, Hexagon::R2,
145 Hexagon::R3, Hexagon::R4, Hexagon::R5
147 const unsigned NumArgRegs = std::size(ArgRegs);
151 if (RegNum != NumArgRegs && RegNum % 2 == 1)
160#include "HexagonGenCallingConv.inc"
179 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
193 return CCInfo.
CheckReturn(Outs, RetCC_Hexagon_HVX);
223 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
272 .
Case(
"r0", Hexagon::R0)
273 .
Case(
"r1", Hexagon::R1)
274 .
Case(
"r2", Hexagon::R2)
275 .
Case(
"r3", Hexagon::R3)
276 .
Case(
"r4", Hexagon::R4)
277 .
Case(
"r5", Hexagon::R5)
278 .
Case(
"r6", Hexagon::R6)
279 .
Case(
"r7", Hexagon::R7)
280 .
Case(
"r8", Hexagon::R8)
281 .
Case(
"r9", Hexagon::R9)
282 .
Case(
"r10", Hexagon::R10)
283 .
Case(
"r11", Hexagon::R11)
284 .
Case(
"r12", Hexagon::R12)
285 .
Case(
"r13", Hexagon::R13)
286 .
Case(
"r14", Hexagon::R14)
287 .
Case(
"r15", Hexagon::R15)
288 .
Case(
"r16", Hexagon::R16)
289 .
Case(
"r17", Hexagon::R17)
290 .
Case(
"r18", Hexagon::R18)
291 .
Case(
"r19", Hexagon::R19)
292 .
Case(
"r20", Hexagon::R20)
293 .
Case(
"r21", Hexagon::R21)
294 .
Case(
"r22", Hexagon::R22)
295 .
Case(
"r23", Hexagon::R23)
296 .
Case(
"r24", Hexagon::R24)
297 .
Case(
"r25", Hexagon::R25)
298 .
Case(
"r26", Hexagon::R26)
299 .
Case(
"r27", Hexagon::R27)
300 .
Case(
"r28", Hexagon::R28)
301 .
Case(
"r29", Hexagon::R29)
302 .
Case(
"r30", Hexagon::R30)
303 .
Case(
"r31", Hexagon::R31)
304 .
Case(
"r1:0", Hexagon::D0)
305 .
Case(
"r3:2", Hexagon::D1)
306 .
Case(
"r5:4", Hexagon::D2)
307 .
Case(
"r7:6", Hexagon::D3)
308 .
Case(
"r9:8", Hexagon::D4)
309 .
Case(
"r11:10", Hexagon::D5)
310 .
Case(
"r13:12", Hexagon::D6)
311 .
Case(
"r15:14", Hexagon::D7)
312 .
Case(
"r17:16", Hexagon::D8)
313 .
Case(
"r19:18", Hexagon::D9)
314 .
Case(
"r21:20", Hexagon::D10)
315 .
Case(
"r23:22", Hexagon::D11)
316 .
Case(
"r25:24", Hexagon::D12)
317 .
Case(
"r27:26", Hexagon::D13)
318 .
Case(
"r29:28", Hexagon::D14)
319 .
Case(
"r31:30", Hexagon::D15)
320 .
Case(
"sp", Hexagon::R29)
321 .
Case(
"fp", Hexagon::R30)
322 .
Case(
"lr", Hexagon::R31)
323 .
Case(
"p0", Hexagon::P0)
324 .
Case(
"p1", Hexagon::P1)
325 .
Case(
"p2", Hexagon::P2)
326 .
Case(
"p3", Hexagon::P3)
327 .
Case(
"sa0", Hexagon::SA0)
328 .
Case(
"lc0", Hexagon::LC0)
329 .
Case(
"sa1", Hexagon::SA1)
330 .
Case(
"lc1", Hexagon::LC1)
331 .
Case(
"m0", Hexagon::M0)
332 .
Case(
"m1", Hexagon::M1)
333 .
Case(
"usr", Hexagon::USR)
334 .
Case(
"ugp", Hexagon::UGP)
335 .
Case(
"cs0", Hexagon::CS0)
336 .
Case(
"cs1", Hexagon::CS1)
366 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
368 if (RVLocs[i].getValVT() == MVT::i1) {
378 Register PredR =
MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
390 RVLocs[i].getValVT(), Glue);
416 bool IsStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
430 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.
getContext(),
434 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX);
436 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_Legacy);
438 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
443 IsVarArg, IsStructRet, StructAttrFlag, Outs,
452 :
"Argument must be passed on stack. "
453 "Not eligible for Tail Call\n"));
456 unsigned NumBytes = CCInfo.getStackSize();
464 bool NeedsArgAlign =
false;
465 Align LargestAlignSeen;
467 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
473 NeedsArgAlign |= ArgAlign;
499 StackPtr.getValueType());
502 LargestAlignSeen = std::max(
504 if (Flags.isByVal()) {
524 if (NeedsArgAlign && Subtarget.
hasV60Ops()) {
525 LLVM_DEBUG(
dbgs() <<
"Function needs byte stack align due to call args\n");
526 Align VecAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
527 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
532 if (!MemOpChains.
empty())
546 for (
const auto &R : RegsToPass) {
547 Chain = DAG.
getCopyToReg(Chain, dl, R.first, R.second, Glue);
562 for (
const auto &R : RegsToPass) {
563 Chain = DAG.
getCopyToReg(Chain, dl, R.first, R.second, Glue);
578 dyn_cast<ExternalSymbolSDNode>(Callee)) {
590 for (
const auto &R : RegsToPass)
594 assert(Mask &&
"Missing call preserved mask for calling convention");
611 Chain = DAG.
getNode(OpCode, dl, NodeTys, Ops);
621 InVals, OutVals, Callee);
636 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
637 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
638 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
639 VT == MVT::v4i16 || VT == MVT::v8i8 ||
648 if (!isa<ConstantSDNode>(
Offset.getNode()))
652 int32_t V = cast<ConstantSDNode>(
Offset.getNode())->getSExtValue();
661 unsigned LR = HRI.getRARegister();
668 if (
Op.getOperand(NumOps-1).getValueType() == MVT::Glue)
673 cast<ConstantSDNode>(
Op.getOperand(i))->getZExtValue());
674 unsigned NumVals = Flags.getNumOperandRegisters();
677 switch (Flags.getKind()) {
688 for (; NumVals; --NumVals, ++i) {
689 Register Reg = cast<RegisterSDNode>(
Op.getOperand(i))->getReg();
692 HMFI.setHasClobberLR(
true);
732 unsigned IntNo = cast<ConstantSDNode>(
Op.getOperand(1))->getZExtValue();
734 if (IntNo == Intrinsic::hexagon_prefetch) {
752 assert(AlignConst &&
"Non-constant Align in LowerDYNAMIC_STACKALLOC");
758 A = HFI.getStackAlign().value();
761 dbgs () << __func__ <<
" Align: " <<
A <<
" Size: ";
762 Size.getNode()->dump(&DAG);
787 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs,
792 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX);
794 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_Legacy);
796 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
805 switch (RC.
getID()) {
806 case Hexagon::IntRegsRegClassID:
807 return Reg - Hexagon::R0 + 1;
808 case Hexagon::DoubleRegsRegClassID:
809 return (Reg - Hexagon::D0 + 1) * 2;
810 case Hexagon::HvxVRRegClassID:
811 return Reg - Hexagon::V0 + 1;
812 case Hexagon::HvxWRRegClassID:
813 return (Reg - Hexagon::W0 + 1) * 2;
820 HFL.FirstVarArgSavedReg = 0;
823 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
826 bool ByVal = Flags.isByVal();
832 if (VA.
isRegLoc() && ByVal && Flags.getByValSize() <= 8)
836 (!ByVal || (ByVal && Flags.getByValSize() > 8));
865 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.
getLocReg());
871 unsigned ObjSize = Flags.isByVal()
872 ? Flags.getByValSize()
880 if (Flags.isByVal()) {
894 for (
int i = HFL.FirstVarArgSavedReg; i < 6; i++)
895 MRI.addLiveIn(Hexagon::R0+i);
899 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);
903 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;
904 bool RequiresPadding = (NumVarArgRegs & 1);
905 int RegSaveAreaSizePlusPadding = RequiresPadding
906 ? (NumVarArgRegs + 1) * 4
909 if (RegSaveAreaSizePlusPadding > 0) {
912 if (!(RegAreaStart % 8))
913 RegAreaStart = (RegAreaStart + 7) & -8;
915 int RegSaveAreaFrameIndex =
917 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);
920 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;
922 HMFI.setVarArgsFrameIndex(FI);
928 HMFI.setRegSavedAreaStartFrameIndex(FI);
929 HMFI.setVarArgsFrameIndex(FI);
938 HMFI.setVarArgsFrameIndex(FI);
951 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
971 SDValue SavedRegAreaStartFrameIndex =
972 DAG.
getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32);
976 if (HFL.FirstVarArgSavedReg & 1)
977 SavedRegAreaStartFrameIndex =
986 SavedRegAreaStartFrameIndex,
1018 const Value *DestSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
1019 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
1025 false,
false,
false,
1037 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1054 auto isSExtFree = [
this](
SDValue N) {
1055 switch (
N.getOpcode()) {
1061 EVT OrigTy = cast<VTSDNode>(
Op.getOperand(1))->getVT();
1067 return ThisBW >= OrigBW;
1076 if (OpTy == MVT::i8 || OpTy == MVT::i16) {
1078 bool IsNegative =
C &&
C->getAPIntValue().isNegative();
1079 if (IsNegative || isSExtFree(
LHS) || isSExtFree(
RHS))
1091 SDValue Op1 =
Op.getOperand(1), Op2 =
Op.getOperand(2);
1095 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1113 EVT ValTy =
Op.getValueType();
1116 bool isVTi1Type =
false;
1117 if (
auto *CV = dyn_cast<ConstantVector>(CPN->
getConstVal())) {
1118 if (cast<VectorType>(CV->getType())->getElementType()->isIntegerTy(1)) {
1121 unsigned VecLen = CV->getNumOperands();
1123 "conversion only supported for pow2 VectorSize");
1124 for (
unsigned i = 0; i < VecLen; ++i)
1140 else if (isVTi1Type)
1146 assert(cast<ConstantPoolSDNode>(
T)->getTargetFlags() == TF &&
1147 "Inconsistent target flag encountered");
1149 if (IsPositionIndependent)
1156 EVT VT =
Op.getValueType();
1157 int Idx = cast<JumpTableSDNode>(
Op)->getIndex();
1177 EVT VT =
Op.getValueType();
1179 unsigned Depth = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
1199 EVT VT =
Op.getValueType();
1201 unsigned Depth = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
1219 auto *GAN = cast<GlobalAddressSDNode>(
Op);
1221 auto *GV = GAN->getGlobal();
1222 int64_t
Offset = GAN->getOffset();
1230 if (GO && Subtarget.
useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))
1252 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
1278 unsigned char OperandFlags)
const {
1297 assert(Mask &&
"Missing call preserved mask for calling convention");
1332 if (IsPositionIndependent) {
1404 Hexagon::R0, Flags);
1540 for (
unsigned LegalIntOp :
1580 for (
unsigned IntExpOp :
1589 for (
unsigned FPExpOp :
1625 static const unsigned VectExpOps[] = {
1651 for (
unsigned VectExpOp : VectExpOps)
1665 if (VT.getVectorElementType() != MVT::i32) {
1689 for (
MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1690 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1705 if (NativeVT.getVectorElementType() != MVT::i1) {
1712 for (
MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) {
1723 for (
MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,
1724 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1730 for (
MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1736 for (
MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,
1748 for (
MVT VT : {MVT::i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1800 for (
MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
1801 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {
1829 initializeHVXLowering();
1849 setLibcallName(RTLIB::FPTOUINT_F32_I128,
"__hexagon_fixunssfti");
1850 setLibcallName(RTLIB::FPTOUINT_F64_I128,
"__hexagon_fixunsdfti");
1949HexagonTargetLowering::validateConstPtrAlignment(
SDValue Ptr,
Align NeedAlign,
1951 auto *CA = dyn_cast<ConstantSDNode>(
Ptr);
1954 unsigned Addr = CA->getZExtValue();
1957 if (HaveAlign >= NeedAlign)
1963 DiagnosticInfoMisalignedTrap(
StringRef M)
1969 return DI->
getKind() == DK_MisalignedTrap;
1977 <<
" has alignment " << HaveAlign.
value()
1978 <<
", but the memory access requires " << NeedAlign.
value();
1981 O <<
". The instruction has been replaced with a trap.";
1991 auto *
LS = cast<LSBaseSDNode>(
Op.getNode());
1992 assert(!
LS->isIndexed() &&
"Not expecting indexed ops on constant address");
2004 unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID();
2005 return (
ID == Intrinsic::hexagon_L2_loadrd_pbr ||
2006 ID == Intrinsic::hexagon_L2_loadri_pbr ||
2007 ID == Intrinsic::hexagon_L2_loadrh_pbr ||
2008 ID == Intrinsic::hexagon_L2_loadruh_pbr ||
2009 ID == Intrinsic::hexagon_L2_loadrb_pbr ||
2010 ID == Intrinsic::hexagon_L2_loadrub_pbr);
2019 V = cast<Operator>(V)->getOperand(0);
2021 V = cast<Instruction>(V)->getOperand(0);
2034 if (Blk == Parent) {
2039 BaseVal = BackEdgeVal;
2041 }
while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
2044 if (IntrBaseVal == BackEdgeVal)
2051 assert(
Idx >= 0 &&
"Unexpected index to incoming argument in PHI");
2059 Value *IntrBaseVal = V;
2066 }
while (BaseVal != V);
2069 if (
const PHINode *PN = dyn_cast<PHINode>(V))
2083 unsigned Intrinsic)
const {
2084 switch (Intrinsic) {
2085 case Intrinsic::hexagon_L2_loadrd_pbr:
2086 case Intrinsic::hexagon_L2_loadri_pbr:
2087 case Intrinsic::hexagon_L2_loadrh_pbr:
2088 case Intrinsic::hexagon_L2_loadruh_pbr:
2089 case Intrinsic::hexagon_L2_loadrb_pbr:
2090 case Intrinsic::hexagon_L2_loadrub_pbr: {
2092 auto &
DL =
I.getCalledFunction()->getParent()->getDataLayout();
2093 auto &Cont =
I.getCalledFunction()->getParent()->getContext();
2097 Type *ElTy =
I.getCalledFunction()->getReturnType()->getStructElementType(0);
2104 Info.align =
DL.getABITypeAlign(
Info.memVT.getTypeForEVT(Cont));
2108 case Intrinsic::hexagon_V6_vgathermw:
2109 case Intrinsic::hexagon_V6_vgathermw_128B:
2110 case Intrinsic::hexagon_V6_vgathermh:
2111 case Intrinsic::hexagon_V6_vgathermh_128B:
2112 case Intrinsic::hexagon_V6_vgathermhw:
2113 case Intrinsic::hexagon_V6_vgathermhw_128B:
2114 case Intrinsic::hexagon_V6_vgathermwq:
2115 case Intrinsic::hexagon_V6_vgathermwq_128B:
2116 case Intrinsic::hexagon_V6_vgathermhq:
2117 case Intrinsic::hexagon_V6_vgathermhq_128B:
2118 case Intrinsic::hexagon_V6_vgathermhwq:
2119 case Intrinsic::hexagon_V6_vgathermhwq_128B: {
2120 const Module &M = *
I.getParent()->getParent()->getParent();
2122 Type *VecTy =
I.getArgOperand(1)->getType();
2124 Info.ptrVal =
I.getArgOperand(0);
2127 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8);
2140 return X.getValueType().isScalarInteger();
2160 unsigned DefinedValues)
const {
2165 unsigned Index)
const {
2197 unsigned Action = getPreferredHvxVectorAction(VT);
2203 if (ElemTy == MVT::i1)
2217 unsigned Action = getCustomHvxOperationAction(
Op);
2224std::pair<SDValue, int>
2225HexagonTargetLowering::getBaseAndOffset(
SDValue Addr)
const {
2228 if (
auto *CN = dyn_cast<const ConstantSDNode>(Op1.
getNode()))
2229 return {
Addr.getOperand(0), CN->getSExtValue() };
2239 const auto *SVN = cast<ShuffleVectorSDNode>(
Op);
2241 assert(AM.
size() <= 8 &&
"Unexpected shuffle mask");
2242 unsigned VecLen = AM.
size();
2246 "HVX shuffles should be legal");
2256 if (ty(Op0) != VecTy || ty(Op1) != VecTy)
2265 if (AM[
F] >=
int(VecLen)) {
2273 for (
int M : Mask) {
2275 for (
unsigned j = 0; j != ElemBytes; ++j)
2278 for (
unsigned j = 0; j != ElemBytes; ++j)
2291 for (
unsigned i = 0, e = ByteMask.
size(); i != e; ++i) {
2299 if (ByteMask.
size() == 4) {
2301 if (MaskIdx == (0x03020100 | MaskUnd))
2304 if (MaskIdx == (0x00010203 | MaskUnd)) {
2312 getCombine(Op1, Op0, dl, typeJoin({ty(Op1), ty(Op0)}), DAG);
2313 if (MaskIdx == (0x06040200 | MaskUnd))
2314 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);
2315 if (MaskIdx == (0x07050301 | MaskUnd))
2316 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);
2319 getCombine(Op0, Op1, dl, typeJoin({ty(Op0), ty(Op1)}), DAG);
2320 if (MaskIdx == (0x02000604 | MaskUnd))
2321 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);
2322 if (MaskIdx == (0x03010705 | MaskUnd))
2323 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);
2326 if (ByteMask.
size() == 8) {
2328 if (MaskIdx == (0x0706050403020100ull | MaskUnd))
2331 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
2338 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
2339 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);
2340 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
2341 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);
2342 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
2343 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);
2344 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
2345 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);
2346 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
2347 VectorPair
P = opSplit(Op0, dl, DAG);
2348 return getInstr(Hexagon::S2_packhl, dl, VecTy, {
P.second,
P.first}, DAG);
2352 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
2353 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);
2354 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2355 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);
2363 switch (
Op.getOpcode()) {
2365 if (
SDValue S = cast<BuildVectorSDNode>(
Op)->getSplatValue())
2369 return Op.getOperand(0);
2379 switch (
Op.getOpcode()) {
2393 if (
SDValue Sp = getSplatValue(
Op.getOperand(1), DAG))
2406 if (
SDValue S = getVectorShiftByInt(
Op, DAG))
2420 MVT ResTy = ty(Res);
2428 auto ShiftPartI8 = [&dl, &DAG,
this](
unsigned Opc,
SDValue V,
SDValue A) {
2438 return ShiftPartI8(Opc, Val, Amt);
2440 auto [LoV, HiV] = opSplit(Val, dl, DAG);
2442 {ShiftPartI8(Opc, LoV, Amt), ShiftPartI8(Opc, HiV, Amt)});
2447 if (isa<ConstantSDNode>(
Op.getOperand(1).getNode()))
2456 MVT InpTy = ty(InpV);
2461 if (InpTy == MVT::i8) {
2462 if (ResTy == MVT::v8i1) {
2465 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);
2480 bool AllConst =
true;
2482 for (
unsigned i = 0, e = Values.
size(); i != e; ++i) {
2489 if (
auto *CN = dyn_cast<ConstantSDNode>(V.getNode())) {
2490 const ConstantInt *CI = CN->getConstantIntValue();
2492 }
else if (
auto *CN = dyn_cast<ConstantFPSDNode>(
V.getNode())) {
2493 const ConstantFP *CF = CN->getConstantFPValue();
2510 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2514 if (!isUndef(Elem[
First]))
2522 return getZero(dl, VecTy, DAG);
2524 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2529 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |
2530 Consts[1]->getZExtValue() << 16;
2534 if (ElemTy == MVT::f16) {
2541 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {E1, E0}, DAG);
2545 if (ElemTy == MVT::i8) {
2548 int32_t
V = (Consts[0]->getZExtValue() & 0xFF) |
2549 (Consts[1]->getZExtValue() & 0xFF) << 8 |
2550 (Consts[2]->getZExtValue() & 0xFF) << 16 |
2551 Consts[3]->getZExtValue() << 24;
2556 bool IsSplat =
true;
2557 for (
unsigned i =
First+1; i != Num; ++i) {
2558 if (Elem[i] == Elem[
First] || isUndef(Elem[i]))
2574 for (
unsigned i = 0; i != 4; ++i) {
2584 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);
2589 dbgs() <<
"VecTy: " << VecTy <<
'\n';
2601 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2605 if (!isUndef(Elem[
First]))
2613 return getZero(dl, VecTy, DAG);
2616 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2617 bool IsSplat =
true;
2618 for (
unsigned i =
First+1; i != Num; ++i) {
2619 if (Elem[i] == Elem[
First] || isUndef(Elem[i]))
2638 for (
unsigned i = 0; i != Num; ++i)
2639 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() &
Mask);
2648 : buildVector32(Elem.
take_front(Num/2), dl, HalfTy, DAG);
2651 : buildVector32(Elem.
drop_front(Num/2), dl, HalfTy, DAG);
2652 return getCombine(
H, L, dl, VecTy, DAG);
2659 MVT VecTy = ty(VecV);
2663 return extractVectorPred(VecV, IdxV, dl, ValTy, ResTy, DAG);
2668 assert((VecWidth % ElemWidth) == 0);
2669 assert(VecWidth == 32 || VecWidth == 64);
2672 MVT ScalarTy = tyScalar(VecTy);
2678 if (
auto *IdxN = dyn_cast<ConstantSDNode>(IdxV)) {
2679 unsigned Off = IdxN->getZExtValue() * ElemWidth;
2680 if (VecWidth == 64 && ValWidth == 32) {
2681 assert(Off == 0 || Off == 32);
2682 ExtV =
Off == 0 ? LoHalf(VecV, DAG) : HiHalf(VecV, DAG);
2683 }
else if (Off == 0 && (ValWidth % 8) == 0) {
2690 {VecV, WidthV, OffV});
2693 if (ty(IdxV) != MVT::i32)
2698 {VecV, WidthV, OffV});
2708HexagonTargetLowering::extractVectorPred(
SDValue VecV,
SDValue IdxV,
2713 MVT VecTy = ty(VecV);
2717 "Vector elements should equal vector width size");
2718 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2);
2729 if (ValWidth == 1) {
2730 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2740 unsigned Scale = VecWidth / ValWidth;
2744 assert(ty(IdxV) == MVT::i32);
2745 unsigned VecRep = 8 / VecWidth;
2753 T1 = LoHalf(T1, DAG);
2754 T1 = expandPredicate(T1, dl, DAG);
2765 MVT VecTy = ty(VecV);
2767 return insertVectorPred(VecV, ValV, IdxV, dl, ValTy, DAG);
2771 assert(VecWidth == 32 || VecWidth == 64);
2772 assert((VecWidth % ValWidth) == 0);
2788 unsigned W =
C->getZExtValue() * ValWidth;
2791 {VecV, ValV, WidthV, OffV});
2793 if (ty(IdxV) != MVT::i32)
2797 {VecV, ValV, WidthV, OffV});
2804HexagonTargetLowering::insertVectorPred(
SDValue VecV,
SDValue ValV,
2807 MVT VecTy = ty(VecV);
2810 if (ValTy == MVT::i1) {
2811 SDValue ToReg = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2817 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {
Ins}, DAG);
2828 for (
unsigned R = Scale;
R > 1;
R /= 2) {
2829 ValR = contractPredicate(ValR, dl, DAG);
2830 ValR = getCombine(DAG.
getUNDEF(MVT::i32), ValR, dl, MVT::i64, DAG);
2842HexagonTargetLowering::expandPredicate(
SDValue Vec32,
const SDLoc &dl,
2844 assert(ty(Vec32).getSizeInBits() == 32);
2853HexagonTargetLowering::contractPredicate(
SDValue Vec64,
const SDLoc &dl,
2855 assert(ty(Vec64).getSizeInBits() == 64);
2861 {0, 2, 4, 6, 1, 3, 5, 7});
2862 return extractVector(S, DAG.
getConstant(0, dl, MVT::i32), dl, MVT::v4i8,
2886 MVT ValTy = ty(Val);
2891 if (ValLen == ResLen)
2894 const SDLoc &dl(Val);
2896 assert(ResLen % ValLen == 0);
2899 for (
unsigned i = 1, e = ResLen / ValLen; i <
e; ++i)
2908 MVT ElemTy = ty(
Hi);
2937 return buildVector32(Ops, dl, VecTy, DAG);
2939 return buildVector64(Ops, dl, VecTy, DAG);
2941 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2943 bool All0 =
true, All1 =
true;
2945 auto *CN = dyn_cast<ConstantSDNode>(
P.getNode());
2946 if (CN ==
nullptr) {
2947 All0 = All1 =
false;
2963 SDValue Z = getZero(dl, MVT::i32, DAG);
2966 for (
unsigned i = 0; i != 8; ++i) {
2968 Rs[i] = DAG.
getSelect(dl, MVT::i32, Ops[i/Rep], S, Z);
2971 for (
unsigned i = 0, e =
A.size()/2; i != e; ++i)
2975 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);
2988 return getCombine(
Op.getOperand(1),
Op.getOperand(0), dl, VecTy, DAG);
2992 if (ElemTy == MVT::i1) {
2993 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1);
2994 MVT OpTy = ty(
Op.getOperand(0));
3007 for (
SDValue P :
Op.getNode()->op_values()) {
3009 for (
unsigned R = Scale; R > 1; R /= 2) {
3010 W = contractPredicate(W, dl, DAG);
3011 W = getCombine(DAG.
getUNDEF(MVT::i32), W, dl, MVT::i64, DAG);
3019 Words[IdxW ^ 1].
clear();
3021 for (
unsigned i = 0, e = Words[IdxW].
size(); i != e; i += 2) {
3022 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
3025 {W0, W1, WidthV, WidthV});
3033 assert(Scale == 2 && Words[IdxW].
size() == 2);
3035 SDValue WW = getCombine(Words[IdxW][1], Words[IdxW][0], dl, MVT::i64, DAG);
3047 return extractVector(Vec,
Op.getOperand(1),
SDLoc(
Op), ElemTy, ty(
Op), DAG);
3053 return extractVector(
Op.getOperand(0),
Op.getOperand(1),
SDLoc(
Op),
3054 ty(
Op), ty(
Op), DAG);
3060 return insertVector(
Op.getOperand(0),
Op.getOperand(1),
Op.getOperand(2),
3068 return insertVector(
Op.getOperand(0), ValV,
Op.getOperand(2),
3093 bool LoadPred = MemTy == MVT::v2i1 || MemTy == MVT::v4i1 || MemTy == MVT::v8i1;
3100 LN = cast<LoadSDNode>(
NL.getNode());
3104 if (!validateConstPtrAlignment(LN->
getBasePtr(), ClaimAlign, dl, DAG))
3105 return replaceMemWithUndef(
Op, DAG);
3111 SDValue TP = getInstr(Hexagon::C2_tfrrp, dl, MemTy, {LU}, DAG);
3130 if (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1) {
3132 SDValue TR = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {Val}, DAG);
3139 SN = cast<StoreSDNode>(NS.getNode());
3143 if (!validateConstPtrAlignment(SN->
getBasePtr(), ClaimAlign, dl, DAG))
3144 return replaceMemWithUndef(
Op, DAG);
3148 if (ClaimAlign < NeedAlign)
3157 MVT LoadTy = ty(
Op);
3160 if (HaveAlign >= NeedAlign)
3169 bool DoDefault =
false;
3180 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
3199 unsigned LoadLen = NeedAlign;
3202 auto BO = getBaseAndOffset(
Base);
3203 unsigned BaseOpc = BO.first.getOpcode();
3207 if (BO.second % LoadLen != 0) {
3209 DAG.
getConstant(BO.second % LoadLen, dl, MVT::i32));
3210 BO.second -= BO.second % LoadLen;
3225 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen,
Align(LoadLen),
3226 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
3227 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
3244 auto *CY = dyn_cast<ConstantSDNode>(
Y);
3252 unsigned Opc =
Op.getOpcode();
3256 assert(VY != 0 &&
"This should have been folded");
3281 unsigned Opc =
Op.getOpcode();
3288 EVT CarryTy =
C.getValueType();
3290 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });
3309 unsigned OffsetReg = Hexagon::R28;
3325 unsigned Opc =
Op.getOpcode();
3331 if (isHvxOperation(
Op.getNode(), DAG)) {
3333 if (
SDValue V = LowerHvxOperation(
Op, DAG))
3340 Op.getNode()->dumpr(&DAG);
3342 errs() <<
"Error: check for a non-legal type in this operation\n";
3392 if (isHvxOperation(
N, DAG)) {
3393 LowerHvxOperationWrapper(
N,
Results, DAG);
3399 unsigned Opc =
N->getOpcode();
3423 if (isHvxOperation(
N, DAG)) {
3424 ReplaceHvxNodeResults(
N,
Results, DAG);
3430 switch (
N->getOpcode()) {
3437 if (
N->getValueType(0) == MVT::i8) {
3438 if (
N->getOperand(0).getValueType() == MVT::v8i1) {
3439 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,
3440 N->getOperand(0), DAG);
3452 if (isHvxOperation(
N, DCI.
DAG)) {
3453 if (
SDValue V = PerformHvxDAGCombine(
N, DCI))
3460 unsigned Opc =
Op.getOpcode();
3466 EVT TruncTy =
Op.getValueType();
3482 switch (
P.getOpcode()) {
3486 return getZero(dl, ty(
Op), DCI.
DAG);
3499 Op.getOperand(2),
Op.getOperand(1));
3507 MVT TruncTy = ty(
Op);
3510 if (ty(Elem0) == TruncTy)
3513 if (ty(Elem0).bitsGT(TruncTy))
3520 if (ty(
Op) != MVT::i64)
3531 auto *Amt = dyn_cast<ConstantSDNode>(Shl.
getOperand(1));
3532 if (Amt && Amt->getZExtValue() >= 32 && ty(Z).getSizeInBits() <= 32) {
3533 unsigned A = Amt->getZExtValue();
3555 int Idx = cast<JumpTableSDNode>(Table)->getIndex();
3567 if (Constraint.
size() == 1) {
3568 switch (Constraint[0]) {
3583std::pair<unsigned, const TargetRegisterClass*>
3587 if (Constraint.
size() == 1) {
3588 switch (Constraint[0]) {
3592 return {0u,
nullptr};
3598 return {0u, &Hexagon::IntRegsRegClass};
3601 return {0u, &Hexagon::DoubleRegsRegClass};
3606 return {0u,
nullptr};
3607 return {0u, &Hexagon::ModRegsRegClass};
3611 return {0u,
nullptr};
3614 return {0u, &Hexagon::HvxQRRegClass};
3620 return {0u,
nullptr};
3622 return {0u, &Hexagon::HvxVRRegClass};
3625 return {0u, &Hexagon::HvxVRRegClass};
3626 return {0u, &Hexagon::HvxWRRegClass};
3628 return {0u, &Hexagon::HvxWRRegClass};
3632 return {0u,
nullptr};
3643 bool ForCodeSize)
const {
3673 int Scale = AM.
Scale;
3697 return Imm >= -512 && Imm <= 511;
3707 bool IsCalleeStructRet,
3708 bool IsCallerStructRet,
3715 bool CCMatch = CallerCC == CalleeCC;
3723 if (!isa<GlobalAddressSDNode>(Callee) &&
3724 !isa<ExternalSymbolSDNode>(Callee)) {
3744 if (IsCalleeStructRet || IsCallerStructRet)
3767 if (
Op.size() >= 8 &&
Op.isAligned(
Align(8)))
3769 if (
Op.size() >= 4 &&
Op.isAligned(
Align(4)))
3771 if (
Op.size() >= 2 &&
Op.isAligned(
Align(2)))
3781 return allowsHvxMemoryAccess(SVT, Flags,
Fast);
3788 unsigned *
Fast)
const {
3791 return allowsHvxMisalignedMemoryAccesses(SVT, Flags,
Fast);
3797std::pair<const TargetRegisterClass*, uint8_t>
3805 return std::make_pair(&Hexagon::HvxQRRegClass, 1);
3807 return std::make_pair(&Hexagon::HvxVRRegClass, 1);
3809 return std::make_pair(&Hexagon::HvxWRRegClass, 1);
3821 auto *L = cast<LoadSDNode>(Load);
3822 std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr());
3828 const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal());
3836 AdjustHvxInstrPostInstrSelection(
MI, Node);
3845 assert((SZ == 32 || SZ == 64) &&
"Only 32/64-bit atomic loads supported");
3846 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3847 : Intrinsic::hexagon_L4_loadd_locked;
3852 return Builder.CreateBitCast(Call, ValueTy);
3866 assert((SZ == 32 || SZ == 64) &&
"Only 32/64-bit atomic stores supported");
3867 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3868 : Intrinsic::hexagon_S4_stored_locked;
3871 Val =
Builder.CreateBitCast(Val, CastTy);
3890 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
Function Alias Analysis Results
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static cl::opt< int > MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memcpy"))
static Value * getUnderLyingObjectForBrevLdIntr(Value *V)
static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static cl::opt< bool > AlignLoads("hexagon-align-loads", cl::Hidden, cl::init(false), cl::desc("Rewrite unaligned loads as a pair of aligned loads"))
static bool isBrevLdIntrinsic(const Value *Inst)
static cl::opt< int > MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memmove"))
static cl::opt< int > MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memmove"))
static Value * getBrevLdObject(Value *V)
static cl::opt< int > MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::init(8), cl::desc("Max #stores to inline memset"))
static cl::opt< bool > DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden, cl::init(false), cl::desc("Disable minimum alignment of 1 for " "arguments passed by value on stack"))
static Value * returnEdge(const PHINode *PN, Value *IntrBaseVal)
static cl::opt< int > MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memcpy"))
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
static cl::opt< int > MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memset"))
static cl::opt< bool > EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden, cl::desc("Control jump table emission on Hexagon target"))
static cl::opt< int > MinimumJumpTables("minimum-jump-tables", cl::Hidden, cl::init(5), cl::desc("Set minimum jump tables"))
static cl::opt< bool > EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden, cl::desc("Enable Hexagon SDNode scheduling"))
static cl::opt< bool > EnableFastMath("ffast-math", cl::Hidden, cl::desc("Enable Fast Math processing"))
#define Hexagon_PointerSize
#define HEXAGON_LRFP_SIZE
#define HEXAGON_GOT_SYM_NAME
std::pair< MCSymbol *, MachineModuleInfoImpl::StubValueTy > PairTy
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
The address of a basic block.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
FunctionType * getFunctionType() const
This class represents a function call, abstracting a target machine's calling convention.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
const APInt & getValue() const
Return the constant as an APInt value reference.
MachineConstantPoolValue * getMachineCPVal() const
bool isMachineConstantPoolEntry() const
const Constant * getConstVal() const
int64_t getSExtValue() const
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
This is the base abstract class for diagnostic reporting in the backend.
Interface for custom diagnostic printing.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
const GlobalObject * getAliaseeObject() const
bool isValidAutoIncImm(const EVT VT, const int Offset) const
Hexagon target-specific information for each MachineFunction.
int getVarArgsFrameIndex()
void setFirstNamedArgFrameIndex(int v)
void setHasEHReturn(bool H=true)
Register getStackRegister() const
Register getFrameRegister(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const HexagonInstrInfo * getInstrInfo() const override
const HexagonFrameLowering * getFrameLowering() const override
bool useSmallData() const
const HexagonRegisterInfo * getRegisterInfo() const override
bool isHVXVectorType(EVT VecTy, bool IncludeBool=false) const
Align getTypeAlignment(MVT Ty) const
unsigned getVectorLength() const
bool useHVX128BOps() const