46#include "llvm/IR/IntrinsicsHexagon.h"
66#define DEBUG_TYPE "hexagon-lowering"
70 cl::desc(
"Control jump table emission on Hexagon target"));
74 cl::desc(
"Enable Hexagon SDNode scheduling"));
78 cl::desc(
"Set minimum jump tables"));
82 cl::desc(
"Convert constant loads to immediate values."));
86 cl::desc(
"Rewrite unaligned loads as a pair of aligned loads"));
91 cl::desc(
"Disable minimum alignment of 1 for "
92 "arguments passed by value on stack"));
100 Hexagon::R0, Hexagon::R1, Hexagon::R2,
101 Hexagon::R3, Hexagon::R4, Hexagon::R5
103 const unsigned NumArgRegs = std::size(ArgRegs);
104 unsigned RegNum = State.getFirstUnallocated(ArgRegs);
107 if (RegNum != NumArgRegs && RegNum % 2 == 1)
108 State.AllocateReg(ArgRegs[RegNum]);
116#include "HexagonGenCallingConv.inc"
120 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
128 if (isBoolVector && !Subtarget.useHVXOps() && isPowerOf2 && NumElts >= 8) {
129 RegisterVT = MVT::v8i8;
130 IntermediateVT = MVT::v8i1;
131 NumIntermediates = NumElts / 8;
132 return NumIntermediates;
137 if (isBoolVector && Subtarget.useHVX64BOps() && isPowerOf2 && NumElts >= 64) {
138 RegisterVT = MVT::v64i8;
139 IntermediateVT = MVT::v64i1;
140 NumIntermediates = NumElts / 64;
141 return NumIntermediates;
146 if (isBoolVector && Subtarget.useHVX128BOps() && isPowerOf2 &&
148 RegisterVT = MVT::v128i8;
149 IntermediateVT = MVT::v128i1;
150 NumIntermediates = NumElts / 128;
151 return NumIntermediates;
155 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
158std::pair<MVT, unsigned>
168 if (!Subtarget.useHVXOps() && NumElems >= 8)
169 return {MVT::v8i8, NumElems / 8};
171 if (Subtarget.useHVX64BOps() && NumElems >= 64)
172 return {MVT::v64i8, NumElems / 64};
174 if (Subtarget.useHVX128BOps() && NumElems >= 128)
175 return {MVT::v128i8, NumElems / 128};
185 auto [RegisterVT, NumRegisters] =
197 unsigned IntNo =
Op.getConstantOperandVal(0);
202 case Intrinsic::thread_pointer: {
204 return DAG.
getNode(HexagonISD::THREAD_POINTER, dl, PtrVT);
219 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
230 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
233 return CCInfo.
CheckReturn(Outs, RetCC_Hexagon_HVX);
254 if (Subtarget.useHVXOps())
263 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
300 return DAG.
getNode(HexagonISD::RET_GLUE, dl, MVT::Other, RetOps);
312 .
Case(
"r0", Hexagon::R0)
313 .
Case(
"r1", Hexagon::R1)
314 .
Case(
"r2", Hexagon::R2)
315 .
Case(
"r3", Hexagon::R3)
316 .
Case(
"r4", Hexagon::R4)
317 .
Case(
"r5", Hexagon::R5)
318 .
Case(
"r6", Hexagon::R6)
319 .
Case(
"r7", Hexagon::R7)
320 .
Case(
"r8", Hexagon::R8)
321 .
Case(
"r9", Hexagon::R9)
322 .
Case(
"r10", Hexagon::R10)
323 .
Case(
"r11", Hexagon::R11)
324 .
Case(
"r12", Hexagon::R12)
325 .
Case(
"r13", Hexagon::R13)
326 .
Case(
"r14", Hexagon::R14)
327 .
Case(
"r15", Hexagon::R15)
328 .
Case(
"r16", Hexagon::R16)
329 .
Case(
"r17", Hexagon::R17)
330 .
Case(
"r18", Hexagon::R18)
331 .
Case(
"r19", Hexagon::R19)
332 .
Case(
"r20", Hexagon::R20)
333 .
Case(
"r21", Hexagon::R21)
334 .
Case(
"r22", Hexagon::R22)
335 .
Case(
"r23", Hexagon::R23)
336 .
Case(
"r24", Hexagon::R24)
337 .
Case(
"r25", Hexagon::R25)
338 .
Case(
"r26", Hexagon::R26)
339 .
Case(
"r27", Hexagon::R27)
340 .
Case(
"r28", Hexagon::R28)
341 .
Case(
"r29", Hexagon::R29)
342 .
Case(
"r30", Hexagon::R30)
343 .
Case(
"r31", Hexagon::R31)
344 .
Case(
"r1:0", Hexagon::D0)
345 .
Case(
"r3:2", Hexagon::D1)
346 .
Case(
"r5:4", Hexagon::D2)
347 .
Case(
"r7:6", Hexagon::D3)
348 .
Case(
"r9:8", Hexagon::D4)
349 .
Case(
"r11:10", Hexagon::D5)
350 .
Case(
"r13:12", Hexagon::D6)
351 .
Case(
"r15:14", Hexagon::D7)
352 .
Case(
"r17:16", Hexagon::D8)
353 .
Case(
"r19:18", Hexagon::D9)
354 .
Case(
"r21:20", Hexagon::D10)
355 .
Case(
"r23:22", Hexagon::D11)
356 .
Case(
"r25:24", Hexagon::D12)
357 .
Case(
"r27:26", Hexagon::D13)
358 .
Case(
"r29:28", Hexagon::D14)
359 .
Case(
"r31:30", Hexagon::D15)
360 .
Case(
"sp", Hexagon::R29)
361 .
Case(
"fp", Hexagon::R30)
362 .
Case(
"lr", Hexagon::R31)
363 .
Case(
"p0", Hexagon::P0)
364 .
Case(
"p1", Hexagon::P1)
365 .
Case(
"p2", Hexagon::P2)
366 .
Case(
"p3", Hexagon::P3)
367 .
Case(
"sa0", Hexagon::SA0)
368 .
Case(
"lc0", Hexagon::LC0)
369 .
Case(
"sa1", Hexagon::SA1)
370 .
Case(
"lc1", Hexagon::LC1)
371 .
Case(
"m0", Hexagon::M0)
372 .
Case(
"m1", Hexagon::M1)
373 .
Case(
"usr", Hexagon::USR)
374 .
Case(
"ugp", Hexagon::UGP)
375 .
Case(
"cs0", Hexagon::CS0)
376 .
Case(
"cs1", Hexagon::CS1)
397 if (Subtarget.useHVXOps())
403 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
405 if (RVLocs[i].getValVT() == MVT::i1) {
415 Register PredR =
MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
427 RVLocs[i].getValVT(), Glue);
453 bool IsStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
462 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
468 if (Subtarget.useHVXOps())
478 IsVarArg, IsStructRet, StructAttrFlag, Outs,
487 :
"Argument must be passed on stack. "
488 "Not eligible for Tail Call\n"));
499 bool NeedsArgAlign =
false;
500 Align LargestAlignSeen;
502 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
507 bool ArgAlign = Subtarget.isHVXVectorType(VA.
getValVT());
508 NeedsArgAlign |= ArgAlign;
534 StackPtr.getValueType());
537 LargestAlignSeen = std::max(
539 if (Flags.isByVal()) {
559 if (NeedsArgAlign && Subtarget.hasV60Ops()) {
560 LLVM_DEBUG(
dbgs() <<
"Function needs byte stack align due to call args\n");
561 Align VecAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
562 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
567 if (!MemOpChains.
empty())
581 for (
const auto &R : RegsToPass) {
582 Chain = DAG.
getCopyToReg(Chain, dl, R.first, R.second, Glue);
597 for (
const auto &R : RegsToPass) {
598 Chain = DAG.
getCopyToReg(Chain, dl, R.first, R.second, Glue);
619 Ops.push_back(Chain);
620 Ops.push_back(Callee);
624 for (
const auto &R : RegsToPass)
628 assert(Mask &&
"Missing call preserved mask for calling convention");
636 return DAG.
getNode(HexagonISD::TC_RETURN, dl, MVT::Other,
Ops);
644 unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL;
645 Chain = DAG.
getNode(OpCode, dl, {MVT::Other, MVT::Glue},
Ops);
655 InVals, OutVals, Callee);
670 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
671 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
672 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
673 VT == MVT::v4i16 || VT == MVT::v8i8 ||
687 return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, V);
702 unsigned LR = HRI.getRARegister();
708 unsigned NumOps =
Op.getNumOperands();
709 if (
Op.getOperand(
NumOps-1).getValueType() == MVT::Glue)
714 unsigned NumVals = Flags.getNumOperandRegisters();
717 switch (Flags.getKind()) {
728 for (; NumVals; --NumVals, ++i) {
732 HMFI.setHasClobberLR(
true);
754 return DAG.
getNode(HexagonISD::DCFETCH,
DL, MVT::Other, Chain, Addr, Zero);
766 return DAG.
getNode(HexagonISD::READCYCLE, dl, VTs, Chain);
778 return DAG.
getNode(HexagonISD::READTIMER, dl, VTs, Chain);
784 unsigned IntNo =
Op.getConstantOperandVal(1);
786 if (IntNo == Intrinsic::hexagon_prefetch) {
790 return DAG.
getNode(HexagonISD::DCFETCH,
DL, MVT::Other, Chain, Addr, Zero);
804 assert(AlignConst &&
"Non-constant Align in LowerDYNAMIC_STACKALLOC");
807 auto &HFI = *Subtarget.getFrameLowering();
810 A = HFI.getStackAlign().value();
813 dbgs () << __func__ <<
" Align: " <<
A <<
" Size: ";
814 Size.getNode()->dump(&DAG);
835 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
841 if (Subtarget.useHVXOps())
855 switch (RC.
getID()) {
856 case Hexagon::IntRegsRegClassID:
857 return Reg - Hexagon::R0 + 1;
858 case Hexagon::DoubleRegsRegClassID:
859 return (Reg - Hexagon::D0 + 1) * 2;
860 case Hexagon::HvxVRRegClassID:
861 return Reg - Hexagon::V0 + 1;
862 case Hexagon::HvxWRRegClassID:
863 return (Reg - Hexagon::W0 + 1) * 2;
870 HFL.FirstVarArgSavedReg = 0;
873 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
876 bool ByVal = Flags.isByVal();
882 if (VA.
isRegLoc() && ByVal && Flags.getByValSize() <= 8)
886 (!ByVal || (ByVal && Flags.getByValSize() > 8));
910 Subtarget.isHVXVectorType(RegVT));
915 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.
getLocReg());
921 unsigned ObjSize = Flags.isByVal()
922 ? Flags.getByValSize()
930 if (Flags.isByVal()) {
943 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
944 for (
int i = HFL.FirstVarArgSavedReg; i < 6; i++)
945 MRI.addLiveIn(Hexagon::R0+i);
948 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
949 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);
953 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;
954 bool RequiresPadding = (NumVarArgRegs & 1);
955 int RegSaveAreaSizePlusPadding = RequiresPadding
956 ? (NumVarArgRegs + 1) * 4
959 if (RegSaveAreaSizePlusPadding > 0) {
962 if (!(RegAreaStart % 8))
963 RegAreaStart = (RegAreaStart + 7) & -8;
965 int RegSaveAreaFrameIndex =
967 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);
970 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;
972 HMFI.setVarArgsFrameIndex(FI);
978 HMFI.setRegSavedAreaStartFrameIndex(FI);
979 HMFI.setVarArgsFrameIndex(FI);
984 if (IsVarArg && !Subtarget.isEnvironmentMusl()) {
988 HMFI.setVarArgsFrameIndex(FI);
1003 if (!Subtarget.isEnvironmentMusl()) {
1008 auto &HFL = *Subtarget.getFrameLowering();
1021 SDValue SavedRegAreaStartFrameIndex =
1022 DAG.
getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32);
1026 if (HFL.FirstVarArgSavedReg & 1)
1027 SavedRegAreaStartFrameIndex =
1029 DAG.
getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(),
1036 SavedRegAreaStartFrameIndex,
1064 assert(Subtarget.isEnvironmentMusl() &&
"Linux ABI should be enabled");
1075 false,
false,
nullptr, std::nullopt,
1087 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1089 assert(ElemTy.isScalarInteger());
1104 auto isSExtFree = [
this](
SDValue N) {
1105 switch (
N.getOpcode()) {
1117 return ThisBW >= OrigBW;
1126 if (OpTy == MVT::i8 || OpTy == MVT::i16) {
1128 bool IsNegative =
C &&
C->getAPIntValue().isNegative();
1129 if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS))
1141 SDValue Op1 =
Op.getOperand(1), Op2 =
Op.getOperand(2);
1145 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1147 assert(ElemTy.isScalarInteger());
1163 EVT ValTy =
Op.getValueType();
1166 bool isVTi1Type =
false;
1171 unsigned VecLen = CV->getNumOperands();
1173 "conversion only supported for pow2 VectorSize");
1174 for (
unsigned i = 0; i < VecLen; ++i)
1190 else if (isVTi1Type)
1197 "Inconsistent target flag encountered");
1199 if (IsPositionIndependent)
1206 EVT VT =
Op.getValueType();
1224 EVT VT =
Op.getValueType();
1226 unsigned Depth =
Op.getConstantOperandVal(0);
1246 EVT VT =
Op.getValueType();
1248 unsigned Depth =
Op.getConstantOperandVal(0);
1260 return DAG.
getNode(HexagonISD::BARRIER, dl, MVT::Other,
Op.getOperand(0));
1268 auto *GV = GAN->getGlobal();
1269 int64_t
Offset = GAN->getOffset();
1271 auto &HLOF = *HTM.getObjFileLowering();
1277 if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))
1278 return DAG.
getNode(HexagonISD::CONST32_GP, dl, PtrVT, GA);
1279 return DAG.
getNode(HexagonISD::CONST32, dl, PtrVT, GA);
1286 return DAG.
getNode(HexagonISD::AT_PCREL, dl, PtrVT, GA);
1293 return DAG.
getNode(HexagonISD::AT_GOT, dl, PtrVT, GOT, GA, Off);
1306 return DAG.
getNode(HexagonISD::CONST32_GP, dl, PtrVT,
A);
1310 return DAG.
getNode(HexagonISD::AT_PCREL, dl, PtrVT,
A);
1319 return DAG.
getNode(HexagonISD::AT_PCREL,
SDLoc(
Op), PtrVT, GOTSym);
1325 unsigned char OperandFlags)
const {
1342 const auto &HRI = *Subtarget.getRegisterInfo();
1344 assert(Mask &&
"Missing call preserved mask for calling convention");
1347 Chain = DAG.
getNode(HexagonISD::CALL, dl, NodeTys,
Ops);
1379 if (IsPositionIndependent) {
1451 Hexagon::R0, Flags);
1464 switch (HTM.getTLSModel(GA->
getGlobal())) {
1484 auto &HRI = *Subtarget.getRegisterInfo();
1576 if (Subtarget.isEnvironmentMusl())
1591 for (
unsigned LegalIntOp :
1631 for (
unsigned IntExpOp :
1679 static const unsigned VectExpOps[] = {
1707 for (
unsigned VectExpOp : VectExpOps)
1721 if (VT.getVectorElementType() != MVT::i32) {
1748 for (
MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1749 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1764 if (NativeVT.getVectorElementType() != MVT::i1) {
1771 for (
MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) {
1782 for (
MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,
1783 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1789 for (
MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1795 for (
MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,
1807 for (
MVT VT : {MVT::i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1866 for (
MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
1867 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {
1874 if (Subtarget.hasV60Ops()) {
1880 if (Subtarget.hasV66Ops()) {
1884 if (Subtarget.hasV67Ops()) {
1894 if (Subtarget.useHVXOps())
1895 initializeHVXLowering();
1901HexagonTargetLowering::validateConstPtrAlignment(
SDValue Ptr,
Align NeedAlign,
1906 unsigned Addr = CA->getZExtValue();
1909 if (HaveAlign >= NeedAlign)
1915 DiagnosticInfoMisalignedTrap(
StringRef M)
1920 static bool classof(
const DiagnosticInfo *DI) {
1921 return DI->
getKind() == DK_MisalignedTrap;
1927 raw_string_ostream
O(ErrMsg);
1928 O <<
"Misaligned constant address: " <<
format_hex(Addr, 10)
1929 <<
" has alignment " << HaveAlign.
value()
1930 <<
", but the memory access requires " << NeedAlign.
value();
1933 O <<
". The instruction has been replaced with a trap.";
1942 const SDLoc &dl(
Op);
1944 assert(!
LS->isIndexed() &&
"Not expecting indexed ops on constant address");
1957 return (
ID == Intrinsic::hexagon_L2_loadrd_pbr ||
1958 ID == Intrinsic::hexagon_L2_loadri_pbr ||
1959 ID == Intrinsic::hexagon_L2_loadrh_pbr ||
1960 ID == Intrinsic::hexagon_L2_loadruh_pbr ||
1961 ID == Intrinsic::hexagon_L2_loadrb_pbr ||
1962 ID == Intrinsic::hexagon_L2_loadrub_pbr);
1986 if (Blk == Parent) {
1991 BaseVal = BackEdgeVal;
1993 }
while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
1996 if (IntrBaseVal == BackEdgeVal)
2003 assert(Idx >= 0 &&
"Unexpected index to incoming argument in PHI");
2011 Value *IntrBaseVal = V;
2018 }
while (BaseVal != V);
2037 case Intrinsic::hexagon_L2_loadrd_pbr:
2038 case Intrinsic::hexagon_L2_loadri_pbr:
2039 case Intrinsic::hexagon_L2_loadrh_pbr:
2040 case Intrinsic::hexagon_L2_loadruh_pbr:
2041 case Intrinsic::hexagon_L2_loadrb_pbr:
2042 case Intrinsic::hexagon_L2_loadrub_pbr: {
2044 auto &
DL =
I.getDataLayout();
2045 auto &Cont =
I.getCalledFunction()->getParent()->getContext();
2049 Type *ElTy =
I.getCalledFunction()->getReturnType()->getStructElementType(0);
2056 Info.align =
DL.getABITypeAlign(Info.memVT.getTypeForEVT(Cont));
2060 case Intrinsic::hexagon_V6_vgathermw:
2061 case Intrinsic::hexagon_V6_vgathermw_128B:
2062 case Intrinsic::hexagon_V6_vgathermh:
2063 case Intrinsic::hexagon_V6_vgathermh_128B:
2064 case Intrinsic::hexagon_V6_vgathermhw:
2065 case Intrinsic::hexagon_V6_vgathermhw_128B:
2066 case Intrinsic::hexagon_V6_vgathermwq:
2067 case Intrinsic::hexagon_V6_vgathermwq_128B:
2068 case Intrinsic::hexagon_V6_vgathermhq:
2069 case Intrinsic::hexagon_V6_vgathermhq_128B:
2070 case Intrinsic::hexagon_V6_vgathermhwq:
2071 case Intrinsic::hexagon_V6_vgathermhwq_128B:
2072 case Intrinsic::hexagon_V6_vgather_vscattermh:
2073 case Intrinsic::hexagon_V6_vgather_vscattermh_128B: {
2074 const Module &M = *
I.getParent()->getParent()->getParent();
2076 Type *VecTy =
I.getArgOperand(1)->getType();
2078 Info.ptrVal =
I.getArgOperand(0);
2081 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8);
2094 return X.getValueType().isScalarInteger();
2114 unsigned DefinedValues)
const {
2119 unsigned Index)
const {
2150 if (Subtarget.useHVXOps()) {
2151 unsigned Action = getPreferredHvxVectorAction(VT);
2157 if (ElemTy == MVT::i1)
2170 if (Subtarget.useHVXOps()) {
2171 unsigned Action = getCustomHvxOperationAction(
Op);
2178std::pair<SDValue, int>
2179HexagonTargetLowering::getBaseAndOffset(
SDValue Addr)
const {
2183 return { Addr.
getOperand(0), CN->getSExtValue() };
2195 assert(AM.
size() <= 8 &&
"Unexpected shuffle mask");
2196 unsigned VecLen = AM.
size();
2199 assert(!Subtarget.isHVXVectorType(VecTy,
true) &&
2200 "HVX shuffles should be legal");
2210 if (ty(Op0) != VecTy || ty(Op1) != VecTy)
2219 if (AM[
F] >=
int(VecLen)) {
2227 for (
int M : Mask) {
2229 for (
unsigned j = 0; j != ElemBytes; ++j)
2232 for (
unsigned j = 0; j != ElemBytes; ++j)
2245 for (
unsigned i = 0, e = ByteMask.
size(); i != e; ++i) {
2253 if (ByteMask.
size() == 4) {
2255 if (MaskIdx == (0x03020100 | MaskUnd))
2258 if (MaskIdx == (0x00010203 | MaskUnd)) {
2266 getCombine(Op1, Op0, dl, typeJoin({ty(Op1), ty(Op0)}), DAG);
2267 if (MaskIdx == (0x06040200 | MaskUnd))
2268 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);
2269 if (MaskIdx == (0x07050301 | MaskUnd))
2270 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);
2273 getCombine(Op0, Op1, dl, typeJoin({ty(Op0), ty(Op1)}), DAG);
2274 if (MaskIdx == (0x02000604 | MaskUnd))
2275 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);
2276 if (MaskIdx == (0x03010705 | MaskUnd))
2277 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);
2280 if (ByteMask.
size() == 8) {
2282 if (MaskIdx == (0x0706050403020100ull | MaskUnd))
2285 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
2292 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
2293 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);
2294 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
2295 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);
2296 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
2297 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);
2298 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
2299 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);
2300 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
2301 VectorPair
P = opSplit(Op0, dl, DAG);
2302 return getInstr(Hexagon::S2_packhl, dl, VecTy, {
P.second,
P.first}, DAG);
2306 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
2307 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);
2308 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2309 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);
2317 switch (
Op.getOpcode()) {
2323 return Op.getOperand(0);
2333 switch (
Op.getOpcode()) {
2335 NewOpc = HexagonISD::VASL;
2338 NewOpc = HexagonISD::VASR;
2341 NewOpc = HexagonISD::VLSR;
2347 if (
SDValue Sp = getSplatValue(
Op.getOperand(1), DAG))
2348 return DAG.
getNode(NewOpc, SDLoc(
Op), ty(
Op),
Op.getOperand(0), Sp);
2360 if (
SDValue S = getVectorShiftByInt(
Op, DAG))
2365 case HexagonISD::VASR:
2366 case HexagonISD::VLSR:
2367 case HexagonISD::VASL:
2374 MVT ResTy = ty(Res);
2392 return ShiftPartI8(
Opc, Val, Amt);
2394 auto [LoV, HiV] = opSplit(Val, dl, DAG);
2396 {ShiftPartI8(
Opc, LoV, Amt), ShiftPartI8(
Opc, HiV, Amt)});
2410 MVT InpTy = ty(InpV);
2415 if (InpTy == MVT::i8) {
2416 if (ResTy == MVT::v8i1) {
2419 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);
2434 bool AllConst =
true;
2436 for (
unsigned i = 0, e = Values.
size(); i != e; ++i) {
2439 Consts[i] = ConstantInt::get(IntTy, 0);
2444 const ConstantInt *CI = CN->getConstantIntValue();
2447 const ConstantFP *CF = CN->getConstantFPValue();
2449 Consts[i] = ConstantInt::get(IntTy,
A.getZExtValue());
2464 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2468 if (!isUndef(Elem[
First]))
2476 return getZero(dl, VecTy, DAG);
2478 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2483 uint32_t
V = (Consts[0]->getZExtValue() & 0xFFFF) |
2484 Consts[1]->getZExtValue() << 16;
2488 if (ElemTy == MVT::f16) {
2495 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {E1, E0}, DAG);
2499 if (ElemTy == MVT::i8) {
2502 uint32_t
V = (Consts[0]->getZExtValue() & 0xFF) |
2503 (Consts[1]->getZExtValue() & 0xFF) << 8 |
2504 (Consts[2]->getZExtValue() & 0xFF) << 16 |
2505 Consts[3]->getZExtValue() << 24;
2510 bool IsSplat =
true;
2511 for (
unsigned i =
First+1; i != Num; ++i) {
2512 if (Elem[i] == Elem[
First] || isUndef(Elem[i]))
2528 for (
unsigned i = 0; i != 4; ++i) {
2538 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);
2543 dbgs() <<
"VecTy: " << VecTy <<
'\n';
2555 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2559 if (!isUndef(Elem[
First]))
2567 return getZero(dl, VecTy, DAG);
2570 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2571 bool IsSplat =
true;
2572 for (
unsigned i =
First+1; i != Num; ++i) {
2573 if (Elem[i] == Elem[
First] || isUndef(Elem[i]))
2591 uint64_t
Mask = (1ull <<
W) - 1;
2592 for (
unsigned i = 0; i != Num; ++i)
2593 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask);
2602 : buildVector32(Elem.take_front(Num/2), dl, HalfTy, DAG);
2605 : buildVector32(Elem.
drop_front(Num/2), dl, HalfTy, DAG);
2606 return getCombine(
H, L, dl, VecTy, DAG);
2613 MVT VecTy = ty(VecV);
2617 return extractVectorPred(VecV, IdxV, dl, ValTy, ResTy, DAG);
2622 assert((VecWidth % ElemWidth) == 0);
2623 assert(VecWidth == 32 || VecWidth == 64);
2626 MVT ScalarTy = tyScalar(VecTy);
2633 unsigned Off = IdxN->getZExtValue() * ElemWidth;
2634 if (VecWidth == 64 && ValWidth == 32) {
2635 assert(Off == 0 || Off == 32);
2636 ExtV =
Off == 0 ? LoHalf(VecV, DAG) : HiHalf(VecV, DAG);
2637 }
else if (Off == 0 && (ValWidth % 8) == 0) {
2643 ExtV = DAG.
getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2644 {VecV, WidthV, OffV});
2647 if (ty(IdxV) != MVT::i32)
2651 ExtV = DAG.
getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2652 {VecV, WidthV, OffV});
2662HexagonTargetLowering::extractVectorPred(
SDValue VecV,
SDValue IdxV,
2667 MVT VecTy = ty(VecV);
2671 "Vector elements should equal vector width size");
2672 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2);
2683 if (ValWidth == 1) {
2684 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2687 return DAG.
getNode(HexagonISD::TSTBIT, dl, MVT::i1, A0, I0);
2694 unsigned Scale = VecWidth / ValWidth;
2698 assert(ty(IdxV) == MVT::i32);
2699 unsigned VecRep = 8 / VecWidth;
2707 T1 = LoHalf(
T1, DAG);
2708 T1 = expandPredicate(
T1, dl, DAG);
2719 MVT VecTy = ty(VecV);
2721 return insertVectorPred(VecV, ValV, IdxV, dl, ValTy, DAG);
2725 assert(VecWidth == 32 || VecWidth == 64);
2726 assert((VecWidth % ValWidth) == 0);
2732 unsigned VW = ty(ValV).getSizeInBits();
2742 unsigned W =
C->getZExtValue() * ValWidth;
2744 InsV = DAG.
getNode(HexagonISD::INSERT, dl, ScalarTy,
2745 {VecV, ValV, WidthV, OffV});
2747 if (ty(IdxV) != MVT::i32)
2750 InsV = DAG.
getNode(HexagonISD::INSERT, dl, ScalarTy,
2751 {VecV, ValV, WidthV, OffV});
2758HexagonTargetLowering::insertVectorPred(
SDValue VecV,
SDValue ValV,
2761 MVT VecTy = ty(VecV);
2764 if (ValTy == MVT::i1) {
2765 SDValue ToReg = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2770 DAG.
getNode(HexagonISD::INSERT, dl, MVT::i32, {ToReg, Ext, Width, Idx});
2771 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Ins}, DAG);
2782 for (
unsigned R = Scale;
R > 1;
R /= 2) {
2783 ValR = contractPredicate(ValR, dl, DAG);
2784 ValR = getCombine(DAG.
getUNDEF(MVT::i32), ValR, dl, MVT::i64, DAG);
2791 DAG.
getNode(HexagonISD::INSERT, dl, MVT::i64, {VecR, ValR, Width, Idx});
2796HexagonTargetLowering::expandPredicate(
SDValue Vec32,
const SDLoc &dl,
2798 assert(ty(Vec32).getSizeInBits() == 32);
2807HexagonTargetLowering::contractPredicate(
SDValue Vec64,
const SDLoc &dl,
2809 assert(ty(Vec64).getSizeInBits() == 64);
2815 {0, 2, 4, 6, 1, 3, 5, 7});
2816 return extractVector(S, DAG.
getConstant(0, dl, MVT::i32), dl, MVT::v4i8,
2840 MVT ValTy = ty(Val);
2845 if (ValLen == ResLen)
2848 const SDLoc &dl(Val);
2850 assert(ResLen % ValLen == 0);
2853 for (
unsigned i = 1, e = ResLen / ValLen; i <
e; ++i)
2862 MVT ElemTy = ty(
Hi);
2887 for (
unsigned i = 0, e =
Op.getNumOperands(); i != e; ++i)
2888 Ops.push_back(
Op.getOperand(i));
2891 return buildVector32(
Ops, dl, VecTy, DAG);
2893 return buildVector64(
Ops, dl, VecTy, DAG);
2895 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2897 bool All0 =
true, All1 =
true;
2900 if (CN ==
nullptr) {
2901 All0 = All1 =
false;
2909 return DAG.
getNode(HexagonISD::PFALSE, dl, VecTy);
2911 return DAG.
getNode(HexagonISD::PTRUE, dl, VecTy);
2917 SDValue Z = getZero(dl, MVT::i32, DAG);
2920 for (
unsigned i = 0; i != 8; ++i) {
2925 for (
unsigned i = 0, e =
A.size()/2; i != e; ++i)
2929 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);
2942 return getCombine(
Op.getOperand(1),
Op.getOperand(0), dl, VecTy, DAG);
2946 if (ElemTy == MVT::i1) {
2947 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1);
2948 MVT OpTy = ty(
Op.getOperand(0));
2952 assert(Scale ==
Op.getNumOperands() && Scale > 1);
2961 for (
SDValue P :
Op.getNode()->op_values()) {
2963 for (
unsigned R = Scale; R > 1; R /= 2) {
2964 W = contractPredicate(W, dl, DAG);
2965 W = getCombine(DAG.
getUNDEF(MVT::i32), W, dl, MVT::i64, DAG);
2973 Words[IdxW ^ 1].
clear();
2975 for (
unsigned i = 0, e = Words[IdxW].
size(); i != e; i += 2) {
2976 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
2979 {W0, W1, WidthV, WidthV});
2987 assert(Scale == 2 && Words[IdxW].
size() == 2);
2989 SDValue WW = getCombine(Words[IdxW][1], Words[IdxW][0], dl, MVT::i64, DAG);
3001 return extractVector(Vec,
Op.getOperand(1),
SDLoc(
Op), ElemTy, ty(
Op), DAG);
3007 return extractVector(
Op.getOperand(0),
Op.getOperand(1),
SDLoc(
Op),
3008 ty(
Op), ty(
Op), DAG);
3014 return insertVector(
Op.getOperand(0),
Op.getOperand(1),
Op.getOperand(2),
3022 return insertVector(
Op.getOperand(0), ValV,
Op.getOperand(2),
3047 bool LoadPred = MemTy == MVT::v2i1 || MemTy == MVT::v4i1 || MemTy == MVT::v8i1;
3058 if (!validateConstPtrAlignment(LN->
getBasePtr(), ClaimAlign, dl, DAG))
3059 return replaceMemWithUndef(
Op, DAG);
3065 SDValue TP = getInstr(Hexagon::C2_tfrrp, dl, MemTy, {LU}, DAG);
3084 if (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1) {
3086 SDValue TR = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {Val}, DAG);
3097 if (!validateConstPtrAlignment(SN->
getBasePtr(), ClaimAlign, dl, DAG))
3098 return replaceMemWithUndef(
Op, DAG);
3101 Align NeedAlign = Subtarget.getTypeAlignment(StoreTy);
3102 if (ClaimAlign < NeedAlign)
3111 MVT LoadTy = ty(
Op);
3112 unsigned NeedAlign = Subtarget.getTypeAlignment(LoadTy).
value();
3114 if (HaveAlign >= NeedAlign)
3123 bool DoDefault =
false;
3134 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
3153 unsigned LoadLen = NeedAlign;
3156 auto BO = getBaseAndOffset(
Base);
3157 unsigned BaseOpc = BO.first.getOpcode();
3158 if (BaseOpc == HexagonISD::VALIGNADDR && BO.second % LoadLen == 0)
3161 if (BO.second % LoadLen != 0) {
3163 DAG.
getConstant(BO.second % LoadLen, dl, MVT::i32));
3164 BO.second -= BO.second % LoadLen;
3166 SDValue BaseNoOff = (BaseOpc != HexagonISD::VALIGNADDR)
3167 ? DAG.
getNode(HexagonISD::VALIGNADDR, dl, MVT::i32, BO.first,
3179 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen,
Align(LoadLen),
3180 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
3181 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
3206 unsigned Opc =
Op.getOpcode();
3210 assert(VY != 0 &&
"This should have been folded");
3235 unsigned Opc =
Op.getOpcode();
3239 return DAG.
getNode(HexagonISD::ADDC, dl,
Op.getNode()->getVTList(),
3242 EVT CarryTy =
C.getValueType();
3243 SDValue SubC = DAG.
getNode(HexagonISD::SUBC, dl,
Op.getNode()->getVTList(),
3244 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });
3263 unsigned OffsetReg = Hexagon::R28;
3274 return DAG.
getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
3279 unsigned Opc =
Op.getOpcode();
3284 if (isHvxOperation(
Op.getNode(), DAG)) {
3286 if (
SDValue V = LowerHvxOperation(
Op, DAG))
3293 Op.getNode()->dumpr(&DAG);
3347 if (isHvxOperation(
N, DAG)) {
3348 LowerHvxOperationWrapper(
N,
Results, DAG);
3354 unsigned Opc =
N->getOpcode();
3357 case HexagonISD::SSAT:
3358 case HexagonISD::USAT:
3378 if (isHvxOperation(
N, DAG)) {
3379 ReplaceHvxNodeResults(
N,
Results, DAG);
3385 switch (
N->getOpcode()) {
3392 if (
N->getValueType(0) == MVT::i8) {
3393 if (
N->getOperand(0).getValueType() == MVT::v8i1) {
3394 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,
3395 N->getOperand(0), DAG);
3407 if (isHvxOperation(
N, DCI.
DAG)) {
3408 if (
SDValue V = PerformHvxDAGCombine(
N, DCI))
3415 unsigned Opc =
Op.getOpcode();
3421 EVT TruncTy =
Op.getValueType();
3437 switch (
P.getOpcode()) {
3438 case HexagonISD::PTRUE:
3440 case HexagonISD::PFALSE:
3441 return getZero(dl, ty(
Op), DCI.
DAG);
3452 if (C1->getOpcode() == HexagonISD::PTRUE) {
3454 Op.getOperand(2),
Op.getOperand(1));
3462 MVT TruncTy = ty(
Op);
3465 if (ty(Elem0) == TruncTy)
3468 if (ty(Elem0).bitsGT(TruncTy))
3475 if (ty(
Op) != MVT::i64)
3487 if (Amt && Amt->getZExtValue() >= 32 && ty(Z).getSizeInBits() <= 32) {
3488 unsigned A = Amt->getZExtValue();
3494 return DCI.
DAG.
getNode(HexagonISD::COMBINE, dl, MVT::i64, {T1, T2});
3513 return DAG.
getNode(HexagonISD::AT_PCREL,
SDLoc(Table), VT,
T);
3522 if (Constraint.
size() == 1) {
3523 switch (Constraint[0]) {
3526 if (Subtarget.useHVXOps())
3538std::pair<unsigned, const TargetRegisterClass*>
3542 if (Constraint.
size() == 1) {
3543 switch (Constraint[0]) {
3547 return {0u,
nullptr};
3553 return {0u, &Hexagon::IntRegsRegClass};
3556 return {0u, &Hexagon::DoubleRegsRegClass};
3561 return {0u,
nullptr};
3562 return {0u, &Hexagon::ModRegsRegClass};
3566 return {0u,
nullptr};
3569 return {0u, &Hexagon::HvxQRRegClass};
3575 return {0u,
nullptr};
3577 return {0u, &Hexagon::HvxVRRegClass};
3579 if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps())
3580 return {0u, &Hexagon::HvxVRRegClass};
3581 return {0u, &Hexagon::HvxWRRegClass};
3583 return {0u, &Hexagon::HvxWRRegClass};
3587 return {0u,
nullptr};
3598 bool ForCodeSize)
const {
3609 assert(Ty->isIntegerTy());
3610 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3611 return (BitSize > 0 && BitSize <= 64);
3619 if (Ty->isSized()) {
3640 int Scale = AM.
Scale;
3664 return Imm >= -512 && Imm <= 511;
3674 bool IsCalleeStructRet,
3675 bool IsCallerStructRet,
3682 bool CCMatch = CallerCC == CalleeCC;
3711 if (IsCalleeStructRet || IsCallerStructRet)
3734 const AttributeList &FuncAttributes)
const {
3735 if (
Op.size() >= 8 &&
Op.isAligned(
Align(8)))
3737 if (
Op.size() >= 4 &&
Op.isAligned(
Align(4)))
3739 if (
Op.size() >= 2 &&
Op.isAligned(
Align(2)))
3750 if (Subtarget.isHVXVectorType(SVT,
true))
3751 return allowsHvxMemoryAccess(SVT, Flags,
Fast);
3753 Context,
DL, VT, AddrSpace, Alignment, Flags,
Fast);
3758 unsigned *
Fast)
const {
3762 if (Subtarget.isHVXVectorType(SVT,
true))
3763 return allowsHvxMisalignedMemoryAccesses(SVT, Flags,
Fast);
3769std::pair<const TargetRegisterClass*, uint8_t>
3777 return std::make_pair(&Hexagon::HvxQRRegClass, 1);
3779 return std::make_pair(&Hexagon::HvxVRRegClass, 1);
3781 return std::make_pair(&Hexagon::HvxWRRegClass, 1);
3789 std::optional<unsigned> ByteOffset)
const {
3796 std::pair<SDValue, int> BO = getBaseAndOffset(L->getBasePtr());
3798 if (BO.first.getOpcode() == HexagonISD::CONST32_GP)
3803 return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, HTM);
3810 AdjustHvxInstrPostInstrSelection(
MI,
Node);
3816 unsigned SZ = ValueTy->getPrimitiveSizeInBits();
3817 assert((SZ == 32 || SZ == 64) &&
"Only 32/64-bit atomic loads supported");
3818 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3819 : Intrinsic::hexagon_L4_loadd_locked;
3822 Builder.CreateIntrinsic(IntID, Addr,
nullptr,
"larx");
3824 return Builder.CreateBitCast(
Call, ValueTy);
3838 assert((SZ == 32 || SZ == 64) &&
"Only 32/64-bit atomic stores supported");
3839 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3840 : Intrinsic::hexagon_S4_stored_locked;
3842 Val = Builder.CreateBitCast(Val, CastTy);
3844 Value *
Call = Builder.CreateIntrinsic(IntID, {Addr, Val},
3846 Value *Cmp = Builder.CreateICmpEQ(
Call, Builder.getInt32(0),
"");
3862 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64
3880 return Mask->getValue().isPowerOf2();
3887 if (
N->getNumValues() != 1)
3889 if (!
N->hasNUsesOfValue(1, 0))
3892 SDNode *Copy = *
N->user_begin();
3904 if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == MVT::Glue)
3908 bool HasRet =
false;
3910 if (
Node->getOpcode() != HexagonISD::RET_GLUE)
3917 Chain = Copy->getOperand(0);
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static cl::opt< bool > ConstantLoadsToImm("constant-loads-to-imm", cl::Hidden, cl::init(true), cl::desc("Convert constant loads to immediate values."))
static Value * getUnderLyingObjectForBrevLdIntr(Value *V)
static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static cl::opt< bool > AlignLoads("hexagon-align-loads", cl::Hidden, cl::init(false), cl::desc("Rewrite unaligned loads as a pair of aligned loads"))
static bool isBrevLdIntrinsic(const Value *Inst)
static Value * getBrevLdObject(Value *V)
static cl::opt< bool > DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden, cl::init(false), cl::desc("Disable minimum alignment of 1 for " "arguments passed by value on stack"))
static Value * returnEdge(const PHINode *PN, Value *IntrBaseVal)
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
static cl::opt< bool > EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden, cl::desc("Control jump table emission on Hexagon target"))
static cl::opt< int > MinimumJumpTables("minimum-jump-tables", cl::Hidden, cl::init(5), cl::desc("Set minimum jump tables"))
static cl::opt< bool > EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden, cl::desc("Enable Hexagon SDNode scheduling"))
#define Hexagon_PointerSize
#define HEXAGON_LRFP_SIZE
#define HEXAGON_GOT_SYM_NAME
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
std::pair< MCSymbol *, MachineModuleInfoImpl::StubValueTy > PairTy
Register const TargetRegisterInfo * TRI
Promote Memory to Register
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
The address of a basic block.
CCState - This class holds information needed while lowering arguments and return values.
LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
const APInt & getValue() const
Return the constant as an APInt value reference.
MachineConstantPoolValue * getMachineCPVal() const
bool isMachineConstantPoolEntry() const
const Constant * getConstVal() const
int64_t getSExtValue() const
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
This is the base abstract class for diagnostic reporting in the backend.
Interface for custom diagnostic printing.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
int64_t getOffset() const
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
LLVM_ABI const GlobalObject * getAliaseeObject() const
Hexagon target-specific information for each MachineFunction.
int getVarArgsFrameIndex()
void setFirstNamedArgFrameIndex(int v)
void setHasEHReturn(bool H=true)
Register getStackRegister() const
Register getFrameRegister(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
bool isHVXVectorType(EVT VecTy, bool IncludeBool=false) const
unsigned getVectorLength() const
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
bool isTargetCanonicalConstantNode(SDValue Op) const override
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset) const override
Return true if it is profitable to reduce a load to a smaller type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const
AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, SDValue InGlue, EVT PtrVT, unsigned ReturnReg, unsigned char OperandGlues) const
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override
Return true if result of the specified node is used by a return node only.
SDValue LowerCallResult(SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals, const SmallVectorImpl< SDValue > &OutVals, SDValue Callee) const
LowerCallResult - Lower the result values of an ISD::CALL into the appropriate copies out of appropri...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Return true if the target supports a memory access of this type for the given address space and align...
SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool isShuffleMaskLegal(ArrayRef< int > Mask, EVT VT) const override
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
LegalizeAction getCustomOperationAction(SDNode &Op) const override
How to legalize this custom operation?
SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerUAddSubOCarry(SDValue Op, SelectionDAG &DAG) const
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
LowerCall - Functions arguments are copied from virtual regs to (physical regs)/(stack frame),...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const
SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
HexagonTargetLowering(const TargetMachine &TM, const HexagonSubtarget &ST)
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool IsEligibleForTailCallOptimization(SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet, bool isCallerStructRet, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SmallVectorImpl< ISD::InputArg > &Ins, SelectionDAG &DAG) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const
AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &, EVT) const override
Return true if an FMA operation is faster than a pair of mul and add instructions.
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const
LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
std::pair< MVT, unsigned > handleMaskRegisterForCallingConv(const HexagonSubtarget &Subtarget, EVT VT) const
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const
SDValue LowerREADSTEADYCOUNTER(SDValue Op, SelectionDAG &DAG) const
Common base class shared among various IRBuilders.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Base class for LoadSDNode and StoreSDNode.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static LLVM_ABI MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
LLVM_ABI void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
LLVM_ABI void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
unsigned getNumFixedObjects() const
Return the number of fixed objects.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
LLVM_ABI SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset=std::nullopt) const
Return true if it is profitable to reduce a load to a smaller type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ UndefinedBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
virtual bool isTargetCanonicalConstantNode(SDValue Op) const
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
Primary interface to the complete machine description for the target machine.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
@ MO_PCREL
MO_PCREL - On a symbol operand, indicates a PC-relative relocation Used for computing a global addres...
@ MO_GOT
MO_GOT - Indicates a GOT-relative relocation.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ GLOBAL_OFFSET_TABLE
The address of the GOT.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
auto dyn_cast_or_null(const Y &Val)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)
format_hex - Output N as a fixed width hexadecimal.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
LLVM_ABI int getNextAvailablePluginDiagnosticKind()
Get the next available kind ID for a plugin diagnostic.
unsigned M0(unsigned Val)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
unsigned Log2(Align A)
Returns the log2 of the alignment.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool isVector() const
Return true if this is a vector value type.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const