26#include "llvm/IR/IntrinsicsAMDGPU.h"
33#include "AMDGPUGenCallingConv.inc"
36 "amdgpu-bypass-slow-div",
37 cl::desc(
"Skip 64-bit divide for dynamic 32-bit values"),
46 if (StoreSize % 32 == 0)
203 {MVT::v2i8, MVT::v4i8, MVT::v2i16, MVT::v3i16, MVT::v4i16})
409 ISD::FROUNDEVEN, ISD::FTRUNC},
410 {MVT::f16, MVT::f32},
Legal);
416 {MVT::f16, MVT::f32, MVT::f64},
Expand);
419 {ISD::FLOG, ISD::FLOG10, ISD::FEXP, ISD::FEXP2, ISD::FEXP10}, MVT::f32,
431 if (Subtarget->has16BitInsts()) {
443 if (Subtarget->has16BitInsts()) {
451 {MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32,
452 MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32,
453 MVT::v2f64, MVT::v3f64, MVT::v4f64, MVT::v8f64,
459 {MVT::v2f16, MVT::v3f16, MVT::v4f16, MVT::v16f16},
466 {MVT::v3i32, MVT::v3f32, MVT::v4i32, MVT::v4f32,
467 MVT::v5i32, MVT::v5f32, MVT::v6i32, MVT::v6f32,
468 MVT::v7i32, MVT::v7f32, MVT::v8i32, MVT::v8f32,
469 MVT::v9i32, MVT::v9f32, MVT::v10i32, MVT::v10f32,
470 MVT::v11i32, MVT::v11f32, MVT::v12i32, MVT::v12f32},
475 {MVT::v2f32, MVT::v2i32, MVT::v3f32, MVT::v3i32, MVT::v4f32,
476 MVT::v4i32, MVT::v5f32, MVT::v5i32, MVT::v6f32, MVT::v6i32,
477 MVT::v7f32, MVT::v7i32, MVT::v8f32, MVT::v8i32, MVT::v9f32,
478 MVT::v9i32, MVT::v10i32, MVT::v10f32, MVT::v11i32, MVT::v11f32,
479 MVT::v12i32, MVT::v12f32, MVT::v16i32, MVT::v32f32, MVT::v32i32,
480 MVT::v2f64, MVT::v2i64, MVT::v3f64, MVT::v3i64, MVT::v4f64,
481 MVT::v4i64, MVT::v8f64, MVT::v8i64, MVT::v16f64, MVT::v16i64},
487 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
488 for (
MVT VT : ScalarIntVTs) {
525 for (
auto VT : {MVT::i8, MVT::i16})
529 MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, MVT::v6i32, MVT::v7i32,
530 MVT::v9i32, MVT::v10i32, MVT::v11i32, MVT::v12i32};
532 for (
MVT VT : VectorIntTypes) {
550 MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32,
551 MVT::v9f32, MVT::v10f32, MVT::v11f32, MVT::v12f32};
553 for (
MVT VT : FloatVectorTypes) {
555 {ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM,
559 ISD::FLOG, ISD::FLOG10, ISD::FPOW,
561 ISD::FMA, ISD::FRINT, ISD::FNEARBYINT,
650 const auto Flags =
Op.getNode()->getFlags();
651 if (Flags.hasNoSignedZeros())
671 case ISD::FMINNUM_IEEE:
672 case ISD::FMAXNUM_IEEE:
675 case ISD::FMINIMUMNUM:
676 case ISD::FMAXIMUMNUM:
681 case ISD::FNEARBYINT:
682 case ISD::FROUNDEVEN:
685 case AMDGPUISD::RCP_LEGACY:
686 case AMDGPUISD::RCP_IFLAG:
687 case AMDGPUISD::SIN_HW:
688 case AMDGPUISD::FMUL_LEGACY:
689 case AMDGPUISD::FMIN_LEGACY:
690 case AMDGPUISD::FMAX_LEGACY:
691 case AMDGPUISD::FMED3:
702 unsigned Opc =
N->getOpcode();
703 if (
Opc == ISD::BITCAST) {
723 return (
N->getNumOperands() > 2 &&
N->getOpcode() !=
ISD::SELECT) ||
732 return N->getValueType(0) == MVT::f32;
742 switch (
N->getOpcode()) {
747 case ISD::INLINEASM_BR:
748 case AMDGPUISD::DIV_SCALE:
757 switch (
N->getConstantOperandVal(0)) {
758 case Intrinsic::amdgcn_interp_p1:
759 case Intrinsic::amdgcn_interp_p2:
760 case Intrinsic::amdgcn_interp_mov:
761 case Intrinsic::amdgcn_interp_p1_f16:
762 case Intrinsic::amdgcn_interp_p2_f16:
782 unsigned NumMayIncreaseSize = 0;
783 MVT VT =
N->getValueType(0).getScalarType().getSimpleVT();
788 for (
const SDNode *U :
N->users()) {
823 bool ForCodeSize)
const {
825 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
826 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
832 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
837 std::optional<unsigned> ByteOffset)
const {
849 EVT OldVT =
N->getValueType(0);
857 if (OldSize >= 32 && NewSize < 32 && MN->
getAlign() >=
Align(4) &&
872 return (OldSize < 32);
887 if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
892 CastTy, MMO, &
Fast) &&
908 switch (
N->getOpcode()) {
913 unsigned IntrID =
N->getConstantOperandVal(0);
917 unsigned IntrID =
N->getConstantOperandVal(1);
925 case AMDGPUISD::SETCC:
935 switch (
Op.getOpcode()) {
943 case AMDGPUISD::RCP: {
945 EVT VT =
Op.getValueType();
951 return DAG.
getNode(AMDGPUISD::RCP, SL, VT, NegSrc,
Op->getFlags());
970 return VT == MVT::f32 || VT == MVT::f64 ||
971 (Subtarget->has16BitInsts() && (VT == MVT::f16 || VT == MVT::bf16));
978 return VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f16 || VT == MVT::bf16;
1002 unsigned SrcSize = Source.getSizeInBits();
1005 return DestSize < SrcSize && DestSize % 32 == 0 ;
1011 unsigned SrcSize = Source->getScalarSizeInBits();
1014 if (DestSize== 16 && Subtarget->has16BitInsts())
1015 return SrcSize >= 32;
1017 return DestSize < SrcSize && DestSize % 32 == 0;
1021 unsigned SrcSize = Src->getScalarSizeInBits();
1024 if (SrcSize == 16 && Subtarget->has16BitInsts())
1025 return DestSize >= 32;
1027 return SrcSize == 32 && DestSize == 64;
1036 if (Src == MVT::i16)
1037 return Dest == MVT::i32 ||Dest == MVT::i64 ;
1039 return Src == MVT::i32 && Dest == MVT::i64;
1044 switch (
N->getOpcode()) {
1060 if (Subtarget->has16BitInsts() &&
1061 (!DestVT.
isVector() || !Subtarget->hasVOP3PInsts())) {
1063 if (!
N->isDivergent() && DestVT.
isInteger() &&
1091 "Expected shift op");
1093 SDValue ShiftLHS =
N->getOperand(0);
1108 if (
N->getValueType(0) == MVT::i32 &&
N->hasOneUse() &&
1109 (
N->user_begin()->getOpcode() ==
ISD::SRA ||
1110 N->user_begin()->getOpcode() ==
ISD::SRL))
1120 return LHS0 && LHS1 && RHSLd && LHS0->getExtensionType() ==
ISD::ZEXTLOAD &&
1121 LHS1->getAPIntValue() == LHS0->getMemoryVT().getScalarSizeInBits() &&
1124 SDValue LHS =
N->getOperand(0).getOperand(0);
1125 SDValue RHS =
N->getOperand(0).getOperand(1);
1126 return !(IsShiftAndLoad(LHS, RHS) || IsShiftAndLoad(RHS, LHS));
1146 return CC_AMDGPU_CS_CHAIN;
1150 return CC_AMDGPU_Func;
1176 return RetCC_SI_Shader;
1179 return RetCC_SI_Gfx;
1183 return RetCC_AMDGPU_Func;
1222 const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset();
1229 unsigned InIndex = 0;
1232 const bool IsByRef = Arg.hasByRefAttr();
1233 Type *BaseArgTy = Arg.getType();
1234 Type *MemArgTy = IsByRef ? Arg.getParamByRefType() : BaseArgTy;
1235 Align Alignment =
DL.getValueOrABITypeAlignment(
1236 IsByRef ? Arg.getParamAlign() : std::nullopt, MemArgTy);
1237 MaxAlign = std::max(Alignment, MaxAlign);
1238 uint64_t AllocSize =
DL.getTypeAllocSize(MemArgTy);
1240 uint64_t ArgOffset =
alignTo(ExplicitArgOffset, Alignment) + ExplicitOffset;
1241 ExplicitArgOffset =
alignTo(ExplicitArgOffset, Alignment) + AllocSize;
1253 &Offsets, ArgOffset);
1255 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
1293 }
else if (RegisterVT.
isVector()) {
1296 assert(MemoryBits % NumElements == 0);
1300 MemoryBits / NumElements);
1318 unsigned PartOffset = 0;
1319 for (
unsigned i = 0; i != NumRegs; ++i) {
1321 BasePartOffset + PartOffset,
1339 return DAG.
getNode(AMDGPUISD::ENDPGM,
DL, MVT::Other, Chain);
1360 int ClobberedFI)
const {
1363 int64_t LastByte = FirstByte + MFI.
getObjectSize(ClobberedFI) - 1;
1374 if (FI->getIndex() < 0) {
1376 int64_t InLastByte = InFirstByte;
1379 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1380 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1402 FuncName =
G->getSymbol();
1404 FuncName =
G->getGlobal()->getName();
1439 switch (
Op.getOpcode()) {
1443 "instruction is not implemented yet!");
1455 case ISD::FROUNDEVEN:
1488 switch (
N->getOpcode()) {
1535 if (std::optional<uint32_t>
Address =
1537 if (IsNamedBarrier) {
1542 }
else if (IsNamedBarrier) {
1550 GV->
getName() !=
"llvm.amdgcn.module.lds" &&
1555 Fn,
"local memory global used by non-kernel function",
1572 "Do not know what to do with an non-zero offset");
1588 EVT VT =
Op.getValueType();
1590 unsigned OpBitSize =
Op.getOperand(0).getValueType().getSizeInBits();
1591 if (OpBitSize >= 32 && OpBitSize % 32 == 0) {
1592 unsigned NewNumElt = OpBitSize / 32;
1593 EVT NewEltVT = (NewNumElt == 1) ? MVT::i32
1595 MVT::i32, NewNumElt);
1596 for (
const SDUse &U :
Op->ops()) {
1602 Args.push_back(NewIn);
1606 NewNumElt *
Op.getNumOperands());
1608 return DAG.
getNode(ISD::BITCAST, SL, VT, BV);
1612 for (
const SDUse &U :
Op->ops())
1622 unsigned Start =
Op.getConstantOperandVal(1);
1623 EVT VT =
Op.getValueType();
1624 EVT SrcVT =
Op.getOperand(0).getValueType();
1629 assert(NumElt % 2 == 0 && NumSrcElt % 2 == 0 &&
"expect legal types");
1633 EVT NewVT = NumElt == 2
1644 return DAG.
getNode(ISD::BITCAST, SL, VT, Tmp);
1693 return DAG.
getNode(AMDGPUISD::FMIN_LEGACY,
DL, VT, RHS, LHS);
1694 return DAG.
getNode(AMDGPUISD::FMAX_LEGACY,
DL, VT, LHS, RHS);
1712 return DAG.
getNode(AMDGPUISD::FMIN_LEGACY,
DL, VT, LHS, RHS);
1713 return DAG.
getNode(AMDGPUISD::FMAX_LEGACY,
DL, VT, RHS, LHS);
1718 return DAG.
getNode(AMDGPUISD::FMAX_LEGACY,
DL, VT, RHS, LHS);
1719 return DAG.
getNode(AMDGPUISD::FMIN_LEGACY,
DL, VT, LHS, RHS);
1730 return DAG.
getNode(AMDGPUISD::FMAX_LEGACY,
DL, VT, LHS, RHS);
1731 return DAG.
getNode(AMDGPUISD::FMIN_LEGACY,
DL, VT, RHS, LHS);
1745 if ((LHS == True && RHS == False) || (LHS == False && RHS == True))
1764 if (LHS == NegTrue && CFalse && CRHS) {
1770 return DAG.
getNode(ISD::FNEG,
DL, VT, Combined);
1778std::pair<SDValue, SDValue>
1790 return std::pair(
Lo,
Hi);
1819 HiVT = NumElts - LoNumElts == 1
1822 return std::pair(LoVT, HiVT);
1827std::pair<SDValue, SDValue>
1829 const EVT &LoVT,
const EVT &HiVT,
1831 EVT VT =
N.getValueType();
1835 "More vector elements requested than available!");
1866 EVT VT =
Op.getValueType();
1878 SDValue BasePtr = Load->getBasePtr();
1879 EVT MemVT = Load->getMemoryVT();
1884 EVT LoMemVT, HiMemVT;
1892 Align BaseAlign = Load->getAlign();
1896 Load->getExtensionType(), SL, LoVT, Load->getChain(), BasePtr, SrcValue,
1897 LoMemVT, BaseAlign, Load->getMemOperand()->getFlags(), Load->getAAInfo());
1900 Load->getExtensionType(), SL, HiVT, Load->getChain(), HiPtr,
1902 Load->getMemOperand()->getFlags(), Load->getAAInfo());
1926 EVT VT =
Op.getValueType();
1927 SDValue BasePtr = Load->getBasePtr();
1928 EVT MemVT = Load->getMemoryVT();
1931 Align BaseAlign = Load->getAlign();
1936 if (NumElements != 3 ||
1937 (BaseAlign <
Align(8) &&
1941 assert(NumElements == 3);
1948 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1949 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1960 SDValue Val = Store->getValue();
1968 EVT MemVT = Store->getMemoryVT();
1969 SDValue Chain = Store->getChain();
1970 SDValue BasePtr = Store->getBasePtr();
1974 EVT LoMemVT, HiMemVT;
1984 Align BaseAlign = Store->getAlign();
1989 DAG.
getTruncStore(Chain, SL,
Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1990 Store->getMemOperand()->getFlags(), Store->getAAInfo());
1993 Store->getMemOperand()->getFlags(), Store->getAAInfo());
2004 EVT VT =
Op.getValueType();
2007 MVT IntVT = MVT::i32;
2008 MVT FltVT = MVT::f32;
2011 if (LHSSignBits < 9)
2015 if (RHSSignBits < 9)
2019 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
2020 unsigned DivBits = BitSize - SignBits;
2054 fa, DAG.
getNode(AMDGPUISD::RCP,
DL, FltVT, fb));
2057 fq = DAG.
getNode(ISD::FTRUNC,
DL, FltVT, fq);
2064 bool UseFmadFtz =
false;
2065 if (Subtarget->isGCN()) {
2073 : UseFmadFtz ? (
unsigned)AMDGPUISD::FMAD_FTZ
2081 fr = DAG.
getNode(ISD::FABS,
DL, FltVT, fr);
2084 fb = DAG.
getNode(ISD::FABS,
DL, FltVT, fb);
2120 EVT VT =
Op.getValueType();
2122 assert(VT == MVT::i64 &&
"LowerUDIVREM64 expects an i64");
2132 std::tie(LHS_Lo, LHS_Hi) = DAG.
SplitScalar(LHS,
DL, HalfVT, HalfVT);
2136 std::tie(RHS_Lo, RHS_Hi) = DAG.
SplitScalar(RHS,
DL, HalfVT, HalfVT);
2195 std::tie(Mulhi1_Lo, Mulhi1_Hi) =
2208 std::tie(Mulhi2_Lo, Mulhi2_Hi) =
2222 std::tie(Mul3_Lo, Mul3_Hi) = DAG.
SplitScalar(Mul3,
DL, HalfVT, HalfVT);
2294 REM = DAG.
getNode(ISD::BITCAST,
DL, MVT::i64, REM);
2301 for (
unsigned i = 0; i < halfBitWidth; ++i) {
2302 const unsigned bitPos = halfBitWidth - i - 1;
2325 DIV = DAG.
getNode(ISD::BITCAST,
DL, MVT::i64, DIV);
2333 EVT VT =
Op.getValueType();
2335 if (VT == MVT::i64) {
2341 if (VT == MVT::i32) {
2388 EVT VT =
Op.getValueType();
2396 if (VT == MVT::i32) {
2401 if (VT == MVT::i64 &&
2472 const unsigned FractBits = 52;
2473 const unsigned ExpBits = 11;
2489 assert(
Op.getValueType() == MVT::f64);
2499 const unsigned FractBits = 52;
2507 SignBit64 = DAG.
getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
2511 = DAG.
getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2528 return DAG.
getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2536 assert(
Op.getValueType() == MVT::f64);
2569 auto VT =
Op.getValueType();
2570 auto Arg =
Op.getOperand(0u);
2582 EVT VT =
Op.getValueType();
2633 switch (Src.getOpcode()) {
2634 case ISD::FP_EXTEND:
2635 return Src.getOperand(0).getValueType() == MVT::f16;
2636 case ISD::FP16_TO_FP:
2640 unsigned IntrinsicID = Src.getConstantOperandVal(0);
2641 switch (IntrinsicID) {
2642 case Intrinsic::amdgcn_frexp_mant:
2657 return Flags.hasApproximateFuncs();
2673 EVT VT = Src.getValueType();
2684 return IsLtSmallestNormal;
2690 EVT VT = Src.getValueType();
2703std::pair<SDValue, SDValue>
2724 return {ScaledInput, IsLtSmallestNormal};
2735 EVT VT =
Op.getValueType();
2739 if (VT == MVT::f16) {
2741 assert(!Subtarget->has16BitInsts());
2742 SDValue Ext = DAG.
getNode(ISD::FP_EXTEND, SL, MVT::f32, Src, Flags);
2743 SDValue Log = DAG.
getNode(AMDGPUISD::LOG, SL, MVT::f32, Ext, Flags);
2748 auto [ScaledInput, IsLtSmallestNormal] =
2751 return DAG.
getNode(AMDGPUISD::LOG, SL, VT, Src, Flags);
2771 EVT VT =
Op.getValueType();
2774 const bool IsLog10 =
Op.getOpcode() == ISD::FLOG10;
2775 assert(IsLog10 ||
Op.getOpcode() == ISD::FLOG);
2778 if (VT == MVT::f16 || Flags.hasApproximateFuncs()) {
2780 if (VT == MVT::f16 && !Subtarget->has16BitInsts()) {
2782 X = DAG.
getNode(ISD::FP_EXTEND,
DL, MVT::f32,
X, Flags);
2786 if (VT == MVT::f16 && !Subtarget->has16BitInsts()) {
2801 if (Subtarget->hasFastFMAF32()) {
2803 const float c_log10 = 0x1.344134p-2f;
2804 const float cc_log10 = 0x1.09f79ep-26f;
2807 const float c_log = 0x1.62e42ep-1f;
2808 const float cc_log = 0x1.efa39ep-25f;
2814 Flags.setAllowContract(
false);
2822 const float ch_log10 = 0x1.344000p-2f;
2823 const float ct_log10 = 0x1.3509f6p-18f;
2826 const float ch_log = 0x1.62e000p-1f;
2827 const float ct_log = 0x1.0bfbe8p-15f;
2839 Flags.setAllowContract(
false);
2846 const bool IsFiniteOnly =
2847 (Flags.hasNoNaNs() ||
Options.NoNaNsFPMath) && Flags.hasNoInfs();
2850 if (!IsFiniteOnly) {
2876 EVT VT = Src.getValueType();
2880 double Log2BaseInverted =
2883 if (VT == MVT::f32) {
2886 SDValue LogSrc = DAG.
getNode(AMDGPUISD::LOG, SL, VT, ScaledInput, Flags);
2893 ScaledResultOffset, Zero, Flags);
2897 if (Subtarget->hasFastFMAF32())
2908 return DAG.
getNode(
ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand,
2917 EVT VT =
Op.getValueType();
2921 if (VT == MVT::f16) {
2923 assert(!Subtarget->has16BitInsts());
2924 SDValue Ext = DAG.
getNode(ISD::FP_EXTEND, SL, MVT::f32, Src, Flags);
2925 SDValue Log = DAG.
getNode(AMDGPUISD::EXP, SL, MVT::f32, Ext, Flags);
2933 return DAG.
getNode(AMDGPUISD::EXP, SL, MVT::f32, Src, Flags);
2953 SDValue Exp2 = DAG.
getNode(AMDGPUISD::EXP, SL, VT, AddInput, Flags);
2966 bool IsExp10)
const {
2969 EVT VT =
X.getValueType();
2974 return DAG.
getNode(VT == MVT::f32 ? (
unsigned)AMDGPUISD::EXP
2975 : (
unsigned)ISD::FEXP2,
2976 SL, VT,
Mul, Flags);
2982 EVT VT =
X.getValueType();
3001 SDValue Exp2 = DAG.
getNode(AMDGPUISD::EXP, SL, VT, ExpInput, Flags);
3016 const EVT VT =
X.getValueType();
3018 const unsigned Exp2Op = VT == MVT::f32 ?
static_cast<unsigned>(AMDGPUISD::EXP)
3019 :
static_cast<unsigned>(ISD::FEXP2);
3068 EVT VT =
Op.getValueType();
3072 const bool IsExp10 =
Op.getOpcode() == ISD::FEXP10;
3128 if (Subtarget->hasFastFMAF32()) {
3130 const float cc_exp = 0x1.4ae0bep-26f;
3131 const float c_exp10 = 0x1.a934f0p+1f;
3132 const float cc_exp10 = 0x1.2f346ep-24f;
3142 const float ch_exp = 0x1.714000p+0f;
3143 const float cl_exp = 0x1.47652ap-12f;
3145 const float ch_exp10 = 0x1.a92000p+1f;
3146 const float cl_exp10 = 0x1.4f0978p-11f;
3161 PL =
getMad(DAG, SL, VT, XH, CL, Mad0, Flags);
3176 DAG.
getConstantFP(IsExp10 ? -0x1.66d3e8p+5f : -0x1.9d1da0p+6f, SL, VT);
3185 if (!Flags.hasNoInfs()) {
3187 DAG.
getConstantFP(IsExp10 ? 0x1.344136p+5f : 0x1.62e430p+6f, SL, VT);
3209 auto Opc =
Op.getOpcode();
3210 auto Arg =
Op.getOperand(0u);
3211 auto ResultVT =
Op.getValueType();
3213 if (ResultVT != MVT::i8 && ResultVT != MVT::i16)
3217 assert(ResultVT == Arg.getValueType());
3219 const uint64_t NumBits = ResultVT.getFixedSizeInBits();
3226 NewOp = DAG.
getNode(
Opc, SL, MVT::i32, NewOp);
3229 NewOp = DAG.
getNode(
Opc, SL, MVT::i32, NewOp);
3242 unsigned NewOpc = Ctlz ? AMDGPUISD::FFBH_U32 : AMDGPUISD::FFBL_B32;
3246 bool Is64BitScalar = !Src->isDivergent() && Src.getValueType() == MVT::i64;
3248 if (Src.getValueType() == MVT::i32 || Is64BitScalar) {
3262 Op.getValueType().getScalarSizeInBits(), SL, MVT::i32);
3282 OprLo = DAG.
getNode(AddOpc, SL, MVT::i32, OprLo, Const32);
3284 OprHi = DAG.
getNode(AddOpc, SL, MVT::i32, OprHi, Const32);
3331 if (
Signed && Subtarget->isGCN()) {
3360 ShAmt = DAG.
getNode(AMDGPUISD::FFBH_I32, SL, MVT::i32,
Hi);
3401 if (Subtarget->isGCN())
3402 return DAG.
getNode(ISD::FLDEXP, SL, MVT::f32, FVal, ShAmt);
3411 DAG.
getNode(ISD::BITCAST, SL, MVT::i32, FVal), Exp);
3419 return DAG.
getNode(ISD::BITCAST, SL, MVT::f32, IVal);
3444 EVT DestVT =
Op.getValueType();
3446 EVT SrcVT = Src.getValueType();
3448 if (SrcVT == MVT::i16) {
3449 if (DestVT == MVT::f16)
3458 if (DestVT == MVT::bf16) {
3465 if (SrcVT != MVT::i64)
3468 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
3480 if (DestVT == MVT::f32)
3483 assert(DestVT == MVT::f64);
3489 EVT DestVT =
Op.getValueType();
3492 EVT SrcVT = Src.getValueType();
3494 if (SrcVT == MVT::i16) {
3495 if (DestVT == MVT::f16)
3504 if (DestVT == MVT::bf16) {
3511 if (SrcVT != MVT::i64)
3516 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
3529 if (DestVT == MVT::f32)
3532 assert(DestVT == MVT::f64);
3541 EVT SrcVT = Src.getValueType();
3543 assert(SrcVT == MVT::f32 || SrcVT == MVT::f64);
3556 if (
Signed && SrcVT == MVT::f32) {
3563 DAG.
getNode(ISD::BITCAST, SL, MVT::i32, Trunc),
3565 Trunc = DAG.
getNode(ISD::FABS, SL, SrcVT, Trunc);
3569 if (SrcVT == MVT::f64) {
3591 SL, MVT::i32, FloorMul);
3597 if (
Signed && SrcVT == MVT::f32) {
3600 Sign = DAG.
getNode(ISD::BITCAST, SL, MVT::i64,
3617 return DAG.
getNode(AMDGPUISD::FP_TO_FP16,
DL,
Op.getValueType(), N0);
3619 if (
Op->getFlags().hasApproximateFuncs()) {
3630 assert(Src.getSimpleValueType() == MVT::f64);
3634 const unsigned ExpMask = 0x7ff;
3635 const unsigned ExpBiasf64 = 1023;
3636 const unsigned ExpBiasf16 = 15;
3719 unsigned OpOpcode =
Op.getOpcode();
3720 EVT SrcVT = Src.getValueType();
3721 EVT DestVT =
Op.getValueType();
3724 if (SrcVT == MVT::f16 && DestVT == MVT::i16)
3727 if (SrcVT == MVT::bf16) {
3730 return DAG.
getNode(
Op.getOpcode(),
DL, DestVT, PromotedSrc);
3734 if (DestVT == MVT::i16 && (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
3741 if (DestVT != MVT::i64)
3744 if (SrcVT == MVT::f16 ||
3745 (SrcVT == MVT::f32 && Src.getOpcode() == ISD::FP16_TO_FP)) {
3751 return DAG.
getNode(Ext,
DL, MVT::i64, FpToInt32);
3754 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
3763 MVT VT =
Op.getSimpleValueType();
3777 for (
unsigned I = 0;
I < NElts; ++
I)
3792 EVT VT =
Op.getValueType();
3806 unsigned NewOpcode = Node24->
getOpcode();
3810 case Intrinsic::amdgcn_mul_i24:
3811 NewOpcode = AMDGPUISD::MUL_I24;
3813 case Intrinsic::amdgcn_mul_u24:
3814 NewOpcode = AMDGPUISD::MUL_U24;
3816 case Intrinsic::amdgcn_mulhi_i24:
3817 NewOpcode = AMDGPUISD::MULHI_I24;
3819 case Intrinsic::amdgcn_mulhi_u24:
3820 NewOpcode = AMDGPUISD::MULHI_U24;
3834 if (DemandedLHS || DemandedRHS)
3836 DemandedLHS ? DemandedLHS :
LHS,
3837 DemandedRHS ? DemandedRHS :
RHS);
3849template <
typename IntTy>
3852 if (Width +
Offset < 32) {
3854 IntTy Result =
static_cast<IntTy
>(Shl) >> (32 - Width);
3855 if constexpr (std::is_signed_v<IntTy>) {
3868 if (M->isVolatile())
4020 EVT SrcVT = Src.getValueType();
4021 if (SrcVT.
bitsGE(ExtVT)) {
4032 unsigned IID =
N->getConstantOperandVal(0);
4034 case Intrinsic::amdgcn_mul_i24:
4035 case Intrinsic::amdgcn_mul_u24:
4036 case Intrinsic::amdgcn_mulhi_i24:
4037 case Intrinsic::amdgcn_mulhi_u24:
4039 case Intrinsic::amdgcn_fract:
4040 case Intrinsic::amdgcn_rsq:
4041 case Intrinsic::amdgcn_rcp_legacy:
4042 case Intrinsic::amdgcn_rsq_legacy:
4043 case Intrinsic::amdgcn_rsq_clamp:
4044 case Intrinsic::amdgcn_tanh:
4045 case Intrinsic::amdgcn_prng_b32: {
4048 return Src.isUndef() ? Src :
SDValue();
4050 case Intrinsic::amdgcn_frexp_exp: {
4056 if (PeekSign == Src)
4088 return DAG.
getNode(ISD::BITCAST, SL, MVT::i64, Vec);
4093 EVT VT =
N->getValueType(0);
4106 switch (LHS->getOpcode()) {
4114 if (VT == MVT::i32 && RHSVal == 16 &&
X.getValueType() == MVT::i16 &&
4121 return DAG.
getNode(ISD::BITCAST, SL, MVT::i32, Vec);
4131 EVT XVT =
X.getValueType();
4164 ShiftAmt = DAG.
getNode(
ISD::AND, SL, TargetType, TruncShiftAmt, ShiftMask);
4181 for (
unsigned I = 0;
I != NElts; ++
I)
4182 HiAndLoOps[2 *
I + 1] = HiOps[
I];
4188 return DAG.
getNode(ISD::BITCAST, SL, VT, Vec);
4195 EVT VT =
N->getValueType(0);
4227 (ElementType.getSizeInBits() - 1)) {
4228 ShiftAmt = ShiftFullAmt;
4235 ShiftAmt = DAG.
getNode(
ISD::AND, SL, TargetType, TruncShiftAmt, ShiftMask);
4245 SDValue SplitLHS = DAG.
getNode(ISD::BITCAST, LHSSL, ConcatType, LHS);
4250 for (
unsigned I = 0;
I != NElts; ++
I) {
4251 HiOps[
I] = HiAndLoOps[2 *
I + 1];
4257 SDValue SplitLHS = DAG.
getNode(ISD::BITCAST, LHSSL, ConcatType, LHS);
4281 for (
unsigned I = 0;
I != NElts; ++
I) {
4282 HiAndLoOps[2 *
I + 1] = HiOps[
I];
4283 HiAndLoOps[2 *
I] = LoOps[
I];
4289 return DAG.
getNode(ISD::BITCAST, SL, VT, Vec);
4296 EVT VT =
N->getValueType(0);
4309 unsigned MaskIdx, MaskLen;
4310 if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) &&
4311 MaskIdx == RHSVal) {
4351 ShiftAmt = DAG.
getNode(
ISD::AND, SL, TargetType, TruncShiftAmt, ShiftMask);
4362 SDValue SplitLHS = DAG.
getNode(ISD::BITCAST, LHSSL, ConcatType, LHS);
4367 for (
unsigned I = 0;
I != NElts; ++
I)
4368 HiOps[
I] = HiAndLoOps[2 *
I + 1];
4373 SDValue SplitLHS = DAG.
getNode(ISD::BITCAST, LHSSL, ConcatType, LHS);
4387 for (
unsigned I = 0;
I != NElts; ++
I)
4388 HiAndLoOps[2 *
I] = LoOps[
I];
4393 return DAG.
getNode(ISD::BITCAST, SL, VT, Vec);
4400 EVT VT =
N->getValueType(0);
4404 if (Src.getOpcode() == ISD::BITCAST && !VT.
isVector()) {
4411 Elt0 = DAG.
getNode(ISD::BITCAST, SL,
4429 unsigned BitIndex = K->getZExtValue();
4430 unsigned PartIndex = BitIndex / SrcEltSize;
4432 if (PartIndex * SrcEltSize == BitIndex &&
4450 EVT SrcVT = Src.getValueType();
4462 const unsigned MaxCstSize =
4496 unsigned MulOpc =
Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
4497 return DAG.
getNode(MulOpc, SL, MVT::i32, N0, N1);
4500 unsigned MulLoOpc =
Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
4501 unsigned MulHiOpc =
Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
4521 EVT VT =
N->getValueType(0);
4527 if (!
N->isDivergent())
4549 if (V.hasOneUse() ||
all_of(V->users(), [](
const SDNode *U) ->
bool {
4550 return U->getOpcode() == ISD::MUL;
4559 if (
SDValue MulOper = IsFoldableAdd(N0)) {
4564 if (
SDValue MulOper = IsFoldableAdd(N1)) {
4585 if (Subtarget->hasMulU24() &&
isU24(N0, DAG) &&
isU24(N1, DAG)) {
4589 }
else if (Subtarget->hasMulI24() &&
isI24(N0, DAG) &&
isI24(N1, DAG)) {
4605 if (
N->getValueType(0) != MVT::i32)
4626 unsigned LoOpcode = 0;
4627 unsigned HiOpcode = 0;
4629 if (Subtarget->hasMulI24() &&
isI24(N0, DAG) &&
isI24(N1, DAG)) {
4632 LoOpcode = AMDGPUISD::MUL_I24;
4633 HiOpcode = AMDGPUISD::MULHI_I24;
4636 if (Subtarget->hasMulU24() &&
isU24(N0, DAG) &&
isU24(N1, DAG)) {
4639 LoOpcode = AMDGPUISD::MUL_U24;
4640 HiOpcode = AMDGPUISD::MULHI_U24;
4654 EVT VT =
N->getValueType(0);
4656 if (!Subtarget->hasMulI24() || VT.
isVector())
4665 if (Subtarget->hasSMulHi() && !
N->isDivergent())
4687 EVT VT =
N->getValueType(0);
4698 if (Subtarget->hasSMulHi() && !
N->isDivergent())
4721 unsigned Opc)
const {
4722 EVT VT =
Op.getValueType();
4725 LegalVT != MVT::i16))
4761 isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
4762 return getFFBX_U32(DAG, CmpLHS, SL,
Opc);
4771 isCttzOpc(LHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
4773 return getFFBX_U32(DAG, CmpLHS, SL,
Opc);
4791 return DAG.
getNode(
Op, SL, VT, NewSelect);
4809 EVT VT =
N.getValueType();
4810 if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
4811 (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
4820 if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
4827 if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS &&
4836 bool ShouldFoldNeg =
true;
4841 ShouldFoldNeg =
false;
4843 ShouldFoldNeg =
false;
4846 if (ShouldFoldNeg) {
4847 if (LHS.getOpcode() == ISD::FABS && CRHS->
isNegative())
4863 if (LHS.getOpcode() == ISD::FNEG)
4864 NewRHS = DAG.
getNode(ISD::FNEG, SL, VT, RHS);
4870 Cond, NewLHS, NewRHS);
4872 return DAG.
getNode(LHS.getOpcode(), SL, VT, NewSelect);
4888 EVT VT =
N->getValueType(0);
4896 if (
Cond.hasOneUse()) {
4912 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
4942 if (Subtarget->hasInv2PiInlineImm() &&
isInv2Pi(
C->getValueAPF()))
4963 return ISD::FMINNUM;
4965 return ISD::FMAXNUM;
4966 case ISD::FMAXNUM_IEEE:
4967 return ISD::FMINNUM_IEEE;
4968 case ISD::FMINNUM_IEEE:
4969 return ISD::FMAXNUM_IEEE;
4971 return ISD::FMINIMUM;
4973 return ISD::FMAXIMUM;
4974 case ISD::FMAXIMUMNUM:
4975 return ISD::FMINIMUMNUM;
4976 case ISD::FMINIMUMNUM:
4977 return ISD::FMAXIMUMNUM;
4978 case AMDGPUISD::FMAX_LEGACY:
4979 return AMDGPUISD::FMIN_LEGACY;
4980 case AMDGPUISD::FMIN_LEGACY:
4981 return AMDGPUISD::FMAX_LEGACY;
5012 EVT VT =
N->getValueType(0);
5029 if (LHS.getOpcode() != ISD::FNEG)
5030 LHS = DAG.
getNode(ISD::FNEG, SL, VT, LHS);
5034 if (RHS.getOpcode() != ISD::FNEG)
5035 RHS = DAG.
getNode(ISD::FNEG, SL, VT, RHS);
5047 case AMDGPUISD::FMUL_LEGACY: {
5053 if (LHS.getOpcode() == ISD::FNEG)
5055 else if (RHS.getOpcode() == ISD::FNEG)
5058 RHS = DAG.
getNode(ISD::FNEG, SL, VT, RHS);
5078 if (LHS.getOpcode() == ISD::FNEG)
5083 MHS = DAG.
getNode(ISD::FNEG, SL, VT, MHS);
5085 if (RHS.getOpcode() != ISD::FNEG)
5086 RHS = DAG.
getNode(ISD::FNEG, SL, VT, RHS);
5099 case ISD::FMAXNUM_IEEE:
5100 case ISD::FMINNUM_IEEE:
5103 case ISD::FMINIMUMNUM:
5104 case ISD::FMAXIMUMNUM:
5105 case AMDGPUISD::FMAX_LEGACY:
5106 case AMDGPUISD::FMIN_LEGACY: {
5131 case AMDGPUISD::FMED3: {
5133 for (
unsigned I = 0;
I < 3; ++
I)
5137 if (Res.
getOpcode() != AMDGPUISD::FMED3)
5150 case ISD::FP_EXTEND:
5153 case ISD::FNEARBYINT:
5154 case ISD::FROUNDEVEN:
5157 case AMDGPUISD::RCP:
5158 case AMDGPUISD::RCP_LEGACY:
5159 case AMDGPUISD::RCP_IFLAG:
5160 case AMDGPUISD::SIN_HW: {
5192 case ISD::FP16_TO_FP: {
5199 EVT SrcVT = Src.getValueType();
5204 return DAG.
getNode(ISD::FP16_TO_FP, SL,
N->getValueType(0), IntFNeg);
5211 case ISD::BITCAST: {
5229 SDValue CastHi = DAG.
getNode(ISD::BITCAST, SL, MVT::f32, HighBits);
5235 Ops.back() = CastBack;
5281 case ISD::FP16_TO_FP: {
5282 assert(!Subtarget->has16BitInsts() &&
"should only see if f16 is illegal");
5285 EVT SrcVT = Src.getValueType();
5290 return DAG.
getNode(ISD::FP16_TO_FP, SL,
N->getValueType(0), IntFAbs);
5304 const APFloat &Val = CFP->getValueAPF();
5314 switch(
N->getOpcode()) {
5317 case ISD::BITCAST: {
5318 EVT DestVT =
N->getValueType(0);
5330 EVT SrcVT = Src.getValueType();
5362 return DAG.
getNode(ISD::BITCAST, SL, DestVT, BV);
5366 const APInt &Val =
C->getValueAPF().bitcastToAPInt();
5373 return DAG.
getNode(ISD::BITCAST, SL, DestVT, Vec);
5385 if (!(
N->getValueType(0).isVector() &&
5399 case AMDGPUISD::MUL_U24:
5400 case AMDGPUISD::MUL_I24: {
5405 case AMDGPUISD::MULHI_I24:
5406 case AMDGPUISD::MULHI_U24:
5421 case AMDGPUISD::BFE_I32:
5422 case AMDGPUISD::BFE_U32: {
5423 assert(!
N->getValueType(0).isVector() &&
5424 "Vector handling of BFE not implemented");
5437 SDValue BitsFrom =
N->getOperand(0);
5440 bool Signed =
N->getOpcode() == AMDGPUISD::BFE_I32;
5442 if (OffsetVal == 0) {
5444 unsigned SignBits =
Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
5447 if (OpSignBits >= SignBits)
5468 CVal->getSExtValue(),
5475 CVal->getZExtValue(),
5481 if ((OffsetVal + WidthVal) >= 32 &&
5482 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
5485 BitsFrom, ShiftVal);
5491 OffsetVal + WidthVal);
5509 case AMDGPUISD::RCP:
5510 case AMDGPUISD::RCP_IFLAG:
5517 case AMDGPUISD::FMAD_FTZ: {
5521 EVT VT =
N->getValueType(0);
5528 if (N0CFP && N1CFP && N2CFP) {
5529 const auto FTZ = [](
const APFloat &V) {
5530 if (V.isDenormal()) {
5531 APFloat Zero(V.getSemantics(), 0);
5532 return V.isNegative() ? -Zero : Zero;
5559 bool RawReg)
const {
5564 if (!
MRI.isLiveIn(Reg)) {
5565 VReg =
MRI.createVirtualRegister(RC);
5566 MRI.addLiveIn(Reg, VReg);
5568 VReg =
MRI.getLiveInVirtReg(Reg);
5619 DAG.
getCopyFromReg(Chain, SL, Info->getStackPtrOffsetReg(), MVT::i32);
5630 assert(Arg &&
"Attempting to load missing argument");
5639 unsigned Mask = Arg.
getMask();
5649 unsigned ExplicitArgOffset = Subtarget->getExplicitKernelArgOffset();
5650 const Align Alignment = Subtarget->getAlignmentForImplicitArgPtr();
5652 alignTo(ExplicitKernArgSize, Alignment) + ExplicitArgOffset;
5674 int &RefinementSteps,
5675 bool &UseOneConstNR,
5676 bool Reciprocal)
const {
5679 if (VT == MVT::f32) {
5680 RefinementSteps = 0;
5681 return DAG.
getNode(AMDGPUISD::RSQ,
SDLoc(Operand), VT, Operand);
5692 int &RefinementSteps)
const {
5695 if (VT == MVT::f32) {
5701 RefinementSteps = 0;
5702 return DAG.
getNode(AMDGPUISD::RCP,
SDLoc(Operand), VT, Operand);
5713 case Intrinsic::amdgcn_workitem_id_x:
5715 case Intrinsic::amdgcn_workitem_id_y:
5717 case Intrinsic::amdgcn_workitem_id_z:
5730 unsigned Opc =
Op.getOpcode();
5735 case AMDGPUISD::CARRY:
5736 case AMDGPUISD::BORROW: {
5741 case AMDGPUISD::BFE_I32:
5742 case AMDGPUISD::BFE_U32: {
5749 if (
Opc == AMDGPUISD::BFE_U32)
5754 case AMDGPUISD::FP_TO_FP16: {
5761 case AMDGPUISD::MUL_U24:
5762 case AMDGPUISD::MUL_I24: {
5773 LHSKnown = LHSKnown.
trunc(24);
5774 RHSKnown = RHSKnown.
trunc(24);
5776 if (
Opc == AMDGPUISD::MUL_I24) {
5779 unsigned MaxValBits = LHSValBits + RHSValBits;
5780 if (MaxValBits > 32)
5782 unsigned SignBits = 32 - MaxValBits + 1;
5790 if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative))
5792 else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative))
5797 unsigned MaxValBits = LHSValBits + RHSValBits;
5798 if (MaxValBits >= 32)
5804 case AMDGPUISD::PERM: {
5813 for (
unsigned I = 0;
I < 32;
I += 8) {
5814 unsigned SelBits = Sel & 0xff;
5819 }
else if (SelBits < 7) {
5820 SelBits = (SelBits & 3) * 8;
5823 }
else if (SelBits == 0x0c) {
5824 Known.
Zero |= 0xFFull <<
I;
5825 }
else if (SelBits > 0x0c) {
5826 Known.
One |= 0xFFull <<
I;
5832 case AMDGPUISD::BUFFER_LOAD_UBYTE: {
5836 case AMDGPUISD::BUFFER_LOAD_USHORT: {
5840 case AMDGPUISD::LDS: {
5848 case AMDGPUISD::SMIN3:
5849 case AMDGPUISD::SMAX3:
5850 case AMDGPUISD::SMED3:
5851 case AMDGPUISD::UMIN3:
5852 case AMDGPUISD::UMAX3:
5853 case AMDGPUISD::UMED3: {
5872 unsigned IID =
Op.getConstantOperandVal(0);
5874 case Intrinsic::amdgcn_workitem_id_x:
5875 case Intrinsic::amdgcn_workitem_id_y:
5876 case Intrinsic::amdgcn_workitem_id_z: {
5877 unsigned MaxValue = Subtarget->getMaxWorkitemID(
5891 unsigned Depth)
const {
5892 switch (
Op.getOpcode()) {
5893 case AMDGPUISD::BFE_I32: {
5904 return std::max(SignBits, Op0SignBits);
5907 case AMDGPUISD::BFE_U32: {
5909 return Width ? 32 - (Width->
getZExtValue() & 0x1f) : 1;
5912 case AMDGPUISD::CARRY:
5913 case AMDGPUISD::BORROW:
5915 case AMDGPUISD::BUFFER_LOAD_BYTE:
5917 case AMDGPUISD::BUFFER_LOAD_SHORT:
5919 case AMDGPUISD::BUFFER_LOAD_UBYTE:
5921 case AMDGPUISD::BUFFER_LOAD_USHORT:
5923 case AMDGPUISD::FP_TO_FP16:
5925 case AMDGPUISD::SMIN3:
5926 case AMDGPUISD::SMAX3:
5927 case AMDGPUISD::SMED3:
5928 case AMDGPUISD::UMIN3:
5929 case AMDGPUISD::UMAX3:
5930 case AMDGPUISD::UMED3: {
5943 return std::min({Tmp0, Tmp1, Tmp2});
5958 switch (
MI->getOpcode()) {
5959 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
5961 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
5963 case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
5965 case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
5967 case AMDGPU::G_AMDGPU_SMED3:
5968 case AMDGPU::G_AMDGPU_UMED3: {
5969 auto [Dst, Src0, Src1, Src2] =
MI->getFirst4Regs();
5970 unsigned Tmp2 =
Analysis.computeNumSignBits(Src2, DemandedElts,
Depth + 1);
5973 unsigned Tmp1 =
Analysis.computeNumSignBits(Src1, DemandedElts,
Depth + 1);
5976 unsigned Tmp0 =
Analysis.computeNumSignBits(Src0, DemandedElts,
Depth + 1);
5979 return std::min({Tmp0, Tmp1, Tmp2});
5989 unsigned Opcode =
Op.getOpcode();
5991 case AMDGPUISD::BFE_I32:
5992 case AMDGPUISD::BFE_U32:
6001 unsigned Depth)
const {
6002 unsigned Opcode =
Op.getOpcode();
6004 case AMDGPUISD::FMIN_LEGACY:
6005 case AMDGPUISD::FMAX_LEGACY: {
6013 case AMDGPUISD::FMUL_LEGACY:
6014 case AMDGPUISD::CVT_PKRTZ_F16_F32: {
6020 case AMDGPUISD::FMED3:
6021 case AMDGPUISD::FMIN3:
6022 case AMDGPUISD::FMAX3:
6023 case AMDGPUISD::FMINIMUM3:
6024 case AMDGPUISD::FMAXIMUM3:
6025 case AMDGPUISD::FMAD_FTZ: {
6032 case AMDGPUISD::CVT_F32_UBYTE0:
6033 case AMDGPUISD::CVT_F32_UBYTE1:
6034 case AMDGPUISD::CVT_F32_UBYTE2:
6035 case AMDGPUISD::CVT_F32_UBYTE3:
6038 case AMDGPUISD::RCP:
6039 case AMDGPUISD::RSQ:
6040 case AMDGPUISD::RCP_LEGACY:
6041 case AMDGPUISD::RSQ_CLAMP: {
6049 case AMDGPUISD::FRACT: {
6054 case AMDGPUISD::DIV_SCALE:
6055 case AMDGPUISD::DIV_FMAS:
6056 case AMDGPUISD::DIV_FIXUP:
6059 case AMDGPUISD::SIN_HW:
6060 case AMDGPUISD::COS_HW: {
6065 unsigned IntrinsicID =
Op.getConstantOperandVal(0);
6067 switch (IntrinsicID) {
6068 case Intrinsic::amdgcn_cubeid:
6069 case Intrinsic::amdgcn_cvt_off_f32_i4:
6072 case Intrinsic::amdgcn_frexp_mant: {
6077 case Intrinsic::amdgcn_cvt_pkrtz: {
6083 case Intrinsic::amdgcn_rcp:
6084 case Intrinsic::amdgcn_rsq:
6085 case Intrinsic::amdgcn_rcp_legacy:
6086 case Intrinsic::amdgcn_rsq_legacy:
6087 case Intrinsic::amdgcn_rsq_clamp:
6088 case Intrinsic::amdgcn_tanh: {
6095 case Intrinsic::amdgcn_trig_preop:
6096 case Intrinsic::amdgcn_fdot2:
6099 case Intrinsic::amdgcn_fma_legacy:
6116 return MRI.hasOneNonDBGUse(N0);
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static LLVM_READONLY bool hasSourceMods(const MachineInstr &MI)
static bool isInv2Pi(const APFloat &APF)
static LLVM_READONLY bool opMustUseVOP3Encoding(const MachineInstr &MI, const MachineRegisterInfo &MRI)
returns true if the operation will definitely need to use a 64-bit encoding, and thus will use a VOP3...
static unsigned inverseMinMax(unsigned Opc)
static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL, SelectionDAG &DAG)
static unsigned workitemIntrinsicDim(unsigned ID)
static int getOrCreateFixedStackObject(MachineFrameInfo &MFI, unsigned Size, int64_t Offset)
static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset, uint32_t Width, const SDLoc &DL)
static SDValue getMad(SelectionDAG &DAG, const SDLoc &SL, EVT VT, SDValue X, SDValue Y, SDValue C, SDNodeFlags Flags=SDNodeFlags())
static SDValue getAddOneOp(const SDNode *V)
If V is an add of a constant 1, returns the other operand.
static LLVM_READONLY bool selectSupportsSourceMods(const SDNode *N)
Return true if v_cndmask_b32 will support fabs/fneg source modifiers for the type for ISD::SELECT.
static cl::opt< bool > AMDGPUBypassSlowDiv("amdgpu-bypass-slow-div", cl::desc("Skip 64-bit divide for dynamic 32-bit values"), cl::init(true))
static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL, SDValue N0, SDValue N1, unsigned Size, bool Signed)
static bool fnegFoldsIntoOp(const SDNode *N)
static bool isI24(SDValue Op, SelectionDAG &DAG)
static bool isCttzOpc(unsigned Opc)
static bool isU24(SDValue Op, SelectionDAG &DAG)
static SDValue peekFPSignOps(SDValue Val)
static bool valueIsKnownNeverF32Denorm(SDValue Src)
Return true if it's known that Src can never be an f32 denormal value.
static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI, unsigned Op, const SDLoc &SL, SDValue Cond, SDValue N1, SDValue N2)
static SDValue peekFNeg(SDValue Val)
static SDValue simplifyMul24(SDNode *Node24, TargetLowering::DAGCombinerInfo &DCI)
static bool isCtlzOpc(unsigned Opc)
static LLVM_READNONE bool fnegFoldsIntoOpcode(unsigned Opc)
static bool hasVolatileUser(SDNode *Val)
Interface definition of the TargetLowering class that is common to all AMD GPUs.
Contains the definition of a TargetInstrInfo class that is common to all AMD GPUs.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
block Block Frequency Analysis
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Provides analysis for querying information about KnownBits during GISel passes.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
const SmallVectorImpl< MachineOperand > & Cond
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg)
static CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg)
uint64_t getExplicitKernArgSize() const
static std::optional< uint32_t > getLDSAbsoluteAddress(const GlobalValue &GV)
void recordNumNamedBarriers(uint32_t GVAddr, unsigned BarCnt)
unsigned allocateLDSGlobal(const DataLayout &DL, const GlobalVariable &GV)
bool isModuleEntryFunction() const
bool has16BitInsts() const
static const AMDGPUSubtarget & get(const MachineFunction &MF)
static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG)
SDValue combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True, SDValue False, SDValue CC, DAGCombinerInfo &DCI) const
Generate Min/Max node.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const
EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType ExtendKind) const override
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const
Split a vector load into 2 loads of half the vector.
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const
SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const
void analyzeFormalArgumentsCompute(CCState &State, const SmallVectorImpl< ISD::InputArg > &Ins) const
The SelectionDAGBuilder will automatically promote function arguments with illegal types.
SDValue LowerF64ToF16Safe(SDValue Src, const SDLoc &DL, SelectionDAG &DAG) const
SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const
SDValue storeStackInputValue(SelectionDAG &DAG, const SDLoc &SL, SDValue Chain, SDValue ArgVal, int64_t Offset) const
bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem, unsigned AS) const override
Return true if it is expected to be cheaper to do a store of vector constant with the given size and ...
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool shouldCombineMemoryType(EVT VT) const
SDValue splitBinaryBitConstantOpImpl(DAGCombinerInfo &DCI, const SDLoc &SL, unsigned Opc, SDValue LHS, uint32_t ValLo, uint32_t ValHi) const
Split the 64-bit value LHS into two 32-bit components, and perform the binary operation Opc to it wit...
SDValue lowerUnhandledCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals, StringRef Reason) const
SDValue performAssertSZExtCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isTruncateFree(EVT Src, EVT Dest) const override
bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override
SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const
TargetLowering::NegatibleCost getConstantNegateCost(const ConstantFPSDNode *C) const
SDValue LowerFLOGUnsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, bool IsLog10, SDNodeFlags Flags) const
bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const override
Return true if Op can create undef or poison from non-undef & non-poison operands.
SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue lowerFEXPUnsafeImpl(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, SDNodeFlags Flags, bool IsExp10) const
bool isSDNodeAlwaysUniform(const SDNode *N) const override
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to move this shift by a constant amount through its operand,...
SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
SDValue LowerFLOG10(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const
unsigned computeNumSignBitsForTargetInstr(GISelValueTracking &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const
SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG, MachineFrameInfo &MFI, int ClobberedFI) const
bool isConstantCheaperToNegate(SDValue N) const
bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0, Register N1) const override
bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const override
If SNaN is false,.
static bool needsDenormHandlingF32(const SelectionDAG &DAG, SDValue Src, SDNodeFlags Flags)
uint32_t getImplicitParameterOffset(const MachineFunction &MF, const ImplicitParameter Param) const
Helper function that returns the byte offset of the given type of implicit parameter.
SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const
SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const
virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op, SelectionDAG &DAG) const
bool isConstantCostlierToNegate(SDValue N) const
SDValue loadInputValue(SelectionDAG &DAG, const TargetRegisterClass *RC, EVT VT, const SDLoc &SL, const ArgDescriptor &Arg) const
SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const
SDValue lowerFEXP10Unsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, SDNodeFlags Flags) const
Emit approx-funcs appropriate lowering for exp10.
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtType, EVT ExtVT, std::optional< unsigned > ByteOffset) const override
Return true if it is profitable to reduce a load to a smaller type.
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS, SDValue RHS, DAGCombinerInfo &DCI) const
SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isSelectSupported(SelectSupportKind) const override
bool isZExtFree(Type *Src, Type *Dest) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
SDValue lowerFEXP2(SDValue Op, SelectionDAG &DAG) const
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const
SDValue getIsLtSmallestNormal(SelectionDAG &DAG, SDValue Op, SDNodeFlags Flags) const
bool mayIgnoreSignedZero(SDValue Op) const
SDValue getIsFinite(SelectionDAG &DAG, SDValue Op, SDNodeFlags Flags) const
bool isLoadBitCastBeneficial(EVT, EVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const final
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On arch...
std::pair< SDValue, SDValue > splitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HighVT, SelectionDAG &DAG) const
Split a vector value into two parts of types LoVT and HiVT.
AMDGPUTargetLowering(const TargetMachine &TM, const TargetSubtargetInfo &STI, const AMDGPUSubtarget &AMDGPUSTI)
SDValue LowerFLOGCommon(SDValue Op, SelectionDAG &DAG) const
SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI, SDValue N) const
SDValue LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const
bool isFAbsFree(EVT VT) const override
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
SDValue loadStackInputValue(SelectionDAG &DAG, EVT VT, const SDLoc &SL, int64_t Offset) const
Similar to CreateLiveInRegister, except value maybe loaded from a stack slot rather than passed in a ...
SDValue LowerFLOG2(SDValue Op, SelectionDAG &DAG) const
static EVT getEquivalentMemType(LLVMContext &Context, EVT VT)
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue performTruncateCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const
static SDValue stripBitcast(SDValue Val)
SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC, Register Reg, EVT VT, const SDLoc &SL, bool RawReg=false) const
Helper function that adds Reg to the LiveIn list of the DAG's MachineFunction.
SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const
Split a vector store into 2 stores of half the vector.
SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOperations, bool ForCodeSize, NegatibleCost &Cost, unsigned Depth) const override
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
std::pair< SDValue, SDValue > split64BitValue(SDValue Op, SelectionDAG &DAG) const
Return 64-bit value Op as two 32-bit integers.
SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const override
Return a reciprocal estimate value for the input operand.
SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const
static CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg)
std::pair< SDValue, SDValue > getScaledLogInput(SelectionDAG &DAG, const SDLoc SL, SDValue Op, SDNodeFlags Flags) const
If denormal handling is required return the scaled input to FLOG2, and the check for denormal range.
static CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg)
Selects the correct CCAssignFn for a given CallingConvention value.
static bool allUsesHaveSourceMods(const SDNode *N, unsigned CostThreshold=4)
SDValue LowerFROUNDEVEN(SDValue Op, SelectionDAG &DAG) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG)
SDValue lowerFEXPUnsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, SDNodeFlags Flags) const
SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
static bool allowApproxFunc(const SelectionDAG &DAG, SDNodeFlags Flags)
bool ShouldShrinkFPConstant(EVT VT) const override
If true, then instruction selection should seek to shrink the FP constant of the specified type to a ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const
SDValue lowerCTLZResults(SDValue Op, SelectionDAG &DAG) const
SDValue performFAbsCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG, bool Signed) const
static bool shouldFoldFNegIntoSrc(SDNode *FNeg, SDValue FNegSrc)
bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const override
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const
SDValue performIntrinsicWOChainCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const
SDValue performMulLoHiCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results) const
SDValue WidenOrSplitVectorLoad(SDValue Op, SelectionDAG &DAG) const
Widen a suitably aligned v3 load.
std::pair< EVT, EVT > getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const
Split a vector type into two parts.
SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const
SDValue combineFMinMaxLegacyImpl(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True, SDValue False, SDValue CC, DAGCombinerInfo &DCI) const
unsigned getVectorIdxWidth(const DataLayout &) const override
Returns the type to be used for the index operand vector operations.
static const fltSemantics & IEEEsingle()
static const fltSemantics & IEEEdouble()
static constexpr roundingMode rmNearestTiesToEven
static const fltSemantics & IEEEhalf()
bool bitwiseIsEqual(const APFloat &RHS) const
opStatus add(const APFloat &RHS, roundingMode RM)
const fltSemantics & getSemantics() const
opStatus multiply(const APFloat &RHS, roundingMode RM)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
This class represents an incoming formal argument to a Function.
CCState - This class holds information needed while lowering arguments and return values.
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
const APFloat & getValueAPF() const
bool isNegative() const
Return true if the value is negative.
uint64_t getZExtValue() const
A parsed version of the target data layout string in and methods for querying it.
Diagnostic information for unsupported feature in backend.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
iterator_range< arg_iterator > args()
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Type * getValueType() const
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
static auto integer_fixedlen_vector_valuetypes()
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOInvariant
The memory access always returns the same value (or traps).
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This is an abstract virtual class for memory operations.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
iterator_range< user_iterator > users()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
unsigned getNumOperands() const
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
SIModeRegisterDefaults getMode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
bool isConstantValueOfAnyType(SDValue N) const
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset=std::nullopt) const
Return true if it is profitable to reduce a load to a smaller type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
unsigned MaxGluedStoresPerMemcpy
Specify max number of store instructions to glue in inlined memcpy.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum fp to/from int conversion the backend supports.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
SelectSupportKind
Enum that describes what type of support for selects the target has.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more...
NegatibleCost
Enum that specifies when a float negation is beneficial.
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
TargetLowering(const TargetLowering &)=delete
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
Primary interface to the complete machine description for the target machine.
TargetSubtargetInfo - Generic base class for all target subtargets.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
LLVM Value Representation.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
bool isIntrinsicAlwaysUniform(unsigned IntrID)
TargetExtType * isNamedBarrier(const GlobalVariable &GV)
bool isUniformMMO(const MachineMemOperand *MMO)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
LLVM_ABI ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
To bit_cast(const From &from) noexcept
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
APFloat neg(APFloat X)
Returns the negated value of the argument.
unsigned Log2(Align A)
Returns the log2 of the alignment.
static cl::opt< unsigned > CostThreshold("dfa-cost-threshold", cl::desc("Maximum cost accepted for the transformation"), cl::Hidden, cl::init(50))
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
MCRegister getRegister() const
unsigned getStackOffset() const
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
static constexpr DenormalMode getPreserveSign()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
EVT getPow2VectorType(LLVMContext &Context) const
Widens the length of the given vector EVT up to the nearest power of 2 and returns that type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
EVT getDoubleNumVectorElementsVT(LLVMContext &Context) const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMaxSignificantBits() const
Returns the maximum number of bits needed to represent all possible signed values with these known bi...
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI bool isDereferenceable(unsigned Size, LLVMContext &C, const DataLayout &DL) const
Return true if memory region [V, V+Offset+Size) is known to be dereferenceable.
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
MachinePointerInfo getWithOffset(int64_t O) const
These are IR-level optimization flags that may be propagated to SDNodes.
void setAllowContract(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
bool isBeforeLegalizeOps() const
CombineLevel getDAGCombineLevel()
LLVM_ABI void AddToWorklist(SDNode *N)
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
LLVM_ABI SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
LLVM_ABI void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...