37#include "llvm/IR/IntrinsicsRISCV.h"
50#define DEBUG_TYPE "riscv-lower"
56 cl::desc(
"Give the maximum size (in number of nodes) of the web of "
57 "instructions that we will consider for VW expansion"),
62 cl::desc(
"Allow the formation of VW_W operations (e.g., "
63 "VWADD_W) with splat constants"),
68 cl::desc(
"Set the minimum number of repetitions of a divisor to allow "
69 "transformation to multiplications by the reciprocal"),
74 cl::desc(
"Give the maximum number of instructions that we will "
75 "use for creating a floating-point immediate value"),
80 cl::desc(
"Make i32 a legal type for SelectionDAG on RV64."));
90 !Subtarget.hasStdExtF()) {
91 errs() <<
"Hard-float 'f' ABI can't be used for a target that "
92 "doesn't support the F instruction set extension (ignoring "
96 !Subtarget.hasStdExtD()) {
97 errs() <<
"Hard-float 'd' ABI can't be used for a target that "
98 "doesn't support the D instruction set extension (ignoring "
124 if (Subtarget.hasStdExtZfhmin())
126 if (Subtarget.hasStdExtZfbfmin())
128 if (Subtarget.hasStdExtF())
130 if (Subtarget.hasStdExtD())
132 if (Subtarget.hasStdExtZhinxmin())
134 if (Subtarget.hasStdExtZfinx())
136 if (Subtarget.hasStdExtZdinx()) {
144 MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1,
145 MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
147 MVT::nxv1i8, MVT::nxv2i8, MVT::nxv4i8, MVT::nxv8i8, MVT::nxv16i8,
148 MVT::nxv32i8, MVT::nxv64i8, MVT::nxv1i16, MVT::nxv2i16, MVT::nxv4i16,
149 MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
150 MVT::nxv4i32, MVT::nxv8i32, MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
151 MVT::nxv4i64, MVT::nxv8i64};
153 MVT::nxv1f16, MVT::nxv2f16, MVT::nxv4f16,
154 MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
156 MVT::nxv1bf16, MVT::nxv2bf16, MVT::nxv4bf16,
157 MVT::nxv8bf16, MVT::nxv16bf16, MVT::nxv32bf16};
159 MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
161 MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
164 auto addRegClassForRVV = [
this](
MVT VT) {
168 if (VT.getVectorMinNumElements() < MinElts)
171 unsigned Size = VT.getSizeInBits().getKnownMinValue();
174 RC = &RISCV::VRRegClass;
176 RC = &RISCV::VRM2RegClass;
178 RC = &RISCV::VRM4RegClass;
180 RC = &RISCV::VRM8RegClass;
187 for (
MVT VT : BoolVecVTs)
188 addRegClassForRVV(VT);
189 for (
MVT VT : IntVecVTs) {
190 if (VT.getVectorElementType() == MVT::i64 &&
193 addRegClassForRVV(VT);
197 for (
MVT VT : F16VecVTs)
198 addRegClassForRVV(VT);
201 for (
MVT VT : BF16VecVTs)
202 addRegClassForRVV(VT);
205 for (
MVT VT : F32VecVTs)
206 addRegClassForRVV(VT);
209 for (
MVT VT : F64VecVTs)
210 addRegClassForRVV(VT);
213 auto addRegClassForFixedVectors = [
this](
MVT VT) {
220 if (useRVVForFixedLengthVectorVT(VT))
221 addRegClassForFixedVectors(VT);
224 if (useRVVForFixedLengthVectorVT(VT))
225 addRegClassForFixedVectors(VT);
273 if (!Subtarget.hasStdExtZbb() && !Subtarget.hasVendorXTHeadBb())
285 if (!Subtarget.hasStdExtZbb())
289 if (Subtarget.hasStdExtZbb()) {
297 {RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, RTLIB::MUL_I128},
302 if (!Subtarget.hasStdExtM() && !Subtarget.hasStdExtZmmul()) {
306 }
else if (Subtarget.
is64Bit()) {
316 if (!Subtarget.hasStdExtM()) {
322 }
else if (Subtarget.
is64Bit()) {
325 {MVT::i8, MVT::i16, MVT::i32},
Custom);
342 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) {
345 }
else if (Subtarget.hasVendorXTHeadBb()) {
349 }
else if (Subtarget.hasVendorXCVbitmanip()) {
360 (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb() ||
361 Subtarget.hasVendorXTHeadBb())
366 (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb() ||
367 Subtarget.hasVendorXTHeadBb())
372 if (Subtarget.hasVendorXCVbitmanip()) {
380 if (Subtarget.hasStdExtZbb()) {
393 }
else if (!Subtarget.hasVendorXCVbitmanip()) {
399 if (Subtarget.hasStdExtZbb() || Subtarget.hasVendorXTHeadBb() ||
400 Subtarget.hasVendorXCVbitmanip()) {
407 if (!Subtarget.hasStdExtZbb())
419 !Subtarget.hasShortForwardBranchOpt())
423 if (Subtarget.hasShortForwardBranchOpt())
426 if (!Subtarget.hasVendorXTHeadCondMov()) {
432 static const unsigned FPLegalNodeTypes[] = {
445 static const unsigned FPOpToExpand[] = {
449 static const unsigned FPRndMode[] = {
456 static const unsigned ZfhminZfbfminPromoteOps[] = {
467 if (Subtarget.hasStdExtZfbfmin()) {
526 if (!Subtarget.hasStdExtZfa())
550 if (Subtarget.hasStdExtZfa())
565 if (Subtarget.hasStdExtZfa()) {
641 if (Subtarget.hasStdExtZicbop()) {
645 if (Subtarget.hasStdExtA()) {
647 if (Subtarget.hasStdExtZabha() && Subtarget.hasStdExtZacas())
651 }
else if (Subtarget.hasForcedAtomics()) {
672 {MVT::i8, MVT::i16},
Custom);
683 static const unsigned IntegerVPOps[] = {
684 ISD::VP_ADD, ISD::VP_SUB, ISD::VP_MUL,
685 ISD::VP_SDIV, ISD::VP_UDIV, ISD::VP_SREM,
686 ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,
687 ISD::VP_XOR, ISD::VP_ASHR, ISD::VP_LSHR,
688 ISD::VP_SHL, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
689 ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX,
690 ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
691 ISD::VP_MERGE, ISD::VP_SELECT, ISD::VP_FP_TO_SINT,
692 ISD::VP_FP_TO_UINT, ISD::VP_SETCC, ISD::VP_SIGN_EXTEND,
693 ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE, ISD::VP_SMIN,
694 ISD::VP_SMAX, ISD::VP_UMIN, ISD::VP_UMAX,
695 ISD::VP_ABS, ISD::EXPERIMENTAL_VP_REVERSE, ISD::EXPERIMENTAL_VP_SPLICE,
696 ISD::VP_SADDSAT, ISD::VP_UADDSAT, ISD::VP_SSUBSAT,
699 static const unsigned FloatingPointVPOps[] = {
700 ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL,
701 ISD::VP_FDIV, ISD::VP_FNEG, ISD::VP_FABS,
702 ISD::VP_FMA, ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
703 ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,
704 ISD::VP_SELECT, ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP,
705 ISD::VP_SETCC, ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND,
706 ISD::VP_SQRT, ISD::VP_FMINNUM, ISD::VP_FMAXNUM,
707 ISD::VP_FCEIL, ISD::VP_FFLOOR, ISD::VP_FROUND,
708 ISD::VP_FROUNDEVEN, ISD::VP_FCOPYSIGN, ISD::VP_FROUNDTOZERO,
709 ISD::VP_FRINT, ISD::VP_FNEARBYINT, ISD::VP_IS_FPCLASS,
710 ISD::VP_FMINIMUM, ISD::VP_FMAXIMUM, ISD::VP_LRINT,
711 ISD::VP_LLRINT, ISD::EXPERIMENTAL_VP_REVERSE,
712 ISD::EXPERIMENTAL_VP_SPLICE};
714 static const unsigned IntegerVecReduceOps[] = {
719 static const unsigned FloatingPointVecReduceOps[] = {
732 ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR,
733 ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN,
734 ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN},
738 for (
MVT VT : BoolVecVTs) {
765 {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
789 ISD::VP_TRUNCATE, ISD::VP_SETCC},
805 for (
MVT VT : IntVecVTs) {
816 if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV())
863 {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
864 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER},
888 if (Subtarget.hasStdExtZvkb()) {
896 if (Subtarget.hasStdExtZvbb()) {
900 ISD::VP_CTTZ_ZERO_UNDEF, ISD::VP_CTPOP},
906 ISD::VP_CTTZ_ZERO_UNDEF, ISD::VP_CTPOP},
915 ISD::VP_CTLZ_ZERO_UNDEF, ISD::VP_CTTZ_ZERO_UNDEF},
935 static const unsigned ZvfhminPromoteOps[] = {
945 static const unsigned ZvfhminPromoteVPOps[] = {
946 ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL,
947 ISD::VP_FDIV, ISD::VP_FNEG, ISD::VP_FABS,
948 ISD::VP_FMA, ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
949 ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_SQRT,
950 ISD::VP_FMINNUM, ISD::VP_FMAXNUM, ISD::VP_FCEIL,
951 ISD::VP_FFLOOR, ISD::VP_FROUND, ISD::VP_FROUNDEVEN,
952 ISD::VP_FCOPYSIGN, ISD::VP_FROUNDTOZERO, ISD::VP_FRINT,
953 ISD::VP_FNEARBYINT, ISD::VP_SETCC, ISD::VP_FMINIMUM,
957 const auto SetCommonVFPActions = [&](
MVT VT) {
1002 {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1003 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER},
1034 const auto SetCommonVFPExtLoadTruncStoreActions =
1036 for (
auto SmallVT : SmallerVTs) {
1043 for (
MVT VT : F16VecVTs) {
1046 SetCommonVFPActions(VT);
1049 for (
MVT VT : F16VecVTs) {
1060 ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP},
1070 if (VT == MVT::nxv32f16) {
1083 for (
MVT VT : F32VecVTs) {
1086 SetCommonVFPActions(VT);
1087 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
1092 for (
MVT VT : F64VecVTs) {
1095 SetCommonVFPActions(VT);
1096 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
1097 SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
1103 if (!useRVVForFixedLengthVectorVT(VT))
1146 {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
1173 ISD::VP_SETCC, ISD::VP_TRUNCATE},
1195 ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1196 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
1233 if (Subtarget.hasStdExtZvkb())
1236 if (Subtarget.hasStdExtZvbb()) {
1258 if (!useRVVForFixedLengthVectorVT(VT))
1279 ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP},
1311 ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1312 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
1363 if (Subtarget.hasStdExtA()) {
1369 if (Subtarget.hasForcedAtomics()) {
1379 if (Subtarget.hasVendorXTHeadMemIdx()) {
1411 if (Subtarget.hasStdExtZbb())
1414 if (Subtarget.hasStdExtZbs() && Subtarget.
is64Bit())
1417 if (Subtarget.hasStdExtZbkb())
1429 ISD::EXPERIMENTAL_VP_REVERSE,
ISD::MUL,
1431 if (Subtarget.hasVendorXTHeadMemPair())
1454MVT RISCVTargetLowering::getVPExplicitVectorLengthTy()
const {
1459bool RISCVTargetLowering::shouldExpandGetVectorLength(
EVT TripCountVT,
1461 bool IsScalable)
const {
1468 if (TripCountVT != MVT::i32 && TripCountVT != Subtarget.
getXLenVT())
1488 unsigned Intrinsic)
const {
1489 auto &
DL =
I.getModule()->getDataLayout();
1491 auto SetRVVLoadStoreInfo = [&](
unsigned PtrOp,
bool IsStore,
1492 bool IsUnitStrided,
bool UsePtrVal =
false) {
1497 Info.ptrVal =
I.getArgOperand(PtrOp);
1499 Info.fallbackAddressSpace =
1500 I.getArgOperand(PtrOp)->getType()->getPointerAddressSpace();
1504 MemTy =
I.getArgOperand(0)->getType();
1507 MemTy =
I.getType();
1522 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
1526 switch (Intrinsic) {
1529 case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1530 case Intrinsic::riscv_masked_atomicrmw_add_i32:
1531 case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1532 case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1533 case Intrinsic::riscv_masked_atomicrmw_max_i32:
1534 case Intrinsic::riscv_masked_atomicrmw_min_i32:
1535 case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1536 case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1537 case Intrinsic::riscv_masked_cmpxchg_i32:
1539 Info.memVT = MVT::i32;
1540 Info.ptrVal =
I.getArgOperand(0);
1546 case Intrinsic::riscv_masked_strided_load:
1547 return SetRVVLoadStoreInfo( 1,
false,
1549 case Intrinsic::riscv_masked_strided_store:
1550 return SetRVVLoadStoreInfo( 1,
true,
1552 case Intrinsic::riscv_seg2_load:
1553 case Intrinsic::riscv_seg3_load:
1554 case Intrinsic::riscv_seg4_load:
1555 case Intrinsic::riscv_seg5_load:
1556 case Intrinsic::riscv_seg6_load:
1557 case Intrinsic::riscv_seg7_load:
1558 case Intrinsic::riscv_seg8_load:
1559 return SetRVVLoadStoreInfo( 0,
false,
1561 case Intrinsic::riscv_seg2_store:
1562 case Intrinsic::riscv_seg3_store:
1563 case Intrinsic::riscv_seg4_store:
1564 case Intrinsic::riscv_seg5_store:
1565 case Intrinsic::riscv_seg6_store:
1566 case Intrinsic::riscv_seg7_store:
1567 case Intrinsic::riscv_seg8_store:
1569 return SetRVVLoadStoreInfo(
I.arg_size() - 2,
1572 case Intrinsic::riscv_vle:
1573 case Intrinsic::riscv_vle_mask:
1574 case Intrinsic::riscv_vleff:
1575 case Intrinsic::riscv_vleff_mask:
1576 return SetRVVLoadStoreInfo( 1,
1580 case Intrinsic::riscv_vse:
1581 case Intrinsic::riscv_vse_mask:
1582 return SetRVVLoadStoreInfo( 1,
1586 case Intrinsic::riscv_vlse:
1587 case Intrinsic::riscv_vlse_mask:
1588 case Intrinsic::riscv_vloxei:
1589 case Intrinsic::riscv_vloxei_mask:
1590 case Intrinsic::riscv_vluxei:
1591 case Intrinsic::riscv_vluxei_mask:
1592 return SetRVVLoadStoreInfo( 1,
1595 case Intrinsic::riscv_vsse:
1596 case Intrinsic::riscv_vsse_mask:
1597 case Intrinsic::riscv_vsoxei:
1598 case Intrinsic::riscv_vsoxei_mask:
1599 case Intrinsic::riscv_vsuxei:
1600 case Intrinsic::riscv_vsuxei_mask:
1601 return SetRVVLoadStoreInfo( 1,
1604 case Intrinsic::riscv_vlseg2:
1605 case Intrinsic::riscv_vlseg3:
1606 case Intrinsic::riscv_vlseg4:
1607 case Intrinsic::riscv_vlseg5:
1608 case Intrinsic::riscv_vlseg6:
1609 case Intrinsic::riscv_vlseg7:
1610 case Intrinsic::riscv_vlseg8:
1611 case Intrinsic::riscv_vlseg2ff:
1612 case Intrinsic::riscv_vlseg3ff:
1613 case Intrinsic::riscv_vlseg4ff:
1614 case Intrinsic::riscv_vlseg5ff:
1615 case Intrinsic::riscv_vlseg6ff:
1616 case Intrinsic::riscv_vlseg7ff:
1617 case Intrinsic::riscv_vlseg8ff:
1618 return SetRVVLoadStoreInfo(
I.arg_size() - 2,
1621 case Intrinsic::riscv_vlseg2_mask:
1622 case Intrinsic::riscv_vlseg3_mask:
1623 case Intrinsic::riscv_vlseg4_mask:
1624 case Intrinsic::riscv_vlseg5_mask:
1625 case Intrinsic::riscv_vlseg6_mask:
1626 case Intrinsic::riscv_vlseg7_mask:
1627 case Intrinsic::riscv_vlseg8_mask:
1628 case Intrinsic::riscv_vlseg2ff_mask:
1629 case Intrinsic::riscv_vlseg3ff_mask:
1630 case Intrinsic::riscv_vlseg4ff_mask:
1631 case Intrinsic::riscv_vlseg5ff_mask:
1632 case Intrinsic::riscv_vlseg6ff_mask:
1633 case Intrinsic::riscv_vlseg7ff_mask:
1634 case Intrinsic::riscv_vlseg8ff_mask:
1635 return SetRVVLoadStoreInfo(
I.arg_size() - 4,
1638 case Intrinsic::riscv_vlsseg2:
1639 case Intrinsic::riscv_vlsseg3:
1640 case Intrinsic::riscv_vlsseg4:
1641 case Intrinsic::riscv_vlsseg5:
1642 case Intrinsic::riscv_vlsseg6:
1643 case Intrinsic::riscv_vlsseg7:
1644 case Intrinsic::riscv_vlsseg8:
1645 case Intrinsic::riscv_vloxseg2:
1646 case Intrinsic::riscv_vloxseg3:
1647 case Intrinsic::riscv_vloxseg4:
1648 case Intrinsic::riscv_vloxseg5:
1649 case Intrinsic::riscv_vloxseg6:
1650 case Intrinsic::riscv_vloxseg7:
1651 case Intrinsic::riscv_vloxseg8:
1652 case Intrinsic::riscv_vluxseg2:
1653 case Intrinsic::riscv_vluxseg3:
1654 case Intrinsic::riscv_vluxseg4:
1655 case Intrinsic::riscv_vluxseg5:
1656 case Intrinsic::riscv_vluxseg6:
1657 case Intrinsic::riscv_vluxseg7:
1658 case Intrinsic::riscv_vluxseg8:
1659 return SetRVVLoadStoreInfo(
I.arg_size() - 3,
1662 case Intrinsic::riscv_vlsseg2_mask:
1663 case Intrinsic::riscv_vlsseg3_mask:
1664 case Intrinsic::riscv_vlsseg4_mask:
1665 case Intrinsic::riscv_vlsseg5_mask:
1666 case Intrinsic::riscv_vlsseg6_mask:
1667 case Intrinsic::riscv_vlsseg7_mask:
1668 case Intrinsic::riscv_vlsseg8_mask:
1669 case Intrinsic::riscv_vloxseg2_mask:
1670 case Intrinsic::riscv_vloxseg3_mask:
1671 case Intrinsic::riscv_vloxseg4_mask:
1672 case Intrinsic::riscv_vloxseg5_mask:
1673 case Intrinsic::riscv_vloxseg6_mask:
1674 case Intrinsic::riscv_vloxseg7_mask:
1675 case Intrinsic::riscv_vloxseg8_mask:
1676 case Intrinsic::riscv_vluxseg2_mask:
1677 case Intrinsic::riscv_vluxseg3_mask:
1678 case Intrinsic::riscv_vluxseg4_mask:
1679 case Intrinsic::riscv_vluxseg5_mask:
1680 case Intrinsic::riscv_vluxseg6_mask:
1681 case Intrinsic::riscv_vluxseg7_mask:
1682 case Intrinsic::riscv_vluxseg8_mask:
1683 return SetRVVLoadStoreInfo(
I.arg_size() - 5,
1686 case Intrinsic::riscv_vsseg2:
1687 case Intrinsic::riscv_vsseg3:
1688 case Intrinsic::riscv_vsseg4:
1689 case Intrinsic::riscv_vsseg5:
1690 case Intrinsic::riscv_vsseg6:
1691 case Intrinsic::riscv_vsseg7:
1692 case Intrinsic::riscv_vsseg8:
1693 return SetRVVLoadStoreInfo(
I.arg_size() - 2,
1696 case Intrinsic::riscv_vsseg2_mask:
1697 case Intrinsic::riscv_vsseg3_mask:
1698 case Intrinsic::riscv_vsseg4_mask:
1699 case Intrinsic::riscv_vsseg5_mask:
1700 case Intrinsic::riscv_vsseg6_mask:
1701 case Intrinsic::riscv_vsseg7_mask:
1702 case Intrinsic::riscv_vsseg8_mask:
1703 return SetRVVLoadStoreInfo(
I.arg_size() - 3,
1706 case Intrinsic::riscv_vssseg2:
1707 case Intrinsic::riscv_vssseg3:
1708 case Intrinsic::riscv_vssseg4:
1709 case Intrinsic::riscv_vssseg5:
1710 case Intrinsic::riscv_vssseg6:
1711 case Intrinsic::riscv_vssseg7:
1712 case Intrinsic::riscv_vssseg8:
1713 case Intrinsic::riscv_vsoxseg2:
1714 case Intrinsic::riscv_vsoxseg3:
1715 case Intrinsic::riscv_vsoxseg4:
1716 case Intrinsic::riscv_vsoxseg5:
1717 case Intrinsic::riscv_vsoxseg6:
1718 case Intrinsic::riscv_vsoxseg7:
1719 case Intrinsic::riscv_vsoxseg8:
1720 case Intrinsic::riscv_vsuxseg2:
1721 case Intrinsic::riscv_vsuxseg3:
1722 case Intrinsic::riscv_vsuxseg4:
1723 case Intrinsic::riscv_vsuxseg5:
1724 case Intrinsic::riscv_vsuxseg6:
1725 case Intrinsic::riscv_vsuxseg7:
1726 case Intrinsic::riscv_vsuxseg8:
1727 return SetRVVLoadStoreInfo(
I.arg_size() - 3,
1730 case Intrinsic::riscv_vssseg2_mask:
1731 case Intrinsic::riscv_vssseg3_mask:
1732 case Intrinsic::riscv_vssseg4_mask:
1733 case Intrinsic::riscv_vssseg5_mask:
1734 case Intrinsic::riscv_vssseg6_mask:
1735 case Intrinsic::riscv_vssseg7_mask:
1736 case Intrinsic::riscv_vssseg8_mask:
1737 case Intrinsic::riscv_vsoxseg2_mask:
1738 case Intrinsic::riscv_vsoxseg3_mask:
1739 case Intrinsic::riscv_vsoxseg4_mask:
1740 case Intrinsic::riscv_vsoxseg5_mask:
1741 case Intrinsic::riscv_vsoxseg6_mask:
1742 case Intrinsic::riscv_vsoxseg7_mask:
1743 case Intrinsic::riscv_vsoxseg8_mask:
1744 case Intrinsic::riscv_vsuxseg2_mask:
1745 case Intrinsic::riscv_vsuxseg3_mask:
1746 case Intrinsic::riscv_vsuxseg4_mask:
1747 case Intrinsic::riscv_vsuxseg5_mask:
1748 case Intrinsic::riscv_vsuxseg6_mask:
1749 case Intrinsic::riscv_vsuxseg7_mask:
1750 case Intrinsic::riscv_vsuxseg8_mask:
1751 return SetRVVLoadStoreInfo(
I.arg_size() - 4,
1788 return isInt<12>(Imm);
1792 return isInt<12>(Imm);
1805 return (SrcBits == 64 && DestBits == 32);
1816 return (SrcBits == 64 && DestBits == 32);
1823 if (
auto *LD = dyn_cast<LoadSDNode>(Val)) {
1824 EVT MemVT = LD->getMemoryVT();
1825 if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1835 return Subtarget.
is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1843 return Subtarget.hasStdExtZbb() || Subtarget.hasVendorXCVbitmanip();
1847 return Subtarget.hasStdExtZbb() || Subtarget.hasVendorXTHeadBb() ||
1848 Subtarget.hasVendorXCVbitmanip();
1859 if (!Subtarget.hasStdExtZbs() && !Subtarget.hasVendorXTHeadBs())
1864 return !Mask->getValue().isSignedIntN(12) && Mask->getValue().isPowerOf2();
1868 EVT VT =
Y.getValueType();
1874 return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) &&
1875 !isa<ConstantSDNode>(
Y);
1880 if (Subtarget.hasStdExtZbs())
1881 return X.getValueType().isScalarInteger();
1882 auto *
C = dyn_cast<ConstantSDNode>(
Y);
1884 if (Subtarget.hasVendorXTHeadBs())
1885 return C !=
nullptr;
1887 return C &&
C->getAPIntValue().ule(10);
1907 if (BitSize > Subtarget.
getXLen())
1911 int64_t Val = Imm.getSExtValue();
1919 if (!Subtarget.hasFastUnalignedAccess())
1935 unsigned OldShiftOpcode,
unsigned NewShiftOpcode,
1942 if (XC && OldShiftOpcode ==
ISD::SRL && XC->isOne())
1946 if (NewShiftOpcode ==
ISD::SRL &&
CC->isOne())
1958 case Instruction::Add:
1959 case Instruction::Sub:
1960 case Instruction::Mul:
1961 case Instruction::And:
1962 case Instruction::Or:
1963 case Instruction::Xor:
1964 case Instruction::FAdd:
1965 case Instruction::FSub:
1966 case Instruction::FMul:
1967 case Instruction::FDiv:
1968 case Instruction::ICmp:
1969 case Instruction::FCmp:
1971 case Instruction::Shl:
1972 case Instruction::LShr:
1973 case Instruction::AShr:
1974 case Instruction::UDiv:
1975 case Instruction::SDiv:
1976 case Instruction::URem:
1977 case Instruction::SRem:
1978 return Operand == 1;
1992 auto *II = dyn_cast<IntrinsicInst>(
I);
1996 switch (II->getIntrinsicID()) {
1997 case Intrinsic::fma:
1998 case Intrinsic::vp_fma:
1999 return Operand == 0 || Operand == 1;
2000 case Intrinsic::vp_shl:
2001 case Intrinsic::vp_lshr:
2002 case Intrinsic::vp_ashr:
2003 case Intrinsic::vp_udiv:
2004 case Intrinsic::vp_sdiv:
2005 case Intrinsic::vp_urem:
2006 case Intrinsic::vp_srem:
2007 case Intrinsic::ssub_sat:
2008 case Intrinsic::vp_ssub_sat:
2009 case Intrinsic::usub_sat:
2010 case Intrinsic::vp_usub_sat:
2011 return Operand == 1;
2013 case Intrinsic::vp_add:
2014 case Intrinsic::vp_mul:
2015 case Intrinsic::vp_and:
2016 case Intrinsic::vp_or:
2017 case Intrinsic::vp_xor:
2018 case Intrinsic::vp_fadd:
2019 case Intrinsic::vp_fmul:
2020 case Intrinsic::vp_icmp:
2021 case Intrinsic::vp_fcmp:
2022 case Intrinsic::smin:
2023 case Intrinsic::vp_smin:
2024 case Intrinsic::umin:
2025 case Intrinsic::vp_umin:
2026 case Intrinsic::smax:
2027 case Intrinsic::vp_smax:
2028 case Intrinsic::umax:
2029 case Intrinsic::vp_umax:
2030 case Intrinsic::sadd_sat:
2031 case Intrinsic::vp_sadd_sat:
2032 case Intrinsic::uadd_sat:
2033 case Intrinsic::vp_uadd_sat:
2035 case Intrinsic::vp_sub:
2036 case Intrinsic::vp_fsub:
2037 case Intrinsic::vp_fdiv:
2038 return Operand == 0 || Operand == 1;
2059 if (!Subtarget.sinkSplatOperands())
2062 for (
auto OpIdx :
enumerate(
I->operands())) {
2066 Instruction *
Op = dyn_cast<Instruction>(OpIdx.value().get());
2068 if (!
Op ||
any_of(Ops, [&](
Use *U) {
return U->get() ==
Op; }))
2077 if (cast<VectorType>(
Op->getType())->getElementType()->isIntegerTy(1))
2082 for (
Use &U :
Op->uses()) {
2134 if (!Subtarget.hasStdExtZfa())
2135 return std::make_pair(-1,
false);
2137 bool IsSupportedVT =
false;
2138 if (VT == MVT::f16) {
2139 IsSupportedVT = Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZvfh();
2140 }
else if (VT == MVT::f32) {
2141 IsSupportedVT =
true;
2142 }
else if (VT == MVT::f64) {
2143 assert(Subtarget.hasStdExtD() &&
"Expect D extension");
2144 IsSupportedVT =
true;
2148 return std::make_pair(-1,
false);
2151 if (
Index < 0 && Imm.isNegative())
2155 return std::make_pair(
Index,
false);
2159 bool ForCodeSize)
const {
2160 bool IsLegalVT =
false;
2163 else if (VT == MVT::f32)
2165 else if (VT == MVT::f64)
2167 else if (VT == MVT::bf16)
2168 IsLegalVT = Subtarget.hasStdExtZfbfmin();
2180 return Imm.isZero();
2184 if (Imm.isNegZero())
2197 unsigned Index)
const {
2210 if (EltVT == MVT::i1)
2223 if (
Index + ResElts <= MinVLMAX &&
Index < 31)
2230 if ((ResElts * 2) != SrcElts)
2274 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
2276 Context,
CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
2279 IntermediateVT = MVT::i64;
2282 RegisterVT = MVT::i64;
2297 isa<ConstantSDNode>(
LHS.getOperand(1))) {
2303 ShAmt =
LHS.getValueSizeInBits() - 1 -
Log2_64(Mask);
2316 if (
auto *RHSC = dyn_cast<ConstantSDNode>(
RHS)) {
2317 int64_t
C = RHSC->getSExtValue();
2359 switch (KnownSize) {
2387 return RISCV::VRRegClassID;
2389 return RISCV::VRM2RegClassID;
2391 return RISCV::VRM4RegClassID;
2393 return RISCV::VRM8RegClassID;
2403 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
2404 "Unexpected subreg numbering");
2405 return RISCV::sub_vrm1_0 +
Index;
2408 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
2409 "Unexpected subreg numbering");
2410 return RISCV::sub_vrm2_0 +
Index;
2413 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
2414 "Unexpected subreg numbering");
2415 return RISCV::sub_vrm4_0 +
Index;
2422 return RISCV::VRRegClassID;
2431std::pair<unsigned, unsigned>
2433 MVT VecVT,
MVT SubVecVT,
unsigned InsertExtractIdx,
2435 static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
2436 RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
2437 RISCV::VRM2RegClassID > RISCV::VRRegClassID),
2438 "Register classes not ordered");
2447 unsigned SubRegIdx = RISCV::NoSubRegister;
2448 for (
const unsigned RCID :
2449 {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
2450 if (VecRegClassID > RCID && SubRegClassID <= RCID) {
2454 SubRegIdx =
TRI->composeSubRegIndices(SubRegIdx,
2459 return {SubRegIdx, InsertExtractIdx};
2464bool RISCVTargetLowering::mergeStoresAfterLegalization(
EVT VT)
const {
2493unsigned RISCVTargetLowering::combineRepeatedFPDivisors()
const {
2500 "Unexpected opcode");
2502 unsigned IntNo =
Op.getConstantOperandVal(HasChain ? 1 : 0);
2504 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
2507 return Op.getOperand(II->
VLOperand + 1 + HasChain);
2577bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(
MVT VT)
const {
2578 return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
2587 "Expected legal fixed length vector!");
2590 unsigned MaxELen = Subtarget.
getELen();
2623 return ::getContainerForFixedLengthVector(*
this, VT,
getSubtarget());
2630 "Expected to convert into a scalable vector!");
2631 assert(V.getValueType().isFixedLengthVector() &&
2632 "Expected a fixed length vector operand!");
2642 "Expected to convert into a fixed length vector!");
2643 assert(V.getValueType().isScalableVector() &&
2644 "Expected a scalable vector operand!");
2672 const auto [MinVLMAX, MaxVLMAX] =
2674 if (MinVLMAX == MaxVLMAX && NumElts == MinVLMAX)
2680static std::pair<SDValue, SDValue>
2689static std::pair<SDValue, SDValue>
2702static std::pair<SDValue, SDValue>
2719std::pair<unsigned, unsigned>
2735 return std::make_pair(MinVLMAX, MaxVLMAX);
2747 EVT VT,
unsigned DefinedValues)
const {
2761 std::tie(LMul, Fractional) =
2764 Cost = LMul <= DLenFactor ? (DLenFactor / LMul) : 1;
2766 Cost = (LMul * DLenFactor);
2811 MVT DstVT =
Op.getSimpleValueType();
2812 EVT SatVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2820 Src.getValueType() == MVT::bf16) {
2827 else if (DstVT == MVT::i64 && SatVT == MVT::i32)
2835 Opc,
DL, DstVT, Src,
2849 MVT SrcVT = Src.getSimpleValueType();
2855 if (SatVT != DstEltVT)
2859 if (SrcEltSize > (2 * DstEltSize))
2862 MVT DstContainerVT = DstVT;
2863 MVT SrcContainerVT = SrcVT;
2869 "Expected same element count");
2878 {Src, Src, DAG.getCondCode(ISD::SETNE),
2879 DAG.getUNDEF(Mask.getValueType()), Mask, VL});
2883 if (DstEltSize > (2 * SrcEltSize)) {
2897 Res, DAG.
getUNDEF(DstContainerVT), VL);
2909 case ISD::VP_FROUNDEVEN:
2913 case ISD::VP_FROUNDTOZERO:
2917 case ISD::VP_FFLOOR:
2925 case ISD::VP_FROUND:
2941 MVT VT =
Op.getSimpleValueType();
2948 MVT ContainerVT = VT;
2955 if (
Op->isVPOpcode()) {
2956 Mask =
Op.getOperand(1);
2960 VL =
Op.getOperand(2);
2982 DAG.
getUNDEF(ContainerVT), MaxValNode, VL);
2996 switch (
Op.getOpcode()) {
3002 case ISD::VP_FFLOOR:
3005 case ISD::VP_FROUND:
3006 case ISD::VP_FROUNDEVEN:
3007 case ISD::VP_FROUNDTOZERO: {
3023 case ISD::VP_FNEARBYINT:
3036 Src, Src, Mask, VL);
3051 MVT VT =
Op.getSimpleValueType();
3055 MVT ContainerVT = VT;
3067 MVT MaskVT = Mask.getSimpleValueType();
3070 {Chain, Src, Src, DAG.getCondCode(ISD::SETUNE),
3071 DAG.getUNDEF(MaskVT), Mask, VL});
3075 {Chain, Src, Src, DAG.getUNDEF(ContainerVT), Unorder, VL});
3076 Chain = Src.getValue(1);
3092 DAG.
getUNDEF(ContainerVT), MaxValNode, VL);
3104 switch (
Op.getOpcode()) {
3115 {Chain, Src, Mask, DAG.getTargetConstant(FRM, DL, XLenVT), VL});
3121 DAG.
getVTList(IntVT, MVT::Other), Chain, Src, Mask, VL);
3125 DAG.
getVTList(ContainerVT, MVT::Other), Chain, Src,
3134 DAG.
getVTList(ContainerVT, MVT::Other), Chain,
3135 Truncated, Mask, VL);
3141 Src, Src, Mask, VL);
3151 MVT VT =
Op.getSimpleValueType();
3179 MVT VT =
Op.getSimpleValueType();
3184 MVT ContainerVT = VT;
3206 if (
Merge.isUndef())
3218 if (
Merge.isUndef())
3227 "Unexpected vector MVT");
3255 return std::nullopt;
3270 unsigned EltSizeInBits) {
3273 return std::nullopt;
3274 bool IsInteger =
Op.getValueType().isInteger();
3276 std::optional<unsigned> SeqStepDenom;
3277 std::optional<int64_t> SeqStepNum, SeqAddend;
3278 std::optional<std::pair<uint64_t, unsigned>> PrevElt;
3279 assert(EltSizeInBits >=
Op.getValueType().getScalarSizeInBits());
3284 const unsigned OpSize =
Op.getScalarValueSizeInBits();
3286 if (Elt.isUndef()) {
3287 Elts[
Idx] = std::nullopt;
3291 Elts[
Idx] = Elt->getAsZExtVal() & maskTrailingOnes<uint64_t>(OpSize);
3296 return std::nullopt;
3297 Elts[
Idx] = *ExactInteger;
3310 unsigned IdxDiff =
Idx - PrevElt->second;
3311 int64_t ValDiff =
SignExtend64(*Elt - PrevElt->first, EltSizeInBits);
3319 int64_t Remainder = ValDiff % IdxDiff;
3321 if (Remainder != ValDiff) {
3324 return std::nullopt;
3330 SeqStepNum = ValDiff;
3331 else if (ValDiff != SeqStepNum)
3332 return std::nullopt;
3335 SeqStepDenom = IdxDiff;
3336 else if (IdxDiff != *SeqStepDenom)
3337 return std::nullopt;
3341 if (!PrevElt || PrevElt->first != *Elt)
3342 PrevElt = std::make_pair(*Elt,
Idx);
3346 if (!SeqStepNum || !SeqStepDenom)
3347 return std::nullopt;
3355 (int64_t)(
Idx * (
uint64_t)*SeqStepNum) / *SeqStepDenom;
3356 int64_t Addend =
SignExtend64(*Elt - ExpectedVal, EltSizeInBits);
3359 else if (Addend != SeqAddend)
3360 return std::nullopt;
3363 assert(SeqAddend &&
"Must have an addend if we have a step");
3365 return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
3386 MVT ContainerVT = VT;
3414 MVT VT =
Op.getSimpleValueType();
3426 unsigned MostCommonCount = 0;
3428 unsigned NumUndefElts =
3436 unsigned NumScalarLoads = 0;
3442 ValueCounts.
insert(std::make_pair(V, 0));
3443 unsigned &Count = ValueCounts[V];
3445 if (
auto *CFP = dyn_cast<ConstantFPSDNode>(V))
3446 NumScalarLoads += !CFP->isExactlyValue(+0.0);
3451 if (++Count >= MostCommonCount) {
3453 MostCommonCount = Count;
3457 assert(DominantValue &&
"Not expecting an all-undef BUILD_VECTOR");
3458 unsigned NumDefElts = NumElts - NumUndefElts;
3459 unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
3465 ((MostCommonCount > DominantValueCountThreshold) ||
3478 !LastOp.isUndef() && ValueCounts[LastOp] == 1 &&
3479 LastOp != DominantValue) {
3488 Processed.insert(LastOp);
3493 const SDValue &V = OpIdx.value();
3494 if (V.isUndef() || !Processed.insert(V).second)
3496 if (ValueCounts[V] == 1) {
3505 return DAG.getConstant(V == V1, DL, XLenVT);
3521 MVT VT =
Op.getSimpleValueType();
3551 unsigned NumViaIntegerBits = std::clamp(NumElts, 8u, Subtarget.
getXLen());
3552 NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.
getELen());
3560 unsigned IntegerViaVecElts =
divideCeil(NumElts, NumViaIntegerBits);
3561 MVT IntegerViaVecVT =
3566 unsigned BitPos = 0, IntegerEltIdx = 0;
3569 for (
unsigned I = 0;
I < NumElts;) {
3571 bool BitValue = !V.isUndef() && V->getAsZExtVal();
3572 Bits |= ((
uint64_t)BitValue << BitPos);
3578 if (
I % NumViaIntegerBits == 0 ||
I == NumElts) {
3579 if (NumViaIntegerBits <= 32)
3580 Bits = SignExtend64<32>(Bits);
3582 Elts[IntegerEltIdx] = Elt;
3591 if (NumElts < NumViaIntegerBits) {
3595 assert(IntegerViaVecVT == MVT::v1i8 &&
"Unexpected mask vector type");
3623 int64_t StepNumerator = SimpleVID->StepNumerator;
3624 unsigned StepDenominator = SimpleVID->StepDenominator;
3625 int64_t Addend = SimpleVID->Addend;
3627 assert(StepNumerator != 0 &&
"Invalid step");
3628 bool Negate =
false;
3629 int64_t SplatStepVal = StepNumerator;
3633 if (StepNumerator != 1 && StepNumerator !=
INT64_MIN &&
3635 Negate = StepNumerator < 0;
3637 SplatStepVal =
Log2_64(std::abs(StepNumerator));
3644 if (((StepOpcode ==
ISD::MUL && isInt<12>(SplatStepVal)) ||
3645 (StepOpcode ==
ISD::SHL && isUInt<5>(SplatStepVal))) &&
3647 (SplatStepVal >= 0 || StepDenominator == 1) && isInt<5>(Addend)) {
3650 MVT VIDContainerVT =
3658 if ((StepOpcode ==
ISD::MUL && SplatStepVal != 1) ||
3659 (StepOpcode ==
ISD::SHL && SplatStepVal != 0)) {
3661 VID = DAG.
getNode(StepOpcode,
DL, VIDVT, VID, SplatStep);
3663 if (StepDenominator != 1) {
3668 if (Addend != 0 || Negate) {
3687 assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32) &&
3688 "Unexpected sequence type");
3692 unsigned ViaVecLen =
3696 uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
3699 for (
const auto &OpIdx :
enumerate(
Op->op_values())) {
3700 const auto &SeqV = OpIdx.value();
3701 if (!SeqV.isUndef())
3703 ((SeqV->getAsZExtVal() & EltMask) << (OpIdx.index() * EltBitSize));
3708 if (Subtarget.
is64Bit() && ViaIntVT == MVT::i32)
3709 SplatValue = SignExtend64<32>(SplatValue);
3731 const auto *BV = cast<BuildVectorSDNode>(
Op);
3734 BV->getRepeatedSequence(Sequence) &&
3735 (Sequence.size() * EltBitSize) <= Subtarget.
getELen()) {
3736 unsigned SeqLen = Sequence.size();
3738 assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
3739 ViaIntVT == MVT::i64) &&
3740 "Unexpected sequence type");
3745 const unsigned RequiredVL = NumElts / SeqLen;
3746 const unsigned ViaVecLen =
3748 NumElts : RequiredVL;
3751 unsigned EltIdx = 0;
3752 uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
3756 for (
const auto &SeqV : Sequence) {
3757 if (!SeqV.isUndef())
3759 ((SeqV->getAsZExtVal() & EltMask) << (EltIdx * EltBitSize));
3765 if (Subtarget.
is64Bit() && ViaIntVT == MVT::i32)
3766 SplatValue = SignExtend64<32>(SplatValue);
3772 (!Subtarget.
is64Bit() && ViaIntVT == MVT::i64)) &&
3773 "Unexpected bitcast sequence");
3774 if (ViaIntVT.
bitsLE(XLenVT) || isInt<32>(SplatValue)) {
3777 MVT ViaContainerVT =
3784 if (ViaVecLen != RequiredVL)
3801 if (EltBitSize - SignBits < 8) {
3805 Source, DAG, Subtarget);
3822 MVT VT =
Op.getSimpleValueType();
3894 auto OneVRegOfOps =
ArrayRef(BuildVectorOps).
slice(i, ElemsPerVReg);
3898 unsigned InsertIdx = (i / ElemsPerVReg) * NumOpElts;
3914 unsigned NumUndefElts =
3916 unsigned NumDefElts = NumElts - NumUndefElts;
3917 if (NumDefElts >= 8 && NumDefElts > NumElts / 2 &&
3924 for (
unsigned i = 0; i < NumElts; i++) {
3926 if (i < NumElts / 2) {
3933 bool SelectMaskVal = (i < NumElts / 2);
3936 assert(SubVecAOps.
size() == NumElts && SubVecBOps.
size() == NumElts &&
3937 MaskVals.
size() == NumElts);
3972 unsigned UndefCount = 0;
3979 LinearBudget -= PerSlideCost;
3982 LinearBudget -= PerSlideCost;
3985 LinearBudget -= PerSlideCost;
3988 if (LinearBudget < 0)
3993 "Illegal type which will result in reserved encoding");
4018 Vec,
Offset, Mask, VL, Policy);
4031 Vec,
Offset, Mask, VL, Policy);
4041 if (isa<ConstantSDNode>(
Lo) && isa<ConstantSDNode>(
Hi)) {
4042 int32_t LoC = cast<ConstantSDNode>(
Lo)->getSExtValue();
4043 int32_t HiC = cast<ConstantSDNode>(
Hi)->getSExtValue();
4046 if ((LoC >> 31) == HiC)
4057 (isa<RegisterSDNode>(VL) &&
4058 cast<RegisterSDNode>(VL)->
getReg() == RISCV::X0))
4060 else if (isa<ConstantSDNode>(VL) && isUInt<4>(VL->
getAsZExtVal()))
4075 isa<ConstantSDNode>(
Hi.getOperand(1)) &&
4076 Hi.getConstantOperandVal(1) == 31)
4095 assert(Scalar.getValueType() == MVT::i64 &&
"Unexpected VT!");
4107 bool HasPassthru = Passthru && !Passthru.
isUndef();
4108 if (!HasPassthru && !Passthru)
4116 if (Scalar.getValueType().bitsLE(XLenVT)) {
4123 Scalar = DAG.
getNode(ExtOpc,
DL, XLenVT, Scalar);
4127 assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
4128 "Unexpected scalar for splat lowering!");
4152 SDValue ExtractedVal = Scalar.getOperand(0);
4157 MVT ExtractedContainerVT = ExtractedVT;
4160 DAG, ExtractedContainerVT, Subtarget);
4162 ExtractedVal, DAG, Subtarget);
4164 if (ExtractedContainerVT.
bitsLE(VT))
4179 if (!Scalar.getValueType().bitsLE(XLenVT))
4182 VT,
DL, DAG, Subtarget);
4190 Scalar = DAG.
getNode(ExtOpc,
DL, XLenVT, Scalar);
4216 if (Src != V2.getOperand(0))
4220 if (Src.getValueType().getVectorNumElements() != (Mask.size() * 2))
4225 V2.getConstantOperandVal(1) != Mask.size())
4229 if (Mask[0] != 0 && Mask[0] != 1)
4234 for (
unsigned i = 1; i != Mask.size(); ++i)
4235 if (Mask[i] != Mask[i - 1] + 2)
4253 int Size = Mask.size();
4255 assert(
Size == (
int)NumElts &&
"Unexpected mask size");
4261 EvenSrc = StartIndexes[0];
4262 OddSrc = StartIndexes[1];
4265 if (EvenSrc != 0 && OddSrc != 0)
4275 int HalfNumElts = NumElts / 2;
4276 return ((EvenSrc % HalfNumElts) == 0) && ((OddSrc % HalfNumElts) == 0);
4292 int Size = Mask.size();
4304 for (
int i = 0; i !=
Size; ++i) {
4310 int StartIdx = i - (M %
Size);
4318 int CandidateRotation = StartIdx < 0 ? -StartIdx :
Size - StartIdx;
4321 Rotation = CandidateRotation;
4322 else if (Rotation != CandidateRotation)
4327 int MaskSrc = M <
Size ? 0 : 1;
4332 int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
4337 TargetSrc = MaskSrc;
4338 else if (TargetSrc != MaskSrc)
4345 assert(Rotation != 0 &&
"Failed to locate a viable rotation!");
4346 assert((LoSrc >= 0 || HiSrc >= 0) &&
4347 "Failed to find a rotated input vector!");
4362 MVT ContainerVT = VT;
4365 assert(Src.getSimpleValueType().isFixedLengthVector());
4369 MVT SrcContainerVT =
4382 Src = DAG.
getBitcast(WideSrcContainerVT, Src);
4389 unsigned Shift = EvenElts ? 0 : EltBits;
4395 DAG.
getUNDEF(IntContainerVT), TrueMask, VL);
4421 auto findNonEXTRACT_SUBVECTORParent =
4422 [](
SDValue Parent) -> std::pair<SDValue, uint64_t> {
4427 Parent.getOperand(0).getSimpleValueType().isFixedLengthVector()) {
4428 Offset += Parent.getConstantOperandVal(1);
4429 Parent = Parent.getOperand(0);
4431 return std::make_pair(Parent,
Offset);
4434 auto [V1Src, V1IndexOffset] = findNonEXTRACT_SUBVECTORParent(V1);
4435 auto [V2Src, V2IndexOffset] = findNonEXTRACT_SUBVECTORParent(V2);
4444 for (
size_t i = 0; i != NewMask.
size(); ++i) {
4445 if (NewMask[i] == -1)
4448 if (
static_cast<size_t>(NewMask[i]) < NewMask.
size()) {
4449 NewMask[i] = NewMask[i] + V1IndexOffset;
4453 NewMask[i] = NewMask[i] - NewMask.
size() + V2IndexOffset;
4459 if (NewMask[0] <= 0)
4463 for (
unsigned i = 1; i != NewMask.
size(); ++i)
4464 if (NewMask[i - 1] + 1 != NewMask[i])
4468 MVT SrcVT = Src.getSimpleValueType();
4499 int NumSubElts,
Index;
4504 bool OpsSwapped = Mask[
Index] < (int)NumElts;
4505 SDValue InPlace = OpsSwapped ? V2 : V1;
4506 SDValue ToInsert = OpsSwapped ? V1 : V2;
4516 if (NumSubElts +
Index >= (
int)NumElts)
4530 Res =
getVSlideup(DAG, Subtarget,
DL, ContainerVT, InPlace, ToInsert,
4542 bool OpsSwapped =
false;
4543 if (!isa<BuildVectorSDNode>(V1)) {
4544 if (!isa<BuildVectorSDNode>(V2))
4549 SDValue Splat = cast<BuildVectorSDNode>(V1)->getSplatValue();
4557 const unsigned E = Mask.size() - ((
Offset > 0) ?
Offset : 0);
4558 for (
unsigned i = S; i != E; ++i)
4559 if (Mask[i] >= 0 && (
unsigned)Mask[i] !=
Base + i +
Offset)
4565 bool IsVSlidedown = isSlideMask(Mask, OpsSwapped ? 0 : NumElts, 1);
4566 if (!IsVSlidedown && !isSlideMask(Mask, OpsSwapped ? 0 : NumElts, -1))
4569 const int InsertIdx = Mask[IsVSlidedown ? (NumElts - 1) : 0];
4571 if (InsertIdx < 0 || InsertIdx / NumElts != (
unsigned)OpsSwapped)
4576 auto OpCode = IsVSlidedown ?
4581 auto Vec = DAG.
getNode(OpCode,
DL, ContainerVT,
4584 Splat, TrueMask, VL);
4595 MVT VecContainerVT = VecVT;
4612 MVT WideContainerVT = WideVT;
4618 EvenV = DAG.
getBitcast(VecContainerVT, EvenV);
4625 if (Subtarget.hasStdExtZvbb()) {
4632 OffsetVec, Passthru, Mask, VL);
4634 Interleaved, EvenV, Passthru, Mask, VL);
4639 OddV, Passthru, Mask, VL);
4645 OddV, AllOnesVec, Passthru, Mask, VL);
4653 Interleaved, OddsMul, Passthru, Mask, VL);
4660 Interleaved = DAG.
getBitcast(ResultContainerVT, Interleaved);
4706 if (ViaEltSize > NumElts)
4715 if (ViaEltSize > NumElts)
4721 if (ViaEltSize > NumElts)
4730 MVT &RotateVT,
unsigned &RotateAmt) {
4736 unsigned NumSubElts;
4738 NumElts, NumSubElts, RotateAmt))
4741 NumElts / NumSubElts);
4803 unsigned VRegsPerSrc = NumElts / ElemsPerVReg;
4806 OutMasks(VRegsPerSrc, {-1, {}});
4811 for (
unsigned DstIdx = 0; DstIdx < Mask.size(); DstIdx++) {
4812 int DstVecIdx = DstIdx / ElemsPerVReg;
4813 int DstSubIdx = DstIdx % ElemsPerVReg;
4814 int SrcIdx = Mask[DstIdx];
4815 if (SrcIdx < 0 || (
unsigned)SrcIdx >= 2 * NumElts)
4817 int SrcVecIdx = SrcIdx / ElemsPerVReg;
4818 int SrcSubIdx = SrcIdx % ElemsPerVReg;
4819 if (OutMasks[DstVecIdx].first == -1)
4820 OutMasks[DstVecIdx].first = SrcVecIdx;
4821 if (OutMasks[DstVecIdx].first != SrcVecIdx)
4827 OutMasks[DstVecIdx].second.resize(ElemsPerVReg, -1);
4828 OutMasks[DstVecIdx].second[DstSubIdx] = SrcSubIdx;
4842 for (
unsigned DstVecIdx = 0 ; DstVecIdx < OutMasks.size(); DstVecIdx++) {
4843 auto &[SrcVecIdx, SrcSubMask] = OutMasks[DstVecIdx];
4844 if (SrcVecIdx == -1)
4846 unsigned ExtractIdx = (SrcVecIdx % VRegsPerSrc) * NumOpElts;
4853 unsigned InsertIdx = DstVecIdx * NumOpElts;
4866 MVT VT =
Op.getSimpleValueType();
4881 V2 = V2.isUndef() ? DAG.
getUNDEF(WidenVT)
4905 V.getOperand(0).getSimpleValueType().getVectorNumElements();
4906 V = V.getOperand(
Offset / OpElements);
4912 auto *Ld = cast<LoadSDNode>(V);
4922 SDValue Ops[] = {Ld->getChain(),
4940 V = DAG.
getLoad(SVT,
DL, Ld->getChain(), NewAddr,
4941 Ld->getPointerInfo().getWithOffset(
Offset),
4942 Ld->getOriginalAlign(),
4946 Ld->getPointerInfo().getWithOffset(
Offset), SVT,
4947 Ld->getOriginalAlign(),
4948 Ld->getMemOperand()->getFlags());
4959 assert(Lane < (
int)NumElts &&
"Unexpected lane!");
4962 DAG.
getUNDEF(ContainerVT), TrueMask, VL);
4984 if (Subtarget.hasStdExtZvkb())
4995 LoV = LoSrc == 0 ? V1 : V2;
4999 HiV = HiSrc == 0 ? V1 : V2;
5005 unsigned InvRotate = NumElts - Rotation;
5015 Res =
getVSlideup(DAG, Subtarget,
DL, ContainerVT, Res, LoV,
5035 int EvenSrc, OddSrc;
5040 int Size = Mask.size();
5042 assert(EvenSrc >= 0 &&
"Undef source?");
5043 EvenV = (EvenSrc /
Size) == 0 ? V1 : V2;
5047 assert(OddSrc >= 0 &&
"Undef source?");
5048 OddV = (OddSrc /
Size) == 0 ? V1 : V2;
5057 assert(!V1.
isUndef() &&
"Unexpected shuffle canonicalization");
5066 any_of(Mask, [&](
const auto &
Idx) {
return Idx > 255; })) {
5095 MVT IndexContainerVT =
5100 for (
int MaskIndex : Mask) {
5101 bool IsLHSIndex = MaskIndex < (int)NumElts && MaskIndex >= 0;
5110 DAG.
getUNDEF(ContainerVT), TrueMask, VL);
5124 int MaskIndex = MaskIdx.value();
5125 return MaskIndex < 0 || MaskIdx.index() == (
unsigned)MaskIndex % NumElts;
5130 for (
int MaskIndex : Mask) {
5131 bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ SwapOps;
5138 assert(MaskVals.
size() == NumElts &&
"Unexpected select-like shuffle");
5151 for (
int MaskIndex : Mask) {
5152 bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ !SwapOps;
5154 bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
5155 ShuffleMaskLHS.
push_back(IsLHSOrUndefIndex && MaskIndex >= 0
5157 ShuffleMaskRHS.
push_back(IsLHSOrUndefIndex ? -1 : (MaskIndex - NumElts));
5162 std::swap(ShuffleMaskLHS, ShuffleMaskRHS);
5165 assert(MaskVals.
size() == NumElts &&
"Unexpected select-like shuffle");
5201RISCVTargetLowering::lowerCTLZ_CTTZ_ZERO_UNDEF(
SDValue Op,
5203 MVT VT =
Op.getSimpleValueType();
5207 MVT ContainerVT = VT;
5210 if (
Op->isVPOpcode()) {
5211 Mask =
Op.getOperand(1);
5215 VL =
Op.getOperand(2);
5221 MVT FloatEltVT = (EltSize >= 32) ? MVT::f64 : MVT::f32;
5223 FloatEltVT = MVT::f32;
5230 "Expected legal float type!");
5237 }
else if (
Op.getOpcode() == ISD::VP_CTTZ_ZERO_UNDEF) {
5240 Src = DAG.
getNode(ISD::VP_AND,
DL, VT, Src, Neg, Mask, VL);
5245 if (FloatVT.
bitsGT(VT)) {
5246 if (
Op->isVPOpcode())
5247 FloatVal = DAG.
getNode(ISD::VP_UINT_TO_FP,
DL, FloatVT, Src, Mask, VL);
5256 if (!
Op->isVPOpcode())
5260 MVT ContainerFloatVT =
5263 Src, Mask, RTZRM, VL);
5270 unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
5274 if (
Op->isVPOpcode()) {
5283 else if (IntVT.
bitsGT(VT))
5288 unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
5293 if (
Op.getOpcode() == ISD::VP_CTTZ_ZERO_UNDEF)
5294 return DAG.
getNode(ISD::VP_SUB,
DL, VT, Exp,
5299 unsigned Adjust = ExponentBias + (EltSize - 1);
5301 if (
Op->isVPOpcode())
5311 else if (
Op.getOpcode() == ISD::VP_CTLZ)
5312 Res = DAG.
getNode(ISD::VP_UMIN,
DL, VT, Res,
5323 auto *
Load = cast<LoadSDNode>(
Op);
5324 assert(Load &&
Load->getMemoryVT().isVector() &&
"Expected vector load");
5327 Load->getMemoryVT(),
5328 *
Load->getMemOperand()))
5332 MVT VT =
Op.getSimpleValueType();
5334 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
5335 "Unexpected unaligned RVV load type");
5339 "Expecting equally-sized RVV vector types to be legal");
5341 Load->getPointerInfo(),
Load->getOriginalAlign(),
5342 Load->getMemOperand()->getFlags());
5352 auto *
Store = cast<StoreSDNode>(
Op);
5353 assert(Store &&
Store->getValue().getValueType().isVector() &&
5354 "Expected vector store");
5357 Store->getMemoryVT(),
5358 *
Store->getMemOperand()))
5365 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
5366 "Unexpected unaligned RVV store type");
5370 "Expecting equally-sized RVV vector types to be legal");
5371 StoredVal = DAG.
getBitcast(NewVT, StoredVal);
5373 Store->getPointerInfo(),
Store->getOriginalAlign(),
5374 Store->getMemOperand()->getFlags());
5379 assert(
Op.getValueType() == MVT::i64 &&
"Unexpected VT");
5381 int64_t Imm = cast<ConstantSDNode>(
Op)->getSExtValue();
5408 unsigned ShiftAmt, AddOpc;
5425 if (Subtarget.hasStdExtZtso()) {
5448 "Unexpected custom legalisation");
5469 "Unexpected custom legalisation");
5484 "Unexpected custom legalisation");
5485 if (isa<ConstantSDNode>(
Op.getOperand(1)))
5505 "Unexpected custom legalisation");
5521 MVT VT =
Op.getSimpleValueType();
5523 unsigned Check =
Op.getConstantOperandVal(1);
5524 unsigned TDCMask = 0;
5552 MVT VT0 =
Op.getOperand(0).getSimpleValueType();
5557 if (
Op.getOpcode() == ISD::VP_IS_FPCLASS) {
5559 VL =
Op.getOperand(3);
5562 VL,
Op->getFlags());
5577 if (
Op.getOpcode() == ISD::VP_IS_FPCLASS) {
5579 MVT MaskContainerVT =
5582 VL =
Op.getOperand(3);
5587 Mask, VL,
Op->getFlags());
5590 DAG.
getUNDEF(ContainerDstVT), TDCMaskV, VL);
5595 DAG.
getUNDEF(ContainerVT), Mask, VL});
5599 TDCMaskV, DAG.
getUNDEF(ContainerDstVT), Mask, VL);
5603 DAG.
getUNDEF(ContainerDstVT), SplatZero, VL);
5607 DAG.
getUNDEF(ContainerVT), Mask, VL});
5623 MVT VT =
Op.getSimpleValueType();
5650 return DAG.
getNode(Opc,
DL, VT, NewX, NewY);
5657 MVT ContainerVT = VT;
5665 if (
Op->isVPOpcode()) {
5666 Mask =
Op.getOperand(2);
5670 VL =
Op.getOperand(3);
5678 {X, X, DAG.getCondCode(ISD::SETOEQ),
5679 DAG.getUNDEF(ContainerVT), Mask, VL});
5687 {Y, Y, DAG.getCondCode(ISD::SETOEQ),
5688 DAG.getUNDEF(ContainerVT), Mask, VL});
5698 DAG.
getUNDEF(ContainerVT), Mask, VL);
5706#define OP_CASE(NODE) \
5708 return RISCVISD::NODE##_VL;
5709#define VP_CASE(NODE) \
5710 case ISD::VP_##NODE: \
5711 return RISCVISD::NODE##_VL;
5713 switch (
Op.getOpcode()) {
5789 case ISD::VP_CTLZ_ZERO_UNDEF:
5792 case ISD::VP_CTTZ_ZERO_UNDEF:
5801 if (
Op.getSimpleValueType().getVectorElementType() == MVT::i1)
5806 if (
Op.getSimpleValueType().getVectorElementType() == MVT::i1)
5811 if (
Op.getSimpleValueType().getVectorElementType() == MVT::i1)
5814 case ISD::VP_SELECT:
5823 case ISD::VP_SIGN_EXTEND:
5825 case ISD::VP_ZERO_EXTEND:
5827 case ISD::VP_FP_TO_SINT:
5829 case ISD::VP_FP_TO_UINT:
5832 case ISD::VP_FMINNUM:
5835 case ISD::VP_FMAXNUM:
5840 case ISD::VP_LLRINT:
5852 "not a RISC-V target specific op");
5858 "adding target specific op should update this function");
5878 "not a RISC-V target specific op");
5884 "adding target specific op should update this function");
5903 if (!
Op.getOperand(j).getValueType().isVector()) {
5904 LoOperands[j] =
Op.getOperand(j);
5905 HiOperands[j] =
Op.getOperand(j);
5908 std::tie(LoOperands[j], HiOperands[j]) =
5913 DAG.
getNode(
Op.getOpcode(),
DL, LoVT, LoOperands,
Op->getFlags());
5915 DAG.
getNode(
Op.getOpcode(),
DL, HiVT, HiOperands,
Op->getFlags());
5930 std::tie(LoOperands[j], HiOperands[j]) =
5934 if (!
Op.getOperand(j).getValueType().isVector()) {
5935 LoOperands[j] =
Op.getOperand(j);
5936 HiOperands[j] =
Op.getOperand(j);
5939 std::tie(LoOperands[j], HiOperands[j]) =
5944 DAG.
getNode(
Op.getOpcode(),
DL, LoVT, LoOperands,
Op->getFlags());
5946 DAG.
getNode(
Op.getOpcode(),
DL, HiVT, HiOperands,
Op->getFlags());
5956 auto [EVLLo, EVLHi] =
5957 DAG.
SplitEVL(
Op.getOperand(3),
Op.getOperand(1).getValueType(),
DL);
5961 {Op.getOperand(0), Lo, MaskLo, EVLLo},
Op->getFlags());
5963 {ResLo, Hi, MaskHi, EVLHi},
Op->getFlags());
5981 if (!
Op.getOperand(j).getValueType().isVector()) {
5982 LoOperands[j] =
Op.getOperand(j);
5983 HiOperands[j] =
Op.getOperand(j);
5986 std::tie(LoOperands[j], HiOperands[j]) =
5991 DAG.
getNode(
Op.getOpcode(),
DL, LoVTs, LoOperands,
Op->getFlags());
5994 DAG.
getNode(
Op.getOpcode(),
DL, HiVTs, HiOperands,
Op->getFlags());
6003 switch (
Op.getOpcode()) {
6009 return lowerGlobalAddress(
Op, DAG);
6011 return lowerBlockAddress(
Op, DAG);
6013 return lowerConstantPool(
Op, DAG);
6015 return lowerJumpTable(
Op, DAG);
6017 return lowerGlobalTLSAddress(
Op, DAG);
6021 return lowerSELECT(
Op, DAG);
6023 return lowerBRCOND(
Op, DAG);
6025 return lowerVASTART(
Op, DAG);
6027 return lowerFRAMEADDR(
Op, DAG);
6029 return lowerRETURNADDR(
Op, DAG);
6036 return lowerShiftLeftParts(
Op, DAG);
6038 return lowerShiftRightParts(
Op, DAG,
true);
6040 return lowerShiftRightParts(
Op, DAG,
false);
6043 if (
Op.getValueType().isFixedLengthVector()) {
6044 assert(Subtarget.hasStdExtZvkb());
6045 return lowerToScalableOp(
Op, DAG);
6047 assert(Subtarget.hasVendorXTHeadBb() &&
6048 !(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) &&
6049 "Unexpected custom legalization");
6051 if (!isa<ConstantSDNode>(
Op.getOperand(1)))
6056 EVT VT =
Op.getValueType();
6060 if (VT == MVT::f16 && Op0VT == MVT::i16 &&
6066 if (VT == MVT::bf16 && Op0VT == MVT::i16 &&
6067 Subtarget.hasStdExtZfbfmin()) {
6072 if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.
is64Bit() &&
6079 if (VT == MVT::f64 && Op0VT == MVT::i64 && XLenVT == MVT::i32) {
6096 "Unexpected types");
6130 return LowerINTRINSIC_WO_CHAIN(
Op, DAG);
6132 return LowerINTRINSIC_W_CHAIN(
Op, DAG);
6134 return LowerINTRINSIC_VOID(
Op, DAG);
6136 return LowerIS_FPCLASS(
Op, DAG);
6138 MVT VT =
Op.getSimpleValueType();
6140 assert(Subtarget.hasStdExtZvbb());
6141 return lowerToScalableOp(
Op, DAG);
6144 assert(Subtarget.hasStdExtZbkb() &&
"Unexpected custom legalization");
6152 if (!
Op.getSimpleValueType().isVector())
6154 return lowerVectorTruncLike(
Op, DAG);
6157 if (
Op.getOperand(0).getValueType().isVector() &&
6158 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
6159 return lowerVectorMaskExt(
Op, DAG, 1);
6162 if (
Op.getOperand(0).getValueType().isVector() &&
6163 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
6164 return lowerVectorMaskExt(
Op, DAG, -1);
6167 return lowerSPLAT_VECTOR_PARTS(
Op, DAG);
6169 return lowerINSERT_VECTOR_ELT(
Op, DAG);
6171 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
6173 MVT VT =
Op.getSimpleValueType();
6181 MVT ContainerVT = VT;
6187 DAG.
getUNDEF(ContainerVT), Scalar, VL);
6194 MVT VT =
Op.getSimpleValueType();
6214 }
else if ((Val % 8) == 0) {
6230 if (
Op.getValueType() == MVT::f16 && Subtarget.
is64Bit() &&
6231 Op.getOperand(1).getValueType() == MVT::i32) {
6243 if (
Op.getValueType() == MVT::nxv32f16 &&
6250 EVT VT =
Op.getValueType();
6253 if (VT == MVT::f32 && Op0VT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())
6255 if (VT == MVT::f64 && Op0VT == MVT::bf16 && Subtarget.hasStdExtZfbfmin()) {
6261 if (!
Op.getValueType().isVector())
6263 return lowerVectorFPExtendOrRoundLike(
Op, DAG);
6267 EVT VT =
Op.getValueType();
6270 if (VT == MVT::bf16 && Op0VT == MVT::f32 && Subtarget.hasStdExtZfbfmin())
6272 if (VT == MVT::bf16 && Op0VT == MVT::f64 && Subtarget.hasStdExtZfbfmin() &&
6280 if (!
Op.getValueType().isVector())
6282 return lowerVectorFPExtendOrRoundLike(
Op, DAG);
6286 return lowerStrictFPExtendOrRoundLike(
Op, DAG);
6289 if (
Op.getValueType().isVector() &&
6290 Op.getValueType().getScalarType() == MVT::f16 &&
6293 if (
Op.getValueType() == MVT::nxv32f16)
6308 Op1.getValueType().isVector() &&
6309 Op1.getValueType().getScalarType() == MVT::f16 &&
6312 if (Op1.getValueType() == MVT::nxv32f16)
6317 Op1.getValueType().getVectorElementCount());
6320 return DAG.
getNode(
Op.getOpcode(),
DL,
Op.getValueType(), WidenVec);
6330 MVT VT =
Op.getSimpleValueType();
6334 bool IsStrict =
Op->isStrictFPOpcode();
6335 SDValue Src =
Op.getOperand(0 + IsStrict);
6337 MVT SrcVT = Src.getSimpleValueType();
6342 "Unexpected vector element types");
6346 if (EltSize > (2 * SrcEltSize)) {
6358 Op.getOperand(0), Ext);
6362 assert(SrcEltVT == MVT::f16 &&
"Unexpected FP_TO_[US]INT lowering");
6367 auto [FExt, Chain] =
6369 return DAG.
getNode(
Op.getOpcode(),
DL,
Op->getVTList(), Chain, FExt);
6376 if (SrcEltSize > (2 * EltSize)) {
6379 assert(EltVT == MVT::f16 &&
"Unexpected [US]_TO_FP lowering");
6384 Op.getOperand(0), Src);
6399 Op.getOperand(0), Src);
6413 unsigned RVVOpc = 0;
6414 switch (
Op.getOpcode()) {
6446 "Expected same element count");
6453 Op.getOperand(0), Src, Mask, VL);
6457 Src = DAG.
getNode(RVVOpc,
DL, ContainerVT, Src, Mask, VL);
6472 makeLibCall(DAG, LC, MVT::f32,
Op.getOperand(0), CallOptions,
DL).first;
6479 MVT VT =
Op.getSimpleValueType();
6501 makeLibCall(DAG, LC, MVT::f32,
Op.getOperand(0), CallOptions,
DL).first;
6517 makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MVT::f32, Arg, CallOptions,
DL)
6537 return lowerVECREDUCE(
Op, DAG);
6541 if (
Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
6542 return lowerVectorMaskVecReduction(
Op, DAG,
false);
6543 return lowerVECREDUCE(
Op, DAG);
6550 return lowerFPVECREDUCE(
Op, DAG);
6551 case ISD::VP_REDUCE_ADD:
6552 case ISD::VP_REDUCE_UMAX:
6553 case ISD::VP_REDUCE_SMAX:
6554 case ISD::VP_REDUCE_UMIN:
6555 case ISD::VP_REDUCE_SMIN:
6556 case ISD::VP_REDUCE_FADD:
6557 case ISD::VP_REDUCE_SEQ_FADD:
6558 case ISD::VP_REDUCE_FMIN:
6559 case ISD::VP_REDUCE_FMAX:
6560 if (
Op.getOperand(1).getValueType() == MVT::nxv32f16 &&
6564 return lowerVPREDUCE(
Op, DAG);
6565 case ISD::VP_REDUCE_AND:
6566 case ISD::VP_REDUCE_OR:
6567 case ISD::VP_REDUCE_XOR:
6568 if (
Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
6569 return lowerVectorMaskVecReduction(
Op, DAG,
true);
6570 return lowerVPREDUCE(
Op, DAG);
6574 DAG.
getUNDEF(ContainerVT), DAG, Subtarget);
6577 return lowerINSERT_SUBVECTOR(
Op, DAG);
6579 return lowerEXTRACT_SUBVECTOR(
Op, DAG);
6581 return lowerVECTOR_DEINTERLEAVE(
Op, DAG);
6583 return lowerVECTOR_INTERLEAVE(
Op, DAG);
6585 return lowerSTEP_VECTOR(
Op, DAG);
6587 return lowerVECTOR_REVERSE(
Op, DAG);
6589 return lowerVECTOR_SPLICE(
Op, DAG);
6593 if (
Op.getValueType().getScalarType() == MVT::f16 &&
6596 if (
Op.getValueType() == MVT::nxv32f16)
6608 if (
Op.getValueType().getVectorElementType() == MVT::i1)
6609 return lowerVectorMaskSplat(
Op, DAG);
6617 MVT VT =
Op.getSimpleValueType();
6618 MVT ContainerVT = VT;
6636 Op->ops().take_front(HalfNumOps));
6638 Op->ops().drop_front(HalfNumOps));
6642 unsigned NumOpElts =
6643 Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
6646 SDValue SubVec = OpIdx.value();
6657 if (
auto V = expandUnalignedRVVLoad(
Op, DAG))
6659 if (
Op.getValueType().isFixedLengthVector())
6660 return lowerFixedLengthVectorLoadToRVV(
Op, DAG);
6663 if (
auto V = expandUnalignedRVVStore(
Op, DAG))
6665 if (
Op.getOperand(1).getValueType().isFixedLengthVector())
6666 return lowerFixedLengthVectorStoreToRVV(
Op, DAG);
6670 return lowerMaskedLoad(
Op, DAG);
6673 return lowerMaskedStore(
Op, DAG);
6682 EVT VT =
Op.getValueType();
6693 MVT OpVT =
Op.getOperand(0).getSimpleValueType();
6695 MVT VT =
Op.getSimpleValueType();
6700 "Unexpected CondCode");
6708 if (isa<ConstantSDNode>(
RHS)) {
6709 int64_t Imm = cast<ConstantSDNode>(
RHS)->getSExtValue();
6710 if (Imm != 0 && isInt<12>((
uint64_t)Imm + 1)) {
6729 if (
Op.getOperand(0).getSimpleValueType() == MVT::nxv32f16 &&
6734 return lowerFixedLengthVectorSetccToRVV(
Op, DAG);
6750 return lowerToScalableOp(
Op, DAG);
6754 if (
Op.getSimpleValueType().isFixedLengthVector())
6755 return lowerToScalableOp(
Op, DAG);
6757 assert(
Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.
is64Bit() &&
6758 "Unexpected custom legalisation");
6770 if (
Op.getValueType() == MVT::nxv32f16 &&
6781 return lowerToScalableOp(
Op, DAG);
6784 if (!
Op.getValueType().isVector())
6786 return lowerToScalableOp(
Op, DAG);
6789 if (!
Op.getValueType().isVector())
6791 return lowerToScalableOp(
Op, DAG);
6795 EVT VT =
Op->getValueType(0);
6810 return lowerABS(
Op, DAG);
6815 if (Subtarget.hasStdExtZvbb())
6816 return lowerToScalableOp(
Op, DAG);
6818 return lowerCTLZ_CTTZ_ZERO_UNDEF(
Op, DAG);
6820 return lowerFixedLengthVectorSelectToRVV(
Op, DAG);
6822 if (
Op.getValueType() == MVT::nxv32f16 &&
6826 return lowerFixedLengthVectorFCOPYSIGNToRVV(
Op, DAG);
6833 if (
Op.getValueType() == MVT::nxv32f16 &&
6837 return lowerToScalableOp(
Op, DAG);
6840 return lowerVectorStrictFSetcc(
Op, DAG);
6850 case ISD::VP_GATHER:
6851 return lowerMaskedGather(
Op, DAG);
6853 case ISD::VP_SCATTER:
6854 return lowerMaskedScatter(
Op, DAG);
6856 return lowerGET_ROUNDING(
Op, DAG);
6858 return lowerSET_ROUNDING(
Op, DAG);
6860 return lowerEH_DWARF_CFA(
Op, DAG);
6861 case ISD::VP_SELECT:
6870 case ISD::VP_UADDSAT:
6871 case ISD::VP_USUBSAT:
6872 case ISD::VP_SADDSAT:
6873 case ISD::VP_SSUBSAT:
6875 case ISD::VP_LLRINT:
6876 return lowerVPOp(
Op, DAG);
6880 return lowerLogicVPOp(
Op, DAG);
6889 case ISD::VP_FMINNUM:
6890 case ISD::VP_FMAXNUM:
6891 case ISD::VP_FCOPYSIGN:
6892 if (
Op.getValueType() == MVT::nxv32f16 &&
6900 return lowerVPOp(
Op, DAG);
6901 case ISD::VP_IS_FPCLASS:
6902 return LowerIS_FPCLASS(
Op, DAG);
6903 case ISD::VP_SIGN_EXTEND:
6904 case ISD::VP_ZERO_EXTEND:
6905 if (
Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
6906 return lowerVPExtMaskOp(
Op, DAG);
6907 return lowerVPOp(
Op, DAG);
6908 case ISD::VP_TRUNCATE:
6909 return lowerVectorTruncLike(
Op, DAG);
6910 case ISD::VP_FP_EXTEND:
6911 case ISD::VP_FP_ROUND:
6912 return lowerVectorFPExtendOrRoundLike(
Op, DAG);
6913 case ISD::VP_SINT_TO_FP:
6914 case ISD::VP_UINT_TO_FP:
6915 if (
Op.getValueType().isVector() &&
6916 Op.getValueType().getScalarType() == MVT::f16 &&
6919 if (
Op.getValueType() == MVT::nxv32f16)
6931 case ISD::VP_FP_TO_SINT:
6932 case ISD::VP_FP_TO_UINT:
6934 Op1.getValueType().isVector() &&
6935 Op1.getValueType().getScalarType() == MVT::f16 &&
6938 if (Op1.getValueType() == MVT::nxv32f16)
6943 Op1.getValueType().getVectorElementCount());
6947 {WidenVec, Op.getOperand(1), Op.getOperand(2)});
6949 return lowerVPFPIntConvOp(
Op, DAG);
6951 if (
Op.getOperand(0).getSimpleValueType() == MVT::nxv32f16 &&
6955 if (
Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
6956 return lowerVPSetCCMaskOp(
Op, DAG);
6962 case ISD::VP_BITREVERSE:
6964 return lowerVPOp(
Op, DAG);
6966 case ISD::VP_CTLZ_ZERO_UNDEF:
6967 if (Subtarget.hasStdExtZvbb())
6968 return lowerVPOp(
Op, DAG);
6969 return lowerCTLZ_CTTZ_ZERO_UNDEF(
Op, DAG);
6971 case ISD::VP_CTTZ_ZERO_UNDEF:
6972 if (Subtarget.hasStdExtZvbb())
6973 return lowerVPOp(
Op, DAG);
6974 return lowerCTLZ_CTTZ_ZERO_UNDEF(
Op, DAG);
6976 return lowerVPOp(
Op, DAG);
6977 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
6978 return lowerVPStridedLoad(
Op, DAG);
6979 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6980 return lowerVPStridedStore(
Op, DAG);
6982 case ISD::VP_FFLOOR:
6984 case ISD::VP_FNEARBYINT:
6985 case ISD::VP_FROUND:
6986 case ISD::VP_FROUNDEVEN:
6987 case ISD::VP_FROUNDTOZERO:
6988 if (
Op.getValueType() == MVT::nxv32f16 &&
6993 case ISD::VP_FMAXIMUM:
6994 case ISD::VP_FMINIMUM:
6995 if (
Op.getValueType() == MVT::nxv32f16 &&
7000 case ISD::EXPERIMENTAL_VP_SPLICE:
7001 return lowerVPSpliceExperimental(
Op, DAG);
7002 case ISD::EXPERIMENTAL_VP_REVERSE:
7003 return lowerVPReverseExperimental(
Op, DAG);
7021 N->getOffset(), Flags);
7029template <
class NodeTy>
7031 bool IsLocal,
bool IsExternWeak)
const {
7041 if (IsLocal && !Subtarget.allowTaggedGlobals())
7103 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
7112 return getAddr(
N, DAG);
7119 return getAddr(
N, DAG);
7126 return getAddr(
N, DAG);
7131 bool UseGOT)
const {
7195 Args.push_back(Entry);
7228 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
7242 Addr = getStaticTLSAddr(
N, DAG,
false);
7245 Addr = getStaticTLSAddr(
N, DAG,
true);
7250 : getDynamicTLSAddr(
N, DAG);
7267 if (
LHS == LHS2 &&
RHS == RHS2) {
7272 }
else if (
LHS == RHS2 &&
RHS == LHS2) {
7280 return std::nullopt;
7288 MVT VT =
N->getSimpleValueType(0);
7318 if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV)) {
7321 if (~TrueVal == FalseVal) {
7361 if (Subtarget.hasShortForwardBranchOpt())
7364 unsigned SelOpNo = 0;
7374 unsigned ConstSelOpNo = 1;
7375 unsigned OtherSelOpNo = 2;
7376 if (!dyn_cast<ConstantSDNode>(Sel->
getOperand(ConstSelOpNo))) {
7381 ConstantSDNode *ConstSelOpNode = dyn_cast<ConstantSDNode>(ConstSelOp);
7382 if (!ConstSelOpNode || ConstSelOpNode->
isOpaque())
7386 ConstantSDNode *ConstBinOpNode = dyn_cast<ConstantSDNode>(ConstBinOp);
7387 if (!ConstBinOpNode || ConstBinOpNode->
isOpaque())
7393 SDValue NewConstOps[2] = {ConstSelOp, ConstBinOp};
7395 std::swap(NewConstOps[0], NewConstOps[1]);
7407 SDValue NewNonConstOps[2] = {OtherSelOp, ConstBinOp};
7409 std::swap(NewNonConstOps[0], NewNonConstOps[1]);
7412 SDValue NewT = (ConstSelOpNo == 1) ? NewConstOp : NewNonConstOp;
7413 SDValue NewF = (ConstSelOpNo == 1) ? NewNonConstOp : NewConstOp;
7422 MVT VT =
Op.getSimpleValueType();
7436 if ((Subtarget.hasStdExtZicond() || Subtarget.hasVendorXVentanaCondOps()) &&
7464 if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV)) {
7468 TrueVal, Subtarget.
getXLen(), Subtarget,
true);
7470 FalseVal, Subtarget.
getXLen(), Subtarget,
true);
7471 bool IsCZERO_NEZ = TrueValCost <= FalseValCost;
7473 IsCZERO_NEZ ? FalseVal - TrueVal : TrueVal - FalseVal,
DL, VT);
7478 DL, VT, LHSVal, CondV);
7494 if (
Op.hasOneUse()) {
7495 unsigned UseOpc =
Op->use_begin()->getOpcode();
7501 return lowerSELECT(NewSel, DAG);
7529 SDValue Ops[] = {CondV,
Zero, SetNE, TrueV, FalseV};
7550 if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
7554 if (TrueVal - 1 == FalseVal)
7556 if (TrueVal + 1 == FalseVal)
7563 RHS == TrueV && LHS == FalseV) {
7580 if (isa<ConstantSDNode>(TrueV) && !isa<ConstantSDNode>(FalseV)) {
7606 LHS, RHS, TargetCC,
Op.getOperand(2));
7624 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
7636 int XLenInBytes = Subtarget.
getXLen() / 8;
7638 EVT VT =
Op.getValueType();
7641 unsigned Depth =
Op.getConstantOperandVal(0);
7643 int Offset = -(XLenInBytes * 2);
7659 int XLenInBytes = Subtarget.
getXLen() / 8;
7664 EVT VT =
Op.getValueType();
7666 unsigned Depth =
Op.getConstantOperandVal(0);
7668 int Off = -XLenInBytes;
7669 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
7688 EVT VT =
Lo.getValueType();
7727 EVT VT =
Lo.getValueType();
7778 MVT VT =
Op.getSimpleValueType();
7804 MVT VecVT =
Op.getSimpleValueType();
7806 "Unexpected SPLAT_VECTOR_PARTS lowering");
7812 MVT ContainerVT = VecVT;
7832 int64_t ExtTrueVal)
const {
7834 MVT VecVT =
Op.getSimpleValueType();
7837 assert(Src.getValueType().isVector() &&
7838 Src.getValueType().getVectorElementType() == MVT::i1);
7859 DAG.
getUNDEF(ContainerVT), SplatZero, VL);
7861 DAG.
getUNDEF(ContainerVT), SplatTrueVal, VL);
7864 SplatZero, DAG.
getUNDEF(ContainerVT), VL);
7869SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
7871 MVT ExtVT =
Op.getSimpleValueType();
7875 MVT VT =
Op.getOperand(0).getSimpleValueType();
7901 bool IsVPTrunc =
Op.getOpcode() == ISD::VP_TRUNCATE;
7903 EVT MaskVT =
Op.getValueType();
7906 "Unexpected type for vector mask lowering");
7908 MVT VecVT = Src.getSimpleValueType();
7912 VL =
Op.getOperand(2);
7915 MVT ContainerVT = VecVT;
7921 MVT MaskContainerVT =
7928 std::tie(Mask, VL) =
7936 DAG.
getUNDEF(ContainerVT), SplatOne, VL);
7938 DAG.
getUNDEF(ContainerVT), SplatZero, VL);
7942 DAG.
getUNDEF(ContainerVT), Mask, VL);
7945 DAG.
getUNDEF(MaskContainerVT), Mask, VL});
7953 bool IsVPTrunc =
Op.getOpcode() == ISD::VP_TRUNCATE;
7956 MVT VT =
Op.getSimpleValueType();
7958 assert(VT.
isVector() &&
"Unexpected type for vector truncate lowering");
7962 return lowerVectorMaskTruncLike(
Op, DAG);
7970 MVT SrcVT = Src.getSimpleValueType();
7975 "Unexpected vector truncate lowering");
7977 MVT ContainerVT = SrcVT;
7981 VL =
Op.getOperand(2);
7994 std::tie(Mask, VL) =
8005 }
while (SrcEltVT != DstEltVT);
8014RISCVTargetLowering::lowerStrictFPExtendOrRoundLike(
SDValue Op,
8019 MVT VT =
Op.getSimpleValueType();
8020 MVT SrcVT = Src.getSimpleValueType();
8021 MVT ContainerVT = VT;
8042 Chain, Src, Mask, VL);
8043 Chain = Src.getValue(1);
8050 Chain, Src, Mask, VL);
8061RISCVTargetLowering::lowerVectorFPExtendOrRoundLike(
SDValue Op,
8064 Op.getOpcode() == ISD::VP_FP_ROUND ||
Op.getOpcode() == ISD::VP_FP_EXTEND;
8071 MVT VT =
Op.getSimpleValueType();
8073 assert(VT.
isVector() &&
"Unexpected type for vector truncate lowering");
8076 MVT SrcVT = Src.getSimpleValueType();
8083 bool IsDirectConv = IsDirectExtend || IsDirectTrunc;
8086 MVT ContainerVT = VT;
8090 VL =
Op.getOperand(2);
8104 std::tie(Mask, VL) =
8110 Src = DAG.
getNode(ConvOpc,
DL, ContainerVT, Src, Mask, VL);
8116 unsigned InterConvOpc =
8121 DAG.
getNode(InterConvOpc,
DL, InterVT, Src, Mask, VL);
8123 DAG.
getNode(ConvOpc,
DL, ContainerVT, IntermediateConv, Mask, VL);
8134static std::optional<MVT>
8140 const unsigned MinVLMAX = VectorBitsMin / EltSize;
8142 if (MaxIdx < MinVLMAX)
8144 else if (MaxIdx < MinVLMAX * 2)
8146 else if (MaxIdx < MinVLMAX * 4)
8151 return std::nullopt;
8164 MVT VecVT =
Op.getSimpleValueType();
8178 MVT ContainerVT = VecVT;
8187 MVT OrigContainerVT = ContainerVT;
8190 if (
auto *IdxC = dyn_cast<ConstantSDNode>(
Idx)) {
8191 const unsigned OrigIdx = IdxC->getZExtValue();
8194 DL, DAG, Subtarget)) {
8195 ContainerVT = *ShrunkVT;
8204 VLEN && ContainerVT.
bitsGT(M1VT)) {
8207 unsigned RemIdx = OrigIdx % ElemsPerVReg;
8208 unsigned SubRegIdx = OrigIdx / ElemsPerVReg;
8209 unsigned ExtractIdx =
8228 if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
8229 const auto *CVal = cast<ConstantSDNode>(Val);
8230 if (isInt<32>(CVal->getSExtValue())) {
8231 IsLegalInsert =
true;
8240 if (IsLegalInsert) {
8246 Vec = DAG.
getNode(Opc,
DL, ContainerVT, Vec, Val, VL);
8262 std::tie(ValLo, ValHi) = DAG.
SplitScalar(Val,
DL, MVT::i32, MVT::i32);
8263 MVT I32ContainerVT =
8274 Vec, Vec, ValLo, I32Mask, InsertI64VL);
8279 Tail, ValInVec, ValHi, I32Mask, InsertI64VL);
8281 ValInVec = DAG.
getBitcast(ContainerVT, ValInVec);
8286 ValInVec, AlignedIdx);
8296 DAG.
getUNDEF(I32ContainerVT), ValLo,
8297 I32Mask, InsertI64VL);
8299 DAG.
getUNDEF(I32ContainerVT), ValInVec, ValHi,
8300 I32Mask, InsertI64VL);
8302 ValInVec = DAG.
getBitcast(ContainerVT, ValInVec);
8315 Idx, Mask, InsertVL, Policy);
8319 Slideup, AlignedIdx);
8334 EVT EltVT =
Op.getValueType();
8341 MVT ContainerVT = VecVT;
8357 unsigned WidenVecLen;
8360 unsigned MaxEEW = Subtarget.
getELen();
8365 "the number of elements should be power of 2");
8369 ExtractBitIdx =
Idx;
8371 WideEltVT = LargestEltVT;
8374 ExtractElementIdx = DAG.
getNode(
8385 Vec, ExtractElementIdx);
8401 MVT ContainerVT = VecVT;
8412 if (
auto *IdxC = dyn_cast<ConstantSDNode>(
Idx);
8413 IdxC && VLen && VecVT.
getSizeInBits().getKnownMinValue() > *VLen) {
8415 unsigned OrigIdx = IdxC->getZExtValue();
8418 unsigned RemIdx = OrigIdx % ElemsPerVReg;
8419 unsigned SubRegIdx = OrigIdx / ElemsPerVReg;
8420 unsigned ExtractIdx =
8430 std::optional<uint64_t> MaxIdx;
8433 if (
auto *IdxC = dyn_cast<ConstantSDNode>(
Idx))
8434 MaxIdx = IdxC->getZExtValue();
8436 if (
auto SmallerVT =
8438 ContainerVT = *SmallerVT;
8485 "Unexpected opcode");
8492 unsigned IntNo =
Op.getConstantOperandVal(HasChain ? 1 : 0);
8497 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
8514 if (OpVT.
bitsLT(XLenVT)) {
8521 ScalarOp = DAG.
getNode(ExtOpc,
DL, XLenVT, ScalarOp);
8532 MVT VT =
Op.getOperand(SplatOp - 1).getSimpleValueType();
8535 assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
8546 case Intrinsic::riscv_vslide1up:
8547 case Intrinsic::riscv_vslide1down:
8548 case Intrinsic::riscv_vslide1up_mask:
8549 case Intrinsic::riscv_vslide1down_mask: {
8552 bool IsMasked = NumOps == 7;
8558 std::tie(ScalarLo, ScalarHi) =
8566 if (isa<ConstantSDNode>(AVL)) {
8567 const auto [MinVLMAX, MaxVLMAX] =
8571 if (AVLInt <= MinVLMAX) {
8573 }
else if (AVLInt >= 2 * MaxVLMAX) {
8580 Intrinsic::riscv_vsetvlimax,
DL, MVT::i32);
8614 if (IntNo == Intrinsic::riscv_vslide1up ||
8615 IntNo == Intrinsic::riscv_vslide1up_mask) {
8617 ScalarHi, I32Mask, I32VL);
8619 ScalarLo, I32Mask, I32VL);
8622 ScalarLo, I32Mask, I32VL);
8624 ScalarHi, I32Mask, I32VL);
8673 const unsigned ElementWidth = 8;
8678 [[maybe_unused]]
unsigned MinVF =
8681 [[maybe_unused]]
unsigned VF =
N->getConstantOperandVal(2);
8685 bool Fractional = VF < LMul1VF;
8686 unsigned LMulVal = Fractional ? LMul1VF / VF : VF / LMul1VF;
8711 unsigned IntNo =
Op.getConstantOperandVal(HasChain ? 1 : 0);
8715 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
8732 if (OpVT.
bitsLT(XLenVT)) {
8735 ScalarOp = DAG.
getNode(ExtOpc,
DL, XLenVT, ScalarOp);
8748 EVT ValType = V.getValueType();
8749 if (ValType.isVector() && ValType.isFloatingPoint()) {
8752 ValType.getVectorElementCount());
8755 if (ValType.isFixedLengthVector()) {
8757 DAG, V.getSimpleValueType(), Subtarget);
8773 unsigned IntNo =
Op.getConstantOperandVal(0);
8780 case Intrinsic::thread_pointer: {
8784 case Intrinsic::riscv_orc_b:
8785 case Intrinsic::riscv_brev8:
8786 case Intrinsic::riscv_sha256sig0:
8787 case Intrinsic::riscv_sha256sig1:
8788 case Intrinsic::riscv_sha256sum0:
8789 case Intrinsic::riscv_sha256sum1:
8790 case Intrinsic::riscv_sm3p0:
8791 case Intrinsic::riscv_sm3p1: {
8811 return DAG.
getNode(Opc,
DL, XLenVT,
Op.getOperand(1));
8813 case Intrinsic::riscv_sm4ks:
8814 case Intrinsic::riscv_sm4ed: {
8824 DAG.
getNode(Opc,
DL, MVT::i64, NewOp0, NewOp1,
Op.getOperand(3));
8828 return DAG.
getNode(Opc,
DL, XLenVT,
Op.getOperand(1),
Op.getOperand(2),
8831 case Intrinsic::riscv_zip:
8832 case Intrinsic::riscv_unzip: {
8835 return DAG.
getNode(Opc,
DL, XLenVT,
Op.getOperand(1));
8837 case Intrinsic::riscv_mopr: {
8850 case Intrinsic::riscv_moprr: {
8862 Op.getOperand(2),
Op.getOperand(3));
8864 case Intrinsic::riscv_clmul:
8875 case Intrinsic::riscv_clmulh:
8876 case Intrinsic::riscv_clmulr: {
8894 return DAG.
getNode(Opc,
DL, XLenVT,
Op.getOperand(1),
Op.getOperand(2));
8896 case Intrinsic::experimental_get_vector_length:
8898 case Intrinsic::riscv_vmv_x_s: {
8902 case Intrinsic::riscv_vfmv_f_s:
8905 case Intrinsic::riscv_vmv_v_x:
8907 Op.getOperand(3),
Op.getSimpleValueType(),
DL, DAG,
8909 case Intrinsic::riscv_vfmv_v_f:
8911 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
8912 case Intrinsic::riscv_vmv_s_x: {
8915 if (
Scalar.getValueType().bitsLE(XLenVT)) {
8918 Op.getOperand(1), Scalar,
Op.getOperand(3));
8921 assert(
Scalar.getValueType() == MVT::i64 &&
"Unexpected scalar VT!");
8938 MVT VT =
Op.getSimpleValueType();
8943 if (
Op.getOperand(1).isUndef())
8959 case Intrinsic::riscv_vfmv_s_f:
8961 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
8963 case Intrinsic::riscv_vaesdf_vv:
8964 case Intrinsic::riscv_vaesdf_vs:
8965 case Intrinsic::riscv_vaesdm_vv:
8966 case Intrinsic::riscv_vaesdm_vs:
8967 case Intrinsic::riscv_vaesef_vv:
8968 case Intrinsic::riscv_vaesef_vs:
8969 case Intrinsic::riscv_vaesem_vv:
8970 case Intrinsic::riscv_vaesem_vs:
8971 case Intrinsic::riscv_vaeskf1:
8972 case Intrinsic::riscv_vaeskf2:
8973 case Intrinsic::riscv_vaesz_vs:
8974 case Intrinsic::riscv_vsm4k:
8975 case Intrinsic::riscv_vsm4r_vv:
8976 case Intrinsic::riscv_vsm4r_vs: {
8977 if (!
isValidEGW(4,
Op.getSimpleValueType(), Subtarget) ||
8978 !
isValidEGW(4,
Op->getOperand(1).getSimpleValueType(), Subtarget) ||
8979 !
isValidEGW(4,
Op->getOperand(2).getSimpleValueType(), Subtarget))
8984 case Intrinsic::riscv_vsm3c:
8985 case Intrinsic::riscv_vsm3me: {
8986 if (!
isValidEGW(8,
Op.getSimpleValueType(), Subtarget) ||
8987 !
isValidEGW(8,
Op->getOperand(1).getSimpleValueType(), Subtarget))
8992 case Intrinsic::riscv_vsha2ch:
8993 case Intrinsic::riscv_vsha2cl:
8994 case Intrinsic::riscv_vsha2ms: {
8995 if (
Op->getSimpleValueType(0).getScalarSizeInBits() == 64 &&
8996 !Subtarget.hasStdExtZvknhb())
8998 if (!
isValidEGW(4,
Op.getSimpleValueType(), Subtarget) ||
8999 !
isValidEGW(4,
Op->getOperand(1).getSimpleValueType(), Subtarget) ||
9000 !
isValidEGW(4,
Op->getOperand(2).getSimpleValueType(), Subtarget))
9004 case Intrinsic::riscv_sf_vc_v_x:
9005 case Intrinsic::riscv_sf_vc_v_i:
9006 case Intrinsic::riscv_sf_vc_v_xv:
9007 case Intrinsic::riscv_sf_vc_v_iv:
9008 case Intrinsic::riscv_sf_vc_v_vv:
9009 case Intrinsic::riscv_sf_vc_v_fv:
9010 case Intrinsic::riscv_sf_vc_v_xvv:
9011 case Intrinsic::riscv_sf_vc_v_ivv:
9012 case Intrinsic::riscv_sf_vc_v_vvv:
9013 case Intrinsic::riscv_sf_vc_v_fvv:
9014 case Intrinsic::riscv_sf_vc_v_xvw:
9015 case Intrinsic::riscv_sf_vc_v_ivw:
9016 case Intrinsic::riscv_sf_vc_v_vvw:
9017 case Intrinsic::riscv_sf_vc_v_fvw: {
9018 MVT VT =
Op.getSimpleValueType();
9055 MVT VT =
Op.getSimpleValueType();
9059 if (VT.isFloatingPoint()) {
9064 if (VT.isFixedLengthVector())
9074 if (VT.isFixedLengthVector())
9076 if (VT.isFloatingPoint())
9095 unsigned IntNo =
Op.getConstantOperandVal(1);
9099 case Intrinsic::riscv_masked_strided_load: {
9108 MVT VT =
Op->getSimpleValueType(0);
9109 MVT ContainerVT = VT;
9122 auto *
Load = cast<MemIntrinsicSDNode>(
Op);
9134 ScalarVT,
Load->getMemOperand());
9140 Load->getMemOperand());
9145 IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask,
DL,
9150 Ops.push_back(DAG.
getUNDEF(ContainerVT));
9152 Ops.push_back(PassThru);
9154 Ops.push_back(Stride);
9156 Ops.push_back(Mask);
9161 Ops.push_back(Policy);
9167 Load->getMemoryVT(),
Load->getMemOperand());
9168 Chain =
Result.getValue(1);
9174 case Intrinsic::riscv_seg2_load:
9175 case Intrinsic::riscv_seg3_load:
9176 case Intrinsic::riscv_seg4_load:
9177 case Intrinsic::riscv_seg5_load:
9178 case Intrinsic::riscv_seg6_load:
9179 case Intrinsic::riscv_seg7_load:
9180 case Intrinsic::riscv_seg8_load: {
9183 Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
9184 Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
9185 Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
9186 Intrinsic::riscv_vlseg8};
9187 unsigned NF =
Op->getNumValues() - 1;
9188 assert(NF >= 2 && NF <= 8 &&
"Unexpected seg number");
9190 MVT VT =
Op->getSimpleValueType(0);
9196 auto *
Load = cast<MemIntrinsicSDNode>(
Op);
9198 ContainerVTs.push_back(MVT::Other);
9206 Load->getMemoryVT(),
Load->getMemOperand());
9208 for (
unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
9214 case Intrinsic::riscv_sf_vc_v_x_se:
9216 case Intrinsic::riscv_sf_vc_v_i_se:
9218 case Intrinsic::riscv_sf_vc_v_xv_se:
9220 case Intrinsic::riscv_sf_vc_v_iv_se:
9222 case Intrinsic::riscv_sf_vc_v_vv_se:
9224 case Intrinsic::riscv_sf_vc_v_fv_se:
9226 case Intrinsic::riscv_sf_vc_v_xvv_se:
9228 case Intrinsic::riscv_sf_vc_v_ivv_se:
9230 case Intrinsic::riscv_sf_vc_v_vvv_se:
9232 case Intrinsic::riscv_sf_vc_v_fvv_se:
9234 case Intrinsic::riscv_sf_vc_v_xvw_se:
9236 case Intrinsic::riscv_sf_vc_v_ivw_se:
9238 case Intrinsic::riscv_sf_vc_v_vvw_se:
9240 case Intrinsic::riscv_sf_vc_v_fvw_se:
9249 unsigned IntNo =
Op.getConstantOperandVal(1);
9253 case Intrinsic::riscv_masked_strided_store: {
9264 MVT ContainerVT = VT;
9278 IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask,
DL,
9281 auto *
Store = cast<MemIntrinsicSDNode>(
Op);
9291 Ops,
Store->getMemoryVT(),
9292 Store->getMemOperand());
9294 case Intrinsic::riscv_seg2_store:
9295 case Intrinsic::riscv_seg3_store:
9296 case Intrinsic::riscv_seg4_store:
9297 case Intrinsic::riscv_seg5_store:
9298 case Intrinsic::riscv_seg6_store:
9299 case Intrinsic::riscv_seg7_store:
9300 case Intrinsic::riscv_seg8_store: {
9303 Intrinsic::riscv_vsseg2, Intrinsic::riscv_vsseg3,
9304 Intrinsic::riscv_vsseg4, Intrinsic::riscv_vsseg5,
9305 Intrinsic::riscv_vsseg6, Intrinsic::riscv_vsseg7,
9306 Intrinsic::riscv_vsseg8};
9309 assert(NF >= 2 && NF <= 8 &&
"Unexpected seg number");
9311 MVT VT =
Op->getOperand(2).getSimpleValueType();
9319 auto *FixedIntrinsic = cast<MemIntrinsicSDNode>(
Op);
9321 for (
unsigned i = 0; i < NF; i++)
9323 ContainerVT, FixedIntrinsic->getOperand(2 + i), DAG, Subtarget));
9328 FixedIntrinsic->getMemoryVT(), FixedIntrinsic->getMemOperand());
9330 case Intrinsic::riscv_sf_vc_xv_se:
9332 case Intrinsic::riscv_sf_vc_iv_se:
9334 case Intrinsic::riscv_sf_vc_vv_se:
9336 case Intrinsic::riscv_sf_vc_fv_se:
9338 case Intrinsic::riscv_sf_vc_xvv_se:
9340 case Intrinsic::riscv_sf_vc_ivv_se:
9342 case Intrinsic::riscv_sf_vc_vvv_se:
9344 case Intrinsic::riscv_sf_vc_fvv_se:
9346 case Intrinsic::riscv_sf_vc_xvw_se:
9348 case Intrinsic::riscv_sf_vc_ivw_se:
9350 case Intrinsic::riscv_sf_vc_vvw_se:
9352 case Intrinsic::riscv_sf_vc_fvw_se:
9360 switch (ISDOpcode) {
9363 case ISD::VP_REDUCE_ADD:
9366 case ISD::VP_REDUCE_UMAX:
9369 case ISD::VP_REDUCE_SMAX:
9372 case ISD::VP_REDUCE_UMIN:
9375 case ISD::VP_REDUCE_SMIN:
9378 case ISD::VP_REDUCE_AND:
9381 case ISD::VP_REDUCE_OR:
9384 case ISD::VP_REDUCE_XOR:
9387 case ISD::VP_REDUCE_FADD:
9389 case ISD::VP_REDUCE_SEQ_FADD:
9391 case ISD::VP_REDUCE_FMAX:
9393 case ISD::VP_REDUCE_FMIN:
9403 SDValue Vec =
Op.getOperand(IsVP ? 1 : 0);
9408 Op.getOpcode() == ISD::VP_REDUCE_AND ||
9409 Op.getOpcode() == ISD::VP_REDUCE_OR ||
9410 Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
9411 "Unexpected reduction lowering");
9415 MVT ContainerVT = VecVT;
9424 VL =
Op.getOperand(3);
9426 std::tie(Mask, VL) =
9434 switch (
Op.getOpcode()) {
9438 case ISD::VP_REDUCE_AND: {
9448 case ISD::VP_REDUCE_OR:
9455 case ISD::VP_REDUCE_XOR: {
9478 return DAG.
getNode(BaseOpc,
DL,
Op.getValueType(), SetCC,
Op.getOperand(0));
9482 auto *RegisterAVL = dyn_cast<RegisterSDNode>(AVL);
9483 auto *ImmAVL = dyn_cast<ConstantSDNode>(AVL);
9484 return (RegisterAVL && RegisterAVL->getReg() == RISCV::X0) ||
9485 (ImmAVL && ImmAVL->getZExtValue() >= 1);
9501 auto InnerVT = VecVT.
bitsLE(M1VT) ? VecVT : M1VT;
9505 auto InnerVL = NonZeroAVL ? VL : DAG.
getConstant(1,
DL, XLenVT);
9508 if (M1VT != InnerVT)
9514 SDValue Ops[] = {PassThru, Vec, InitialValue, Mask, VL, Policy};
9533 VecEVT =
Lo.getValueType();
9546 MVT ContainerVT = VecVT;
9566 Mask, VL,
DL, DAG, Subtarget);
9572static std::tuple<unsigned, SDValue, SDValue>
9576 auto Flags =
Op->getFlags();
9577 unsigned Opcode =
Op.getOpcode();
9601 return std::make_tuple(RVVOpc,
Op.getOperand(0), Front);
9609 MVT VecEltVT =
Op.getSimpleValueType();
9613 std::tie(RVVOpcode, VectorVal, ScalarVal) =
9617 MVT ContainerVT = VecVT;
9623 MVT ResVT =
Op.getSimpleValueType();
9626 VL,
DL, DAG, Subtarget);
9631 if (
Op->getFlags().hasNoNaNs())
9637 {VectorVal, VectorVal, DAG.getCondCode(ISD::SETNE),
9638 DAG.getUNDEF(Mask.getValueType()), Mask, VL});
9644 DL, ResVT, NoNaNs, Res,
9671 Vec, Mask, VL,
DL, DAG, Subtarget);
9683 unsigned OrigIdx =
Op.getConstantOperandVal(2);
9692 (OrigIdx != 0 || !Vec.
isUndef())) {
9695 assert(OrigIdx % 8 == 0 &&
"Invalid index");
9698 "Unexpected mask vector lowering");
9731 MVT ContainerVT = VecVT;
9767 SubVec =
getVSlideup(DAG, Subtarget,
DL, ContainerVT, Vec, SubVec,
9768 SlideupAmt, Mask, VL, Policy);
9776 unsigned SubRegIdx, RemIdx;
9777 std::tie(SubRegIdx, RemIdx) =
9779 VecVT, SubVecVT, OrigIdx,
TRI);
9798 if (RemIdx == 0 && (!IsSubVecPartReg || Vec.
isUndef()))
9806 MVT InterSubVT = VecVT;
9808 unsigned AlignedIdx = OrigIdx - RemIdx;
9844 SubVec =
getVSlideup(DAG, Subtarget,
DL, InterSubVT, AlignedExtract, SubVec,
9845 SlideupAmt, Mask, VL, Policy);
9850 if (VecVT.
bitsGT(InterSubVT))
9856 return DAG.
getBitcast(
Op.getSimpleValueType(), SubVec);
9862 MVT SubVecVT =
Op.getSimpleValueType();
9867 unsigned OrigIdx =
Op.getConstantOperandVal(1);
9878 assert(OrigIdx % 8 == 0 &&
"Invalid index");
9881 "Unexpected mask vector lowering");
9920 MVT ContainerVT = VecVT;
9930 ContainerVT = *ShrunkVT;
9944 DAG.
getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
9956 MVT ContainerSubVecVT = SubVecVT;
9970 VecVT, ContainerSubVecVT, OrigIdx / Vscale,
TRI);
9971 SubRegIdx = Decompose.first;
9973 (OrigIdx % Vscale));
9977 VecVT, ContainerSubVecVT, OrigIdx,
TRI);
9978 SubRegIdx = Decompose.first;
10001 MVT InterSubVT = VecVT;
10005 assert(SubRegIdx != RISCV::NoSubRegister);
10019 Vec, SlidedownAmt, Mask, VL);
10028 return DAG.
getBitcast(
Op.getSimpleValueType(), Slidedown);
10035 MVT VT =
N.getSimpleValueType();
10039 assert(
Op.getSimpleValueType() == VT &&
10040 "Operands and result must be same type");
10044 unsigned NumVals =
N->getNumValues();
10047 NumVals,
N.getValueType().changeVectorElementType(MVT::i8)));
10050 for (
unsigned I = 0;
I < NumVals;
I++) {
10056 if (TruncVals.
size() > 1)
10058 return TruncVals.
front();
10064 MVT VecVT =
Op.getSimpleValueType();
10067 "vector_interleave on non-scalable vector!");
10078 EVT SplitVT = Op0Lo.getValueType();
10081 DAG.
getVTList(SplitVT, SplitVT), Op0Lo, Op0Hi);
10083 DAG.
getVTList(SplitVT, SplitVT), Op1Lo, Op1Hi);
10097 Op.getOperand(0),
Op.getOperand(1));
10124 Concat, EvenIdx, Passthru, Mask, VL);
10126 Concat, OddIdx, Passthru, Mask, VL);
10140 MVT VecVT =
Op.getSimpleValueType();
10143 "vector_interleave on non-scalable vector!");
10156 EVT SplitVT = Op0Lo.getValueType();
10159 DAG.
getVTList(SplitVT, SplitVT), Op0Lo, Op1Lo);
10161 DAG.
getVTList(SplitVT, SplitVT), Op0Hi, Op1Hi);
10183 Op.getOperand(0),
Op.getOperand(1));
10231 MVT VT =
Op.getSimpleValueType();
10236 uint64_t StepValImm =
Op.getConstantOperandVal(0);
10237 if (StepValImm != 1) {
10246 VL, VT,
DL, DAG, Subtarget);
10261 MVT VecVT =
Op.getSimpleValueType();
10271 unsigned MaxVLMAX =
10281 if (MaxVLMAX > 256 && EltSize == 8) {
10328 return DAG.
getNode(GatherOpc,
DL, VecVT,
Op.getOperand(0), Indices,
10338 MVT VecVT =
Op.getSimpleValueType();
10342 int64_t ImmValue = cast<ConstantSDNode>(
Op.getOperand(2))->getSExtValue();
10343 SDValue DownOffset, UpOffset;
10344 if (ImmValue >= 0) {
10360 DownOffset, TrueMask, UpOffset);
10361 return getVSlideup(DAG, Subtarget,
DL, VecVT, SlideDown, V2, UpOffset,
10367RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(
SDValue Op,
10370 auto *
Load = cast<LoadSDNode>(
Op);
10373 Load->getMemoryVT(),
10374 *
Load->getMemOperand()) &&
10375 "Expecting a correctly-aligned load");
10377 MVT VT =
Op.getSimpleValueType();
10383 const auto [MinVLMAX, MaxVLMAX] =
10386 getLMUL1VT(ContainerVT).bitsLE(ContainerVT)) {
10404 IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle,
DL, XLenVT);
10413 Load->getMemoryVT(),
Load->getMemOperand());
10420RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(
SDValue Op,
10423 auto *
Store = cast<StoreSDNode>(
Op);
10426 Store->getMemoryVT(),
10427 *
Store->getMemOperand()) &&
10428 "Expecting a correctly-aligned store");
10450 const auto [MinVLMAX, MaxVLMAX] =
10453 getLMUL1VT(ContainerVT).bitsLE(ContainerVT)) {
10470 IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse,
DL, XLenVT);
10473 {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
10474 Store->getMemoryVT(),
Store->getMemOperand());
10480 MVT VT =
Op.getSimpleValueType();
10482 const auto *MemSD = cast<MemSDNode>(
Op);
10483 EVT MemVT = MemSD->getMemoryVT();
10485 SDValue Chain = MemSD->getChain();
10489 if (
const auto *VPLoad = dyn_cast<VPLoadSDNode>(
Op)) {
10490 Mask = VPLoad->getMask();
10492 VL = VPLoad->getVectorLength();
10494 const auto *MLoad = cast<MaskedLoadSDNode>(
Op);
10495 Mask = MLoad->getMask();
10496 PassThru = MLoad->getPassThru();
10503 MVT ContainerVT = VT;
10517 IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
10534 Chain =
Result.getValue(1);
10546 const auto *MemSD = cast<MemSDNode>(
Op);
10547 EVT MemVT = MemSD->getMemoryVT();
10549 SDValue Chain = MemSD->getChain();
10553 bool IsCompressingStore =
false;
10554 if (
const auto *VPStore = dyn_cast<VPStoreSDNode>(
Op)) {
10555 Val = VPStore->getValue();
10556 Mask = VPStore->getMask();
10557 VL = VPStore->getVectorLength();
10559 const auto *MStore = cast<MaskedStoreSDNode>(
Op);
10560 Val = MStore->getValue();
10561 Mask = MStore->getMask();
10562 IsCompressingStore = MStore->isCompressingStore();
10571 MVT ContainerVT = VT;
10576 if (!IsUnmasked || IsCompressingStore) {
10585 if (IsCompressingStore) {
10588 DAG.
getUNDEF(ContainerVT), Val, Mask, VL);
10595 IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
10604 DAG.
getVTList(MVT::Other), Ops, MemVT, MMO);
10608RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(
SDValue Op,
10610 MVT InVT =
Op.getOperand(0).getSimpleValueType();
10613 MVT VT =
Op.getSimpleValueType();
10627 {Op1, Op2,
Op.getOperand(2), DAG.
getUNDEF(MaskVT), Mask, VL});
10634 unsigned Opc =
Op.getOpcode();
10641 MVT VT =
Op.getSimpleValueType();
10674 MVT ContainerInVT = InVT;
10693 {Chain, Op1, Op1, DAG.getCondCode(ISD::SETOEQ), DAG.getUNDEF(MaskVT),
10697 {Chain, Op2, Op2, DAG.getCondCode(ISD::SETOEQ), DAG.getUNDEF(MaskVT),
10705 {Chain, Op1, Op2, CC, Mask, Mask, VL});
10710 {Chain, Op1, Op2, CC, DAG.getUNDEF(MaskVT), Mask, VL});
10723 MVT VT =
Op.getSimpleValueType();
10727 "Unexpected type for ISD::ABS");
10729 MVT ContainerVT = VT;
10736 if (
Op->getOpcode() == ISD::VP_ABS) {
10737 Mask =
Op->getOperand(1);
10741 VL =
Op->getOperand(2);
10749 DAG.
getUNDEF(ContainerVT), Mask, VL);
10751 DAG.
getUNDEF(ContainerVT), Mask, VL);
10758SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
10761 MVT VT =
Op.getSimpleValueType();
10765 "Can only handle COPYSIGN with matching types.");
10774 Sign, DAG.
getUNDEF(ContainerVT), Mask, VL);
10779SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
10781 MVT VT =
Op.getSimpleValueType();
10784 MVT I1ContainerVT =
10798 Op2, DAG.
getUNDEF(ContainerVT), VL);
10809 MVT VT =
Op.getSimpleValueType();
10814 for (
const SDValue &V :
Op->op_values()) {
10815 assert(!isa<VTSDNode>(V) &&
"Unexpected VTSDNode node!");
10818 if (!
V.getValueType().isVector()) {
10824 assert(useRVVForFixedLengthVectorVT(
V.getSimpleValueType()) &&
10825 "Only fixed length vectors are supported!");
10839 if (
Op->isStrictFPOpcode()) {
10848 DAG.
getNode(NewOpc,
DL, ContainerVT, Ops,
Op->getFlags());
10862 MVT VT =
Op.getSimpleValueType();
10865 MVT ContainerVT = VT;
10871 assert(!isa<VTSDNode>(V) &&
"Unexpected VTSDNode node!");
10877 if (*MaskIdx == OpIdx.index())
10881 if (
Op.getOpcode() == ISD::VP_MERGE) {
10885 assert(
Op.getOpcode() == ISD::VP_SELECT);
10892 if (!
V.getValueType().isFixedLengthVector()) {
10897 MVT OpVT =
V.getSimpleValueType();
10899 assert(useRVVForFixedLengthVectorVT(OpVT) &&
10900 "Only fixed length vectors are supported!");
10905 return DAG.
getNode(RISCVISDOpc,
DL, VT, Ops,
Op->getFlags());
10915 MVT VT =
Op.getSimpleValueType();
10921 MVT ContainerVT = VT;
10931 DAG.
getUNDEF(ContainerVT), Zero, VL);
10934 Op.getOpcode() == ISD::VP_ZERO_EXTEND ? 1 : -1,
DL, XLenVT);
10936 DAG.
getUNDEF(ContainerVT), SplatValue, VL);
10939 ZeroSplat, DAG.
getUNDEF(ContainerVT), VL);
10948 MVT VT =
Op.getSimpleValueType();
10952 ISD::CondCode Condition = cast<CondCodeSDNode>(
Op.getOperand(2))->get();
10956 MVT ContainerVT = VT;
10966 switch (Condition) {
11034 MVT DstVT =
Op.getSimpleValueType();
11035 MVT SrcVT = Src.getSimpleValueType();
11048 if (DstEltSize >= SrcEltSize) {
11057 if (SrcEltSize == 1) {
11068 ZeroSplat, DAG.
getUNDEF(IntVT), VL);
11069 }
else if (DstEltSize > (2 * SrcEltSize)) {
11073 Src = DAG.
getNode(RISCVISDExtOpc,
DL, IntVT, Src, Mask, VL);
11079 "Wrong input/output vector types");
11082 if (DstEltSize > (2 * SrcEltSize)) {
11098 MVT InterimFVT = DstVT;
11099 if (SrcEltSize > (2 * DstEltSize)) {
11100 assert(SrcEltSize == (4 * DstEltSize) &&
"Unexpected types!");
11107 if (InterimFVT != DstVT) {
11113 "Wrong input/output vector types");
11117 if (DstEltSize == 1) {
11120 assert(SrcEltSize >= 16 &&
"Unexpected FP type!");
11130 DAG.
getUNDEF(InterimIVT), SplatZero, VL);
11140 while (InterimIVT != DstVT) {
11152 MVT VT =
Op.getSimpleValueType();
11159RISCVTargetLowering::lowerVPSpliceExperimental(
SDValue Op,
11171 MVT VT =
Op.getSimpleValueType();
11172 MVT ContainerVT = VT;
11185 if (IsMaskVector) {
11196 SplatZeroOp1, DAG.
getUNDEF(ContainerVT), EVL1);
11205 SplatZeroOp2, DAG.
getUNDEF(ContainerVT), EVL2);
11208 int64_t ImmValue = cast<ConstantSDNode>(
Offset)->getSExtValue();
11209 SDValue DownOffset, UpOffset;
11210 if (ImmValue >= 0) {
11224 Op1, DownOffset, Mask, UpOffset);
11228 if (IsMaskVector) {
11232 {Result, DAG.getConstant(0, DL, ContainerVT),
11233 DAG.getCondCode(ISD::SETNE), DAG.getUNDEF(getMaskTypeFor(ContainerVT)),
11243RISCVTargetLowering::lowerVPReverseExperimental(
SDValue Op,
11246 MVT VT =
Op.getSimpleValueType();
11253 MVT ContainerVT = VT;
11261 MVT GatherVT = ContainerVT;
11265 if (IsMaskVector) {
11276 SplatZero, DAG.
getUNDEF(IndicesVT), EVL);
11282 unsigned MaxVLMAX =
11291 if (MaxVLMAX > 256 && EltSize == 8) {
11319 DAG.
getUNDEF(GatherVT), Result, Diff, Mask, EVL);
11321 if (IsMaskVector) {
11344 DAG.
getUNDEF(IndicesVT), VecLen, EVL);
11346 DAG.
getUNDEF(IndicesVT), Mask, EVL);
11348 DAG.
getUNDEF(GatherVT), Mask, EVL);
11350 if (IsMaskVector) {
11365 MVT VT =
Op.getSimpleValueType();
11367 return lowerVPOp(
Op, DAG);
11374 MVT ContainerVT = VT;
11393 MVT VT =
Op.getSimpleValueType();
11394 MVT ContainerVT = VT;
11400 auto *VPNode = cast<VPStridedLoadSDNode>(
Op);
11406 : Intrinsic::riscv_vlse_mask,
11409 DAG.
getUNDEF(ContainerVT), VPNode->getBasePtr(),
11410 VPNode->getStride()};
11418 Ops.
push_back(VPNode->getVectorLength());
11426 VPNode->getMemoryVT(), VPNode->getMemOperand());
11440 auto *VPNode = cast<VPStridedStoreSDNode>(
Op);
11441 SDValue StoreVal = VPNode->getValue();
11443 MVT ContainerVT = VT;
11454 : Intrinsic::riscv_vsse_mask,
11457 VPNode->getBasePtr(), VPNode->getStride()};
11465 Ops.
push_back(VPNode->getVectorLength());
11468 Ops, VPNode->getMemoryVT(),
11469 VPNode->getMemOperand());
11481 MVT VT =
Op.getSimpleValueType();
11483 const auto *MemSD = cast<MemSDNode>(
Op.getNode());
11484 EVT MemVT = MemSD->getMemoryVT();
11486 SDValue Chain = MemSD->getChain();
11492 if (
auto *VPGN = dyn_cast<VPGatherSDNode>(
Op.getNode())) {
11493 Index = VPGN->getIndex();
11494 Mask = VPGN->getMask();
11496 VL = VPGN->getVectorLength();
11501 auto *MGN = cast<MaskedGatherSDNode>(
Op.getNode());
11502 Index = MGN->getIndex();
11503 Mask = MGN->getMask();
11504 PassThru = MGN->getPassThru();
11508 MVT IndexVT =
Index.getSimpleValueType();
11512 "Unexpected VTs!");
11513 assert(
BasePtr.getSimpleValueType() == XLenVT &&
"Unexpected pointer type");
11516 "Unexpected extending MGATHER/VP_GATHER");
11522 MVT ContainerVT = VT;
11546 IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
11563 Chain =
Result.getValue(1);
11580 const auto *MemSD = cast<MemSDNode>(
Op.getNode());
11581 EVT MemVT = MemSD->getMemoryVT();
11583 SDValue Chain = MemSD->getChain();
11586 [[maybe_unused]]
bool IsTruncatingStore =
false;
11589 if (
auto *VPSN = dyn_cast<VPScatterSDNode>(
Op.getNode())) {
11590 Index = VPSN->getIndex();
11591 Mask = VPSN->getMask();
11592 Val = VPSN->getValue();
11593 VL = VPSN->getVectorLength();
11595 IsTruncatingStore =
false;
11598 auto *MSN = cast<MaskedScatterSDNode>(
Op.getNode());
11599 Index = MSN->getIndex();
11600 Mask = MSN->getMask();
11601 Val = MSN->getValue();
11602 IsTruncatingStore = MSN->isTruncatingStore();
11606 MVT IndexVT =
Index.getSimpleValueType();
11610 "Unexpected VTs!");
11611 assert(
BasePtr.getSimpleValueType() == XLenVT &&
"Unexpected pointer type");
11614 assert(!IsTruncatingStore &&
"Unexpected truncating MSCATTER/VP_SCATTER");
11620 MVT ContainerVT = VT;
11644 IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
11654 DAG.
getVTList(MVT::Other), Ops, MemVT, MMO);
11663 RISCVSysReg::lookupSysRegByName(
"FRM")->Encoding,
DL, XLenVT);
11671 static const int Table =
11695 RISCVSysReg::lookupSysRegByName(
"FRM")->Encoding,
DL, XLenVT);
11701 static const unsigned Table =
11724 bool isRISCV64 = Subtarget.
is64Bit();
11788 switch (
N->getOpcode()) {
11790 llvm_unreachable(
"Don't know how to custom type legalize this operation!");
11796 "Unexpected custom legalisation");
11797 bool IsStrict =
N->isStrictFPOpcode();
11800 SDValue Op0 = IsStrict ?
N->getOperand(1) :
N->getOperand(0);
11818 Opc,
DL, VTs, Chain, Op0,
11852 std::tie(Result, Chain) =
11853 makeLibCall(DAG, LC,
N->getValueType(0), Op0, CallOptions,
DL, Chain);
11881 Op0.
getValueType() == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
11892 assert(!Subtarget.
is64Bit() &&
"READCYCLECOUNTER/READSTEADYCOUNTER only "
11893 "has custom type legalization on riscv32");
11895 SDValue LoCounter, HiCounter;
11899 RISCVSysReg::lookupSysRegByName(
"CYCLE")->Encoding,
DL, XLenVT);
11901 RISCVSysReg::lookupSysRegByName(
"CYCLEH")->Encoding,
DL, XLenVT);
11904 RISCVSysReg::lookupSysRegByName(
"TIME")->Encoding,
DL, XLenVT);
11906 RISCVSysReg::lookupSysRegByName(
"TIMEH")->Encoding,
DL, XLenVT);
11910 N->getOperand(0), LoCounter, HiCounter);
11934 unsigned Size =
N->getSimpleValueType(0).getSizeInBits();
11935 unsigned XLen = Subtarget.
getXLen();
11938 assert(
Size == (XLen * 2) &&
"Unexpected custom legalisation");
11946 if (LHSIsU == RHSIsU)
11963 if (RHSIsU && LHSIsS && !RHSIsS)
11965 else if (LHSIsU && RHSIsS && !LHSIsS)
11975 "Unexpected custom legalisation");
11982 "Unexpected custom legalisation");
11985 if (
N->getOpcode() ==
ISD::SHL && Subtarget.hasStdExtZbs() &&
12011 "Unexpected custom legalisation");
12012 assert((Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb() ||
12013 Subtarget.hasVendorXTHeadBb()) &&
12014 "Unexpected custom legalization");
12015 if (!isa<ConstantSDNode>(
N->getOperand(1)) &&
12016 !(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()))
12025 "Unexpected custom legalisation");
12039 MVT VT =
N->getSimpleValueType(0);
12040 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
12041 Subtarget.
is64Bit() && Subtarget.hasStdExtM() &&
12042 "Unexpected custom legalisation");
12054 if (VT != MVT::i32)
12063 "Unexpected custom legalisation");
12067 if (!isa<ConstantSDNode>(
N->getOperand(1)))
12084 EVT OType =
N->getValueType(1);
12097 "Unexpected custom legalisation");
12114 Overflow = DAG.
getSetCC(
DL,
N->getValueType(1), Res,
12118 Overflow = DAG.
getSetCC(
DL,
N->getValueType(1),
N->getOperand(0),
12136 "Unexpected custom legalisation");
12137 if (Subtarget.hasStdExtZbb()) {
12158 "Unexpected custom legalisation");
12164 "Unexpected custom legalisation");
12166 if (Subtarget.hasStdExtZbb()) {
12200 EVT VT =
N->getValueType(0);
12205 if (VT == MVT::i16 && Op0VT == MVT::f16 &&
12209 }
else if (VT == MVT::i16 && Op0VT == MVT::bf16 &&
12210 Subtarget.hasStdExtZfbfmin()) {
12213 }
else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.
is64Bit() &&
12218 }
else if (VT == MVT::i64 && Op0VT == MVT::f64 && XLenVT == MVT::i32) {
12220 DAG.
getVTList(MVT::i32, MVT::i32), Op0);
12240 MVT VT =
N->getSimpleValueType(0);
12242 assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.
is64Bit())) &&
12243 "Unexpected custom legalisation");
12244 assert(Subtarget.hasStdExtZbkb() &&
"Unexpected extension");
12270 assert(!Subtarget.
is64Bit() &&
N->getValueType(0) == MVT::i64 &&
12272 "Unexpected EXTRACT_VECTOR_ELT legalization");
12275 MVT ContainerVT = VecVT;
12303 DAG.
getUNDEF(ContainerVT), Mask, VL);
12311 unsigned IntNo =
N->getConstantOperandVal(0);
12315 "Don't know how to custom type legalize this intrinsic!");
12316 case Intrinsic::experimental_get_vector_length: {
12321 case Intrinsic::riscv_orc_b:
12322 case Intrinsic::riscv_brev8:
12323 case Intrinsic::riscv_sha256sig0:
12324 case Intrinsic::riscv_sha256sig1:
12325 case Intrinsic::riscv_sha256sum0:
12326 case Intrinsic::riscv_sha256sum1:
12327 case Intrinsic::riscv_sm3p0:
12328 case Intrinsic::riscv_sm3p1: {
12329 if (!Subtarget.
is64Bit() ||
N->getValueType(0) != MVT::i32)
12349 case Intrinsic::riscv_sm4ks:
12350 case Intrinsic::riscv_sm4ed: {
12358 DAG.
getNode(Opc,
DL, MVT::i64, NewOp0, NewOp1,
N->getOperand(3));
12362 case Intrinsic::riscv_mopr: {
12363 if (!Subtarget.
is64Bit() ||
N->getValueType(0) != MVT::i32)
12373 case Intrinsic::riscv_moprr: {
12374 if (!Subtarget.
is64Bit() ||
N->getValueType(0) != MVT::i32)
12386 case Intrinsic::riscv_clmul: {
12387 if (!Subtarget.
is64Bit() ||
N->getValueType(0) != MVT::i32)
12398 case Intrinsic::riscv_clmulh:
12399 case Intrinsic::riscv_clmulr: {
12400 if (!Subtarget.
is64Bit() ||
N->getValueType(0) != MVT::i32)
12428 case Intrinsic::riscv_vmv_x_s: {
12429 EVT VT =
N->getValueType(0);
12431 if (VT.
bitsLT(XLenVT)) {
12440 "Unexpected custom legalization");
12478 case ISD::VP_REDUCE_ADD:
12479 case ISD::VP_REDUCE_AND:
12480 case ISD::VP_REDUCE_OR:
12481 case ISD::VP_REDUCE_XOR:
12482 case ISD::VP_REDUCE_SMAX:
12483 case ISD::VP_REDUCE_UMAX:
12484 case ISD::VP_REDUCE_SMIN:
12485 case ISD::VP_REDUCE_UMIN:
12549 const EVT VT =
N->getValueType(0);
12550 const unsigned Opc =
N->getOpcode();
12557 (Opc !=
ISD::FADD || !
N->getFlags().hasAllowReassociation()))
12562 "Inconsistent mappings");
12573 !isa<ConstantSDNode>(
RHS.getOperand(1)))
12576 uint64_t RHSIdx = cast<ConstantSDNode>(
RHS.getOperand(1))->getLimitedValue();
12591 LHS.getOperand(0) == SrcVec && isa<ConstantSDNode>(
LHS.getOperand(1))) {
12593 cast<ConstantSDNode>(
LHS.getOperand(1))->getLimitedValue();
12594 if (0 == std::min(LHSIdx, RHSIdx) && 1 == std::max(LHSIdx, RHSIdx)) {
12598 return DAG.
getNode(ReduceOpc,
DL, VT, Vec,
N->getFlags());
12605 if (
LHS.getOpcode() != ReduceOpc)
12620 auto Flags = ReduceVec->
getFlags();
12621 Flags.intersectWith(
N->getFlags());
12622 return DAG.
getNode(ReduceOpc,
DL, VT, Vec, Flags);
12632 auto BinOpToRVVReduce = [](
unsigned Opc) {
12661 auto IsReduction = [&BinOpToRVVReduce](
SDValue V,
unsigned Opc) {
12664 V.getOperand(0).getOpcode() == BinOpToRVVReduce(Opc);
12667 unsigned Opc =
N->getOpcode();
12668 unsigned ReduceIdx;
12669 if (IsReduction(
N->getOperand(0), Opc))
12671 else if (IsReduction(
N->getOperand(1), Opc))
12677 if (Opc ==
ISD::FADD && !
N->getFlags().hasAllowReassociation())
12680 SDValue Extract =
N->getOperand(ReduceIdx);
12712 SDValue NewStart =
N->getOperand(1 - ReduceIdx);
12739 if (!Subtarget.hasStdExtZba())
12743 EVT VT =
N->getValueType(0);
12755 auto *N0C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
12756 auto *N1C = dyn_cast<ConstantSDNode>(N1->
getOperand(1));
12759 int64_t C0 = N0C->getSExtValue();
12760 int64_t C1 = N1C->getSExtValue();
12761 if (C0 <= 0 || C1 <= 0)
12765 int64_t Bits = std::min(C0, C1);
12766 int64_t Diff = std::abs(C0 - C1);
12767 if (Diff != 1 && Diff != 2 && Diff != 3)
12795 EVT VT =
N->getValueType(0);
12803 if ((!Subtarget.hasStdExtZicond() &&
12804 !Subtarget.hasVendorXVentanaCondOps()) ||
12826 bool SwapSelectOps;
12832 SwapSelectOps =
false;
12833 NonConstantVal = FalseVal;
12835 SwapSelectOps =
true;
12836 NonConstantVal = TrueVal;
12842 FalseVal = DAG.
getNode(
N->getOpcode(),
SDLoc(
N), VT, OtherOp, NonConstantVal);
12890 EVT VT =
N->getValueType(0);
12898 auto *N0C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
12899 auto *N1C = dyn_cast<ConstantSDNode>(
N->getOperand(1));
12905 if (!N0C->hasOneUse())
12907 int64_t C0 = N0C->getSExtValue();
12908 int64_t C1 = N1C->getSExtValue();
12910 if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
12913 if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
12914 !isInt<12>(C0 * (C1 / C0))) {
12917 }
else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
12918 isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
12921 }
else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
12922 isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
12947 EVT VT =
N->getValueType(0);
12978 unsigned OuterExtend =
12982 OuterExtend,
SDLoc(
N), VT,
12990 EVT VT =
N->getValueType(0);
13035 EVT VT =
N->getValueType(0);
13039 auto *N0C = dyn_cast<ConstantSDNode>(N0);
13045 APInt ImmValMinus1 = N0C->getAPIntValue() - 1;
13055 if (!isIntEqualitySetCC(CCVal) || !SetCCOpVT.
isInteger())
13077 EVT VT =
N->getValueType(0);
13106 bool IsAnd =
N->getOpcode() ==
ISD::AND;
13130 EVT VT =
N->getValueType(0);
13150 EVT VT =
N->getValueType(0);
13236 EVT VT =
N->getValueType(0);
13307 auto *ConstN00 = dyn_cast<ConstantSDNode>(N0.
getOperand(0));
13312 const APInt &Imm = ConstN00->getAPIntValue();
13313 if ((Imm + 1).isSignedIntN(12))
13330 LHS.getValueType());
13347 EVT VT =
N->getValueType(0);
13355 unsigned AddSubOpc;
13361 auto IsAddSubWith1 = [&](
SDValue V) ->
bool {
13362 AddSubOpc = V->getOpcode();
13364 SDValue Opnd = V->getOperand(1);
13365 MulOper = V->getOperand(0);
13374 if (IsAddSubWith1(N0)) {
13376 return DAG.
getNode(AddSubOpc,
DL, VT, N1, MulVal);
13379 if (IsAddSubWith1(N1)) {
13381 return DAG.
getNode(AddSubOpc,
DL, VT, N0, MulVal);
13393 if (isIndexTypeSigned(IndexType))
13396 if (!
N->hasOneUse())
13399 EVT VT =
N.getValueType();
13438 EVT SrcVT = Src.getValueType();
13442 NewElen = std::max(NewElen, 8U);
13465 EVT VT =
N->getValueType(0);
13468 if (OpVT != MVT::i64 || !Subtarget.
is64Bit())
13472 auto *N1C = dyn_cast<ConstantSDNode>(N1);
13484 if (!isIntEqualitySetCC(
Cond))
13493 const APInt &C1 = N1C->getAPIntValue();
13511 EVT VT =
N->getValueType(0);
13515 cast<VTSDNode>(
N->getOperand(1))->getVT().bitsGE(MVT::i16))
13517 Src.getOperand(0));
13525struct CombineResult;
13527enum ExtKind : uint8_t { ZExt = 1 << 0, SExt = 1 << 1, FPExt = 1 << 2 };
13553struct NodeExtensionHelper {
13562 bool SupportsFPExt;
13565 bool EnforceOneUse;
13589 return OrigOperand;
13599 unsigned getExtOpc(ExtKind SupportsExt)
const {
13600 switch (SupportsExt) {
13601 case ExtKind::SExt:
13603 case ExtKind::ZExt:
13605 case ExtKind::FPExt:
13616 std::optional<ExtKind> SupportsExt)
const {
13617 if (!SupportsExt.has_value())
13618 return OrigOperand;
13620 MVT NarrowVT = getNarrowType(Root, *SupportsExt);
13624 if (
Source.getValueType() == NarrowVT)
13627 unsigned ExtOpc = getExtOpc(*SupportsExt);
13631 auto [
Mask, VL] = getMaskAndVL(Root, DAG, Subtarget);
13638 return DAG.
getNode(ExtOpc,
DL, NarrowVT, Source, Mask, VL);
13654 static MVT getNarrowType(
const SDNode *Root, ExtKind SupportsExt) {
13660 MVT EltVT = SupportsExt == ExtKind::FPExt
13662 :
MVT::getIntegerVT(NarrowSize);
13664 assert((
int)NarrowSize >= (SupportsExt == ExtKind::FPExt ? 16 : 8) &&
13665 "Trying to extend something we can't represent");
13672 static unsigned getSExtOpcode(
unsigned Opcode) {
13694 static unsigned getZExtOpcode(
unsigned Opcode) {
13716 static unsigned getFPExtOpcode(
unsigned Opcode) {
13733 static unsigned getSUOpcode(
unsigned Opcode) {
13735 "SU is only supported for MUL");
13741 static unsigned getWOpcode(
unsigned Opcode, ExtKind SupportsExt) {
13760 using CombineToTry = std::function<std::optional<CombineResult>(
13761 SDNode * ,
const NodeExtensionHelper & ,
13766 bool needToPromoteOtherUsers()
const {
return EnforceOneUse; }
13772 SupportsZExt =
false;
13773 SupportsSExt =
false;
13774 SupportsFPExt =
false;
13775 EnforceOneUse =
true;
13777 unsigned Opc = OrigOperand.
getOpcode();
13799 SupportsZExt =
true;
13804 SupportsSExt =
true;
13809 SupportsFPExt =
true;
13816 EnforceOneUse =
false;
13833 unsigned ScalarBits =
Op.getValueSizeInBits();
13836 if (ScalarBits < EltBits)
13842 if (NarrowSize < 8)
13846 SupportsSExt =
true;
13849 SupportsZExt =
true;
13858 static bool isSupportedRoot(
const SDNode *Root) {
13888 assert(isSupportedRoot(Root) &&
"Trying to build an helper with an "
13889 "unsupported root");
13890 assert(OperandIdx < 2 &&
"Requesting something else than LHS or RHS");
13906 if (OperandIdx == 1) {
13913 std::tie(Mask, VL) = getMaskAndVL(Root, DAG, Subtarget);
13917 EnforceOneUse =
false;
13922 fillUpExtensionSupport(Root, DAG, Subtarget);
13928 bool isVLCompatible(
SDValue VL)
const {
13929 return this->VL !=
SDValue() && this->VL == VL;
13933 bool isMaskCompatible(
SDValue Mask)
const {
13934 return !CheckMask || (this->Mask !=
SDValue() && this->Mask ==
Mask);
13938 static std::pair<SDValue, SDValue>
13941 assert(isSupportedRoot(Root) &&
"Unexpected root");
13958 auto [
Mask, VL] = getMaskAndVL(Root, DAG, Subtarget);
13959 return isMaskCompatible(Mask) && isVLCompatible(VL);
13965 switch (
N->getOpcode()) {
14000struct CombineResult {
14002 unsigned TargetOpcode;
14004 std::optional<ExtKind> LHSExt;
14005 std::optional<ExtKind> RHSExt;
14009 NodeExtensionHelper
LHS;
14011 NodeExtensionHelper
RHS;
14013 CombineResult(
unsigned TargetOpcode,
SDNode *Root,
14014 const NodeExtensionHelper &
LHS, std::optional<ExtKind> LHSExt,
14015 const NodeExtensionHelper &
RHS, std::optional<ExtKind> RHSExt)
14016 : TargetOpcode(TargetOpcode), LHSExt(LHSExt), RHSExt(RHSExt), Root(Root),
14025 std::tie(Mask, VL) =
14026 NodeExtensionHelper::getMaskAndVL(Root, DAG, Subtarget);
14038 LHS.getOrCreateExtendedOp(Root, DAG, Subtarget, LHSExt),
14039 RHS.getOrCreateExtendedOp(Root, DAG, Subtarget, RHSExt),
14054static std::optional<CombineResult>
14055canFoldToVWWithSameExtensionImpl(
SDNode *Root,
const NodeExtensionHelper &LHS,
14056 const NodeExtensionHelper &RHS,
14059 if (!
LHS.areVLAndMaskCompatible(Root, DAG, Subtarget) ||
14060 !
RHS.areVLAndMaskCompatible(Root, DAG, Subtarget))
14061 return std::nullopt;
14062 if ((AllowExtMask & ExtKind::ZExt) &&
LHS.SupportsZExt &&
RHS.SupportsZExt)
14063 return CombineResult(NodeExtensionHelper::getZExtOpcode(Root->
getOpcode()),
14064 Root, LHS, {ExtKind::ZExt}, RHS,
14066 if ((AllowExtMask & ExtKind::SExt) &&
LHS.SupportsSExt &&
RHS.SupportsSExt)
14067 return CombineResult(NodeExtensionHelper::getSExtOpcode(Root->
getOpcode()),
14068 Root, LHS, {ExtKind::SExt}, RHS,
14070 if ((AllowExtMask & ExtKind::FPExt) &&
RHS.SupportsFPExt)
14071 return CombineResult(NodeExtensionHelper::getFPExtOpcode(Root->
getOpcode()),
14072 Root, LHS, {ExtKind::FPExt}, RHS,
14074 return std::nullopt;
14083static std::optional<CombineResult>
14084canFoldToVWWithSameExtension(
SDNode *Root,
const NodeExtensionHelper &LHS,
14087 return canFoldToVWWithSameExtensionImpl(
14088 Root, LHS, RHS, ExtKind::ZExt | ExtKind::SExt | ExtKind::FPExt, DAG,
14096static std::optional<CombineResult>
14097canFoldToVW_W(
SDNode *Root,
const NodeExtensionHelper &LHS,
14100 if (!
RHS.areVLAndMaskCompatible(Root, DAG, Subtarget))
14101 return std::nullopt;
14103 if (
RHS.SupportsFPExt)
14104 return CombineResult(
14105 NodeExtensionHelper::getWOpcode(Root->
getOpcode(), ExtKind::FPExt),
14106 Root, LHS, std::nullopt, RHS, {ExtKind::FPExt});
14113 return CombineResult(
14114 NodeExtensionHelper::getWOpcode(Root->
getOpcode(), ExtKind::ZExt), Root,
14115 LHS, std::nullopt, RHS, {ExtKind::ZExt});
14117 return CombineResult(
14118 NodeExtensionHelper::getWOpcode(Root->
getOpcode(), ExtKind::SExt), Root,
14119 LHS, std::nullopt, RHS, {ExtKind::SExt});
14120 return std::nullopt;
14127static std::optional<CombineResult>
14128canFoldToVWWithSEXT(
SDNode *Root,
const NodeExtensionHelper &LHS,
14131 return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, ExtKind::SExt, DAG,
14139static std::optional<CombineResult>
14140canFoldToVWWithZEXT(
SDNode *Root,
const NodeExtensionHelper &LHS,
14143 return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, ExtKind::ZExt, DAG,
14151static std::optional<CombineResult>
14152canFoldToVWWithFPEXT(
SDNode *Root,
const NodeExtensionHelper &LHS,
14155 return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, ExtKind::FPExt, DAG,
14163static std::optional<CombineResult>
14164canFoldToVW_SU(
SDNode *Root,
const NodeExtensionHelper &LHS,
14168 if (!
LHS.SupportsSExt || !
RHS.SupportsZExt)
14169 return std::nullopt;
14170 if (!
LHS.areVLAndMaskCompatible(Root, DAG, Subtarget) ||
14171 !
RHS.areVLAndMaskCompatible(Root, DAG, Subtarget))
14172 return std::nullopt;
14173 return CombineResult(NodeExtensionHelper::getSUOpcode(Root->
getOpcode()),
14174 Root, LHS, {ExtKind::SExt}, RHS,
14179NodeExtensionHelper::getSupportedFoldings(
const SDNode *Root) {
14189 Strategies.
push_back(canFoldToVWWithSameExtension);
14194 Strategies.
push_back(canFoldToVWWithSameExtension);
14199 Strategies.
push_back(canFoldToVWWithSameExtension);
14206 Strategies.
push_back(canFoldToVWWithSEXT);
14211 Strategies.
push_back(canFoldToVWWithZEXT);
14216 Strategies.
push_back(canFoldToVWWithFPEXT);
14244 if (!NodeExtensionHelper::isSupportedRoot(
N))
14250 Inserted.insert(
N);
14253 while (!Worklist.
empty()) {
14255 if (!NodeExtensionHelper::isSupportedRoot(Root))
14258 NodeExtensionHelper
LHS(
N, 0, DAG, Subtarget);
14259 NodeExtensionHelper
RHS(
N, 1, DAG, Subtarget);
14260 auto AppendUsersIfNeeded = [&Worklist,
14261 &Inserted](
const NodeExtensionHelper &
Op) {
14262 if (
Op.needToPromoteOtherUsers()) {
14263 for (
SDNode *TheUse :
Op.OrigOperand->uses()) {
14264 if (Inserted.insert(TheUse).second)
14276 NodeExtensionHelper::getSupportedFoldings(
N);
14278 assert(!FoldingStrategies.
empty() &&
"Nothing to be folded");
14279 bool Matched =
false;
14280 for (
int Attempt = 0;
14281 (Attempt != 1 + NodeExtensionHelper::isCommutative(
N)) && !Matched;
14284 for (NodeExtensionHelper::CombineToTry FoldingStrategy :
14285 FoldingStrategies) {
14286 std::optional<CombineResult> Res =
14287 FoldingStrategy(
N,
LHS,
RHS, DAG, Subtarget);
14294 if (Res->LHSExt.has_value())
14295 AppendUsersIfNeeded(
LHS);
14296 if (Res->RHSExt.has_value())
14297 AppendUsersIfNeeded(
RHS);
14308 SDValue InputRootReplacement;
14315 for (CombineResult Res : CombinesToApply) {
14316 SDValue NewValue = Res.materialize(DAG, Subtarget);
14317 if (!InputRootReplacement) {
14319 "First element is expected to be the current node");
14320 InputRootReplacement = NewValue;
14325 for (std::pair<SDValue, SDValue> OldNewValues : ValuesToReplace) {
14329 return InputRootReplacement;
14336 unsigned Opc =
N->getOpcode();
14341 SDValue MergeOp =
N->getOperand(1);
14342 unsigned MergeOpc = MergeOp.
getOpcode();
14353 SDValue Passthru =
N->getOperand(2);
14367 Z = Z.getOperand(1);
14373 {Y, X, Y, MergeOp->getOperand(0), N->getOperand(4)},
14380 [[maybe_unused]]
unsigned Opc =
N->getOpcode();
14409 EVT NewMemVT = (MemVT == MVT::i32) ? MVT::i64 : MVT::i128;
14415 auto Ext = cast<LoadSDNode>(LSNode1)->getExtensionType();
14417 if (MemVT == MVT::i32)
14423 Opcode,
SDLoc(LSNode1), DAG.
getVTList({XLenVT, XLenVT, MVT::Other}),
14458 if (!Subtarget.hasVendorXTHeadMemPair())
14470 auto ExtractBaseAndOffset = [](
SDValue Ptr) -> std::pair<SDValue, uint64_t> {
14472 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Ptr->getOperand(1)))
14473 return {
Ptr->getOperand(0), C1->getZExtValue()};
14477 auto [Base1, Offset1] = ExtractBaseAndOffset(LSNode1->
getOperand(OpNum));
14500 auto [Base2, Offset2] = ExtractBaseAndOffset(LSNode2->
getOperand(OpNum));
14503 if (Base1 != Base2)
14507 bool Valid =
false;
14508 if (MemVT == MVT::i32) {
14510 if ((Offset1 + 4 == Offset2) && isShiftedUInt<2, 3>(Offset1))
14512 }
else if (MemVT == MVT::i64) {
14514 if ((Offset1 + 8 == Offset2) && isShiftedUInt<2, 4>(Offset1))
14548 if (Src->isStrictFPOpcode() || Src->isTargetStrictFPOpcode())
14556 if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
14566 EVT VT =
N->getValueType(0);
14569 MVT SrcVT = Src.getSimpleValueType();
14570 MVT SrcContainerVT = SrcVT;
14572 SDValue XVal = Src.getOperand(0);
14599 FpToInt = DAG.
getNode(Opc,
DL, ContainerVT, XVal, Mask, VL);
14603 FpToInt = DAG.
getNode(Opc,
DL, ContainerVT, XVal, Mask, VL);
14607 FpToInt = DAG.
getNode(Opc,
DL, ContainerVT, XVal, Mask,
14620 if (VT != MVT::i32 && VT != XLenVT)
14650 EVT DstVT =
N->getValueType(0);
14651 if (DstVT != XLenVT)
14657 if (Src->isStrictFPOpcode() || Src->isTargetStrictFPOpcode())
14665 if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
14668 EVT SatVT = cast<VTSDNode>(
N->getOperand(1))->getVT();
14677 if (SatVT == DstVT)
14679 else if (DstVT == MVT::i64 && SatVT == MVT::i32)
14685 Src = Src.getOperand(0);
14706 assert(Subtarget.hasStdExtZbkb() &&
"Unexpected extension");
14712 EVT VT =
N->getValueType(0);
14765 unsigned Offset =
N->isTargetStrictFPOpcode();
14772 auto invertIfNegative = [&Mask, &VL](
SDValue &V) {
14774 V.getOperand(2) == VL) {
14776 V = V.getOperand(0);
14783 bool NegA = invertIfNegative(
A);
14784 bool NegB = invertIfNegative(
B);
14785 bool NegC = invertIfNegative(
C);
14788 if (!NegA && !NegB && !NegC)
14792 if (
N->isTargetStrictFPOpcode())
14794 {N->getOperand(0), A, B, C, Mask, VL});
14804 if (
N->getValueType(0).isScalableVector() &&
14805 N->getValueType(0).getVectorElementType() == MVT::f32 &&
14812 if (
N->isTargetStrictFPOpcode())
14837 switch (
N->getOpcode()) {
14857 return DAG.
getNode(NewOpc,
SDLoc(
N),
N->getValueType(0), Op0, Op1,
14858 N->getOperand(2), Mask, VL);
14865 if (
N->getValueType(0) != MVT::i64 || !Subtarget.
is64Bit())
14868 if (!isa<ConstantSDNode>(
N->getOperand(1)))
14870 uint64_t ShAmt =
N->getConstantOperandVal(1);
14881 cast<VTSDNode>(N0.
getOperand(1))->getVT() == MVT::i32 &&
14909 AddC = dyn_cast<ConstantSDNode>(N0.
getOperand(IsAdd ? 1 : 0));
14922 !isa<ConstantSDNode>(U->getOperand(1)) ||
14923 U->getConstantOperandVal(1) > 32)
14978 if (!
Cond.hasOneUse())
14997 EVT VT =
Cond.getValueType();
15042 LHS =
LHS.getOperand(0);
15052 LHS.getOperand(0).getValueType() == Subtarget.
getXLenVT()) {
15056 CCVal = cast<CondCodeSDNode>(
LHS.getOperand(2))->get();
15060 RHS =
LHS.getOperand(1);
15061 LHS =
LHS.getOperand(0);
15070 RHS =
LHS.getOperand(1);
15071 LHS =
LHS.getOperand(0);
15087 ShAmt =
LHS.getValueSizeInBits() - 1 - ShAmt;
15128 bool Commutative =
true;
15129 unsigned Opc = TrueVal.getOpcode();
15137 Commutative =
false;
15145 if (!TrueVal.hasOneUse() || isa<ConstantSDNode>(FalseVal))
15149 if (FalseVal == TrueVal.getOperand(0))
15151 else if (Commutative && FalseVal == TrueVal.getOperand(1))
15156 EVT VT =
N->getValueType(0);
15158 SDValue OtherOp = TrueVal.getOperand(1 - OpToFold);
15164 assert(IdentityOperand &&
"No identity operand!");
15169 DAG.
getSelect(
DL, OtherOpVT,
N->getOperand(0), OtherOp, IdentityOperand);
15170 return DAG.
getNode(TrueVal.getOpcode(),
DL, VT, FalseVal, NewSel);
15191 CountZeroes =
N->getOperand(2);
15192 ValOnZero =
N->getOperand(1);
15194 CountZeroes =
N->getOperand(1);
15195 ValOnZero =
N->getOperand(2);
15214 if (
Cond->getOperand(0) != CountZeroesArgument)
15230 CountZeroes, BitWidthMinusOne);
15240 EVT VT =
N->getValueType(0);
15241 EVT CondVT =
Cond.getValueType();
15249 (Subtarget.hasStdExtZicond() || Subtarget.hasVendorXVentanaCondOps())) {
15255 const APInt &MaskVal =
LHS.getConstantOperandAPInt(1);
15276 SDValue TrueVal =
N->getOperand(1);
15277 SDValue FalseVal =
N->getOperand(2);
15292 EVT VT =
N->getValueType(0);
15299 const unsigned Opcode =
N->op_begin()->getNode()->getOpcode();
15314 if (
Op.isUndef()) {
15327 if (
Op.getOpcode() != Opcode || !
Op.hasOneUse())
15331 if (!isa<ConstantSDNode>(
Op.getOperand(1)) &&
15332 !isa<ConstantFPSDNode>(
Op.getOperand(1)))
15336 if (
Op.getOperand(0).getValueType() !=
Op.getOperand(1).getValueType())
15364 const unsigned InVecOpcode = InVec->
getOpcode();
15374 if (!isa<ConstantSDNode>(InValRHS) && !isa<ConstantFPSDNode>(InValRHS))
15381 InVecLHS, InValLHS, EltNo);
15383 InVecRHS, InValRHS, EltNo);
15392 auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
15395 unsigned Elt = IndexC->getZExtValue();
15403 unsigned ConcatOpIdx = Elt / ConcatNumElts;
15406 ConcatOp, InVal, NewIdx);
15410 ConcatOps[ConcatOpIdx] = ConcatOp;
15422 EVT VT =
N->getValueType(0);
15432 auto *BaseLd = dyn_cast<LoadSDNode>(
N->getOperand(0));
15434 !
SDValue(BaseLd, 0).hasOneUse())
15437 EVT BaseLdVT = BaseLd->getValueType(0);
15444 auto *Ld = dyn_cast<LoadSDNode>(
Op);
15445 if (!Ld || !Ld->isSimple() || !
Op.hasOneUse() ||
15447 Ld->getValueType(0) != BaseLdVT)
15456 using PtrDiff = std::pair<std::variant<int64_t, SDValue>,
bool>;
15458 LoadSDNode *Ld2) -> std::optional<PtrDiff> {
15463 if (BIO1.equalBaseIndex(BIO2, DAG))
15464 return {{BIO2.getOffset() - BIO1.getOffset(),
false}};
15468 SDValue P2 = Ld2->getBasePtr();
15474 return std::nullopt;
15478 auto BaseDiff = GetPtrDiff(Lds[0], Lds[1]);
15483 for (
auto *It = Lds.
begin() + 1; It != Lds.
end() - 1; It++)
15484 if (GetPtrDiff(*It, *std::next(It)) != BaseDiff)
15492 unsigned WideScalarBitWidth =
15505 auto [StrideVariant, MustNegateStride] = *BaseDiff;
15506 SDValue Stride = std::holds_alternative<SDValue>(StrideVariant)
15507 ? std::get<SDValue>(StrideVariant)
15510 if (MustNegateStride)
15523 BaseLd->getBasePtr(), Stride, AllOneMask};
15526 if (
auto *ConstStride = dyn_cast<ConstantSDNode>(Stride);
15527 ConstStride && ConstStride->getSExtValue() >= 0)
15531 ConstStride->getSExtValue() * (
N->getNumOperands() - 1);
15537 BaseLd->getPointerInfo(), BaseLd->getMemOperand()->getFlags(), MemSize,
15541 Ops, WideVecVT, MMO);
15553 if (
N->getValueType(0).isFixedLengthVector())
15556 SDValue Addend =
N->getOperand(0);
15560 SDValue AddMergeOp =
N->getOperand(2);
15565 auto IsVWMulOpc = [](
unsigned Opc) {
15594 return std::make_pair(
N->getOperand(3),
N->getOperand(4));
15595 }(
N, DAG, Subtarget);
15600 if (AddMask != MulMask || AddVL != MulVL)
15605 "Unexpected opcode after VWMACC_VL");
15607 "Unexpected opcode after VWMACC_VL!");
15609 "Unexpected opcode after VWMUL_VL!");
15611 "Unexpected opcode after VWMUL_VL!");
15614 EVT VT =
N->getValueType(0);
15630 const EVT IndexVT =
Index.getValueType();
15634 if (!isIndexTypeSigned(IndexType))
15666 for (
unsigned i = 0; i <
Index->getNumOperands(); i++) {
15669 if (
Index->getOperand(i)->isUndef())
15672 if (
C % ElementSize != 0)
15674 C =
C / ElementSize;
15678 ActiveLanes.
set(
C);
15680 return ActiveLanes.
all();
15698 if (NumElems % 2 != 0)
15702 const unsigned WiderElementSize = ElementSize * 2;
15703 if (WiderElementSize > ST.getELen()/8)
15706 if (!ST.hasFastUnalignedAccess() && BaseAlign < WiderElementSize)
15709 for (
unsigned i = 0; i <
Index->getNumOperands(); i++) {
15712 if (
Index->getOperand(i)->isUndef())
15718 if (
C % WiderElementSize != 0)
15723 if (
C !=
Last + ElementSize)
15739 auto SimplifyDemandedLowBitsHelper = [&](
unsigned OpNo,
unsigned LowBits) {
15750 switch (
N->getOpcode()) {
15770 APInt V =
C->getValueAPF().bitcastToAPInt();
15805 if (SimplifyDemandedLowBitsHelper(0, 32) ||
15806 SimplifyDemandedLowBitsHelper(1, 5))
15814 if (SimplifyDemandedLowBitsHelper(0, 32))
15831 MVT VT =
N->getSimpleValueType(0);
15840 "Unexpected value type!");
15863 EVT VT =
N->getValueType(0);
15918 if (
N->getValueType(0) == MVT::i64 && Subtarget.
is64Bit()) {
15923 Src.getOperand(0));
15928 Src.getOperand(0), Src.getOperand(1));
15943 auto IsTruncNode = [](
SDValue V) {
15946 SDValue VL = V.getOperand(2);
15947 auto *
C = dyn_cast<ConstantSDNode>(VL);
15949 bool IsVLMAXForVMSET = (
C &&
C->isAllOnes()) ||
15950 (isa<RegisterSDNode>(VL) &&
15951 cast<RegisterSDNode>(VL)->getReg() == RISCV::X0);
15960 while (IsTruncNode(
Op)) {
15961 if (!
Op.hasOneUse())
15963 Op =
Op.getOperand(0);
15994 if (
N->getOperand(1).getOpcode() ==
ISD::XOR &&
16003 N->getOperand(0),
Cond);
16015 SDValue FalseV =
N->getOperand(4);
16017 EVT VT =
N->getValueType(0);
16020 if (TrueV == FalseV)
16025 if (!Subtarget.hasShortForwardBranchOpt() && isa<ConstantSDNode>(TrueV) &&
16031 int64_t TrueSImm = cast<ConstantSDNode>(TrueV)->getSExtValue();
16032 int64_t FalseSImm = cast<ConstantSDNode>(FalseV)->getSExtValue();
16035 if (isInt<12>(TrueSImm) && isInt<12>(FalseSImm) &&
16036 isInt<12>(TrueSImm - FalseSImm)) {
16052 {LHS, RHS, CC, TrueV, FalseV});
16119 N->getOperand(0),
LHS,
RHS,
CC,
N->getOperand(4));
16132 EVT VT =
N->getValueType(0);
16156 const auto *MGN = dyn_cast<MaskedGatherSDNode>(
N);
16157 const EVT VT =
N->getValueType(0);
16159 SDValue ScaleOp = MGN->getScale();
16161 assert(!MGN->isIndexScaled() &&
16162 "Scaled gather/scatter should not be formed");
16167 N->getVTList(), MGN->getMemoryVT(),
DL,
16168 {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
16169 MGN->getBasePtr(), Index, ScaleOp},
16170 MGN->getMemOperand(), IndexType, MGN->getExtensionType());
16174 N->getVTList(), MGN->getMemoryVT(),
DL,
16175 {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
16176 MGN->getBasePtr(), Index, ScaleOp},
16177 MGN->getMemOperand(), IndexType, MGN->getExtensionType());
16183 if (std::optional<VIDSequence> SimpleVID =
16185 SimpleVID && SimpleVID->StepDenominator == 1) {
16186 const int64_t StepNumerator = SimpleVID->StepNumerator;
16187 const int64_t Addend = SimpleVID->Addend;
16194 assert(MGN->getBasePtr()->getValueType(0) == PtrVT);
16204 {MGN->getChain(), IntID, MGN->getPassThru(), BasePtr,
16205 DAG.
getConstant(StepNumerator,
DL, XLenVT), MGN->getMask()};
16207 Ops, VT, MGN->getMemOperand());
16215 MGN->getBasePtr(), DAG.
getUNDEF(XLenVT),
16217 MGN->getMemoryVT(), MGN->getMemOperand(),
16226 MGN->getMemOperand()->getBaseAlign(), Subtarget)) {
16228 for (
unsigned i = 0; i <
Index->getNumOperands(); i += 2)
16230 EVT IndexVT =
Index.getValueType()
16237 assert(EltCnt.isKnownEven() &&
"Splitting vector, but not in half!");
16239 EltCnt.divideCoefficientBy(2));
16242 EltCnt.divideCoefficientBy(2));
16247 {MGN->getChain(), Passthru, Mask, MGN->getBasePtr(),
16256 const auto *MSN = dyn_cast<MaskedScatterSDNode>(
N);
16258 SDValue ScaleOp = MSN->getScale();
16260 assert(!MSN->isIndexScaled() &&
16261 "Scaled gather/scatter should not be formed");
16266 N->getVTList(), MSN->getMemoryVT(),
DL,
16267 {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
16269 MSN->getMemOperand(), IndexType, MSN->isTruncatingStore());
16273 N->getVTList(), MSN->getMemoryVT(),
DL,
16274 {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
16276 MSN->getMemOperand(), IndexType, MSN->isTruncatingStore());
16278 EVT VT = MSN->getValue()->getValueType(0);
16280 if (!MSN->isTruncatingStore() &&
16284 return DAG.
getMaskedStore(MSN->getChain(),
DL, Shuffle, MSN->getBasePtr(),
16285 DAG.
getUNDEF(XLenVT), MSN->getMask(),
16286 MSN->getMemoryVT(), MSN->getMemOperand(),
16291 case ISD::VP_GATHER: {
16292 const auto *VPGN = dyn_cast<VPGatherSDNode>(
N);
16294 SDValue ScaleOp = VPGN->getScale();
16296 assert(!VPGN->isIndexScaled() &&
16297 "Scaled gather/scatter should not be formed");
16302 {VPGN->getChain(), VPGN->getBasePtr(), Index,
16303 ScaleOp, VPGN->getMask(),
16304 VPGN->getVectorLength()},
16305 VPGN->getMemOperand(), IndexType);
16309 {VPGN->getChain(), VPGN->getBasePtr(), Index,
16310 ScaleOp, VPGN->getMask(),
16311 VPGN->getVectorLength()},
16312 VPGN->getMemOperand(), IndexType);
16316 case ISD::VP_SCATTER: {
16317 const auto *VPSN = dyn_cast<VPScatterSDNode>(
N);
16319 SDValue ScaleOp = VPSN->getScale();
16321 assert(!VPSN->isIndexScaled() &&
16322 "Scaled gather/scatter should not be formed");
16327 {VPSN->getChain(), VPSN->getValue(),
16328 VPSN->getBasePtr(), Index, ScaleOp,
16329 VPSN->getMask(), VPSN->getVectorLength()},
16330 VPSN->getMemOperand(), IndexType);
16334 {VPSN->getChain(), VPSN->getValue(),
16335 VPSN->getBasePtr(), Index, ScaleOp,
16336 VPSN->getMask(), VPSN->getVectorLength()},
16337 VPSN->getMemOperand(), IndexType);
16348 EVT VT =
N->getValueType(0);
16351 return DAG.
getNode(
N->getOpcode(),
DL, VT,
N->getOperand(0), ShAmt,
16352 N->getOperand(2),
N->getOperand(3),
N->getOperand(4));
16366 EVT VT =
N->getValueType(0);
16370 return DAG.
getNode(
N->getOpcode(),
DL, VT,
N->getOperand(0), ShAmt);
16400 if (
N->getValueType(0).isScalableVector() &&
16401 N->getValueType(0).getVectorElementType() == MVT::f32 &&
16416 auto *Store = cast<StoreSDNode>(
N);
16417 SDValue Chain = Store->getChain();
16418 EVT MemVT = Store->getMemoryVT();
16419 SDValue Val = Store->getValue();
16422 bool IsScalarizable =
16424 Store->isSimple() &&
16454 NewVT, *Store->getMemOperand())) {
16456 return DAG.
getStore(Chain,
DL, NewV, Store->getBasePtr(),
16457 Store->getPointerInfo(), Store->getOriginalAlign(),
16458 Store->getMemOperand()->getFlags());
16466 if (
auto *L = dyn_cast<LoadSDNode>(Val);
16468 L->hasNUsesOfValue(1, 0) && L->hasNUsesOfValue(1, 1) &&
16470 L->getMemoryVT() == MemVT) {
16473 NewVT, *Store->getMemOperand()) &&
16475 NewVT, *L->getMemOperand())) {
16477 L->getPointerInfo(), L->getOriginalAlign(),
16478 L->getMemOperand()->getFlags());
16479 return DAG.
getStore(Chain,
DL, NewL, Store->getBasePtr(),
16480 Store->getPointerInfo(), Store->getOriginalAlign(),
16481 Store->getMemOperand()->getFlags());
16493 MVT VecVT = Src.getSimpleValueType();
16500 Store->getChain(),
DL, Src, Store->getBasePtr(), Store->getOffset(),
16503 Store->getMemOperand(), Store->getAddressingMode(),
16504 Store->isTruncatingStore(),
false);
16511 EVT VT =
N->getValueType(0);
16533 const MVT VT =
N->getSimpleValueType(0);
16534 SDValue Passthru =
N->getOperand(0);
16535 SDValue Scalar =
N->getOperand(1);
16544 const MVT VT =
N->getSimpleValueType(0);
16545 SDValue Passthru =
N->getOperand(0);
16546 SDValue Scalar =
N->getOperand(1);
16551 unsigned ScalarSize = Scalar.getValueSizeInBits();
16553 if (ScalarSize > EltWidth && Passthru.
isUndef())
16554 if (SimplifyDemandedLowBitsHelper(1, EltWidth))
16561 (!Const || Const->isZero() ||
16562 !Const->getAPIntValue().sextOrTrunc(EltWidth).isSignedIntN(5)))
16572 if (
N->getOperand(0).isUndef() &&
16575 Src.getOperand(0).getValueType().isScalableVector()) {
16576 EVT VT =
N->getValueType(0);
16577 EVT SrcVT = Src.getOperand(0).getValueType();
16581 return Src.getOperand(0);
16587 const MVT VT =
N->getSimpleValueType(0);
16588 SDValue Passthru =
N->getOperand(0);
16589 SDValue Scalar =
N->getOperand(1);
16599 DAG.
getNode(
N->getOpcode(),
DL, M1VT, M1Passthru, Scalar, VL);
16609 Const && !Const->isZero() && isInt<5>(Const->getSExtValue()) &&
16617 MVT VecVT =
N->getOperand(0).getSimpleValueType();
16619 if (M1VT.
bitsLT(VecVT)) {
16630 unsigned IntNo =
N->getConstantOperandVal(IntOpNo);
16635 case Intrinsic::riscv_masked_strided_load: {
16636 MVT VT =
N->getSimpleValueType(0);
16637 auto *Load = cast<MemIntrinsicSDNode>(
N);
16638 SDValue PassThru =
N->getOperand(2);
16640 SDValue Stride =
N->getOperand(4);
16646 if (
auto *StrideC = dyn_cast<ConstantSDNode>(Stride);
16647 StrideC && StrideC->getZExtValue() == ElementSize)
16649 DAG.
getUNDEF(XLenVT), Mask, PassThru,
16650 Load->getMemoryVT(), Load->getMemOperand(),
16654 case Intrinsic::riscv_masked_strided_store: {
16655 auto *Store = cast<MemIntrinsicSDNode>(
N);
16658 SDValue Stride =
N->getOperand(4);
16663 const unsigned ElementSize =
Value.getValueType().getScalarStoreSize();
16664 if (
auto *StrideC = dyn_cast<ConstantSDNode>(Stride);
16665 StrideC && StrideC->getZExtValue() == ElementSize)
16668 Store->getMemoryVT(), Store->getMemOperand(),
16672 case Intrinsic::riscv_vcpop:
16673 case Intrinsic::riscv_vcpop_mask:
16674 case Intrinsic::riscv_vfirst:
16675 case Intrinsic::riscv_vfirst_mask: {
16677 if (IntNo == Intrinsic::riscv_vcpop_mask ||
16678 IntNo == Intrinsic::riscv_vfirst_mask)
16679 VL =
N->getOperand(3);
16684 EVT VT =
N->getValueType(0);
16685 if (IntNo == Intrinsic::riscv_vfirst ||
16686 IntNo == Intrinsic::riscv_vfirst_mask)
16695 EVT VT =
N->getValueType(0);
16699 if ((SrcVT == MVT::v1i1 || SrcVT == MVT::v2i1 || SrcVT == MVT::v4i1) &&
16718 EVT XVT,
unsigned KeptBits)
const {
16723 if (XVT != MVT::i32 && XVT != MVT::i64)
16727 if (KeptBits == 32 || KeptBits == 64)
16731 return Subtarget.hasStdExtZbb() &&
16732 ((KeptBits == 8 && XVT == MVT::i64 && !Subtarget.
is64Bit()) ||
16740 "Expected shift op");
16751 auto *C1 = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
16752 auto *C2 = dyn_cast<ConstantSDNode>(
N->getOperand(1));
16754 const APInt &C1Int = C1->getAPIntValue();
16755 APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
16781 if (C1Cost < ShiftedC1Cost)
16795 EVT VT =
Op.getValueType();
16799 unsigned Opcode =
Op.getOpcode();
16807 const APInt &Mask =
C->getAPIntValue();
16816 auto IsLegalMask = [ShrunkMask, ExpandedMask](
const APInt &Mask) ->
bool {
16817 return ShrunkMask.
isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
16819 auto UseMask = [Mask,
Op, &TLO](
const APInt &NewMask) ->
bool {
16820 if (NewMask == Mask)
16825 Op.getOperand(0), NewC);
16838 APInt NewMask =
APInt(Mask.getBitWidth(), 0xffff);
16839 if (IsLegalMask(NewMask))
16840 return UseMask(NewMask);
16843 if (VT == MVT::i64) {
16845 if (IsLegalMask(NewMask))
16846 return UseMask(NewMask);
16861 APInt NewMask = ShrunkMask;
16862 if (MinSignedBits <= 12)
16864 else if (!
C->isOpaque() && MinSignedBits <= 32 && !ShrunkMask.
isSignedIntN(32))
16870 assert(IsLegalMask(NewMask));
16871 return UseMask(NewMask);
16875 static const uint64_t GREVMasks[] = {
16876 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
16877 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
16879 for (
unsigned Stage = 0; Stage != 6; ++Stage) {
16880 unsigned Shift = 1 << Stage;
16881 if (ShAmt & Shift) {
16883 uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask);
16895 const APInt &DemandedElts,
16897 unsigned Depth)
const {
16899 unsigned Opc =
Op.getOpcode();
16904 "Should use MaskedValueIsZero if you don't know whether Op"
16905 " is a target node!");
16988 assert(MinVLenB > 0 &&
"READ_VLENB without vector extension enabled?");
16991 if (MaxVLenB == MinVLenB)
17008 case Intrinsic::riscv_vsetvli:
17009 case Intrinsic::riscv_vsetvlimax: {
17010 bool HasAVL = IntNo == Intrinsic::riscv_vsetvli;
17011 unsigned VSEW =
Op.getConstantOperandVal(HasAVL + 1);
17017 MaxVL = (Fractional) ? MaxVL / LMul : MaxVL * LMul;
17020 if (HasAVL && isa<ConstantSDNode>(
Op.getOperand(1)))
17021 MaxVL = std::min(MaxVL,
Op.getConstantOperandVal(1));
17023 unsigned KnownZeroFirstBit =
Log2_32(MaxVL) + 1;
17036 unsigned Depth)
const {
17037 switch (
Op.getOpcode()) {
17043 if (Tmp == 1)
return 1;
17046 return std::min(Tmp, Tmp2);
17058 if (Tmp < 33)
return 1;
17083 unsigned XLen = Subtarget.
getXLen();
17084 unsigned EltBits =
Op.getOperand(0).getScalarValueSizeInBits();
17085 if (EltBits <= XLen)
17086 return XLen - EltBits + 1;
17090 unsigned IntNo =
Op.getConstantOperandVal(1);
17094 case Intrinsic::riscv_masked_atomicrmw_xchg_i64:
17095 case Intrinsic::riscv_masked_atomicrmw_add_i64:
17096 case Intrinsic::riscv_masked_atomicrmw_sub_i64:
17097 case Intrinsic::riscv_masked_atomicrmw_nand_i64:
17098 case Intrinsic::riscv_masked_atomicrmw_max_i64:
17099 case Intrinsic::riscv_masked_atomicrmw_min_i64:
17100 case Intrinsic::riscv_masked_atomicrmw_umax_i64:
17101 case Intrinsic::riscv_masked_atomicrmw_umin_i64:
17102 case Intrinsic::riscv_masked_cmpxchg_i64:
17110 assert(Subtarget.hasStdExtA());
17125 switch (
Op.getOpcode()) {
17131 return !
Op.getValueType().isInteger();
17139 assert(Ld &&
"Unexpected null LoadSDNode");
17147 auto *CNode = dyn_cast<ConstantPoolSDNode>(
Ptr);
17148 if (!CNode || CNode->isMachineConstantPoolEntry() ||
17149 CNode->getOffset() != 0)
17157 auto *CNode = GetSupportedConstantPool(
Ptr);
17158 if (!CNode || CNode->getTargetFlags() != 0)
17161 return CNode->getConstVal();
17169 auto *CNodeLo = GetSupportedConstantPool(
Ptr.getOperand(1));
17170 auto *CNodeHi = GetSupportedConstantPool(
Ptr.getOperand(0).getOperand(0));
17176 if (CNodeLo->getConstVal() != CNodeHi->getConstVal())
17179 return CNodeLo->getConstVal();
17184 assert(
MI.getOpcode() == RISCV::ReadCounterWide &&
"Unexpected instruction");
17216 Register ReadAgainReg =
RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
17219 int64_t LoCounter =
MI.getOperand(2).getImm();
17220 int64_t HiCounter =
MI.getOperand(3).getImm();
17230 BuildMI(LoopMBB,
DL,
TII->get(RISCV::CSRRS), ReadAgainReg)
17242 MI.eraseFromParent();
17250 assert(
MI.getOpcode() == RISCV::SplitF64Pseudo &&
"Unexpected instruction");
17258 Register SrcReg =
MI.getOperand(2).getReg();
17278 MI.eraseFromParent();
17285 assert(
MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
17286 "Unexpected instruction");
17292 Register DstReg =
MI.getOperand(0).getReg();
17315 MI.eraseFromParent();
17320 switch (
MI.getOpcode()) {
17323 case RISCV::Select_GPR_Using_CC_GPR:
17324 case RISCV::Select_FPR16_Using_CC_GPR:
17325 case RISCV::Select_FPR16INX_Using_CC_GPR:
17326 case RISCV::Select_FPR32_Using_CC_GPR:
17327 case RISCV::Select_FPR32INX_Using_CC_GPR:
17328 case RISCV::Select_FPR64_Using_CC_GPR:
17329 case RISCV::Select_FPR64INX_Using_CC_GPR:
17330 case RISCV::Select_FPR64IN32X_Using_CC_GPR:
17336 unsigned RelOpcode,
unsigned EqOpcode,
17339 Register DstReg =
MI.getOperand(0).getReg();
17340 Register Src1Reg =
MI.getOperand(1).getReg();
17341 Register Src2Reg =
MI.getOperand(2).getReg();
17343 Register SavedFFlags =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
17367 MI.eraseFromParent();
17418 F->insert(It, FirstMBB);
17419 F->insert(It, SecondMBB);
17420 F->insert(It, SinkMBB);
17469 First.eraseFromParent();
17512 SelectDests.
insert(
MI.getOperand(0).getReg());
17516 if (
MI.getOpcode() != RISCV::Select_GPR_Using_CC_GPR && Next != BB->
end() &&
17517 Next->getOpcode() ==
MI.getOpcode() &&
17518 Next->getOperand(5).getReg() ==
MI.getOperand(0).getReg() &&
17519 Next->getOperand(5).isKill()) {
17524 SequenceMBBI != E; ++SequenceMBBI) {
17525 if (SequenceMBBI->isDebugInstr())
17528 if (SequenceMBBI->getOperand(1).getReg() !=
LHS ||
17529 SequenceMBBI->getOperand(2).getReg() !=
RHS ||
17530 SequenceMBBI->getOperand(3).getImm() !=
CC ||
17531 SelectDests.
count(SequenceMBBI->getOperand(4).getReg()) ||
17532 SelectDests.
count(SequenceMBBI->getOperand(5).getReg()))
17534 LastSelectPseudo = &*SequenceMBBI;
17536 SelectDests.
insert(SequenceMBBI->getOperand(0).getReg());
17539 if (SequenceMBBI->hasUnmodeledSideEffects() ||
17540 SequenceMBBI->mayLoadOrStore() ||
17541 SequenceMBBI->usesCustomInsertionHook())
17544 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
17559 F->insert(
I, IfFalseMBB);
17560 F->insert(
I, TailMBB);
17564 TailMBB->
push_back(DebugInstr->removeFromParent());
17568 TailMBB->
splice(TailMBB->
end(), HeadMBB,
17587 auto SelectMBBI =
MI.getIterator();
17588 auto SelectEnd = std::next(LastSelectPseudo->
getIterator());
17589 auto InsertionPoint = TailMBB->
begin();
17590 while (SelectMBBI != SelectEnd) {
17591 auto Next = std::next(SelectMBBI);
17594 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
17595 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
17596 .
addReg(SelectMBBI->getOperand(4).getReg())
17598 .
addReg(SelectMBBI->getOperand(5).getReg())
17612 unsigned CVTFOpc) {
17618 Register SavedFFLAGS =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
17631 .
add(
MI.getOperand(1))
17632 .
add(
MI.getOperand(2))
17633 .
add(
MI.getOperand(3))
17635 .
add(
MI.getOperand(4))
17636 .
add(
MI.getOperand(5))
17637 .
add(
MI.getOperand(6))
17644 .
add(
MI.getOperand(0))
17645 .
add(
MI.getOperand(1))
17647 .
add(
MI.getOperand(3))
17649 .
add(
MI.getOperand(4))
17650 .
add(
MI.getOperand(5))
17651 .
add(
MI.getOperand(6))
17661 MI.eraseFromParent();
17667 unsigned CmpOpc, F2IOpc, I2FOpc, FSGNJOpc, FSGNJXOpc;
17669 switch (
MI.getOpcode()) {
17672 case RISCV::PseudoFROUND_H:
17673 CmpOpc = RISCV::FLT_H;
17674 F2IOpc = RISCV::FCVT_W_H;
17675 I2FOpc = RISCV::FCVT_H_W;
17676 FSGNJOpc = RISCV::FSGNJ_H;
17677 FSGNJXOpc = RISCV::FSGNJX_H;
17678 RC = &RISCV::FPR16RegClass;
17680 case RISCV::PseudoFROUND_H_INX:
17681 CmpOpc = RISCV::FLT_H_INX;
17682 F2IOpc = RISCV::FCVT_W_H_INX;
17683 I2FOpc = RISCV::FCVT_H_W_INX;
17684 FSGNJOpc = RISCV::FSGNJ_H_INX;
17685 FSGNJXOpc = RISCV::FSGNJX_H_INX;
17686 RC = &RISCV::GPRF16RegClass;
17688 case RISCV::PseudoFROUND_S:
17689 CmpOpc = RISCV::FLT_S;
17690 F2IOpc = RISCV::FCVT_W_S;
17691 I2FOpc = RISCV::FCVT_S_W;
17692 FSGNJOpc = RISCV::FSGNJ_S;
17693 FSGNJXOpc = RISCV::FSGNJX_S;
17694 RC = &RISCV::FPR32RegClass;
17696 case RISCV::PseudoFROUND_S_INX:
17697 CmpOpc = RISCV::FLT_S_INX;
17698 F2IOpc = RISCV::FCVT_W_S_INX;
17699 I2FOpc = RISCV::FCVT_S_W_INX;
17700 FSGNJOpc = RISCV::FSGNJ_S_INX;
17701 FSGNJXOpc = RISCV::FSGNJX_S_INX;
17702 RC = &RISCV::GPRF32RegClass;
17704 case RISCV::PseudoFROUND_D:
17706 CmpOpc = RISCV::FLT_D;
17707 F2IOpc = RISCV::FCVT_L_D;
17708 I2FOpc = RISCV::FCVT_D_L;
17709 FSGNJOpc = RISCV::FSGNJ_D;
17710 FSGNJXOpc = RISCV::FSGNJX_D;
17711 RC = &RISCV::FPR64RegClass;
17713 case RISCV::PseudoFROUND_D_INX:
17715 CmpOpc = RISCV::FLT_D_INX;
17716 F2IOpc = RISCV::FCVT_L_D_INX;
17717 I2FOpc = RISCV::FCVT_D_L_INX;
17718 FSGNJOpc = RISCV::FSGNJ_D_INX;
17719 FSGNJXOpc = RISCV::FSGNJX_D_INX;
17720 RC = &RISCV::GPRRegClass;
17732 F->insert(
I, CvtMBB);
17733 F->insert(
I, DoneMBB);
17744 Register DstReg =
MI.getOperand(0).getReg();
17745 Register SrcReg =
MI.getOperand(1).getReg();
17746 Register MaxReg =
MI.getOperand(2).getReg();
17747 int64_t FRM =
MI.getOperand(3).getImm();
17752 Register FabsReg =
MRI.createVirtualRegister(RC);
17756 Register CmpReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
17771 Register F2IReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
17793 MI.eraseFromParent();
17800 switch (
MI.getOpcode()) {
17803 case RISCV::ReadCounterWide:
17805 "ReadCounterWide is only to be used on riscv32");
17807 case RISCV::Select_GPR_Using_CC_GPR:
17808 case RISCV::Select_FPR16_Using_CC_GPR:
17809 case RISCV::Select_FPR16INX_Using_CC_GPR:
17810 case RISCV::Select_FPR32_Using_CC_GPR:
17811 case RISCV::Select_FPR32INX_Using_CC_GPR:
17812 case RISCV::Select_FPR64_Using_CC_GPR:
17813 case RISCV::Select_FPR64INX_Using_CC_GPR:
17814 case RISCV::Select_FPR64IN32X_Using_CC_GPR:
17816 case RISCV::BuildPairF64Pseudo:
17818 case RISCV::SplitF64Pseudo:
17820 case RISCV::PseudoQuietFLE_H:
17822 case RISCV::PseudoQuietFLE_H_INX:
17823 return emitQuietFCMP(
MI, BB, RISCV::FLE_H_INX, RISCV::FEQ_H_INX, Subtarget);
17824 case RISCV::PseudoQuietFLT_H:
17826 case RISCV::PseudoQuietFLT_H_INX:
17827 return emitQuietFCMP(
MI, BB, RISCV::FLT_H_INX, RISCV::FEQ_H_INX, Subtarget);
17828 case RISCV::PseudoQuietFLE_S:
17830 case RISCV::PseudoQuietFLE_S_INX:
17831 return emitQuietFCMP(
MI, BB, RISCV::FLE_S_INX, RISCV::FEQ_S_INX, Subtarget);
17832 case RISCV::PseudoQuietFLT_S:
17834 case RISCV::PseudoQuietFLT_S_INX:
17835 return emitQuietFCMP(
MI, BB, RISCV::FLT_S_INX, RISCV::FEQ_S_INX, Subtarget);
17836 case RISCV::PseudoQuietFLE_D:
17838 case RISCV::PseudoQuietFLE_D_INX:
17839 return emitQuietFCMP(
MI, BB, RISCV::FLE_D_INX, RISCV::FEQ_D_INX, Subtarget);
17840 case RISCV::PseudoQuietFLE_D_IN32X:
17843 case RISCV::PseudoQuietFLT_D:
17845 case RISCV::PseudoQuietFLT_D_INX:
17846 return emitQuietFCMP(
MI, BB, RISCV::FLT_D_INX, RISCV::FEQ_D_INX, Subtarget);
17847 case RISCV::PseudoQuietFLT_D_IN32X:
17851 case RISCV::PseudoVFROUND_NOEXCEPT_V_M1_MASK:
17853 RISCV::PseudoVFCVT_F_X_V_M1_MASK);
17854 case RISCV::PseudoVFROUND_NOEXCEPT_V_M2_MASK:
17856 RISCV::PseudoVFCVT_F_X_V_M2_MASK);
17857 case RISCV::PseudoVFROUND_NOEXCEPT_V_M4_MASK:
17859 RISCV::PseudoVFCVT_F_X_V_M4_MASK);
17860 case RISCV::PseudoVFROUND_NOEXCEPT_V_M8_MASK:
17862 RISCV::PseudoVFCVT_F_X_V_M8_MASK);
17863 case RISCV::PseudoVFROUND_NOEXCEPT_V_MF2_MASK:
17865 RISCV::PseudoVFCVT_F_X_V_MF2_MASK);
17866 case RISCV::PseudoVFROUND_NOEXCEPT_V_MF4_MASK:
17868 RISCV::PseudoVFCVT_F_X_V_MF4_MASK);
17869 case RISCV::PseudoFROUND_H:
17870 case RISCV::PseudoFROUND_H_INX:
17871 case RISCV::PseudoFROUND_S:
17872 case RISCV::PseudoFROUND_S_INX:
17873 case RISCV::PseudoFROUND_D:
17874 case RISCV::PseudoFROUND_D_INX:
17875 case RISCV::PseudoFROUND_D_IN32X:
17877 case TargetOpcode::STATEPOINT:
17878 case TargetOpcode::STACKMAP:
17879 case TargetOpcode::PATCHPOINT:
17882 "supported on 64-bit targets");
17900 if (
MI.readsRegister(RISCV::FRM))
17932 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
17933 RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
17936 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
17937 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
17940 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
17941 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
17945 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
17946 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
17947 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
17949 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
17950 RISCV::V20M2, RISCV::V22M2};
17958 static const MCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
17959 RISCV::X13, RISCV::X14, RISCV::X15,
17960 RISCV::X16, RISCV::X17};
17962 static const MCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
17963 RISCV::X13, RISCV::X14, RISCV::X15};
17974 static const MCPhysReg FastCCIGPRs[] = {
17975 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
17976 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28,
17977 RISCV::X29, RISCV::X30, RISCV::X31};
17980 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
17981 RISCV::X13, RISCV::X14, RISCV::X15,
17996 unsigned XLenInBytes = XLen / 8;
18009 Align StackAlign(XLenInBytes);
18010 if (!
EABI || XLen != 32)
18037 std::optional<unsigned> FirstMaskArgument,
18040 if (RC == &RISCV::VRRegClass) {
18044 if (FirstMaskArgument && ValNo == *FirstMaskArgument)
18048 if (RC == &RISCV::VRM2RegClass)
18050 if (RC == &RISCV::VRM4RegClass)
18052 if (RC == &RISCV::VRM8RegClass)
18062 std::optional<unsigned> FirstMaskArgument) {
18063 unsigned XLen =
DL.getLargestLegalIntTypeSizeInBits();
18064 assert(XLen == 32 || XLen == 64);
18065 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
18069 if (ArgFlags.
isNest()) {
18070 if (
unsigned Reg = State.
AllocateReg(RISCV::X7)) {
18078 if (!LocVT.
isVector() && IsRet && ValNo > 1)
18083 bool UseGPRForF16_F32 =
true;
18086 bool UseGPRForF64 =
true;
18098 UseGPRForF16_F32 = !IsFixed;
18102 UseGPRForF16_F32 = !IsFixed;
18103 UseGPRForF64 = !IsFixed;
18109 UseGPRForF16_F32 =
true;
18110 UseGPRForF64 =
true;
18117 if (UseGPRForF16_F32 &&
18118 (ValVT == MVT::f16 || ValVT == MVT::bf16 || ValVT == MVT::f32)) {
18121 }
else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
18138 unsigned TwoXLenInBytes = (2 * XLen) / 8;
18140 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes &&
18144 if (RegIdx != std::size(
ArgGPRs) && RegIdx % 2 == 1)
18153 "PendingLocs and PendingArgFlags out of sync");
18157 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
18158 assert(PendingLocs.
empty() &&
"Can't lower f64 if it is split");
18206 PendingLocs.
size() <= 2) {
18207 assert(PendingLocs.
size() == 2 &&
"Unexpected PendingLocs.size()");
18212 PendingLocs.
clear();
18213 PendingArgFlags.
clear();
18215 XLen, State, VA, AF, ValNo, ValVT, LocVT, ArgFlags,
18221 unsigned StoreSizeBytes = XLen / 8;
18224 if ((ValVT == MVT::f16 || ValVT == MVT::bf16) && !UseGPRForF16_F32)
18226 else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
18228 else if (ValVT == MVT::f64 && !UseGPRForF64)
18231 Reg =
allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
18264 if (!PendingLocs.
empty()) {
18266 assert(PendingLocs.
size() > 2 &&
"Unexpected PendingLocs.size()");
18268 for (
auto &It : PendingLocs) {
18270 It.convertToReg(Reg);
18275 PendingLocs.clear();
18276 PendingArgFlags.
clear();
18280 assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
18282 "Expected an XLenVT or vector types at this stage");
18300template <
typename ArgTy>
18302 for (
const auto &ArgIdx :
enumerate(Args)) {
18303 MVT ArgVT = ArgIdx.value().VT;
18305 return ArgIdx.index();
18307 return std::nullopt;
18310void RISCVTargetLowering::analyzeInputArgs(
18313 RISCVCCAssignFn Fn)
const {
18314 unsigned NumArgs =
Ins.size();
18317 std::optional<unsigned> FirstMaskArgument;
18321 for (
unsigned i = 0; i != NumArgs; ++i) {
18325 Type *ArgTy =
nullptr;
18327 ArgTy = FType->getReturnType();
18328 else if (Ins[i].isOrigArg())
18329 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
18333 ArgFlags, CCInfo,
true, IsRet, ArgTy, *
this,
18334 FirstMaskArgument)) {
18335 LLVM_DEBUG(
dbgs() <<
"InputArg #" << i <<
" has unhandled type "
18342void RISCVTargetLowering::analyzeOutputArgs(
18345 CallLoweringInfo *CLI, RISCVCCAssignFn Fn)
const {
18346 unsigned NumArgs = Outs.
size();
18348 std::optional<unsigned> FirstMaskArgument;
18352 for (
unsigned i = 0; i != NumArgs; i++) {
18353 MVT ArgVT = Outs[i].VT;
18355 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty :
nullptr;
18359 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *
this,
18360 FirstMaskArgument)) {
18361 LLVM_DEBUG(
dbgs() <<
"OutputArg #" << i <<
" has unhandled type "
18415 if (In.isOrigArg()) {
18420 if ((
BitWidth <= 32 && In.Flags.isSExt()) ||
18421 (
BitWidth < 32 && In.Flags.isZExt())) {
18450 }
else if (LocVT == MVT::i64 && VA.
getValVT() == MVT::f32) {
18496 ExtType,
DL, LocVT, Chain, FIN,
18513 Register LoVReg =
RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
18526 Register HiVReg =
RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
18536 unsigned ValNo,
MVT ValVT,
MVT LocVT,
18539 bool IsFixed,
bool IsRet,
Type *OrigTy,
18541 std::optional<unsigned> FirstMaskArgument) {
18542 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
18551 if (LocVT == MVT::f16 &&
18552 (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZfhmin())) {
18554 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
18555 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H,
18556 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H,
18557 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
18558 if (
unsigned Reg = State.
AllocateReg(FPR16List)) {
18564 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
18566 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
18567 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
18568 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
18569 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
18570 if (
unsigned Reg = State.
AllocateReg(FPR32List)) {
18576 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
18578 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
18579 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
18580 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
18581 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
18582 if (
unsigned Reg = State.
AllocateReg(FPR64List)) {
18589 if ((LocVT == MVT::f16 &&
18590 (Subtarget.hasStdExtZhinx() || Subtarget.hasStdExtZhinxmin())) ||
18591 (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
18592 (LocVT == MVT::f64 && Subtarget.
is64Bit() &&
18593 Subtarget.hasStdExtZdinx())) {
18600 if (LocVT == MVT::f16) {
18606 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
18612 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
18654 if (ArgFlags.
isNest()) {
18656 "Attribute 'nest' is not supported in GHC calling convention");
18660 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
18661 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
18663 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
18675 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
18678 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
18679 RISCV::F18_F, RISCV::F19_F,
18680 RISCV::F20_F, RISCV::F21_F};
18681 if (
unsigned Reg = State.
AllocateReg(FPR32List)) {
18687 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
18690 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
18691 RISCV::F24_D, RISCV::F25_D,
18692 RISCV::F26_D, RISCV::F27_D};
18693 if (
unsigned Reg = State.
AllocateReg(FPR64List)) {
18699 if ((LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
18700 (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() &&
18720 switch (CallConv) {
18729 if (Subtarget.isRVE())
18733 "(Zdinx/D) instruction set extensions");
18737 if (Func.hasFnAttribute(
"interrupt")) {
18738 if (!Func.arg_empty())
18740 "Functions with the interrupt attribute cannot have arguments!");
18745 if (!(Kind ==
"user" || Kind ==
"supervisor" || Kind ==
"machine"))
18747 "Function interrupt attribute argument not supported!");
18752 unsigned XLenInBytes = Subtarget.
getXLen() / 8;
18754 std::vector<SDValue> OutChains;
18763 analyzeInputArgs(MF, CCInfo, Ins,
false,
18767 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
18788 unsigned ArgIndex = Ins[InsIdx].OrigArgIndex;
18789 unsigned ArgPartOffset = Ins[InsIdx].PartOffset;
18791 while (i + 1 != e && Ins[InsIdx + 1].OrigArgIndex == ArgIndex) {
18793 unsigned PartOffset = Ins[InsIdx + 1].PartOffset - ArgPartOffset;
18822 int VarArgsSaveSize = XLenInBytes * (ArgRegs.
size() -
Idx);
18827 if (VarArgsSaveSize == 0) {
18831 int VaArgOffset = -VarArgsSaveSize;
18839 XLenInBytes, VaArgOffset -
static_cast<int>(XLenInBytes),
true);
18840 VarArgsSaveSize += XLenInBytes;
18847 for (
unsigned I =
Idx;
I < ArgRegs.
size(); ++
I) {
18852 Chain,
DL, ArgValue, FIN,
18854 OutChains.push_back(Store);
18868 if (!OutChains.empty()) {
18869 OutChains.push_back(Chain);
18879bool RISCVTargetLowering::isEligibleForTailCallOptimization(
18883 auto CalleeCC = CLI.CallConv;
18884 auto &Outs = CLI.Outs;
18886 auto CallerCC = Caller.getCallingConv();
18893 if (Caller.hasFnAttribute(
"interrupt"))
18908 for (
auto &VA : ArgLocs)
18914 auto IsCallerStructRet = Caller.hasStructRetAttr();
18915 auto IsCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
18916 if (IsCallerStructRet || IsCalleeStructRet)
18921 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
18922 if (CalleeCC != CallerCC) {
18923 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
18924 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
18931 for (
auto &Arg : Outs)
18932 if (Arg.Flags.isByVal())
18967 if (Subtarget.isRVE())
18971 analyzeOutputArgs(MF, ArgCCInfo, Outs,
false, &CLI,
18977 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
18983 "site marked musttail");
18990 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
18992 if (!Flags.isByVal())
18996 unsigned Size = Flags.getByValSize();
18997 Align Alignment = Flags.getNonZeroByValAlign();
19004 Chain = DAG.
getMemcpy(Chain,
DL, FIPtr, Arg, SizeNode, Alignment,
19018 for (
unsigned i = 0, j = 0, e = ArgLocs.
size(), OutIdx = 0; i != e;
19021 SDValue ArgValue = OutVals[OutIdx];
19041 if (!StackPtr.getNode())
19052 RegsToPass.
push_back(std::make_pair(RegHigh,
Hi));
19070 unsigned ArgIndex = Outs[OutIdx].OrigArgIndex;
19071 unsigned ArgPartOffset = Outs[OutIdx].PartOffset;
19077 while (i + 1 != e && Outs[OutIdx + 1].OrigArgIndex == ArgIndex) {
19078 SDValue PartValue = OutVals[OutIdx + 1];
19079 unsigned PartOffset = Outs[OutIdx + 1].PartOffset - ArgPartOffset;
19091 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
19093 DAG.
getStore(Chain,
DL, ArgValue, SpillSlot,
19095 for (
const auto &Part : Parts) {
19096 SDValue PartValue = Part.first;
19097 SDValue PartOffset = Part.second;
19104 ArgValue = SpillSlot;
19110 if (Flags.isByVal())
19111 ArgValue = ByValArgs[j++];
19118 assert(!IsTailCall &&
"Tail call not allowed if stack is used "
19119 "for passing parameters");
19122 if (!StackPtr.getNode())
19135 if (!MemOpChains.
empty())
19141 for (
auto &Reg : RegsToPass) {
19142 Chain = DAG.
getCopyToReg(Chain,
DL, Reg.first, Reg.second, Glue);
19149 validateCCReservedRegs(RegsToPass, MF);
19154 "Return address register required, but has been reserved."});
19173 for (
auto &Reg : RegsToPass)
19179 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
19180 assert(Mask &&
"Missing call preserved mask for calling convention");
19189 "Unexpected CFI type for a direct call");
19219 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
19220 auto &VA = RVLocs[i];
19228 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
19229 assert(VA.needsCustom());
19252 std::optional<unsigned> FirstMaskArgument;
19256 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
19257 MVT VT = Outs[i].VT;
19261 ArgFlags, CCInfo,
true,
true,
nullptr,
19262 *
this, FirstMaskArgument))
19294 for (
unsigned i = 0, e = RVLocs.size(), OutIdx = 0; i < e; ++i, ++OutIdx) {
19295 SDValue Val = OutVals[OutIdx];
19304 DAG.
getVTList(MVT::i32, MVT::i32), Val);
19308 Register RegHi = RVLocs[++i].getLocReg();
19314 "Return value register required, but has been reserved."});
19330 "Return value register required, but has been reserved."});
19352 if (Func.hasFnAttribute(
"interrupt")) {
19353 if (!Func.getReturnType()->isVoidTy())
19355 "Functions with the interrupt attribute must have void return type!");
19361 if (Kind ==
"supervisor")
19367 return DAG.
getNode(RetOpc,
DL, MVT::Other, RetOps);
19370void RISCVTargetLowering::validateCCReservedRegs(
19371 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
19380 F,
"Argument register required, but has been reserved."});
19386 if (
N->getNumValues() != 1)
19388 if (!
N->hasNUsesOfValue(1, 0))
19391 SDNode *Copy = *
N->use_begin();
19405 if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == MVT::Glue)
19409 bool HasRet =
false;
19410 for (
SDNode *Node : Copy->uses()) {
19418 Chain = Copy->getOperand(0);
19427#define NODE_NAME_CASE(NODE) \
19428 case RISCVISD::NODE: \
19429 return "RISCVISD::" #NODE;
19681#undef NODE_NAME_CASE
19688 if (Constraint.
size() == 1) {
19689 switch (Constraint[0]) {
19705 if (Constraint ==
"vr" || Constraint ==
"vm")
19711std::pair<unsigned, const TargetRegisterClass *>
19717 if (Constraint.
size() == 1) {
19718 switch (Constraint[0]) {
19723 if (VT == MVT::f16 && Subtarget.hasStdExtZhinxmin())
19724 return std::make_pair(0U, &RISCV::GPRF16RegClass);
19725 if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
19726 return std::make_pair(0U, &RISCV::GPRF32RegClass);
19727 if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.
is64Bit())
19728 return std::make_pair(0U, &RISCV::GPRPairRegClass);
19729 return std::make_pair(0U, &RISCV::GPRNoX0RegClass);
19731 if (Subtarget.hasStdExtZfhmin() && VT == MVT::f16)
19732 return std::make_pair(0U, &RISCV::FPR16RegClass);
19733 if (Subtarget.hasStdExtF() && VT == MVT::f32)
19734 return std::make_pair(0U, &RISCV::FPR32RegClass);
19735 if (Subtarget.hasStdExtD() && VT == MVT::f64)
19736 return std::make_pair(0U, &RISCV::FPR64RegClass);
19741 }
else if (Constraint ==
"vr") {
19742 for (
const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
19743 &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
19745 return std::make_pair(0U, RC);
19747 }
else if (Constraint ==
"vm") {
19748 if (
TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.
SimpleTy))
19749 return std::make_pair(0U, &RISCV::VMV0RegClass);
19757 .
Case(
"{zero}", RISCV::X0)
19758 .
Case(
"{ra}", RISCV::X1)
19759 .
Case(
"{sp}", RISCV::X2)
19760 .
Case(
"{gp}", RISCV::X3)
19761 .
Case(
"{tp}", RISCV::X4)
19762 .
Case(
"{t0}", RISCV::X5)
19763 .
Case(
"{t1}", RISCV::X6)
19764 .
Case(
"{t2}", RISCV::X7)
19765 .
Cases(
"{s0}",
"{fp}", RISCV::X8)
19766 .
Case(
"{s1}", RISCV::X9)
19767 .
Case(
"{a0}", RISCV::X10)
19768 .
Case(
"{a1}", RISCV::X11)
19769 .
Case(
"{a2}", RISCV::X12)
19770 .
Case(
"{a3}", RISCV::X13)
19771 .
Case(
"{a4}", RISCV::X14)
19772 .
Case(
"{a5}", RISCV::X15)
19773 .
Case(
"{a6}", RISCV::X16)
19774 .
Case(
"{a7}", RISCV::X17)
19775 .
Case(
"{s2}", RISCV::X18)
19776 .
Case(
"{s3}", RISCV::X19)
19777 .
Case(
"{s4}", RISCV::X20)
19778 .
Case(
"{s5}", RISCV::X21)
19779 .
Case(
"{s6}", RISCV::X22)
19780 .
Case(
"{s7}", RISCV::X23)
19781 .
Case(
"{s8}", RISCV::X24)
19782 .
Case(
"{s9}", RISCV::X25)
19783 .
Case(
"{s10}", RISCV::X26)
19784 .
Case(
"{s11}", RISCV::X27)
19785 .
Case(
"{t3}", RISCV::X28)
19786 .
Case(
"{t4}", RISCV::X29)
19787 .
Case(
"{t5}", RISCV::X30)
19788 .
Case(
"{t6}", RISCV::X31)
19790 if (XRegFromAlias != RISCV::NoRegister)
19791 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
19800 if (Subtarget.hasStdExtF()) {
19802 .
Cases(
"{f0}",
"{ft0}", RISCV::F0_F)
19803 .
Cases(
"{f1}",
"{ft1}", RISCV::F1_F)
19804 .
Cases(
"{f2}",
"{ft2}", RISCV::F2_F)
19805 .
Cases(
"{f3}",
"{ft3}", RISCV::F3_F)
19806 .
Cases(
"{f4}",
"{ft4}", RISCV::F4_F)
19807 .
Cases(
"{f5}",
"{ft5}", RISCV::F5_F)
19808 .
Cases(
"{f6}",
"{ft6}", RISCV::F6_F)
19809 .
Cases(
"{f7}",
"{ft7}", RISCV::F7_F)
19810 .
Cases(
"{f8}",
"{fs0}", RISCV::F8_F)
19811 .
Cases(
"{f9}",
"{fs1}", RISCV::F9_F)
19812 .
Cases(
"{f10}",
"{fa0}", RISCV::F10_F)
19813 .
Cases(
"{f11}",
"{fa1}", RISCV::F11_F)
19814 .
Cases(
"{f12}",
"{fa2}", RISCV::F12_F)
19815 .
Cases(
"{f13}",
"{fa3}", RISCV::F13_F)
19816 .
Cases(
"{f14}",
"{fa4}", RISCV::F14_F)
19817 .
Cases(
"{f15}",
"{fa5}", RISCV::F15_F)
19818 .
Cases(
"{f16}",
"{fa6}", RISCV::F16_F)
19819 .
Cases(
"{f17}",
"{fa7}", RISCV::F17_F)
19820 .
Cases(
"{f18}",
"{fs2}", RISCV::F18_F)
19821 .
Cases(
"{f19}",
"{fs3}", RISCV::F19_F)
19822 .
Cases(
"{f20}",
"{fs4}", RISCV::F20_F)
19823 .
Cases(
"{f21}",
"{fs5}", RISCV::F21_F)
19824 .
Cases(
"{f22}",
"{fs6}", RISCV::F22_F)
19825 .
Cases(
"{f23}",
"{fs7}", RISCV::F23_F)
19826 .
Cases(
"{f24}",
"{fs8}", RISCV::F24_F)
19827 .
Cases(
"{f25}",
"{fs9}", RISCV::F25_F)
19828 .
Cases(
"{f26}",
"{fs10}", RISCV::F26_F)
19829 .
Cases(
"{f27}",
"{fs11}", RISCV::F27_F)
19830 .
Cases(
"{f28}",
"{ft8}", RISCV::F28_F)
19831 .
Cases(
"{f29}",
"{ft9}", RISCV::F29_F)
19832 .
Cases(
"{f30}",
"{ft10}", RISCV::F30_F)
19833 .
Cases(
"{f31}",
"{ft11}", RISCV::F31_F)
19835 if (FReg != RISCV::NoRegister) {
19836 assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F &&
"Unknown fp-reg");
19837 if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
19838 unsigned RegNo = FReg - RISCV::F0_F;
19839 unsigned DReg = RISCV::F0_D + RegNo;
19840 return std::make_pair(DReg, &RISCV::FPR64RegClass);
19842 if (VT == MVT::f32 || VT == MVT::Other)
19843 return std::make_pair(FReg, &RISCV::FPR32RegClass);
19844 if (Subtarget.hasStdExtZfhmin() && VT == MVT::f16) {
19845 unsigned RegNo = FReg - RISCV::F0_F;
19846 unsigned HReg = RISCV::F0_H + RegNo;
19847 return std::make_pair(HReg, &RISCV::FPR16RegClass);
19854 .
Case(
"{v0}", RISCV::V0)
19855 .
Case(
"{v1}", RISCV::V1)
19856 .
Case(
"{v2}", RISCV::V2)
19857 .
Case(
"{v3}", RISCV::V3)
19858 .
Case(
"{v4}", RISCV::V4)
19859 .
Case(
"{v5}", RISCV::V5)
19860 .
Case(
"{v6}", RISCV::V6)
19861 .
Case(
"{v7}", RISCV::V7)
19862 .
Case(
"{v8}", RISCV::V8)
19863 .
Case(
"{v9}", RISCV::V9)
19864 .
Case(
"{v10}", RISCV::V10)
19865 .
Case(
"{v11}", RISCV::V11)
19866 .
Case(
"{v12}", RISCV::V12)
19867 .
Case(
"{v13}", RISCV::V13)
19868 .
Case(
"{v14}", RISCV::V14)
19869 .
Case(
"{v15}", RISCV::V15)
19870 .
Case(
"{v16}", RISCV::V16)
19871 .
Case(
"{v17}", RISCV::V17)
19872 .
Case(
"{v18}", RISCV::V18)
19873 .
Case(
"{v19}", RISCV::V19)
19874 .
Case(
"{v20}", RISCV::V20)
19875 .
Case(
"{v21}", RISCV::V21)
19876 .
Case(
"{v22}", RISCV::V22)
19877 .
Case(
"{v23}", RISCV::V23)
19878 .
Case(
"{v24}", RISCV::V24)
19879 .
Case(
"{v25}", RISCV::V25)
19880 .
Case(
"{v26}", RISCV::V26)
19881 .
Case(
"{v27}", RISCV::V27)
19882 .
Case(
"{v28}", RISCV::V28)
19883 .
Case(
"{v29}", RISCV::V29)
19884 .
Case(
"{v30}", RISCV::V30)
19885 .
Case(
"{v31}", RISCV::V31)
19887 if (VReg != RISCV::NoRegister) {
19888 if (
TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.
SimpleTy))
19889 return std::make_pair(VReg, &RISCV::VMRegClass);
19890 if (
TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.
SimpleTy))
19891 return std::make_pair(VReg, &RISCV::VRRegClass);
19892 for (
const auto *RC :
19893 {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
19894 if (
TRI->isTypeLegalForClass(*RC, VT.
SimpleTy)) {
19895 VReg =
TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
19896 return std::make_pair(VReg, RC);
19902 std::pair<Register, const TargetRegisterClass *> Res =
19908 if (Res.second == &RISCV::GPRF16RegClass ||
19909 Res.second == &RISCV::GPRF32RegClass ||
19910 Res.second == &RISCV::GPRPairRegClass)
19911 return std::make_pair(Res.first, &RISCV::GPRRegClass);
19919 if (ConstraintCode.
size() == 1) {
19920 switch (ConstraintCode[0]) {
19935 if (Constraint.
size() == 1) {
19936 switch (Constraint[0]) {
19939 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
19941 if (isInt<12>(CVal))
19954 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
19956 if (isUInt<5>(CVal))
19974 if (Subtarget.hasStdExtZtso()) {
19990 if (Subtarget.hasStdExtZtso()) {
19998 if (Subtarget.enableSeqCstTrailingFence() && isa<StoreInst>(Inst) &&
20015 if (Subtarget.hasForcedAtomics())
20020 if (Subtarget.hasStdExtZacas() &&
20021 (
Size >= 32 || Subtarget.hasStdExtZabha()))
20027 if (
Size < 32 && !Subtarget.hasStdExtZabha())
20040 return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
20042 return Intrinsic::riscv_masked_atomicrmw_add_i32;
20044 return Intrinsic::riscv_masked_atomicrmw_sub_i32;
20046 return Intrinsic::riscv_masked_atomicrmw_nand_i32;
20048 return Intrinsic::riscv_masked_atomicrmw_max_i32;
20050 return Intrinsic::riscv_masked_atomicrmw_min_i32;
20052 return Intrinsic::riscv_masked_atomicrmw_umax_i32;
20054 return Intrinsic::riscv_masked_atomicrmw_umin_i32;
20063 return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
20065 return Intrinsic::riscv_masked_atomicrmw_add_i64;
20067 return Intrinsic::riscv_masked_atomicrmw_sub_i64;
20069 return Intrinsic::riscv_masked_atomicrmw_nand_i64;
20071 return Intrinsic::riscv_masked_atomicrmw_max_i64;
20073 return Intrinsic::riscv_masked_atomicrmw_min_i64;
20075 return Intrinsic::riscv_masked_atomicrmw_umax_i64;
20077 return Intrinsic::riscv_masked_atomicrmw_umin_i64;
20103 unsigned XLen = Subtarget.
getXLen();
20127 unsigned ValWidth =
20132 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
20135 Builder.
CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
20147 if (Subtarget.hasForcedAtomics())
20151 if (!(Subtarget.hasStdExtZabha() && Subtarget.hasStdExtZacas()) &&
20160 unsigned XLen = Subtarget.
getXLen();
20162 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
20167 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
20173 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
20180 EVT DataVT)
const {
20196 return Subtarget.hasStdExtZfhmin();
20198 return Subtarget.hasStdExtF();
20200 return Subtarget.hasStdExtD();
20232 "RVVBitsPerBlock changed, audit needed");
20241 if (!Subtarget.hasVendorXTHeadMemIdx())
20247 Base =
Op->getOperand(0);
20249 int64_t RHSC =
RHS->getSExtValue();
20255 bool isLegalIndexedOffset =
false;
20256 for (
unsigned i = 0; i < 4; i++)
20257 if (isInt<5>(RHSC >> i) && ((RHSC % (1LL << i)) == 0)) {
20258 isLegalIndexedOffset =
true;
20262 if (!isLegalIndexedOffset)
20279 VT = LD->getMemoryVT();
20280 Ptr = LD->getBasePtr();
20281 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
20282 VT = ST->getMemoryVT();
20283 Ptr = ST->getBasePtr();
20302 VT = LD->getMemoryVT();
20303 Ptr = LD->getBasePtr();
20304 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
20305 VT = ST->getMemoryVT();
20306 Ptr = ST->getBasePtr();
20349 const Constant *PersonalityFn)
const {
20354 const Constant *PersonalityFn)
const {
20378 const bool HasExtMOrZmmul =
20379 Subtarget.hasStdExtM() || Subtarget.hasStdExtZmmul();
20388 if (
auto *ConstNode = dyn_cast<ConstantSDNode>(
C.getNode())) {
20390 const APInt &Imm = ConstNode->getAPIntValue();
20391 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
20392 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
20396 if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
20397 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
20398 (Imm - 8).isPowerOf2()))
20403 if (!Imm.isSignedIntN(12) && Imm.countr_zero() < 12 &&
20404 ConstNode->hasOneUse()) {
20405 APInt ImmS = Imm.ashr(Imm.countr_zero());
20406 if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
20407 (1 - ImmS).isPowerOf2())
20431 if (C1.
isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
20440 unsigned *
Fast)
const {
20443 *
Fast = Subtarget.hasFastUnalignedAccess();
20444 return Subtarget.hasFastUnalignedAccess();
20460 *
Fast = Subtarget.hasFastUnalignedAccess();
20461 return Subtarget.hasFastUnalignedAccess();
20470 if (FuncAttributes.
hasFnAttr(Attribute::NoImplicitFloat))
20482 if (
Op.size() < MinVLenInBytes)
20492 MVT PreferredVT = (
Op.isMemset() && !
Op.isZeroMemset()) ? MVT::i8 : ELenVT;
20496 if (PreferredVT != MVT::i8 && !Subtarget.hasFastUnalignedAccess()) {
20498 if (
Op.isFixedDstAlign())
20499 RequiredAlign = std::min(RequiredAlign,
Op.getDstAlign());
20501 RequiredAlign = std::min(RequiredAlign,
Op.getSrcAlign());
20509 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
20510 bool IsABIRegCopy =
CC.has_value();
20512 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
20513 PartVT == MVT::f32) {
20531 if (PartVTBitSize % ValueVTBitSize == 0) {
20532 assert(PartVTBitSize >= ValueVTBitSize);
20539 if (ValueEltVT != PartEltVT) {
20540 if (PartVTBitSize > ValueVTBitSize) {
20542 assert(Count != 0 &&
"The number of element should not be zero.");
20543 EVT SameEltTypeVT =
20564 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
20565 bool IsABIRegCopy =
CC.has_value();
20566 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
20567 PartVT == MVT::f32) {
20584 if (PartVTBitSize % ValueVTBitSize == 0) {
20585 assert(PartVTBitSize >= ValueVTBitSize);
20586 EVT SameEltTypeVT = ValueVT;
20593 if (ValueEltVT != PartEltVT) {
20595 assert(Count != 0 &&
"The number of element should not be zero.");
20612 bool OptSize = Attr.
hasFnAttr(Attribute::MinSize);
20619 unsigned Opc =
N->getOpcode();
20643 VectorType *VTy,
unsigned Factor,
Align Alignment,
unsigned AddrSpace,
20657 if (
auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
20662 if (FVTy->getNumElements() < 2)
20672 return Factor * LMUL <= 8;
20676 Align Alignment)
const {
20688 if (!Subtarget.hasFastUnalignedAccess() &&
20696 Intrinsic::riscv_seg2_load, Intrinsic::riscv_seg3_load,
20697 Intrinsic::riscv_seg4_load, Intrinsic::riscv_seg5_load,
20698 Intrinsic::riscv_seg6_load, Intrinsic::riscv_seg7_load,
20699 Intrinsic::riscv_seg8_load};
20718 auto *VTy = cast<FixedVectorType>(Shuffles[0]->
getType());
20728 {VTy, LI->getPointerOperandType(), XLenTy});
20730 Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
20735 for (
unsigned i = 0; i < Shuffles.
size(); i++) {
20737 Shuffles[i]->replaceAllUsesWith(SubVec);
20744 Intrinsic::riscv_seg2_store, Intrinsic::riscv_seg3_store,
20745 Intrinsic::riscv_seg4_store, Intrinsic::riscv_seg5_store,
20746 Intrinsic::riscv_seg6_store, Intrinsic::riscv_seg7_store,
20747 Intrinsic::riscv_seg8_store};
20767 unsigned Factor)
const {
20769 auto *ShuffleVTy = cast<FixedVectorType>(SVI->
getType());
20772 ShuffleVTy->getNumElements() / Factor);
20774 SI->getPointerAddressSpace(),
20775 SI->getModule()->getDataLayout()))
20782 {VTy, SI->getPointerOperandType(), XLenTy});
20787 for (
unsigned i = 0; i < Factor; i++) {
20796 Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
20797 Ops.
append({SI->getPointerOperand(), VL});
20810 if (DI->
getIntrinsicID() != Intrinsic::experimental_vector_deinterleave2)
20813 unsigned Factor = 2;
20828 if (
auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
20831 {ResVTy, LI->getPointerOperandType(), XLenTy});
20832 VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
20835 Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
20836 Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
20837 Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
20838 Intrinsic::riscv_vlseg8};
20860 if (II->
getIntrinsicID() != Intrinsic::experimental_vector_interleave2)
20863 unsigned Factor = 2;
20869 SI->getPointerAddressSpace(),
20870 SI->getModule()->getDataLayout()))
20877 if (
auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
20880 {InVTy, SI->getPointerOperandType(), XLenTy});
20881 VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
20884 Intrinsic::riscv_vsseg2, Intrinsic::riscv_vsseg3,
20885 Intrinsic::riscv_vsseg4, Intrinsic::riscv_vsseg5,
20886 Intrinsic::riscv_vsseg6, Intrinsic::riscv_vsseg7,
20887 Intrinsic::riscv_vsseg8};
20895 SI->getPointerOperand(), VL});
20905 "Invalid call instruction for a KCFI check");
20907 MBBI->getOpcode()));
20910 Target.setIsRenamable(
false);
20918#define GET_REGISTER_MATCHER
20919#include "RISCVGenAsmMatcher.inc"
20925 if (Reg == RISCV::NoRegister)
20927 if (Reg == RISCV::NoRegister)
20939 const MDNode *NontemporalInfo =
I.getMetadata(LLVMContext::MD_nontemporal);
20941 if (NontemporalInfo ==
nullptr)
20949 int NontemporalLevel = 5;
20950 const MDNode *RISCVNontemporalInfo =
20951 I.getMetadata(
"riscv-nontemporal-domain");
20952 if (RISCVNontemporalInfo !=
nullptr)
20955 cast<ConstantAsMetadata>(RISCVNontemporalInfo->
getOperand(0))
20959 assert((1 <= NontemporalLevel && NontemporalLevel <= 5) &&
20960 "RISC-V target doesn't support this non-temporal domain.");
20962 NontemporalLevel -= 2;
20964 if (NontemporalLevel & 0b1)
20966 if (NontemporalLevel & 0b10)
20979 return TargetFlags;
20989 return isTypeLegal(VT) && Subtarget.hasStdExtZvbb();
20992 return Subtarget.hasStdExtZbb() &&
21005 if (
Op == Instruction::Add ||
Op == Instruction::Sub ||
21006 Op == Instruction::And ||
Op == Instruction::Or ||
21007 Op == Instruction::Xor ||
Op == Instruction::InsertElement ||
21008 Op == Instruction::ShuffleVector ||
Op == Instruction::Load)
21016 !isa<ReturnInst>(&Inst))
21019 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
21020 if (AI->getAllocatedType()->isScalableTy())
21028RISCVTargetLowering::BuildSDIVPow2(
SDNode *
N,
const APInt &Divisor,
21036 if (!Subtarget.hasShortForwardBranchOpt())
21038 EVT VT =
N->getValueType(0);
21039 if (!(VT == MVT::i32 || (VT == MVT::i64 && Subtarget.
is64Bit())))
21043 if (Divisor.
sgt(2048) || Divisor.
slt(-2048))
21048bool RISCVTargetLowering::shouldFoldSelectWithSingleBitTest(
21049 EVT VT,
const APInt &AndMask)
const {
21050 if (Subtarget.hasStdExtZicond() || Subtarget.hasVendorXVentanaCondOps())
21051 return !Subtarget.hasStdExtZbs() && AndMask.
ugt(1024);
21055unsigned RISCVTargetLowering::getMinimumJumpTableEntries()
const {
21061#define GET_RISCVVIntrinsicsTable_IMPL
21062#include "RISCVGenSearchableTables.inc"
unsigned const MachineRegisterInfo * MRI
static MCRegister MatchRegisterName(StringRef Name)
static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT)
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V)
static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V)
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define NODE_NAME_CASE(node)
static bool isConstant(const MachineInstr &MI)
amdgpu AMDGPU Register Bank Select
static bool isZeroOrAllOnes(SDValue N, bool AllOnes)
static SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, TargetLowering::DAGCombinerInfo &DCI)
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, TargetLowering::DAGCombinerInfo &DCI, bool AllOnes=false)
static MCRegister MatchRegisterAltName(StringRef Name)
Maps from the set of all alternative registernames to a register number.
Function Alias Analysis Results
static SDValue getTargetNode(GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static MachineBasicBlock * emitSelectPseudo(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode)
static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget, SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const MCPhysReg ArgFPR32s[]
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
const MCPhysReg ArgFPR64s[]
const MCPhysReg ArgGPRs[]
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, int NumOp, unsigned ExtOpc=ISD::ANY_EXTEND)
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen, AtomicRMWInst::BinOp BinOp)
loop Loop Strength Reduction
static bool isSplat(Value *V)
Return true if V is a splat of a value (which is used when multiplying a matrix with a scalar).
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG, const TargetLowering::DAGCombinerInfo &DCI, const MipsSETargetLowering *TL, const MipsSubtarget &Subtarget)
static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG, const MipsSubtarget &Subtarget)
static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static bool IsSelect(MachineInstr &MI)
const char LLVMTargetMachineRef TM
static StringRef getExtensionType(StringRef Ext)
static SDValue performCONCAT_VECTORSCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static SDValue SplitVectorReductionOp(SDValue Op, SelectionDAG &DAG)
static SDValue lowerSADDO_SSUBO(SDValue Op, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static MachineBasicBlock * emitBuildPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const RISCVSubtarget &Subtarget)
static MachineBasicBlock * emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB, unsigned RelOpcode, unsigned EqOpcode, const RISCVSubtarget &Subtarget)
static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef< int > Mask)
Match shuffles that concatenate two vectors, rotate the concatenation, and then extract the original ...
static const Intrinsic::ID FixedVlsegIntrIds[]
static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static MVT getLMUL1VT(MVT VT)
static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2, bool EABI)
static SDValue lowerVECTOR_SHUFFLEAsVSlide1(const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef< int > Mask, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
Match v(f)slide1up/down idioms.
static MachineBasicBlock * emitVFROUND_NOEXCEPT_MASK(MachineInstr &MI, MachineBasicBlock *BB, unsigned CVTXOpc, unsigned CVTFOpc)
static const MCPhysReg ArgVRM2s[]
static bool isInterleaveShuffle(ArrayRef< int > Mask, MVT VT, int &EvenSrc, int &OddSrc, const RISCVSubtarget &Subtarget)
Is this shuffle interleaving contiguous elements from one vector into the even elements and contiguou...
static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &DAG)
According to the property that indexed load/store instructions zero-extend their indices,...
static void promoteVCIXScalar(const SDValue &Op, SmallVectorImpl< SDValue > &Operands, SelectionDAG &DAG)
static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, SDValue Scalar, SDValue VL, SelectionDAG &DAG)
static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode)
static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, SDValue Lo, SDValue Hi, SDValue VL, SelectionDAG &DAG)
static SDValue getWideningInterleave(SDValue EvenV, SDValue OddV, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue getAllOnesMask(MVT VecVT, SDValue VL, const SDLoc &DL, SelectionDAG &DAG)
Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.
static cl::opt< int > FPImmCost(DEBUG_TYPE "-fpimm-cost", cl::Hidden, cl::desc("Give the maximum number of instructions that we will " "use for creating a floating-point immediate value"), cl::init(2))
static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL, MVT VT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performVWADDSUBW_VLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static bool matchIndexAsWiderOp(EVT VT, SDValue Index, SDValue Mask, Align BaseAlign, const RISCVSubtarget &ST)
Match the index of a gather or scatter operation as an operation with twice the element width and hal...
static bool isLegalBitRotate(ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, MVT &RotateVT, unsigned &RotateAmt)
static SDValue combineVFMADD_VLWithVFNEG_VL(SDNode *N, SelectionDAG &DAG)
static SDValue combineOrOfCZERO(SDNode *N, SDValue N0, SDValue N1, SelectionDAG &DAG)
static SDValue useInversedSetcc(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue combineVWADDSUBWSelect(SDNode *N, SelectionDAG &DAG)
static MachineBasicBlock * EmitLoweredCascadedSelect(MachineInstr &First, MachineInstr &Second, MachineBasicBlock *ThisMBB, const RISCVSubtarget &Subtarget)
static SDValue performINSERT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static SDValue lowerFMAXIMUM_FMINIMUM(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue SplitStrictFPVectorOp(SDValue Op, SelectionDAG &DAG)
static std::optional< uint64_t > getExactInteger(const APFloat &APF, uint32_t BitWidth)
static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG)
static SDValue performMemPairCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue combineDeMorganOfBoolean(SDNode *N, SelectionDAG &DAG)
static bool isDeinterleaveShuffle(MVT VT, MVT ContainerVT, SDValue V1, SDValue V2, ArrayRef< int > Mask, const RISCVSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLEAsVSlidedown(const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef< int > Mask, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
static unsigned getRVVReductionOp(unsigned ISDOpcode)
static std::optional< bool > matchSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue Val)
static SDValue lowerShuffleViaVRegSplitting(ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static MVT getMaskTypeFor(MVT VecVT)
Return the type of the mask type suitable for masking the provided vector type.
static SDValue getVCIXISDNodeVOID(SDValue &Op, SelectionDAG &DAG, unsigned Type)
static cl::opt< unsigned > NumRepeatedDivisors(DEBUG_TYPE "-fp-repeated-divisors", cl::Hidden, cl::desc("Set the minimum number of repetitions of a divisor to allow " "transformation to multiplications by the reciprocal"), cl::init(2))
static SDValue foldSelectOfCTTZOrCTLZ(SDNode *N, SelectionDAG &DAG)
static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue foldBinOpIntoSelectIfProfitable(SDNode *BO, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool hasMaskOp(unsigned Opcode)
Return true if a RISC-V target specified op has a mask operand.
static bool legalizeScatterGatherIndexType(SDLoc DL, SDValue &Index, ISD::MemIndexType &IndexType, RISCVTargetLowering::DAGCombinerInfo &DCI)
static SDValue combineSelectToBinOp(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG)
static unsigned getRISCVVLOp(SDValue Op)
Get a RISC-V target specified VL op for a given SDNode.
static unsigned getVecReduceOpcode(unsigned Opc)
Given a binary operator, return the associative generic ISD::VECREDUCE_OP which corresponds to it.
static std::pair< SDValue, SDValue > getDefaultVLOps(uint64_t NumElts, MVT ContainerVT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performFP_TO_INT_SATCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static SDValue lowerReductionSeq(unsigned RVVOpcode, MVT ResVT, SDValue StartValue, SDValue Vec, SDValue Mask, SDValue VL, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
Helper to lower a reduction sequence of the form: scalar = reduce_op vec, scalar_start.
static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo, std::optional< unsigned > FirstMaskArgument, CCState &State, const RISCVTargetLowering &TLI)
static SDValue lowerGetVectorLength(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static std::pair< SDValue, SDValue > getDefaultScalableVLOps(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static std::optional< unsigned > preAssignMask(const ArgTy &Args)
static SDValue getVLOperand(SDValue Op)
static MachineBasicBlock * emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB, const RISCVSubtarget &Subtarget)
static cl::opt< bool > RV64LegalI32("riscv-experimental-rv64-legal-i32", cl::ReallyHidden, cl::desc("Make i32 a legal type for SelectionDAG on RV64."))
static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerVectorXRINT(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static cl::opt< unsigned > ExtensionMaxWebSize(DEBUG_TYPE "-ext-max-web-size", cl::Hidden, cl::desc("Give the maximum size (in number of nodes) of the web of " "instructions that we will consider for VW expansion"), cl::init(18))
static SDValue combineBinOpOfZExt(SDNode *N, SelectionDAG &DAG)
static SDValue getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL, EVT VT, SDValue Merge, SDValue Op, SDValue Offset, SDValue Mask, SDValue VL, unsigned Policy=RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED)
static bool isSelectPseudo(MachineInstr &MI)
static std::optional< MVT > getSmallestVTForIndex(MVT VecVT, unsigned MaxIdx, SDLoc DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool useRVVForFixedLengthVectorVT(MVT VT, const RISCVSubtarget &Subtarget)
static Value * useTpOffset(IRBuilderBase &IRB, unsigned Offset)
static SDValue combineAddOfBooleanXor(SDNode *N, SelectionDAG &DAG)
static MachineBasicBlock * emitSplitF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const RISCVSubtarget &Subtarget)
static SDValue SplitVectorOp(SDValue Op, SelectionDAG &DAG)
static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc)
static SDValue lowerScalarInsert(SDValue Scalar, SDValue VL, MVT VT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerSMULO(SDValue Op, SelectionDAG &DAG)
static SDValue tryFoldSelectIntoOp(SDNode *N, SelectionDAG &DAG, SDValue TrueVal, SDValue FalseVal, bool Swapped)
static SDValue lowerBitreverseShuffle(ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool matchIndexAsShuffle(EVT VT, SDValue Index, SDValue Mask, SmallVector< int > &ShuffleMask)
Match the index vector of a scatter or gather node as the shuffle mask which performs the rearrangeme...
static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue SplitVPOp(SDValue Op, SelectionDAG &DAG)
static bool hasMergeOp(unsigned Opcode)
Return true if a RISC-V target specified op has a merge operand.
static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static void processVCIXOperands(SDValue &OrigOp, SmallVectorImpl< SDValue > &Operands, SelectionDAG &DAG)
static SDValue widenVectorOpsToi8(SDValue N, const SDLoc &DL, SelectionDAG &DAG)
static SDValue lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static std::optional< VIDSequence > isSimpleVIDSequence(SDValue Op, unsigned EltSizeInBits)
static SDValue getDeinterleaveViaVNSRL(const SDLoc &DL, MVT VT, SDValue Src, bool EvenElts, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
static SDValue lowerUADDSAT_USUBSAT(SDValue Op, SelectionDAG &DAG)
static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC)
static SDValue lowerVECTOR_SHUFFLEAsRotate(ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static RISCVFPRndMode::RoundingMode matchRoundingOp(unsigned Opc)
static SDValue lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue combineSubOfBoolean(SDNode *N, SelectionDAG &DAG)
static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool isValidEGW(int EGS, EVT VT, const RISCVSubtarget &Subtarget)
static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool isNonZeroAVL(SDValue AVL)
static SDValue lowerVECTOR_SHUFFLEAsVSlideup(const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef< int > Mask, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
static SDValue combineBinOp_VLToVWBinOp_VL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
Combine a binary operation to its equivalent VW or VW_W form.
static SDValue getVCIXISDNodeWCHAIN(SDValue &Op, SelectionDAG &DAG, unsigned Type)
static ArrayRef< MCPhysReg > getFastCCArgGPRs(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRM8s[]
static MachineBasicBlock * emitReadCounterWidePseudo(MachineInstr &MI, MachineBasicBlock *BB)
static const MCPhysReg ArgVRM4s[]
static cl::opt< bool > AllowSplatInVW_W(DEBUG_TYPE "-form-vw-w-with-splat", cl::Hidden, cl::desc("Allow the formation of VW_W operations (e.g., " "VWADD_W) with splat constants"), cl::init(false))
static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const CCValAssign &HiVA, const SDLoc &DL)
static SDValue lowerSADDSAT_SSUBSAT(SDValue Op, SelectionDAG &DAG)
static SDValue getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL, EVT VT, SDValue Merge, SDValue Op, SDValue Offset, SDValue Mask, SDValue VL, unsigned Policy=RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED)
static SDValue tryMemPairCombine(SelectionDAG &DAG, LSBaseSDNode *LSNode1, LSBaseSDNode *LSNode2, SDValue BasePtr, uint64_t Imm)
static std::tuple< unsigned, SDValue, SDValue > getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT, const RISCVSubtarget &Subtarget)
static SDValue performFP_TO_INTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static const MCPhysReg ArgFPR16s[]
static SDValue combineBinOpOfExtractToReduceTree(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
Perform two related transforms whose purpose is to incrementally recognize an explode_vector followed...
static SDValue performVFMADD_VLCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performTRUNCATECombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
Try and optimize BUILD_VECTORs with "dominant values" - these are values which constitute a large pro...
static SDValue getVLOp(uint64_t NumElts, MVT ContainerVT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, ISD::CondCode &CC, SelectionDAG &DAG)
static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
If we have a build_vector where each lane is binop X, C, where C is a constant (but not necessarily t...
static const Intrinsic::ID FixedVssegIntrIds[]
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isCommutative(Instruction *I)
This file defines the SmallSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static constexpr int Concat[]
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
void clearAllBits()
Set every bit to 0.
unsigned countr_zero() const
Count the number of trailing zero bits.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
APInt sext(unsigned width) const
Sign extend to a new width.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
int64_t getSExtValue() const
Get sign extended value.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
An arbitrary precision integer that knows its signedness.
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getCompareOperand()
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
StringRef getValueAsString() const
Return the attribute's value as a string.
static BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG)
Parses tree in N for base, index, offset addresses.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
bool test(unsigned Idx) const
bool all() const
all - Returns true if all bits are set.
CCState - This class holds information needed while lowering arguments and return values.
MachineFunction & getMachineFunction() const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
SmallVectorImpl< ISD::ArgFlagsTy > & getPendingArgFlags()
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
SmallVectorImpl< CCValAssign > & getPendingLocs()
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
int64_t getLocMemOffset() const
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
This class represents a function call, abstracting a target machine's calling convention.
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getAPIntValue() const
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
Diagnostic information for unsupported feature in backend.
static constexpr ElementCount getScalable(ScalarTy MinVal)
static constexpr ElementCount getFixed(ScalarTy MinVal)
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Argument * getArg(unsigned i) const
bool hasExternalWeakLinkage() const
Module * getParent()
Get the module that this global value is contained inside of...
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Load the specified register of the given register class from the specified stack frame index.
Common base class shared among various IRBuilders.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
FenceInst * CreateFence(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, const Twine &Name="")
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
BasicBlock * GetInsertBlock() const
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
static InstructionCost getInvalid(CostType Val=0)
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
constexpr bool isValid() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
This is an important class for using LLVM in a threaded context.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Base class for LoadSDNode and StoreSDNode.
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
const MDOperand & getOperand(unsigned I) const
static MVT getFloatingPointVT(unsigned BitWidth)
static auto integer_fixedlen_vector_valuetypes()
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT changeVectorElementType(MVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
bool bitsLE(MVT VT) const
Return true if this has no more bits than VT.
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static MVT getScalableVectorVT(MVT VT, unsigned NumElements)
MVT changeTypeToInteger()
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsLT(MVT VT) const
Return true if this has less bits than VT.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
uint64_t getScalarStoreSize() const
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool bitsGT(MVT VT) const
Return true if this has more bits than VT.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
MVT getDoubleNumVectorElementsVT() const
MVT getHalfNumVectorElementsVT() const
Return a VT for a vector type with the same element type but half the number of elements.
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
static auto integer_scalable_vector_valuetypes()
MVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
static auto fp_fixedlen_vector_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
void push_back(MachineInstr *MI)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
Instructions::iterator instr_iterator
instr_iterator instr_end()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
void setFlag(MIFlag Flag)
Set a MI flag.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
This is an abstract virtual class for memory operations.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
void setVarArgsFrameIndex(int Index)
int getVarArgsFrameIndex() const
void setVarArgsSaveSize(int Size)
void addSExt32Register(Register Reg)
RISCVABI::ABI getTargetABI() const
unsigned getMinimumJumpTableEntries() const
bool hasStdExtCOrZca() const
unsigned getMaxLMULForFixedLengthVectors() const
bool hasVInstructionsI64() const
bool hasVInstructionsF64() const
bool hasStdExtDOrZdinx() const
bool hasStdExtZfhOrZhinx() const
unsigned getRealMinVLen() const
bool useRVVForFixedLengthVectors() const
bool isTargetFuchsia() const
unsigned getDLenFactor() const
bool hasVInstructionsF16Minimal() const
bool hasConditionalMoveFusion() const
bool isRegisterReservedByUser(Register i) const
bool hasVInstructionsF16() const
bool hasVInstructionsBF16() const
unsigned getMaxBuildIntsCost() const
Align getPrefLoopAlignment() const
bool hasVInstructions() const
std::optional< unsigned > getRealVLen() const
bool useConstantPoolForLargeInts() const
Align getPrefFunctionAlignment() const
bool hasStdExtZfhminOrZhinxmin() const
unsigned getRealMaxVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
bool hasVInstructionsF32() const
bool hasStdExtFOrZfinx() const
static std::pair< unsigned, unsigned > computeVLMAXBounds(MVT ContainerVT, const RISCVSubtarget &Subtarget)
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
InstructionCost getVRGatherVVCost(MVT VT) const
Return the cost of a vrgather.vv instruction for the type VT.
bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
Value * getIRStackGuard(IRBuilderBase &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
bool shouldSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT) const override
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
std::pair< int, bool > getLegalZfaFPImm(const APFloat &Imm, EVT VT) const
RISCVTargetLowering(const TargetMachine &TM, const RISCVSubtarget &STI)
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const override
This method returns the constant pool value that will be loaded by LD.
const RISCVSubtarget & getSubtarget() const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II, StoreInst *SI) const override
Lower an interleave intrinsic to a target specific store intrinsic.
bool preferScalarizeSplat(SDNode *N) const override
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool canSplatOperand(Instruction *I, int Operand) const
Return true if the (vector) instruction I will be lowered to an instruction with a scalar splat opera...
bool shouldExtendTypeInLibCall(EVT Type) const override
Returns true if arguments should be extended in lib calls.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override
InstructionCost getVRGatherVICost(MVT VT) const
Return the cost of a vrgather.vi (or vx) instruction for the type VT.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Return true if it is beneficial to convert a load of a constant to just the constant itself.
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the register type for a given MVT, ensuring vectors are treated as a series of gpr sized integ...
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool hasAndNotCompare(SDValue Y) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
bool shouldScalarizeBinop(SDValue VecOp) const override
Try to convert an extract element of a vector binary operation into an extract element followed by a ...
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to move this shift by a constant amount through its operand,...
bool areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX, const MemSDNode &NodeY) const override
Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
static unsigned computeVLMAX(unsigned VectorBits, unsigned EltSize, unsigned MinSize)
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
InstructionCost getLMULCost(MVT VT) const
Return the cost of LMUL for linear operations.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const override
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
InstructionCost getVSlideVICost(MVT VT) const
Return the cost of a vslidedown.vi or vslideup.vi instruction for the type VT.
bool fallBackToDAGISel(const Instruction &Inst) const override
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a vlsegN intrinsic.
bool isCtpopFast(EVT VT) const override
Return true if ctpop instruction is fast.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
MVT getContainerForFixedLengthVector(MVT VT) const
static unsigned getRegClassIDForVecVT(MVT VT)
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const override
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
SDValue computeVLMax(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG) const
bool signExtendConstant(const ConstantInt *CI) const override
Return true if this constant should be sign extended when promoting to a larger type.
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const override
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Returns the register with the specified architectural or ABI name.
InstructionCost getVSlideVXCost(MVT VT) const
Return the cost of a vslidedown.vx or vslideup.vx instruction for the type VT.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul)
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override
Return true if result of the specified node is used by a return node only.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override
Returns true if arguments should be sign-extended in lib calls.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const override
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
ISD::NodeType getExtendForAtomicCmpSwapArg() const override
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a vssegN intrinsic.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
bool isLegalElementTypeForRVV(EVT ScalarTy) const
bool isVScaleKnownToBeAPowerOfTwo() const override
Return true only if vscale must be a power of two.
bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *II, LoadInst *LI) const override
Lower a deinterleave intrinsic to a target specific load intrinsic.
static RISCVII::VLMUL getLMUL(MVT VT)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the number of registers for a given MVT, ensuring vectors are treated as a series of gpr sized...
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override
bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace, const DataLayout &) const
Returns whether or not generating a interleaved load/store intrinsic for this type will be legal.
bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const override
Return true if Op can create undef or poison from non-undef & non-poison operands.
bool isIntDivCheap(EVT VT, AttributeList Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
bool isLegalStridedLoadStore(EVT DataType, Align Alignment) const
Return true if a stride load store of the given result type and alignment is legal.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
iterator_range< use_iterator > uses()
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setCFIType(uint32_t Type)
bool isUndef() const
Return true if the type of the node type undefined.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
op_iterator op_end() const
op_iterator op_begin() const
static use_iterator use_end()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
unsigned getNumOperands() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT, SDNodeFlags Flags)
Get the (commutative) neutral element for the given opcode, if it exists.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
bool isSafeToSpeculativelyExecute(unsigned Opcode) const
Some opcodes may create immediate undefined behavior when used with some values (integer division-by-...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
bool shouldOptForSize() const
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
static const fltSemantics & EVTToAPFloatSemantics(EVT VT)
Returns an APFloat semantics tag appropriate for the given type.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL, bool LegalTypes=true)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
This instruction constructs a fixed permutation of two input vectors.
static bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
static bool isSplatMask(const int *Mask, EVT VT)
int getSplatIndex() const
ArrayRef< int > getMask() const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
std::string lower() const
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
unsigned getMinCmpXchgSizeInBits() const
Returns the size of the smallest cmpxchg or ll/sc instruction the backend supports.
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
virtual bool shouldFoldSelectWithSingleBitTest(EVT VT, const APInt &AndMask) const
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 1...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useTLSDESC() const
Returns true if this target uses TLS Descriptors.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
Type * getStructElementType(unsigned N) const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
Base class of all SIMD vector types.
constexpr ScalarTy getFixedValue() const
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr bool isZero() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
@ C
The default llvm calling convention, compatible with C.
bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SET_ROUNDING
Set rounding mode.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
std::optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
std::optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
static const int FIRST_TARGET_STRICTFP_OPCODE
FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations which cannot raise FP exceptions s...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
@ Bitcast
Perform the operation on a different, but equivalently sized type.
ABI getTargetABI(StringRef ABIName)
bool match(Val *V, const Pattern &P)
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_Undef()
Match an arbitrary undef constant.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
@ TAIL_UNDISTURBED_MASK_UNDISTURBED
static int getFRMOpNum(const MCInstrDesc &Desc)
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #3 and #4) ...
@ STRICT_VFCVT_RTZ_XU_F_VL
@ LAST_RISCV_STRICTFP_OPCODE
@ STRICT_VFROUND_NOEXCEPT_VL
@ SPLAT_VECTOR_SPLIT_I64_VL
@ STRICT_VFCVT_RTZ_X_F_VL
int getLoadFPImm(APFloat FPImm)
getLoadFPImm - Return a 5-bit binary encoding of the floating-point immediate value.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static unsigned decodeVSEW(unsigned VSEW)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static RISCVII::VLMUL encodeLMUL(unsigned LMUL, bool Fractional)
static unsigned encodeSEW(unsigned SEW)
static constexpr unsigned FPMASK_Negative_Zero
static constexpr unsigned FPMASK_Positive_Subnormal
static constexpr unsigned FPMASK_Positive_Normal
static constexpr unsigned FPMASK_Negative_Subnormal
static constexpr unsigned FPMASK_Negative_Normal
static constexpr unsigned FPMASK_Positive_Infinity
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
static constexpr unsigned FPMASK_Negative_Infinity
static constexpr unsigned FPMASK_Quiet_NaN
ArrayRef< MCPhysReg > getArgGPRs(const RISCVABI::ABI ABI)
bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static constexpr unsigned FPMASK_Signaling_NaN
static constexpr unsigned FPMASK_Positive_Zero
static constexpr unsigned RVVBitsPerBlock
bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, std::optional< unsigned > FirstMaskArgument)
bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, std::optional< unsigned > FirstMaskArgument)
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ Kill
The last use of a register.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
IterT next_nodbg(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It, then continue incrementing it while it points to a debug instruction.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
static const MachineMemOperand::Flags MONontemporalBit0
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
bool isReleaseOrStronger(AtomicOrdering AO)
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
@ TowardZero
roundTowardZero.
@ NearestTiesToEven
roundTiesToEven.
@ TowardPositive
roundTowardPositive.
@ NearestTiesToAway
roundTiesToAway.
@ TowardNegative
roundTowardNegative.
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
unsigned Log2(Align A)
Returns the log2 of the alignment.
llvm::SmallVector< int, 16 > createSequentialMask(unsigned Start, unsigned NumInts, unsigned NumUndefs)
Create a sequential shuffle mask.
bool isNeutralConstant(unsigned Opc, SDNodeFlags Flags, SDValue V, unsigned OperandNo)
Returns true if V is a neutral element of Opc with Flags.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static unsigned int semanticsPrecision(const fltSemantics &)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
bool isInteger() const
Return true if this is an integer or a vector integer type.
Helper struct to store a base, index and offset that forms an address.
Align getNonZeroOrigAlign() const
static KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
BitVector getReservedRegs(const MachineFunction &MF) const override
Register getFrameRegister(const MachineFunction &MF) const override
bool hasScalarOperand() const
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
const ConstantInt * CFIType
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isAfterLegalizeDAG() const
void AddToWorklist(SDNode *N)
bool recursivelyDeleteUnusedNodes(SDNode *N)
bool isBeforeLegalize() const
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT, bool Value=true)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)