37#include "llvm/IR/IntrinsicsRISCV.h"
50#define DEBUG_TYPE "riscv-lower"
56 cl::desc(
"Give the maximum size (in number of nodes) of the web of "
57 "instructions that we will consider for VW expansion"),
62 cl::desc(
"Allow the formation of VW_W operations (e.g., "
63 "VWADD_W) with splat constants"),
68 cl::desc(
"Set the minimum number of repetitions of a divisor to allow "
69 "transformation to multiplications by the reciprocal"),
74 cl::desc(
"Give the maximum number of instructions that we will "
75 "use for creating a floating-point immediate value"),
80 cl::desc(
"Make i32 a legal type for SelectionDAG on RV64."));
90 !Subtarget.hasStdExtF()) {
91 errs() <<
"Hard-float 'f' ABI can't be used for a target that "
92 "doesn't support the F instruction set extension (ignoring "
96 !Subtarget.hasStdExtD()) {
97 errs() <<
"Hard-float 'd' ABI can't be used for a target that "
98 "doesn't support the D instruction set extension (ignoring "
124 if (Subtarget.hasStdExtZfhmin())
126 if (Subtarget.hasStdExtZfbfmin())
128 if (Subtarget.hasStdExtF())
130 if (Subtarget.hasStdExtD())
132 if (Subtarget.hasStdExtZhinxmin())
134 if (Subtarget.hasStdExtZfinx())
136 if (Subtarget.hasStdExtZdinx()) {
144 MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1,
145 MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
147 MVT::nxv1i8, MVT::nxv2i8, MVT::nxv4i8, MVT::nxv8i8, MVT::nxv16i8,
148 MVT::nxv32i8, MVT::nxv64i8, MVT::nxv1i16, MVT::nxv2i16, MVT::nxv4i16,
149 MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
150 MVT::nxv4i32, MVT::nxv8i32, MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
151 MVT::nxv4i64, MVT::nxv8i64};
153 MVT::nxv1f16, MVT::nxv2f16, MVT::nxv4f16,
154 MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
156 MVT::nxv1bf16, MVT::nxv2bf16, MVT::nxv4bf16,
157 MVT::nxv8bf16, MVT::nxv16bf16, MVT::nxv32bf16};
159 MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
161 MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
164 auto addRegClassForRVV = [
this](
MVT VT) {
168 if (VT.getVectorMinNumElements() < MinElts)
171 unsigned Size = VT.getSizeInBits().getKnownMinValue();
174 RC = &RISCV::VRRegClass;
176 RC = &RISCV::VRM2RegClass;
178 RC = &RISCV::VRM4RegClass;
180 RC = &RISCV::VRM8RegClass;
187 for (
MVT VT : BoolVecVTs)
188 addRegClassForRVV(VT);
189 for (
MVT VT : IntVecVTs) {
190 if (VT.getVectorElementType() == MVT::i64 &&
193 addRegClassForRVV(VT);
197 for (
MVT VT : F16VecVTs)
198 addRegClassForRVV(VT);
201 for (
MVT VT : BF16VecVTs)
202 addRegClassForRVV(VT);
205 for (
MVT VT : F32VecVTs)
206 addRegClassForRVV(VT);
209 for (
MVT VT : F64VecVTs)
210 addRegClassForRVV(VT);
213 auto addRegClassForFixedVectors = [
this](
MVT VT) {
220 if (useRVVForFixedLengthVectorVT(VT))
221 addRegClassForFixedVectors(VT);
224 if (useRVVForFixedLengthVectorVT(VT))
225 addRegClassForFixedVectors(VT);
273 if (!Subtarget.hasStdExtZbb() && !Subtarget.hasVendorXTHeadBb())
285 if (!Subtarget.hasStdExtZbb())
289 if (Subtarget.hasStdExtZbb()) {
297 {RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, RTLIB::MUL_I128},
302 if (!Subtarget.hasStdExtM() && !Subtarget.hasStdExtZmmul()) {
306 }
else if (Subtarget.
is64Bit()) {
316 if (!Subtarget.hasStdExtM()) {
322 }
else if (Subtarget.
is64Bit()) {
325 {MVT::i8, MVT::i16, MVT::i32},
Custom);
342 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) {
345 }
else if (Subtarget.hasVendorXTHeadBb()) {
349 }
else if (Subtarget.hasVendorXCVbitmanip()) {
360 (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb() ||
361 Subtarget.hasVendorXTHeadBb())
366 (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb() ||
367 Subtarget.hasVendorXTHeadBb())
372 if (Subtarget.hasVendorXCVbitmanip()) {
380 if (Subtarget.hasStdExtZbb()) {
393 }
else if (!Subtarget.hasVendorXCVbitmanip()) {
399 if (Subtarget.hasStdExtZbb() || Subtarget.hasVendorXTHeadBb() ||
400 Subtarget.hasVendorXCVbitmanip()) {
407 if (!Subtarget.hasStdExtZbb())
419 !Subtarget.hasShortForwardBranchOpt())
423 if (Subtarget.hasShortForwardBranchOpt())
426 if (!Subtarget.hasVendorXTHeadCondMov()) {
432 static const unsigned FPLegalNodeTypes[] = {
445 static const unsigned FPOpToExpand[] = {
449 static const unsigned FPRndMode[] = {
456 static const unsigned ZfhminZfbfminPromoteOps[] = {
467 if (Subtarget.hasStdExtZfbfmin()) {
526 if (!Subtarget.hasStdExtZfa())
550 if (Subtarget.hasStdExtZfa())
562 if (Subtarget.hasStdExtZfa()) {
640 if (Subtarget.hasStdExtZicbop()) {
644 if (Subtarget.hasStdExtA()) {
646 if (Subtarget.hasStdExtZabha() && Subtarget.hasStdExtZacas())
650 }
else if (Subtarget.hasForcedAtomics()) {
671 {MVT::i8, MVT::i16},
Custom);
682 static const unsigned IntegerVPOps[] = {
683 ISD::VP_ADD, ISD::VP_SUB, ISD::VP_MUL,
684 ISD::VP_SDIV, ISD::VP_UDIV, ISD::VP_SREM,
685 ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,
686 ISD::VP_XOR, ISD::VP_ASHR, ISD::VP_LSHR,
687 ISD::VP_SHL, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
688 ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX,
689 ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
690 ISD::VP_MERGE, ISD::VP_SELECT, ISD::VP_FP_TO_SINT,
691 ISD::VP_FP_TO_UINT, ISD::VP_SETCC, ISD::VP_SIGN_EXTEND,
692 ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE, ISD::VP_SMIN,
693 ISD::VP_SMAX, ISD::VP_UMIN, ISD::VP_UMAX,
694 ISD::VP_ABS, ISD::EXPERIMENTAL_VP_REVERSE, ISD::EXPERIMENTAL_VP_SPLICE,
695 ISD::VP_SADDSAT, ISD::VP_UADDSAT, ISD::VP_SSUBSAT,
698 static const unsigned FloatingPointVPOps[] = {
699 ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL,
700 ISD::VP_FDIV, ISD::VP_FNEG, ISD::VP_FABS,
701 ISD::VP_FMA, ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
702 ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,
703 ISD::VP_SELECT, ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP,
704 ISD::VP_SETCC, ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND,
705 ISD::VP_SQRT, ISD::VP_FMINNUM, ISD::VP_FMAXNUM,
706 ISD::VP_FCEIL, ISD::VP_FFLOOR, ISD::VP_FROUND,
707 ISD::VP_FROUNDEVEN, ISD::VP_FCOPYSIGN, ISD::VP_FROUNDTOZERO,
708 ISD::VP_FRINT, ISD::VP_FNEARBYINT, ISD::VP_IS_FPCLASS,
709 ISD::VP_FMINIMUM, ISD::VP_FMAXIMUM, ISD::VP_LRINT,
710 ISD::VP_LLRINT, ISD::EXPERIMENTAL_VP_REVERSE,
711 ISD::EXPERIMENTAL_VP_SPLICE};
713 static const unsigned IntegerVecReduceOps[] = {
718 static const unsigned FloatingPointVecReduceOps[] = {
731 ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR,
732 ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN,
733 ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN},
737 for (
MVT VT : BoolVecVTs) {
764 {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
788 ISD::VP_TRUNCATE, ISD::VP_SETCC},
804 for (
MVT VT : IntVecVTs) {
815 if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV())
860 {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
861 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER},
885 if (Subtarget.hasStdExtZvkb()) {
893 if (Subtarget.hasStdExtZvbb()) {
897 ISD::VP_CTTZ_ZERO_UNDEF, ISD::VP_CTPOP},
903 ISD::VP_CTTZ_ZERO_UNDEF, ISD::VP_CTPOP},
912 ISD::VP_CTLZ_ZERO_UNDEF, ISD::VP_CTTZ_ZERO_UNDEF},
932 static const unsigned ZvfhminPromoteOps[] = {
942 static const unsigned ZvfhminPromoteVPOps[] = {
943 ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL,
944 ISD::VP_FDIV, ISD::VP_FNEG, ISD::VP_FABS,
945 ISD::VP_FMA, ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
946 ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_SQRT,
947 ISD::VP_FMINNUM, ISD::VP_FMAXNUM, ISD::VP_FCEIL,
948 ISD::VP_FFLOOR, ISD::VP_FROUND, ISD::VP_FROUNDEVEN,
949 ISD::VP_FCOPYSIGN, ISD::VP_FROUNDTOZERO, ISD::VP_FRINT,
950 ISD::VP_FNEARBYINT, ISD::VP_SETCC, ISD::VP_FMINIMUM,
954 const auto SetCommonVFPActions = [&](
MVT VT) {
999 {ISD::VP_LOAD, ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1000 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER, ISD::VP_SCATTER},
1031 const auto SetCommonVFPExtLoadTruncStoreActions =
1033 for (
auto SmallVT : SmallerVTs) {
1040 for (
MVT VT : F16VecVTs) {
1043 SetCommonVFPActions(VT);
1046 for (
MVT VT : F16VecVTs) {
1057 ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP},
1067 if (VT == MVT::nxv32f16) {
1080 for (
MVT VT : F32VecVTs) {
1083 SetCommonVFPActions(VT);
1084 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
1089 for (
MVT VT : F64VecVTs) {
1092 SetCommonVFPActions(VT);
1093 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
1094 SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
1100 if (!useRVVForFixedLengthVectorVT(VT))
1143 {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
1170 ISD::VP_SETCC, ISD::VP_TRUNCATE},
1192 ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1193 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
1228 if (Subtarget.hasStdExtZvkb())
1231 if (Subtarget.hasStdExtZvbb()) {
1253 if (!useRVVForFixedLengthVectorVT(VT))
1274 ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP},
1306 ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
1307 ISD::EXPERIMENTAL_VP_STRIDED_STORE, ISD::VP_GATHER,
1358 if (Subtarget.hasStdExtA()) {
1364 if (Subtarget.hasForcedAtomics()) {
1374 if (Subtarget.hasVendorXTHeadMemIdx()) {
1406 if (Subtarget.hasStdExtZbb())
1409 if (Subtarget.hasStdExtZbs() && Subtarget.
is64Bit())
1412 if (Subtarget.hasStdExtZbkb())
1424 ISD::EXPERIMENTAL_VP_REVERSE,
ISD::MUL,
1426 if (Subtarget.hasVendorXTHeadMemPair())
1449MVT RISCVTargetLowering::getVPExplicitVectorLengthTy()
const {
1454bool RISCVTargetLowering::shouldExpandGetVectorLength(
EVT TripCountVT,
1456 bool IsScalable)
const {
1463 if (TripCountVT != MVT::i32 && TripCountVT != Subtarget.
getXLenVT())
1483 unsigned Intrinsic)
const {
1484 auto &
DL =
I.getModule()->getDataLayout();
1486 auto SetRVVLoadStoreInfo = [&](
unsigned PtrOp,
bool IsStore,
1487 bool IsUnitStrided,
bool UsePtrVal =
false) {
1492 Info.ptrVal =
I.getArgOperand(PtrOp);
1494 Info.fallbackAddressSpace =
1495 I.getArgOperand(PtrOp)->getType()->getPointerAddressSpace();
1499 MemTy =
I.getArgOperand(0)->getType();
1502 MemTy =
I.getType();
1517 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
1521 switch (Intrinsic) {
1524 case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1525 case Intrinsic::riscv_masked_atomicrmw_add_i32:
1526 case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1527 case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1528 case Intrinsic::riscv_masked_atomicrmw_max_i32:
1529 case Intrinsic::riscv_masked_atomicrmw_min_i32:
1530 case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1531 case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1532 case Intrinsic::riscv_masked_cmpxchg_i32:
1534 Info.memVT = MVT::i32;
1535 Info.ptrVal =
I.getArgOperand(0);
1541 case Intrinsic::riscv_masked_strided_load:
1542 return SetRVVLoadStoreInfo( 1,
false,
1544 case Intrinsic::riscv_masked_strided_store:
1545 return SetRVVLoadStoreInfo( 1,
true,
1547 case Intrinsic::riscv_seg2_load:
1548 case Intrinsic::riscv_seg3_load:
1549 case Intrinsic::riscv_seg4_load:
1550 case Intrinsic::riscv_seg5_load:
1551 case Intrinsic::riscv_seg6_load:
1552 case Intrinsic::riscv_seg7_load:
1553 case Intrinsic::riscv_seg8_load:
1554 return SetRVVLoadStoreInfo( 0,
false,
1556 case Intrinsic::riscv_seg2_store:
1557 case Intrinsic::riscv_seg3_store:
1558 case Intrinsic::riscv_seg4_store:
1559 case Intrinsic::riscv_seg5_store:
1560 case Intrinsic::riscv_seg6_store:
1561 case Intrinsic::riscv_seg7_store:
1562 case Intrinsic::riscv_seg8_store:
1564 return SetRVVLoadStoreInfo(
I.arg_size() - 2,
1567 case Intrinsic::riscv_vle:
1568 case Intrinsic::riscv_vle_mask:
1569 case Intrinsic::riscv_vleff:
1570 case Intrinsic::riscv_vleff_mask:
1571 return SetRVVLoadStoreInfo( 1,
1575 case Intrinsic::riscv_vse:
1576 case Intrinsic::riscv_vse_mask:
1577 return SetRVVLoadStoreInfo( 1,
1581 case Intrinsic::riscv_vlse:
1582 case Intrinsic::riscv_vlse_mask:
1583 case Intrinsic::riscv_vloxei:
1584 case Intrinsic::riscv_vloxei_mask:
1585 case Intrinsic::riscv_vluxei:
1586 case Intrinsic::riscv_vluxei_mask:
1587 return SetRVVLoadStoreInfo( 1,
1590 case Intrinsic::riscv_vsse:
1591 case Intrinsic::riscv_vsse_mask:
1592 case Intrinsic::riscv_vsoxei:
1593 case Intrinsic::riscv_vsoxei_mask:
1594 case Intrinsic::riscv_vsuxei:
1595 case Intrinsic::riscv_vsuxei_mask:
1596 return SetRVVLoadStoreInfo( 1,
1599 case Intrinsic::riscv_vlseg2:
1600 case Intrinsic::riscv_vlseg3:
1601 case Intrinsic::riscv_vlseg4:
1602 case Intrinsic::riscv_vlseg5:
1603 case Intrinsic::riscv_vlseg6:
1604 case Intrinsic::riscv_vlseg7:
1605 case Intrinsic::riscv_vlseg8:
1606 case Intrinsic::riscv_vlseg2ff:
1607 case Intrinsic::riscv_vlseg3ff:
1608 case Intrinsic::riscv_vlseg4ff:
1609 case Intrinsic::riscv_vlseg5ff:
1610 case Intrinsic::riscv_vlseg6ff:
1611 case Intrinsic::riscv_vlseg7ff:
1612 case Intrinsic::riscv_vlseg8ff:
1613 return SetRVVLoadStoreInfo(
I.arg_size() - 2,
1616 case Intrinsic::riscv_vlseg2_mask:
1617 case Intrinsic::riscv_vlseg3_mask:
1618 case Intrinsic::riscv_vlseg4_mask:
1619 case Intrinsic::riscv_vlseg5_mask:
1620 case Intrinsic::riscv_vlseg6_mask:
1621 case Intrinsic::riscv_vlseg7_mask:
1622 case Intrinsic::riscv_vlseg8_mask:
1623 case Intrinsic::riscv_vlseg2ff_mask:
1624 case Intrinsic::riscv_vlseg3ff_mask:
1625 case Intrinsic::riscv_vlseg4ff_mask:
1626 case Intrinsic::riscv_vlseg5ff_mask:
1627 case Intrinsic::riscv_vlseg6ff_mask:
1628 case Intrinsic::riscv_vlseg7ff_mask:
1629 case Intrinsic::riscv_vlseg8ff_mask:
1630 return SetRVVLoadStoreInfo(
I.arg_size() - 4,
1633 case Intrinsic::riscv_vlsseg2:
1634 case Intrinsic::riscv_vlsseg3:
1635 case Intrinsic::riscv_vlsseg4:
1636 case Intrinsic::riscv_vlsseg5:
1637 case Intrinsic::riscv_vlsseg6:
1638 case Intrinsic::riscv_vlsseg7:
1639 case Intrinsic::riscv_vlsseg8:
1640 case Intrinsic::riscv_vloxseg2:
1641 case Intrinsic::riscv_vloxseg3:
1642 case Intrinsic::riscv_vloxseg4:
1643 case Intrinsic::riscv_vloxseg5:
1644 case Intrinsic::riscv_vloxseg6:
1645 case Intrinsic::riscv_vloxseg7:
1646 case Intrinsic::riscv_vloxseg8:
1647 case Intrinsic::riscv_vluxseg2:
1648 case Intrinsic::riscv_vluxseg3:
1649 case Intrinsic::riscv_vluxseg4:
1650 case Intrinsic::riscv_vluxseg5:
1651 case Intrinsic::riscv_vluxseg6:
1652 case Intrinsic::riscv_vluxseg7:
1653 case Intrinsic::riscv_vluxseg8:
1654 return SetRVVLoadStoreInfo(
I.arg_size() - 3,
1657 case Intrinsic::riscv_vlsseg2_mask:
1658 case Intrinsic::riscv_vlsseg3_mask:
1659 case Intrinsic::riscv_vlsseg4_mask:
1660 case Intrinsic::riscv_vlsseg5_mask:
1661 case Intrinsic::riscv_vlsseg6_mask:
1662 case Intrinsic::riscv_vlsseg7_mask:
1663 case Intrinsic::riscv_vlsseg8_mask:
1664 case Intrinsic::riscv_vloxseg2_mask:
1665 case Intrinsic::riscv_vloxseg3_mask:
1666 case Intrinsic::riscv_vloxseg4_mask:
1667 case Intrinsic::riscv_vloxseg5_mask:
1668 case Intrinsic::riscv_vloxseg6_mask:
1669 case Intrinsic::riscv_vloxseg7_mask:
1670 case Intrinsic::riscv_vloxseg8_mask:
1671 case Intrinsic::riscv_vluxseg2_mask:
1672 case Intrinsic::riscv_vluxseg3_mask:
1673 case Intrinsic::riscv_vluxseg4_mask:
1674 case Intrinsic::riscv_vluxseg5_mask:
1675 case Intrinsic::riscv_vluxseg6_mask:
1676 case Intrinsic::riscv_vluxseg7_mask:
1677 case Intrinsic::riscv_vluxseg8_mask:
1678 return SetRVVLoadStoreInfo(
I.arg_size() - 5,
1681 case Intrinsic::riscv_vsseg2:
1682 case Intrinsic::riscv_vsseg3:
1683 case Intrinsic::riscv_vsseg4:
1684 case Intrinsic::riscv_vsseg5:
1685 case Intrinsic::riscv_vsseg6:
1686 case Intrinsic::riscv_vsseg7:
1687 case Intrinsic::riscv_vsseg8:
1688 return SetRVVLoadStoreInfo(
I.arg_size() - 2,
1691 case Intrinsic::riscv_vsseg2_mask:
1692 case Intrinsic::riscv_vsseg3_mask:
1693 case Intrinsic::riscv_vsseg4_mask:
1694 case Intrinsic::riscv_vsseg5_mask:
1695 case Intrinsic::riscv_vsseg6_mask:
1696 case Intrinsic::riscv_vsseg7_mask:
1697 case Intrinsic::riscv_vsseg8_mask:
1698 return SetRVVLoadStoreInfo(
I.arg_size() - 3,
1701 case Intrinsic::riscv_vssseg2:
1702 case Intrinsic::riscv_vssseg3:
1703 case Intrinsic::riscv_vssseg4:
1704 case Intrinsic::riscv_vssseg5:
1705 case Intrinsic::riscv_vssseg6:
1706 case Intrinsic::riscv_vssseg7:
1707 case Intrinsic::riscv_vssseg8:
1708 case Intrinsic::riscv_vsoxseg2:
1709 case Intrinsic::riscv_vsoxseg3:
1710 case Intrinsic::riscv_vsoxseg4:
1711 case Intrinsic::riscv_vsoxseg5:
1712 case Intrinsic::riscv_vsoxseg6:
1713 case Intrinsic::riscv_vsoxseg7:
1714 case Intrinsic::riscv_vsoxseg8:
1715 case Intrinsic::riscv_vsuxseg2:
1716 case Intrinsic::riscv_vsuxseg3:
1717 case Intrinsic::riscv_vsuxseg4:
1718 case Intrinsic::riscv_vsuxseg5:
1719 case Intrinsic::riscv_vsuxseg6:
1720 case Intrinsic::riscv_vsuxseg7:
1721 case Intrinsic::riscv_vsuxseg8:
1722 return SetRVVLoadStoreInfo(
I.arg_size() - 3,
1725 case Intrinsic::riscv_vssseg2_mask:
1726 case Intrinsic::riscv_vssseg3_mask:
1727 case Intrinsic::riscv_vssseg4_mask:
1728 case Intrinsic::riscv_vssseg5_mask:
1729 case Intrinsic::riscv_vssseg6_mask:
1730 case Intrinsic::riscv_vssseg7_mask:
1731 case Intrinsic::riscv_vssseg8_mask:
1732 case Intrinsic::riscv_vsoxseg2_mask:
1733 case Intrinsic::riscv_vsoxseg3_mask:
1734 case Intrinsic::riscv_vsoxseg4_mask:
1735 case Intrinsic::riscv_vsoxseg5_mask:
1736 case Intrinsic::riscv_vsoxseg6_mask:
1737 case Intrinsic::riscv_vsoxseg7_mask:
1738 case Intrinsic::riscv_vsoxseg8_mask:
1739 case Intrinsic::riscv_vsuxseg2_mask:
1740 case Intrinsic::riscv_vsuxseg3_mask:
1741 case Intrinsic::riscv_vsuxseg4_mask:
1742 case Intrinsic::riscv_vsuxseg5_mask:
1743 case Intrinsic::riscv_vsuxseg6_mask:
1744 case Intrinsic::riscv_vsuxseg7_mask:
1745 case Intrinsic::riscv_vsuxseg8_mask:
1746 return SetRVVLoadStoreInfo(
I.arg_size() - 4,
1783 return isInt<12>(Imm);
1787 return isInt<12>(Imm);
1800 return (SrcBits == 64 && DestBits == 32);
1811 return (SrcBits == 64 && DestBits == 32);
1818 if (
auto *LD = dyn_cast<LoadSDNode>(Val)) {
1819 EVT MemVT = LD->getMemoryVT();
1820 if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1830 return Subtarget.
is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1838 return Subtarget.hasStdExtZbb() || Subtarget.hasVendorXCVbitmanip();
1842 return Subtarget.hasStdExtZbb() || Subtarget.hasVendorXTHeadBb() ||
1843 Subtarget.hasVendorXCVbitmanip();
1854 if (!Subtarget.hasStdExtZbs() && !Subtarget.hasVendorXTHeadBs())
1859 return !Mask->getValue().isSignedIntN(12) && Mask->getValue().isPowerOf2();
1863 EVT VT =
Y.getValueType();
1869 return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) &&
1870 !isa<ConstantSDNode>(
Y);
1875 if (Subtarget.hasStdExtZbs())
1876 return X.getValueType().isScalarInteger();
1877 auto *
C = dyn_cast<ConstantSDNode>(
Y);
1879 if (Subtarget.hasVendorXTHeadBs())
1880 return C !=
nullptr;
1882 return C &&
C->getAPIntValue().ule(10);
1902 if (BitSize > Subtarget.
getXLen())
1906 int64_t Val = Imm.getSExtValue();
1914 if (!Subtarget.hasFastUnalignedAccess())
1930 unsigned OldShiftOpcode,
unsigned NewShiftOpcode,
1937 if (XC && OldShiftOpcode ==
ISD::SRL && XC->isOne())
1941 if (NewShiftOpcode ==
ISD::SRL &&
CC->isOne())
1953 case Instruction::Add:
1954 case Instruction::Sub:
1955 case Instruction::Mul:
1956 case Instruction::And:
1957 case Instruction::Or:
1958 case Instruction::Xor:
1959 case Instruction::FAdd:
1960 case Instruction::FSub:
1961 case Instruction::FMul:
1962 case Instruction::FDiv:
1963 case Instruction::ICmp:
1964 case Instruction::FCmp:
1966 case Instruction::Shl:
1967 case Instruction::LShr:
1968 case Instruction::AShr:
1969 case Instruction::UDiv:
1970 case Instruction::SDiv:
1971 case Instruction::URem:
1972 case Instruction::SRem:
1973 return Operand == 1;
1987 auto *II = dyn_cast<IntrinsicInst>(
I);
1991 switch (II->getIntrinsicID()) {
1992 case Intrinsic::fma:
1993 case Intrinsic::vp_fma:
1994 return Operand == 0 || Operand == 1;
1995 case Intrinsic::vp_shl:
1996 case Intrinsic::vp_lshr:
1997 case Intrinsic::vp_ashr:
1998 case Intrinsic::vp_udiv:
1999 case Intrinsic::vp_sdiv:
2000 case Intrinsic::vp_urem:
2001 case Intrinsic::vp_srem:
2002 case Intrinsic::ssub_sat:
2003 case Intrinsic::vp_ssub_sat:
2004 case Intrinsic::usub_sat:
2005 case Intrinsic::vp_usub_sat:
2006 return Operand == 1;
2008 case Intrinsic::vp_add:
2009 case Intrinsic::vp_mul:
2010 case Intrinsic::vp_and:
2011 case Intrinsic::vp_or:
2012 case Intrinsic::vp_xor:
2013 case Intrinsic::vp_fadd:
2014 case Intrinsic::vp_fmul:
2015 case Intrinsic::vp_icmp:
2016 case Intrinsic::vp_fcmp:
2017 case Intrinsic::smin:
2018 case Intrinsic::vp_smin:
2019 case Intrinsic::umin:
2020 case Intrinsic::vp_umin:
2021 case Intrinsic::smax:
2022 case Intrinsic::vp_smax:
2023 case Intrinsic::umax:
2024 case Intrinsic::vp_umax:
2025 case Intrinsic::sadd_sat:
2026 case Intrinsic::vp_sadd_sat:
2027 case Intrinsic::uadd_sat:
2028 case Intrinsic::vp_uadd_sat:
2030 case Intrinsic::vp_sub:
2031 case Intrinsic::vp_fsub:
2032 case Intrinsic::vp_fdiv:
2033 return Operand == 0 || Operand == 1;
2054 if (!Subtarget.sinkSplatOperands())
2057 for (
auto OpIdx :
enumerate(
I->operands())) {
2061 Instruction *
Op = dyn_cast<Instruction>(OpIdx.value().get());
2063 if (!
Op ||
any_of(Ops, [&](
Use *U) {
return U->get() ==
Op; }))
2072 if (cast<VectorType>(
Op->getType())->getElementType()->isIntegerTy(1))
2077 for (
Use &U :
Op->uses()) {
2129 if (!Subtarget.hasStdExtZfa())
2130 return std::make_pair(-1,
false);
2132 bool IsSupportedVT =
false;
2133 if (VT == MVT::f16) {
2134 IsSupportedVT = Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZvfh();
2135 }
else if (VT == MVT::f32) {
2136 IsSupportedVT =
true;
2137 }
else if (VT == MVT::f64) {
2138 assert(Subtarget.hasStdExtD() &&
"Expect D extension");
2139 IsSupportedVT =
true;
2143 return std::make_pair(-1,
false);
2146 if (
Index < 0 && Imm.isNegative())
2150 return std::make_pair(
Index,
false);
2154 bool ForCodeSize)
const {
2155 bool IsLegalVT =
false;
2158 else if (VT == MVT::f32)
2160 else if (VT == MVT::f64)
2162 else if (VT == MVT::bf16)
2163 IsLegalVT = Subtarget.hasStdExtZfbfmin();
2175 return Imm.isZero();
2179 if (Imm.isNegZero())
2192 unsigned Index)
const {
2205 if (EltVT == MVT::i1)
2218 if (
Index + ResElts <= MinVLMAX &&
Index < 31)
2225 if ((ResElts * 2) != SrcElts)
2269 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
2271 Context,
CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
2274 IntermediateVT = MVT::i64;
2277 RegisterVT = MVT::i64;
2292 isa<ConstantSDNode>(
LHS.getOperand(1))) {
2298 ShAmt =
LHS.getValueSizeInBits() - 1 -
Log2_64(Mask);
2311 if (
auto *RHSC = dyn_cast<ConstantSDNode>(
RHS)) {
2312 int64_t
C = RHSC->getSExtValue();
2354 switch (KnownSize) {
2382 return RISCV::VRRegClassID;
2384 return RISCV::VRM2RegClassID;
2386 return RISCV::VRM4RegClassID;
2388 return RISCV::VRM8RegClassID;
2398 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
2399 "Unexpected subreg numbering");
2400 return RISCV::sub_vrm1_0 +
Index;
2403 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
2404 "Unexpected subreg numbering");
2405 return RISCV::sub_vrm2_0 +
Index;
2408 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
2409 "Unexpected subreg numbering");
2410 return RISCV::sub_vrm4_0 +
Index;
2417 return RISCV::VRRegClassID;
2426std::pair<unsigned, unsigned>
2428 MVT VecVT,
MVT SubVecVT,
unsigned InsertExtractIdx,
2430 static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
2431 RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
2432 RISCV::VRM2RegClassID > RISCV::VRRegClassID),
2433 "Register classes not ordered");
2442 unsigned SubRegIdx = RISCV::NoSubRegister;
2443 for (
const unsigned RCID :
2444 {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
2445 if (VecRegClassID > RCID && SubRegClassID <= RCID) {
2449 SubRegIdx =
TRI->composeSubRegIndices(SubRegIdx,
2454 return {SubRegIdx, InsertExtractIdx};
2459bool RISCVTargetLowering::mergeStoresAfterLegalization(
EVT VT)
const {
2488unsigned RISCVTargetLowering::combineRepeatedFPDivisors()
const {
2495 "Unexpected opcode");
2497 unsigned IntNo =
Op.getConstantOperandVal(HasChain ? 1 : 0);
2499 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
2502 return Op.getOperand(II->
VLOperand + 1 + HasChain);
2572bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(
MVT VT)
const {
2573 return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
2582 "Expected legal fixed length vector!");
2585 unsigned MaxELen = Subtarget.
getELen();
2618 return ::getContainerForFixedLengthVector(*
this, VT,
getSubtarget());
2625 "Expected to convert into a scalable vector!");
2626 assert(V.getValueType().isFixedLengthVector() &&
2627 "Expected a fixed length vector operand!");
2637 "Expected to convert into a fixed length vector!");
2638 assert(V.getValueType().isScalableVector() &&
2639 "Expected a scalable vector operand!");
2667 const auto [MinVLMAX, MaxVLMAX] =
2669 if (MinVLMAX == MaxVLMAX && NumElts == MinVLMAX)
2675static std::pair<SDValue, SDValue>
2684static std::pair<SDValue, SDValue>
2697static std::pair<SDValue, SDValue>
2714std::pair<unsigned, unsigned>
2730 return std::make_pair(MinVLMAX, MaxVLMAX);
2742 EVT VT,
unsigned DefinedValues)
const {
2756 std::tie(LMul, Fractional) =
2759 Cost = LMul <= DLenFactor ? (DLenFactor / LMul) : 1;
2761 Cost = (LMul * DLenFactor);
2806 MVT DstVT =
Op.getSimpleValueType();
2807 EVT SatVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
2815 Src.getValueType() == MVT::bf16) {
2822 else if (DstVT == MVT::i64 && SatVT == MVT::i32)
2830 Opc,
DL, DstVT, Src,
2844 MVT SrcVT = Src.getSimpleValueType();
2850 if (SatVT != DstEltVT)
2854 if (SrcEltSize > (2 * DstEltSize))
2857 MVT DstContainerVT = DstVT;
2858 MVT SrcContainerVT = SrcVT;
2864 "Expected same element count");
2873 {Src, Src, DAG.getCondCode(ISD::SETNE),
2874 DAG.getUNDEF(Mask.getValueType()), Mask, VL});
2878 if (DstEltSize > (2 * SrcEltSize)) {
2892 Res, DAG.
getUNDEF(DstContainerVT), VL);
2904 case ISD::VP_FROUNDEVEN:
2908 case ISD::VP_FROUNDTOZERO:
2912 case ISD::VP_FFLOOR:
2920 case ISD::VP_FROUND:
2936 MVT VT =
Op.getSimpleValueType();
2943 MVT ContainerVT = VT;
2950 if (
Op->isVPOpcode()) {
2951 Mask =
Op.getOperand(1);
2955 VL =
Op.getOperand(2);
2977 DAG.
getUNDEF(ContainerVT), MaxValNode, VL);
2991 switch (
Op.getOpcode()) {
2997 case ISD::VP_FFLOOR:
3000 case ISD::VP_FROUND:
3001 case ISD::VP_FROUNDEVEN:
3002 case ISD::VP_FROUNDTOZERO: {
3018 case ISD::VP_FNEARBYINT:
3031 Src, Src, Mask, VL);
3046 MVT VT =
Op.getSimpleValueType();
3050 MVT ContainerVT = VT;
3062 MVT MaskVT = Mask.getSimpleValueType();
3065 {Chain, Src, Src, DAG.getCondCode(ISD::SETUNE),
3066 DAG.getUNDEF(MaskVT), Mask, VL});
3070 {Chain, Src, Src, DAG.getUNDEF(ContainerVT), Unorder, VL});
3071 Chain = Src.getValue(1);
3087 DAG.
getUNDEF(ContainerVT), MaxValNode, VL);
3099 switch (
Op.getOpcode()) {
3110 {Chain, Src, Mask, DAG.getTargetConstant(FRM, DL, XLenVT), VL});
3116 DAG.
getVTList(IntVT, MVT::Other), Chain, Src, Mask, VL);
3120 DAG.
getVTList(ContainerVT, MVT::Other), Chain, Src,
3129 DAG.
getVTList(ContainerVT, MVT::Other), Chain,
3130 Truncated, Mask, VL);
3136 Src, Src, Mask, VL);
3146 MVT VT =
Op.getSimpleValueType();
3174 MVT VT =
Op.getSimpleValueType();
3179 MVT ContainerVT = VT;
3201 if (
Merge.isUndef())
3213 if (
Merge.isUndef())
3222 "Unexpected vector MVT");
3250 return std::nullopt;
3265 unsigned EltSizeInBits) {
3268 return std::nullopt;
3269 bool IsInteger =
Op.getValueType().isInteger();
3271 std::optional<unsigned> SeqStepDenom;
3272 std::optional<int64_t> SeqStepNum, SeqAddend;
3273 std::optional<std::pair<uint64_t, unsigned>> PrevElt;
3274 assert(EltSizeInBits >=
Op.getValueType().getScalarSizeInBits());
3279 const unsigned OpSize =
Op.getScalarValueSizeInBits();
3281 if (Elt.isUndef()) {
3282 Elts[
Idx] = std::nullopt;
3286 Elts[
Idx] = Elt->getAsZExtVal() & maskTrailingOnes<uint64_t>(OpSize);
3291 return std::nullopt;
3292 Elts[
Idx] = *ExactInteger;
3305 unsigned IdxDiff =
Idx - PrevElt->second;
3306 int64_t ValDiff =
SignExtend64(*Elt - PrevElt->first, EltSizeInBits);
3314 int64_t Remainder = ValDiff % IdxDiff;
3316 if (Remainder != ValDiff) {
3319 return std::nullopt;
3325 SeqStepNum = ValDiff;
3326 else if (ValDiff != SeqStepNum)
3327 return std::nullopt;
3330 SeqStepDenom = IdxDiff;
3331 else if (IdxDiff != *SeqStepDenom)
3332 return std::nullopt;
3336 if (!PrevElt || PrevElt->first != *Elt)
3337 PrevElt = std::make_pair(*Elt,
Idx);
3341 if (!SeqStepNum || !SeqStepDenom)
3342 return std::nullopt;
3350 (int64_t)(
Idx * (
uint64_t)*SeqStepNum) / *SeqStepDenom;
3351 int64_t Addend =
SignExtend64(*Elt - ExpectedVal, EltSizeInBits);
3354 else if (Addend != SeqAddend)
3355 return std::nullopt;
3358 assert(SeqAddend &&
"Must have an addend if we have a step");
3360 return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
3381 MVT ContainerVT = VT;
3409 MVT VT =
Op.getSimpleValueType();
3421 unsigned MostCommonCount = 0;
3423 unsigned NumUndefElts =
3431 unsigned NumScalarLoads = 0;
3437 ValueCounts.
insert(std::make_pair(V, 0));
3438 unsigned &Count = ValueCounts[V];
3440 if (
auto *CFP = dyn_cast<ConstantFPSDNode>(V))
3441 NumScalarLoads += !CFP->isExactlyValue(+0.0);
3446 if (++Count >= MostCommonCount) {
3448 MostCommonCount = Count;
3452 assert(DominantValue &&
"Not expecting an all-undef BUILD_VECTOR");
3453 unsigned NumDefElts = NumElts - NumUndefElts;
3454 unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
3460 ((MostCommonCount > DominantValueCountThreshold) ||
3473 !LastOp.isUndef() && ValueCounts[LastOp] == 1 &&
3474 LastOp != DominantValue) {
3483 Processed.insert(LastOp);
3488 const SDValue &V = OpIdx.value();
3489 if (V.isUndef() || !Processed.insert(V).second)
3491 if (ValueCounts[V] == 1) {
3500 return DAG.getConstant(V == V1, DL, XLenVT);
3516 MVT VT =
Op.getSimpleValueType();
3546 unsigned NumViaIntegerBits = std::clamp(NumElts, 8u, Subtarget.
getXLen());
3547 NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.
getELen());
3555 unsigned IntegerViaVecElts =
divideCeil(NumElts, NumViaIntegerBits);
3556 MVT IntegerViaVecVT =
3561 unsigned BitPos = 0, IntegerEltIdx = 0;
3564 for (
unsigned I = 0;
I < NumElts;) {
3566 bool BitValue = !V.isUndef() && V->getAsZExtVal();
3567 Bits |= ((
uint64_t)BitValue << BitPos);
3573 if (
I % NumViaIntegerBits == 0 ||
I == NumElts) {
3574 if (NumViaIntegerBits <= 32)
3575 Bits = SignExtend64<32>(Bits);
3577 Elts[IntegerEltIdx] = Elt;
3586 if (NumElts < NumViaIntegerBits) {
3590 assert(IntegerViaVecVT == MVT::v1i8 &&
"Unexpected mask vector type");
3618 int64_t StepNumerator = SimpleVID->StepNumerator;
3619 unsigned StepDenominator = SimpleVID->StepDenominator;
3620 int64_t Addend = SimpleVID->Addend;
3622 assert(StepNumerator != 0 &&
"Invalid step");
3623 bool Negate =
false;
3624 int64_t SplatStepVal = StepNumerator;
3628 if (StepNumerator != 1 && StepNumerator !=
INT64_MIN &&
3630 Negate = StepNumerator < 0;
3632 SplatStepVal =
Log2_64(std::abs(StepNumerator));
3639 if (((StepOpcode ==
ISD::MUL && isInt<12>(SplatStepVal)) ||
3640 (StepOpcode ==
ISD::SHL && isUInt<5>(SplatStepVal))) &&
3642 (SplatStepVal >= 0 || StepDenominator == 1) && isInt<5>(Addend)) {
3645 MVT VIDContainerVT =
3653 if ((StepOpcode ==
ISD::MUL && SplatStepVal != 1) ||
3654 (StepOpcode ==
ISD::SHL && SplatStepVal != 0)) {
3656 VID = DAG.
getNode(StepOpcode,
DL, VIDVT, VID, SplatStep);
3658 if (StepDenominator != 1) {
3663 if (Addend != 0 || Negate) {
3682 assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32) &&
3683 "Unexpected sequence type");
3687 unsigned ViaVecLen =
3691 uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
3694 for (
const auto &OpIdx :
enumerate(
Op->op_values())) {
3695 const auto &SeqV = OpIdx.value();
3696 if (!SeqV.isUndef())
3698 ((SeqV->getAsZExtVal() & EltMask) << (OpIdx.index() * EltBitSize));
3703 if (Subtarget.
is64Bit() && ViaIntVT == MVT::i32)
3704 SplatValue = SignExtend64<32>(SplatValue);
3726 const auto *BV = cast<BuildVectorSDNode>(
Op);
3729 BV->getRepeatedSequence(Sequence) &&
3730 (Sequence.size() * EltBitSize) <= Subtarget.
getELen()) {
3731 unsigned SeqLen = Sequence.size();
3733 assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
3734 ViaIntVT == MVT::i64) &&
3735 "Unexpected sequence type");
3740 const unsigned RequiredVL = NumElts / SeqLen;
3741 const unsigned ViaVecLen =
3743 NumElts : RequiredVL;
3746 unsigned EltIdx = 0;
3747 uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
3751 for (
const auto &SeqV : Sequence) {
3752 if (!SeqV.isUndef())
3754 ((SeqV->getAsZExtVal() & EltMask) << (EltIdx * EltBitSize));
3760 if (Subtarget.
is64Bit() && ViaIntVT == MVT::i32)
3761 SplatValue = SignExtend64<32>(SplatValue);
3767 (!Subtarget.
is64Bit() && ViaIntVT == MVT::i64)) &&
3768 "Unexpected bitcast sequence");
3769 if (ViaIntVT.
bitsLE(XLenVT) || isInt<32>(SplatValue)) {
3772 MVT ViaContainerVT =
3779 if (ViaVecLen != RequiredVL)
3796 if (EltBitSize - SignBits < 8) {
3800 Source, DAG, Subtarget);
3817 MVT VT =
Op.getSimpleValueType();
3889 auto OneVRegOfOps =
ArrayRef(BuildVectorOps).
slice(i, ElemsPerVReg);
3893 unsigned InsertIdx = (i / ElemsPerVReg) * NumOpElts;
3909 unsigned NumUndefElts =
3911 unsigned NumDefElts = NumElts - NumUndefElts;
3912 if (NumDefElts >= 8 && NumDefElts > NumElts / 2 &&
3919 for (
unsigned i = 0; i < NumElts; i++) {
3921 if (i < NumElts / 2) {
3928 bool SelectMaskVal = (i < NumElts / 2);
3931 assert(SubVecAOps.
size() == NumElts && SubVecBOps.
size() == NumElts &&
3932 MaskVals.
size() == NumElts);
3967 unsigned UndefCount = 0;
3974 LinearBudget -= PerSlideCost;
3977 LinearBudget -= PerSlideCost;
3980 LinearBudget -= PerSlideCost;
3983 if (LinearBudget < 0)
3988 "Illegal type which will result in reserved encoding");
4013 Vec,
Offset, Mask, VL, Policy);
4026 Vec,
Offset, Mask, VL, Policy);
4036 if (isa<ConstantSDNode>(
Lo) && isa<ConstantSDNode>(
Hi)) {
4037 int32_t LoC = cast<ConstantSDNode>(
Lo)->getSExtValue();
4038 int32_t HiC = cast<ConstantSDNode>(
Hi)->getSExtValue();
4041 if ((LoC >> 31) == HiC)
4052 (isa<RegisterSDNode>(VL) &&
4053 cast<RegisterSDNode>(VL)->
getReg() == RISCV::X0))
4055 else if (isa<ConstantSDNode>(VL) && isUInt<4>(VL->
getAsZExtVal()))
4070 isa<ConstantSDNode>(
Hi.getOperand(1)) &&
4071 Hi.getConstantOperandVal(1) == 31)
4090 assert(Scalar.getValueType() == MVT::i64 &&
"Unexpected VT!");
4102 bool HasPassthru = Passthru && !Passthru.
isUndef();
4103 if (!HasPassthru && !Passthru)
4111 if (Scalar.getValueType().bitsLE(XLenVT)) {
4118 Scalar = DAG.
getNode(ExtOpc,
DL, XLenVT, Scalar);
4122 assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
4123 "Unexpected scalar for splat lowering!");
4147 SDValue ExtractedVal = Scalar.getOperand(0);
4152 MVT ExtractedContainerVT = ExtractedVT;
4155 DAG, ExtractedContainerVT, Subtarget);
4157 ExtractedVal, DAG, Subtarget);
4159 if (ExtractedContainerVT.
bitsLE(VT))
4174 if (!Scalar.getValueType().bitsLE(XLenVT))
4177 VT,
DL, DAG, Subtarget);
4185 Scalar = DAG.
getNode(ExtOpc,
DL, XLenVT, Scalar);
4211 if (Src != V2.getOperand(0))
4215 if (Src.getValueType().getVectorNumElements() != (Mask.size() * 2))
4220 V2.getConstantOperandVal(1) != Mask.size())
4224 if (Mask[0] != 0 && Mask[0] != 1)
4229 for (
unsigned i = 1; i != Mask.size(); ++i)
4230 if (Mask[i] != Mask[i - 1] + 2)
4248 int Size = Mask.size();
4250 assert(
Size == (
int)NumElts &&
"Unexpected mask size");
4256 EvenSrc = StartIndexes[0];
4257 OddSrc = StartIndexes[1];
4260 if (EvenSrc != 0 && OddSrc != 0)
4270 int HalfNumElts = NumElts / 2;
4271 return ((EvenSrc % HalfNumElts) == 0) && ((OddSrc % HalfNumElts) == 0);
4287 int Size = Mask.size();
4299 for (
int i = 0; i !=
Size; ++i) {
4305 int StartIdx = i - (M %
Size);
4313 int CandidateRotation = StartIdx < 0 ? -StartIdx :
Size - StartIdx;
4316 Rotation = CandidateRotation;
4317 else if (Rotation != CandidateRotation)
4322 int MaskSrc = M <
Size ? 0 : 1;
4327 int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
4332 TargetSrc = MaskSrc;
4333 else if (TargetSrc != MaskSrc)
4340 assert(Rotation != 0 &&
"Failed to locate a viable rotation!");
4341 assert((LoSrc >= 0 || HiSrc >= 0) &&
4342 "Failed to find a rotated input vector!");
4357 MVT ContainerVT = VT;
4360 assert(Src.getSimpleValueType().isFixedLengthVector());
4364 MVT SrcContainerVT =
4377 Src = DAG.
getBitcast(WideSrcContainerVT, Src);
4384 unsigned Shift = EvenElts ? 0 : EltBits;
4390 DAG.
getUNDEF(IntContainerVT), TrueMask, VL);
4416 auto findNonEXTRACT_SUBVECTORParent =
4417 [](
SDValue Parent) -> std::pair<SDValue, uint64_t> {
4422 Parent.getOperand(0).getSimpleValueType().isFixedLengthVector()) {
4423 Offset += Parent.getConstantOperandVal(1);
4424 Parent = Parent.getOperand(0);
4426 return std::make_pair(Parent,
Offset);
4429 auto [V1Src, V1IndexOffset] = findNonEXTRACT_SUBVECTORParent(V1);
4430 auto [V2Src, V2IndexOffset] = findNonEXTRACT_SUBVECTORParent(V2);
4439 for (
size_t i = 0; i != NewMask.
size(); ++i) {
4440 if (NewMask[i] == -1)
4443 if (
static_cast<size_t>(NewMask[i]) < NewMask.
size()) {
4444 NewMask[i] = NewMask[i] + V1IndexOffset;
4448 NewMask[i] = NewMask[i] - NewMask.
size() + V2IndexOffset;
4454 if (NewMask[0] <= 0)
4458 for (
unsigned i = 1; i != NewMask.
size(); ++i)
4459 if (NewMask[i - 1] + 1 != NewMask[i])
4463 MVT SrcVT = Src.getSimpleValueType();
4494 int NumSubElts,
Index;
4499 bool OpsSwapped = Mask[
Index] < (int)NumElts;
4500 SDValue InPlace = OpsSwapped ? V2 : V1;
4501 SDValue ToInsert = OpsSwapped ? V1 : V2;
4511 if (NumSubElts +
Index >= (
int)NumElts)
4525 Res =
getVSlideup(DAG, Subtarget,
DL, ContainerVT, InPlace, ToInsert,
4537 bool OpsSwapped =
false;
4538 if (!isa<BuildVectorSDNode>(V1)) {
4539 if (!isa<BuildVectorSDNode>(V2))
4544 SDValue Splat = cast<BuildVectorSDNode>(V1)->getSplatValue();
4552 const unsigned E = Mask.size() - ((
Offset > 0) ?
Offset : 0);
4553 for (
unsigned i = S; i !=
E; ++i)
4554 if (Mask[i] >= 0 && (
unsigned)Mask[i] !=
Base + i +
Offset)
4560 bool IsVSlidedown = isSlideMask(Mask, OpsSwapped ? 0 : NumElts, 1);
4561 if (!IsVSlidedown && !isSlideMask(Mask, OpsSwapped ? 0 : NumElts, -1))
4564 const int InsertIdx = Mask[IsVSlidedown ? (NumElts - 1) : 0];
4566 if (InsertIdx < 0 || InsertIdx / NumElts != (
unsigned)OpsSwapped)
4571 auto OpCode = IsVSlidedown ?
4576 auto Vec = DAG.
getNode(OpCode,
DL, ContainerVT,
4579 Splat, TrueMask, VL);
4590 MVT VecContainerVT = VecVT;
4607 MVT WideContainerVT = WideVT;
4613 EvenV = DAG.
getBitcast(VecContainerVT, EvenV);
4620 if (Subtarget.hasStdExtZvbb()) {
4627 OffsetVec, Passthru, Mask, VL);
4629 Interleaved, EvenV, Passthru, Mask, VL);
4634 OddV, Passthru, Mask, VL);
4640 OddV, AllOnesVec, Passthru, Mask, VL);
4648 Interleaved, OddsMul, Passthru, Mask, VL);
4655 Interleaved = DAG.
getBitcast(ResultContainerVT, Interleaved);
4701 if (ViaEltSize > NumElts)
4710 if (ViaEltSize > NumElts)
4716 if (ViaEltSize > NumElts)
4725 MVT &RotateVT,
unsigned &RotateAmt) {
4731 unsigned NumSubElts;
4733 NumElts, NumSubElts, RotateAmt))
4736 NumElts / NumSubElts);
4798 unsigned VRegsPerSrc = NumElts / ElemsPerVReg;
4801 OutMasks(VRegsPerSrc, {-1, {}});
4806 for (
unsigned DstIdx = 0; DstIdx < Mask.size(); DstIdx++) {
4807 int DstVecIdx = DstIdx / ElemsPerVReg;
4808 int DstSubIdx = DstIdx % ElemsPerVReg;
4809 int SrcIdx = Mask[DstIdx];
4810 if (SrcIdx < 0 || (
unsigned)SrcIdx >= 2 * NumElts)
4812 int SrcVecIdx = SrcIdx / ElemsPerVReg;
4813 int SrcSubIdx = SrcIdx % ElemsPerVReg;
4814 if (OutMasks[DstVecIdx].first == -1)
4815 OutMasks[DstVecIdx].first = SrcVecIdx;
4816 if (OutMasks[DstVecIdx].first != SrcVecIdx)
4822 OutMasks[DstVecIdx].second.resize(ElemsPerVReg, -1);
4823 OutMasks[DstVecIdx].second[DstSubIdx] = SrcSubIdx;
4837 for (
unsigned DstVecIdx = 0 ; DstVecIdx < OutMasks.size(); DstVecIdx++) {
4838 auto &[SrcVecIdx, SrcSubMask] = OutMasks[DstVecIdx];
4839 if (SrcVecIdx == -1)
4841 unsigned ExtractIdx = (SrcVecIdx % VRegsPerSrc) * NumOpElts;
4848 unsigned InsertIdx = DstVecIdx * NumOpElts;
4861 MVT VT =
Op.getSimpleValueType();
4876 V2 = V2.isUndef() ? DAG.
getUNDEF(WidenVT)
4900 V.getOperand(0).getSimpleValueType().getVectorNumElements();
4901 V = V.getOperand(
Offset / OpElements);
4907 auto *Ld = cast<LoadSDNode>(V);
4917 SDValue Ops[] = {Ld->getChain(),
4935 V = DAG.
getLoad(SVT,
DL, Ld->getChain(), NewAddr,
4936 Ld->getPointerInfo().getWithOffset(
Offset),
4937 Ld->getOriginalAlign(),
4941 Ld->getPointerInfo().getWithOffset(
Offset), SVT,
4942 Ld->getOriginalAlign(),
4943 Ld->getMemOperand()->getFlags());
4954 assert(Lane < (
int)NumElts &&
"Unexpected lane!");
4957 DAG.
getUNDEF(ContainerVT), TrueMask, VL);
4979 if (Subtarget.hasStdExtZvkb())
4990 LoV = LoSrc == 0 ? V1 : V2;
4994 HiV = HiSrc == 0 ? V1 : V2;
5000 unsigned InvRotate = NumElts - Rotation;
5010 Res =
getVSlideup(DAG, Subtarget,
DL, ContainerVT, Res, LoV,
5030 int EvenSrc, OddSrc;
5035 int Size = Mask.size();
5037 assert(EvenSrc >= 0 &&
"Undef source?");
5038 EvenV = (EvenSrc /
Size) == 0 ? V1 : V2;
5042 assert(OddSrc >= 0 &&
"Undef source?");
5043 OddV = (OddSrc /
Size) == 0 ? V1 : V2;
5052 assert(!V1.
isUndef() &&
"Unexpected shuffle canonicalization");
5061 any_of(Mask, [&](
const auto &
Idx) {
return Idx > 255; })) {
5090 MVT IndexContainerVT =
5095 for (
int MaskIndex : Mask) {
5096 bool IsLHSIndex = MaskIndex < (int)NumElts && MaskIndex >= 0;
5105 DAG.
getUNDEF(ContainerVT), TrueMask, VL);
5119 int MaskIndex = MaskIdx.value();
5120 return MaskIndex < 0 || MaskIdx.index() == (
unsigned)MaskIndex % NumElts;
5125 for (
int MaskIndex : Mask) {
5126 bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ SwapOps;
5133 assert(MaskVals.
size() == NumElts &&
"Unexpected select-like shuffle");
5146 for (
int MaskIndex : Mask) {
5147 bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ !SwapOps;
5149 bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
5150 ShuffleMaskLHS.
push_back(IsLHSOrUndefIndex && MaskIndex >= 0
5152 ShuffleMaskRHS.
push_back(IsLHSOrUndefIndex ? -1 : (MaskIndex - NumElts));
5157 std::swap(ShuffleMaskLHS, ShuffleMaskRHS);
5160 assert(MaskVals.
size() == NumElts &&
"Unexpected select-like shuffle");
5196RISCVTargetLowering::lowerCTLZ_CTTZ_ZERO_UNDEF(
SDValue Op,
5198 MVT VT =
Op.getSimpleValueType();
5202 MVT ContainerVT = VT;
5205 if (
Op->isVPOpcode()) {
5206 Mask =
Op.getOperand(1);
5210 VL =
Op.getOperand(2);
5216 MVT FloatEltVT = (EltSize >= 32) ? MVT::f64 : MVT::f32;
5218 FloatEltVT = MVT::f32;
5225 "Expected legal float type!");
5232 }
else if (
Op.getOpcode() == ISD::VP_CTTZ_ZERO_UNDEF) {
5235 Src = DAG.
getNode(ISD::VP_AND,
DL, VT, Src, Neg, Mask, VL);
5240 if (FloatVT.
bitsGT(VT)) {
5241 if (
Op->isVPOpcode())
5242 FloatVal = DAG.
getNode(ISD::VP_UINT_TO_FP,
DL, FloatVT, Src, Mask, VL);
5251 if (!
Op->isVPOpcode())
5255 MVT ContainerFloatVT =
5258 Src, Mask, RTZRM, VL);
5265 unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
5269 if (
Op->isVPOpcode()) {
5278 else if (IntVT.
bitsGT(VT))
5283 unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
5288 if (
Op.getOpcode() == ISD::VP_CTTZ_ZERO_UNDEF)
5289 return DAG.
getNode(ISD::VP_SUB,
DL, VT, Exp,
5294 unsigned Adjust = ExponentBias + (EltSize - 1);
5296 if (
Op->isVPOpcode())
5306 else if (
Op.getOpcode() == ISD::VP_CTLZ)
5307 Res = DAG.
getNode(ISD::VP_UMIN,
DL, VT, Res,
5318 auto *
Load = cast<LoadSDNode>(
Op);
5319 assert(Load &&
Load->getMemoryVT().isVector() &&
"Expected vector load");
5322 Load->getMemoryVT(),
5323 *
Load->getMemOperand()))
5327 MVT VT =
Op.getSimpleValueType();
5329 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
5330 "Unexpected unaligned RVV load type");
5334 "Expecting equally-sized RVV vector types to be legal");
5336 Load->getPointerInfo(),
Load->getOriginalAlign(),
5337 Load->getMemOperand()->getFlags());
5347 auto *
Store = cast<StoreSDNode>(
Op);
5348 assert(Store &&
Store->getValue().getValueType().isVector() &&
5349 "Expected vector store");
5352 Store->getMemoryVT(),
5353 *
Store->getMemOperand()))
5360 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
5361 "Unexpected unaligned RVV store type");
5365 "Expecting equally-sized RVV vector types to be legal");
5366 StoredVal = DAG.
getBitcast(NewVT, StoredVal);
5368 Store->getPointerInfo(),
Store->getOriginalAlign(),
5369 Store->getMemOperand()->getFlags());
5374 assert(
Op.getValueType() == MVT::i64 &&
"Unexpected VT");
5376 int64_t Imm = cast<ConstantSDNode>(
Op)->getSExtValue();
5403 unsigned ShiftAmt, AddOpc;
5420 if (Subtarget.hasStdExtZtso()) {
5443 "Unexpected custom legalisation");
5464 "Unexpected custom legalisation");
5479 "Unexpected custom legalisation");
5480 if (isa<ConstantSDNode>(
Op.getOperand(1)))
5500 "Unexpected custom legalisation");
5516 MVT VT =
Op.getSimpleValueType();
5518 unsigned Check =
Op.getConstantOperandVal(1);
5519 unsigned TDCMask = 0;
5547 MVT VT0 =
Op.getOperand(0).getSimpleValueType();
5552 if (
Op.getOpcode() == ISD::VP_IS_FPCLASS) {
5554 VL =
Op.getOperand(3);
5557 VL,
Op->getFlags());
5572 if (
Op.getOpcode() == ISD::VP_IS_FPCLASS) {
5574 MVT MaskContainerVT =
5577 VL =
Op.getOperand(3);
5582 Mask, VL,
Op->getFlags());
5585 DAG.
getUNDEF(ContainerDstVT), TDCMaskV, VL);
5590 DAG.
getUNDEF(ContainerVT), Mask, VL});
5594 TDCMaskV, DAG.
getUNDEF(ContainerDstVT), Mask, VL);
5598 DAG.
getUNDEF(ContainerDstVT), SplatZero, VL);
5602 DAG.
getUNDEF(ContainerVT), Mask, VL});
5618 MVT VT =
Op.getSimpleValueType();
5645 return DAG.
getNode(Opc,
DL, VT, NewX, NewY);
5652 MVT ContainerVT = VT;
5660 if (
Op->isVPOpcode()) {
5661 Mask =
Op.getOperand(2);
5665 VL =
Op.getOperand(3);
5673 {X, X, DAG.getCondCode(ISD::SETOEQ),
5674 DAG.getUNDEF(ContainerVT), Mask, VL});
5682 {Y, Y, DAG.getCondCode(ISD::SETOEQ),
5683 DAG.getUNDEF(ContainerVT), Mask, VL});
5693 DAG.
getUNDEF(ContainerVT), Mask, VL);
5701#define OP_CASE(NODE) \
5703 return RISCVISD::NODE##_VL;
5704#define VP_CASE(NODE) \
5705 case ISD::VP_##NODE: \
5706 return RISCVISD::NODE##_VL;
5708 switch (
Op.getOpcode()) {
5784 case ISD::VP_CTLZ_ZERO_UNDEF:
5787 case ISD::VP_CTTZ_ZERO_UNDEF:
5796 if (
Op.getSimpleValueType().getVectorElementType() == MVT::i1)
5801 if (
Op.getSimpleValueType().getVectorElementType() == MVT::i1)
5806 if (
Op.getSimpleValueType().getVectorElementType() == MVT::i1)
5809 case ISD::VP_SELECT:
5818 case ISD::VP_SIGN_EXTEND:
5820 case ISD::VP_ZERO_EXTEND:
5822 case ISD::VP_FP_TO_SINT:
5824 case ISD::VP_FP_TO_UINT:
5827 case ISD::VP_FMINNUM:
5830 case ISD::VP_FMAXNUM:
5835 case ISD::VP_LLRINT:
5847 "not a RISC-V target specific op");
5853 "adding target specific op should update this function");
5873 "not a RISC-V target specific op");
5879 "adding target specific op should update this function");
5898 if (!
Op.getOperand(j).getValueType().isVector()) {
5899 LoOperands[j] =
Op.getOperand(j);
5900 HiOperands[j] =
Op.getOperand(j);
5903 std::tie(LoOperands[j], HiOperands[j]) =
5908 DAG.
getNode(
Op.getOpcode(),
DL, LoVT, LoOperands,
Op->getFlags());
5910 DAG.
getNode(
Op.getOpcode(),
DL, HiVT, HiOperands,
Op->getFlags());
5925 std::tie(LoOperands[j], HiOperands[j]) =
5929 if (!
Op.getOperand(j).getValueType().isVector()) {
5930 LoOperands[j] =
Op.getOperand(j);
5931 HiOperands[j] =
Op.getOperand(j);
5934 std::tie(LoOperands[j], HiOperands[j]) =
5939 DAG.
getNode(
Op.getOpcode(),
DL, LoVT, LoOperands,
Op->getFlags());
5941 DAG.
getNode(
Op.getOpcode(),
DL, HiVT, HiOperands,
Op->getFlags());
5951 auto [EVLLo, EVLHi] =
5952 DAG.
SplitEVL(
Op.getOperand(3),
Op.getOperand(1).getValueType(),
DL);
5956 {Op.getOperand(0), Lo, MaskLo, EVLLo},
Op->getFlags());
5958 {ResLo, Hi, MaskHi, EVLHi},
Op->getFlags());
5976 if (!
Op.getOperand(j).getValueType().isVector()) {
5977 LoOperands[j] =
Op.getOperand(j);
5978 HiOperands[j] =
Op.getOperand(j);
5981 std::tie(LoOperands[j], HiOperands[j]) =
5986 DAG.
getNode(
Op.getOpcode(),
DL, LoVTs, LoOperands,
Op->getFlags());
5989 DAG.
getNode(
Op.getOpcode(),
DL, HiVTs, HiOperands,
Op->getFlags());
5998 switch (
Op.getOpcode()) {
6004 return lowerGlobalAddress(
Op, DAG);
6006 return lowerBlockAddress(
Op, DAG);
6008 return lowerConstantPool(
Op, DAG);
6010 return lowerJumpTable(
Op, DAG);
6012 return lowerGlobalTLSAddress(
Op, DAG);
6016 return lowerSELECT(
Op, DAG);
6018 return lowerBRCOND(
Op, DAG);
6020 return lowerVASTART(
Op, DAG);
6022 return lowerFRAMEADDR(
Op, DAG);
6024 return lowerRETURNADDR(
Op, DAG);
6031 return lowerShiftLeftParts(
Op, DAG);
6033 return lowerShiftRightParts(
Op, DAG,
true);
6035 return lowerShiftRightParts(
Op, DAG,
false);
6038 if (
Op.getValueType().isFixedLengthVector()) {
6039 assert(Subtarget.hasStdExtZvkb());
6040 return lowerToScalableOp(
Op, DAG);
6042 assert(Subtarget.hasVendorXTHeadBb() &&
6043 !(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) &&
6044 "Unexpected custom legalization");
6046 if (!isa<ConstantSDNode>(
Op.getOperand(1)))
6051 EVT VT =
Op.getValueType();
6055 if (VT == MVT::f16 && Op0VT == MVT::i16 &&
6061 if (VT == MVT::bf16 && Op0VT == MVT::i16 &&
6062 Subtarget.hasStdExtZfbfmin()) {
6067 if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.
is64Bit() &&
6074 if (VT == MVT::f64 && Op0VT == MVT::i64 && XLenVT == MVT::i32 &&
6075 Subtarget.hasStdExtZfa()) {
6092 "Unexpected types");
6126 return LowerINTRINSIC_WO_CHAIN(
Op, DAG);
6128 return LowerINTRINSIC_W_CHAIN(
Op, DAG);
6130 return LowerINTRINSIC_VOID(
Op, DAG);
6132 return LowerIS_FPCLASS(
Op, DAG);
6134 MVT VT =
Op.getSimpleValueType();
6136 assert(Subtarget.hasStdExtZvbb());
6137 return lowerToScalableOp(
Op, DAG);
6140 assert(Subtarget.hasStdExtZbkb() &&
"Unexpected custom legalization");
6148 if (!
Op.getSimpleValueType().isVector())
6150 return lowerVectorTruncLike(
Op, DAG);
6153 if (
Op.getOperand(0).getValueType().isVector() &&
6154 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
6155 return lowerVectorMaskExt(
Op, DAG, 1);
6158 if (
Op.getOperand(0).getValueType().isVector() &&
6159 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
6160 return lowerVectorMaskExt(
Op, DAG, -1);
6163 return lowerSPLAT_VECTOR_PARTS(
Op, DAG);
6165 return lowerINSERT_VECTOR_ELT(
Op, DAG);
6167 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
6169 MVT VT =
Op.getSimpleValueType();
6177 MVT ContainerVT = VT;
6183 DAG.
getUNDEF(ContainerVT), Scalar, VL);
6190 MVT VT =
Op.getSimpleValueType();
6210 }
else if ((Val % 8) == 0) {
6226 if (
Op.getValueType() == MVT::f16 && Subtarget.
is64Bit() &&
6227 Op.getOperand(1).getValueType() == MVT::i32) {
6239 if (
Op.getValueType() == MVT::nxv32f16 &&
6246 EVT VT =
Op.getValueType();
6249 if (VT == MVT::f32 && Op0VT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())
6251 if (VT == MVT::f64 && Op0VT == MVT::bf16 && Subtarget.hasStdExtZfbfmin()) {
6257 if (!
Op.getValueType().isVector())
6259 return lowerVectorFPExtendOrRoundLike(
Op, DAG);
6263 EVT VT =
Op.getValueType();
6266 if (VT == MVT::bf16 && Op0VT == MVT::f32 && Subtarget.hasStdExtZfbfmin())
6268 if (VT == MVT::bf16 && Op0VT == MVT::f64 && Subtarget.hasStdExtZfbfmin() &&
6276 if (!
Op.getValueType().isVector())
6278 return lowerVectorFPExtendOrRoundLike(
Op, DAG);
6282 return lowerStrictFPExtendOrRoundLike(
Op, DAG);
6285 if (
Op.getValueType().isVector() &&
6286 Op.getValueType().getScalarType() == MVT::f16 &&
6289 if (
Op.getValueType() == MVT::nxv32f16)
6304 Op1.getValueType().isVector() &&
6305 Op1.getValueType().getScalarType() == MVT::f16 &&
6308 if (Op1.getValueType() == MVT::nxv32f16)
6313 Op1.getValueType().getVectorElementCount());
6316 return DAG.
getNode(
Op.getOpcode(),
DL,
Op.getValueType(), WidenVec);
6326 MVT VT =
Op.getSimpleValueType();
6330 bool IsStrict =
Op->isStrictFPOpcode();
6331 SDValue Src =
Op.getOperand(0 + IsStrict);
6333 MVT SrcVT = Src.getSimpleValueType();
6338 "Unexpected vector element types");
6342 if (EltSize > (2 * SrcEltSize)) {
6354 Op.getOperand(0), Ext);
6358 assert(SrcEltVT == MVT::f16 &&
"Unexpected FP_TO_[US]INT lowering");
6363 auto [FExt, Chain] =
6365 return DAG.
getNode(
Op.getOpcode(),
DL,
Op->getVTList(), Chain, FExt);
6372 if (SrcEltSize > (2 * EltSize)) {
6375 assert(EltVT == MVT::f16 &&
"Unexpected [US]_TO_FP lowering");
6380 Op.getOperand(0), Src);
6395 Op.getOperand(0), Src);
6409 unsigned RVVOpc = 0;
6410 switch (
Op.getOpcode()) {
6442 "Expected same element count");
6449 Op.getOperand(0), Src, Mask, VL);
6453 Src = DAG.
getNode(RVVOpc,
DL, ContainerVT, Src, Mask, VL);
6468 makeLibCall(DAG, LC, MVT::f32,
Op.getOperand(0), CallOptions,
DL).first;
6475 MVT VT =
Op.getSimpleValueType();
6497 makeLibCall(DAG, LC, MVT::f32,
Op.getOperand(0), CallOptions,
DL).first;
6513 makeLibCall(DAG, RTLIB::FPEXT_F16_F32, MVT::f32, Arg, CallOptions,
DL)
6533 return lowerVECREDUCE(
Op, DAG);
6537 if (
Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
6538 return lowerVectorMaskVecReduction(
Op, DAG,
false);
6539 return lowerVECREDUCE(
Op, DAG);
6546 return lowerFPVECREDUCE(
Op, DAG);
6547 case ISD::VP_REDUCE_ADD:
6548 case ISD::VP_REDUCE_UMAX:
6549 case ISD::VP_REDUCE_SMAX:
6550 case ISD::VP_REDUCE_UMIN:
6551 case ISD::VP_REDUCE_SMIN:
6552 case ISD::VP_REDUCE_FADD:
6553 case ISD::VP_REDUCE_SEQ_FADD:
6554 case ISD::VP_REDUCE_FMIN:
6555 case ISD::VP_REDUCE_FMAX:
6556 if (
Op.getOperand(1).getValueType() == MVT::nxv32f16 &&
6560 return lowerVPREDUCE(
Op, DAG);
6561 case ISD::VP_REDUCE_AND:
6562 case ISD::VP_REDUCE_OR:
6563 case ISD::VP_REDUCE_XOR:
6564 if (
Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
6565 return lowerVectorMaskVecReduction(
Op, DAG,
true);
6566 return lowerVPREDUCE(
Op, DAG);
6570 DAG.
getUNDEF(ContainerVT), DAG, Subtarget);
6573 return lowerINSERT_SUBVECTOR(
Op, DAG);
6575 return lowerEXTRACT_SUBVECTOR(
Op, DAG);
6577 return lowerVECTOR_DEINTERLEAVE(
Op, DAG);
6579 return lowerVECTOR_INTERLEAVE(
Op, DAG);
6581 return lowerSTEP_VECTOR(
Op, DAG);
6583 return lowerVECTOR_REVERSE(
Op, DAG);
6585 return lowerVECTOR_SPLICE(
Op, DAG);
6589 if (
Op.getValueType().getScalarType() == MVT::f16 &&
6592 if (
Op.getValueType() == MVT::nxv32f16)
6604 if (
Op.getValueType().getVectorElementType() == MVT::i1)
6605 return lowerVectorMaskSplat(
Op, DAG);
6613 MVT VT =
Op.getSimpleValueType();
6614 unsigned NumOpElts =
6615 Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
6618 SDValue SubVec = OpIdx.value();
6629 if (
auto V = expandUnalignedRVVLoad(
Op, DAG))
6631 if (
Op.getValueType().isFixedLengthVector())
6632 return lowerFixedLengthVectorLoadToRVV(
Op, DAG);
6635 if (
auto V = expandUnalignedRVVStore(
Op, DAG))
6637 if (
Op.getOperand(1).getValueType().isFixedLengthVector())
6638 return lowerFixedLengthVectorStoreToRVV(
Op, DAG);
6642 return lowerMaskedLoad(
Op, DAG);
6645 return lowerMaskedStore(
Op, DAG);
6654 EVT VT =
Op.getValueType();
6665 MVT OpVT =
Op.getOperand(0).getSimpleValueType();
6667 MVT VT =
Op.getSimpleValueType();
6672 "Unexpected CondCode");
6680 if (isa<ConstantSDNode>(
RHS)) {
6681 int64_t Imm = cast<ConstantSDNode>(
RHS)->getSExtValue();
6682 if (Imm != 0 && isInt<12>((
uint64_t)Imm + 1)) {
6701 if (
Op.getOperand(0).getSimpleValueType() == MVT::nxv32f16 &&
6706 return lowerFixedLengthVectorSetccToRVV(
Op, DAG);
6722 return lowerToScalableOp(
Op, DAG);
6726 if (
Op.getSimpleValueType().isFixedLengthVector())
6727 return lowerToScalableOp(
Op, DAG);
6729 assert(
Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.
is64Bit() &&
6730 "Unexpected custom legalisation");
6742 if (
Op.getValueType() == MVT::nxv32f16 &&
6753 return lowerToScalableOp(
Op, DAG);
6756 if (!
Op.getValueType().isVector())
6758 return lowerToScalableOp(
Op, DAG);
6761 if (!
Op.getValueType().isVector())
6763 return lowerToScalableOp(
Op, DAG);
6766 return lowerABS(
Op, DAG);
6771 if (Subtarget.hasStdExtZvbb())
6772 return lowerToScalableOp(
Op, DAG);
6774 return lowerCTLZ_CTTZ_ZERO_UNDEF(
Op, DAG);
6776 return lowerFixedLengthVectorSelectToRVV(
Op, DAG);
6778 if (
Op.getValueType() == MVT::nxv32f16 &&
6782 return lowerFixedLengthVectorFCOPYSIGNToRVV(
Op, DAG);
6789 if (
Op.getValueType() == MVT::nxv32f16 &&
6793 return lowerToScalableOp(
Op, DAG);
6796 return lowerVectorStrictFSetcc(
Op, DAG);
6806 case ISD::VP_GATHER:
6807 return lowerMaskedGather(
Op, DAG);
6809 case ISD::VP_SCATTER:
6810 return lowerMaskedScatter(
Op, DAG);
6812 return lowerGET_ROUNDING(
Op, DAG);
6814 return lowerSET_ROUNDING(
Op, DAG);
6816 return lowerEH_DWARF_CFA(
Op, DAG);
6817 case ISD::VP_SELECT:
6826 case ISD::VP_UADDSAT:
6827 case ISD::VP_USUBSAT:
6828 case ISD::VP_SADDSAT:
6829 case ISD::VP_SSUBSAT:
6831 case ISD::VP_LLRINT:
6832 return lowerVPOp(
Op, DAG);
6836 return lowerLogicVPOp(
Op, DAG);
6845 case ISD::VP_FMINNUM:
6846 case ISD::VP_FMAXNUM:
6847 case ISD::VP_FCOPYSIGN:
6848 if (
Op.getValueType() == MVT::nxv32f16 &&
6856 return lowerVPOp(
Op, DAG);
6857 case ISD::VP_IS_FPCLASS:
6858 return LowerIS_FPCLASS(
Op, DAG);
6859 case ISD::VP_SIGN_EXTEND:
6860 case ISD::VP_ZERO_EXTEND:
6861 if (
Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
6862 return lowerVPExtMaskOp(
Op, DAG);
6863 return lowerVPOp(
Op, DAG);
6864 case ISD::VP_TRUNCATE:
6865 return lowerVectorTruncLike(
Op, DAG);
6866 case ISD::VP_FP_EXTEND:
6867 case ISD::VP_FP_ROUND:
6868 return lowerVectorFPExtendOrRoundLike(
Op, DAG);
6869 case ISD::VP_SINT_TO_FP:
6870 case ISD::VP_UINT_TO_FP:
6871 if (
Op.getValueType().isVector() &&
6872 Op.getValueType().getScalarType() == MVT::f16 &&
6875 if (
Op.getValueType() == MVT::nxv32f16)
6887 case ISD::VP_FP_TO_SINT:
6888 case ISD::VP_FP_TO_UINT:
6890 Op1.getValueType().isVector() &&
6891 Op1.getValueType().getScalarType() == MVT::f16 &&
6894 if (Op1.getValueType() == MVT::nxv32f16)
6899 Op1.getValueType().getVectorElementCount());
6903 {WidenVec, Op.getOperand(1), Op.getOperand(2)});
6905 return lowerVPFPIntConvOp(
Op, DAG);
6907 if (
Op.getOperand(0).getSimpleValueType() == MVT::nxv32f16 &&
6911 if (
Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
6912 return lowerVPSetCCMaskOp(
Op, DAG);
6918 case ISD::VP_BITREVERSE:
6920 return lowerVPOp(
Op, DAG);
6922 case ISD::VP_CTLZ_ZERO_UNDEF:
6923 if (Subtarget.hasStdExtZvbb())
6924 return lowerVPOp(
Op, DAG);
6925 return lowerCTLZ_CTTZ_ZERO_UNDEF(
Op, DAG);
6927 case ISD::VP_CTTZ_ZERO_UNDEF:
6928 if (Subtarget.hasStdExtZvbb())
6929 return lowerVPOp(
Op, DAG);
6930 return lowerCTLZ_CTTZ_ZERO_UNDEF(
Op, DAG);
6932 return lowerVPOp(
Op, DAG);
6933 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
6934 return lowerVPStridedLoad(
Op, DAG);
6935 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
6936 return lowerVPStridedStore(
Op, DAG);
6938 case ISD::VP_FFLOOR:
6940 case ISD::VP_FNEARBYINT:
6941 case ISD::VP_FROUND:
6942 case ISD::VP_FROUNDEVEN:
6943 case ISD::VP_FROUNDTOZERO:
6944 if (
Op.getValueType() == MVT::nxv32f16 &&
6949 case ISD::VP_FMAXIMUM:
6950 case ISD::VP_FMINIMUM:
6951 if (
Op.getValueType() == MVT::nxv32f16 &&
6956 case ISD::EXPERIMENTAL_VP_SPLICE:
6957 return lowerVPSpliceExperimental(
Op, DAG);
6958 case ISD::EXPERIMENTAL_VP_REVERSE:
6959 return lowerVPReverseExperimental(
Op, DAG);
6977 N->getOffset(), Flags);
6985template <
class NodeTy>
6987 bool IsLocal,
bool IsExternWeak)
const {
6997 if (IsLocal && !Subtarget.allowTaggedGlobals())
7059 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
7068 return getAddr(
N, DAG);
7075 return getAddr(
N, DAG);
7082 return getAddr(
N, DAG);
7087 bool UseGOT)
const {
7151 Args.push_back(Entry);
7184 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
7198 Addr = getStaticTLSAddr(
N, DAG,
false);
7201 Addr = getStaticTLSAddr(
N, DAG,
true);
7206 : getDynamicTLSAddr(
N, DAG);
7223 if (
LHS == LHS2 &&
RHS == RHS2) {
7228 }
else if (
LHS == RHS2 &&
RHS == LHS2) {
7236 return std::nullopt;
7244 MVT VT =
N->getSimpleValueType(0);
7274 if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV)) {
7277 if (~TrueVal == FalseVal) {
7317 if (Subtarget.hasShortForwardBranchOpt())
7320 unsigned SelOpNo = 0;
7330 unsigned ConstSelOpNo = 1;
7331 unsigned OtherSelOpNo = 2;
7332 if (!dyn_cast<ConstantSDNode>(Sel->
getOperand(ConstSelOpNo))) {
7337 ConstantSDNode *ConstSelOpNode = dyn_cast<ConstantSDNode>(ConstSelOp);
7338 if (!ConstSelOpNode || ConstSelOpNode->
isOpaque())
7342 ConstantSDNode *ConstBinOpNode = dyn_cast<ConstantSDNode>(ConstBinOp);
7343 if (!ConstBinOpNode || ConstBinOpNode->
isOpaque())
7349 SDValue NewConstOps[2] = {ConstSelOp, ConstBinOp};
7351 std::swap(NewConstOps[0], NewConstOps[1]);
7363 SDValue NewNonConstOps[2] = {OtherSelOp, ConstBinOp};
7365 std::swap(NewNonConstOps[0], NewNonConstOps[1]);
7368 SDValue NewT = (ConstSelOpNo == 1) ? NewConstOp : NewNonConstOp;
7369 SDValue NewF = (ConstSelOpNo == 1) ? NewNonConstOp : NewConstOp;
7378 MVT VT =
Op.getSimpleValueType();
7392 if ((Subtarget.hasStdExtZicond() || Subtarget.hasVendorXVentanaCondOps()) &&
7420 if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV)) {
7424 TrueVal, Subtarget.
getXLen(), Subtarget,
true);
7426 FalseVal, Subtarget.
getXLen(), Subtarget,
true);
7427 bool IsCZERO_NEZ = TrueValCost <= FalseValCost;
7429 IsCZERO_NEZ ? FalseVal - TrueVal : TrueVal - FalseVal,
DL, VT);
7434 DL, VT, LHSVal, CondV);
7450 if (
Op.hasOneUse()) {
7451 unsigned UseOpc =
Op->use_begin()->getOpcode();
7457 return lowerSELECT(NewSel, DAG);
7485 SDValue Ops[] = {CondV,
Zero, SetNE, TrueV, FalseV};
7506 if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
7510 if (TrueVal - 1 == FalseVal)
7512 if (TrueVal + 1 == FalseVal)
7519 RHS == TrueV && LHS == FalseV) {
7536 if (isa<ConstantSDNode>(TrueV) && !isa<ConstantSDNode>(FalseV)) {
7562 LHS, RHS, TargetCC,
Op.getOperand(2));
7580 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
7592 int XLenInBytes = Subtarget.
getXLen() / 8;
7594 EVT VT =
Op.getValueType();
7597 unsigned Depth =
Op.getConstantOperandVal(0);
7599 int Offset = -(XLenInBytes * 2);
7615 int XLenInBytes = Subtarget.
getXLen() / 8;
7620 EVT VT =
Op.getValueType();
7622 unsigned Depth =
Op.getConstantOperandVal(0);
7624 int Off = -XLenInBytes;
7625 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
7644 EVT VT =
Lo.getValueType();
7683 EVT VT =
Lo.getValueType();
7734 MVT VT =
Op.getSimpleValueType();
7760 MVT VecVT =
Op.getSimpleValueType();
7762 "Unexpected SPLAT_VECTOR_PARTS lowering");
7768 MVT ContainerVT = VecVT;
7788 int64_t ExtTrueVal)
const {
7790 MVT VecVT =
Op.getSimpleValueType();
7793 assert(Src.getValueType().isVector() &&
7794 Src.getValueType().getVectorElementType() == MVT::i1);
7815 DAG.
getUNDEF(ContainerVT), SplatZero, VL);
7817 DAG.
getUNDEF(ContainerVT), SplatTrueVal, VL);
7820 SplatZero, DAG.
getUNDEF(ContainerVT), VL);
7825SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
7827 MVT ExtVT =
Op.getSimpleValueType();
7831 MVT VT =
Op.getOperand(0).getSimpleValueType();
7857 bool IsVPTrunc =
Op.getOpcode() == ISD::VP_TRUNCATE;
7859 EVT MaskVT =
Op.getValueType();
7862 "Unexpected type for vector mask lowering");
7864 MVT VecVT = Src.getSimpleValueType();
7868 VL =
Op.getOperand(2);
7871 MVT ContainerVT = VecVT;
7877 MVT MaskContainerVT =
7884 std::tie(Mask, VL) =
7892 DAG.
getUNDEF(ContainerVT), SplatOne, VL);
7894 DAG.
getUNDEF(ContainerVT), SplatZero, VL);
7898 DAG.
getUNDEF(ContainerVT), Mask, VL);
7901 DAG.
getUNDEF(MaskContainerVT), Mask, VL});
7909 bool IsVPTrunc =
Op.getOpcode() == ISD::VP_TRUNCATE;
7912 MVT VT =
Op.getSimpleValueType();
7914 assert(VT.
isVector() &&
"Unexpected type for vector truncate lowering");
7918 return lowerVectorMaskTruncLike(
Op, DAG);
7926 MVT SrcVT = Src.getSimpleValueType();
7931 "Unexpected vector truncate lowering");
7933 MVT ContainerVT = SrcVT;
7937 VL =
Op.getOperand(2);
7950 std::tie(Mask, VL) =
7961 }
while (SrcEltVT != DstEltVT);
7970RISCVTargetLowering::lowerStrictFPExtendOrRoundLike(
SDValue Op,
7975 MVT VT =
Op.getSimpleValueType();
7976 MVT SrcVT = Src.getSimpleValueType();
7977 MVT ContainerVT = VT;
7998 Chain, Src, Mask, VL);
7999 Chain = Src.getValue(1);
8006 Chain, Src, Mask, VL);
8017RISCVTargetLowering::lowerVectorFPExtendOrRoundLike(
SDValue Op,
8020 Op.getOpcode() == ISD::VP_FP_ROUND ||
Op.getOpcode() == ISD::VP_FP_EXTEND;
8027 MVT VT =
Op.getSimpleValueType();
8029 assert(VT.
isVector() &&
"Unexpected type for vector truncate lowering");
8032 MVT SrcVT = Src.getSimpleValueType();
8039 bool IsDirectConv = IsDirectExtend || IsDirectTrunc;
8042 MVT ContainerVT = VT;
8046 VL =
Op.getOperand(2);
8060 std::tie(Mask, VL) =
8066 Src = DAG.
getNode(ConvOpc,
DL, ContainerVT, Src, Mask, VL);
8072 unsigned InterConvOpc =
8077 DAG.
getNode(InterConvOpc,
DL, InterVT, Src, Mask, VL);
8079 DAG.
getNode(ConvOpc,
DL, ContainerVT, IntermediateConv, Mask, VL);
8090static std::optional<MVT>
8096 const unsigned MinVLMAX = VectorBitsMin / EltSize;
8098 if (MaxIdx < MinVLMAX)
8100 else if (MaxIdx < MinVLMAX * 2)
8102 else if (MaxIdx < MinVLMAX * 4)
8107 return std::nullopt;
8120 MVT VecVT =
Op.getSimpleValueType();
8134 MVT ContainerVT = VecVT;
8143 MVT OrigContainerVT = ContainerVT;
8146 if (
auto *IdxC = dyn_cast<ConstantSDNode>(
Idx)) {
8147 const unsigned OrigIdx = IdxC->getZExtValue();
8150 DL, DAG, Subtarget)) {
8151 ContainerVT = *ShrunkVT;
8160 VLEN && ContainerVT.
bitsGT(M1VT)) {
8163 unsigned RemIdx = OrigIdx % ElemsPerVReg;
8164 unsigned SubRegIdx = OrigIdx / ElemsPerVReg;
8165 unsigned ExtractIdx =
8184 if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
8185 const auto *CVal = cast<ConstantSDNode>(Val);
8186 if (isInt<32>(CVal->getSExtValue())) {
8187 IsLegalInsert =
true;
8196 if (IsLegalInsert) {
8202 Vec = DAG.
getNode(Opc,
DL, ContainerVT, Vec, Val, VL);
8218 std::tie(ValLo, ValHi) = DAG.
SplitScalar(Val,
DL, MVT::i32, MVT::i32);
8219 MVT I32ContainerVT =
8230 Vec, Vec, ValLo, I32Mask, InsertI64VL);
8235 Tail, ValInVec, ValHi, I32Mask, InsertI64VL);
8237 ValInVec = DAG.
getBitcast(ContainerVT, ValInVec);
8242 ValInVec, AlignedIdx);
8252 DAG.
getUNDEF(I32ContainerVT), ValLo,
8253 I32Mask, InsertI64VL);
8255 DAG.
getUNDEF(I32ContainerVT), ValInVec, ValHi,
8256 I32Mask, InsertI64VL);
8258 ValInVec = DAG.
getBitcast(ContainerVT, ValInVec);
8271 Idx, Mask, InsertVL, Policy);
8275 Slideup, AlignedIdx);
8290 EVT EltVT =
Op.getValueType();
8297 MVT ContainerVT = VecVT;
8313 unsigned WidenVecLen;
8316 unsigned MaxEEW = Subtarget.
getELen();
8321 "the number of elements should be power of 2");
8325 ExtractBitIdx =
Idx;
8327 WideEltVT = LargestEltVT;
8330 ExtractElementIdx = DAG.
getNode(
8341 Vec, ExtractElementIdx);
8357 MVT ContainerVT = VecVT;
8368 if (
auto *IdxC = dyn_cast<ConstantSDNode>(
Idx);
8369 IdxC && VLen && VecVT.
getSizeInBits().getKnownMinValue() > *VLen) {
8371 unsigned OrigIdx = IdxC->getZExtValue();
8374 unsigned RemIdx = OrigIdx % ElemsPerVReg;
8375 unsigned SubRegIdx = OrigIdx / ElemsPerVReg;
8376 unsigned ExtractIdx =
8386 std::optional<uint64_t> MaxIdx;
8389 if (
auto *IdxC = dyn_cast<ConstantSDNode>(
Idx))
8390 MaxIdx = IdxC->getZExtValue();
8392 if (
auto SmallerVT =
8394 ContainerVT = *SmallerVT;
8441 "Unexpected opcode");
8448 unsigned IntNo =
Op.getConstantOperandVal(HasChain ? 1 : 0);
8453 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
8470 if (OpVT.
bitsLT(XLenVT)) {
8477 ScalarOp = DAG.
getNode(ExtOpc,
DL, XLenVT, ScalarOp);
8488 MVT VT =
Op.getOperand(SplatOp - 1).getSimpleValueType();
8491 assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
8502 case Intrinsic::riscv_vslide1up:
8503 case Intrinsic::riscv_vslide1down:
8504 case Intrinsic::riscv_vslide1up_mask:
8505 case Intrinsic::riscv_vslide1down_mask: {
8508 bool IsMasked = NumOps == 7;
8514 std::tie(ScalarLo, ScalarHi) =
8522 if (isa<ConstantSDNode>(AVL)) {
8523 const auto [MinVLMAX, MaxVLMAX] =
8527 if (AVLInt <= MinVLMAX) {
8529 }
else if (AVLInt >= 2 * MaxVLMAX) {
8536 Intrinsic::riscv_vsetvlimax,
DL, MVT::i32);
8570 if (IntNo == Intrinsic::riscv_vslide1up ||
8571 IntNo == Intrinsic::riscv_vslide1up_mask) {
8573 ScalarHi, I32Mask, I32VL);
8575 ScalarLo, I32Mask, I32VL);
8578 ScalarLo, I32Mask, I32VL);
8580 ScalarHi, I32Mask, I32VL);
8629 const unsigned ElementWidth = 8;
8634 [[maybe_unused]]
unsigned MinVF =
8637 [[maybe_unused]]
unsigned VF =
N->getConstantOperandVal(2);
8641 bool Fractional = VF < LMul1VF;
8642 unsigned LMulVal = Fractional ? LMul1VF / VF : VF / LMul1VF;
8667 unsigned IntNo =
Op.getConstantOperandVal(HasChain ? 1 : 0);
8671 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
8688 if (OpVT.
bitsLT(XLenVT)) {
8691 ScalarOp = DAG.
getNode(ExtOpc,
DL, XLenVT, ScalarOp);
8704 EVT ValType = V.getValueType();
8705 if (ValType.isVector() && ValType.isFloatingPoint()) {
8708 ValType.getVectorElementCount());
8711 if (ValType.isFixedLengthVector()) {
8713 DAG, V.getSimpleValueType(), Subtarget);
8729 unsigned IntNo =
Op.getConstantOperandVal(0);
8736 case Intrinsic::thread_pointer: {
8740 case Intrinsic::riscv_orc_b:
8741 case Intrinsic::riscv_brev8:
8742 case Intrinsic::riscv_sha256sig0:
8743 case Intrinsic::riscv_sha256sig1:
8744 case Intrinsic::riscv_sha256sum0:
8745 case Intrinsic::riscv_sha256sum1:
8746 case Intrinsic::riscv_sm3p0:
8747 case Intrinsic::riscv_sm3p1: {
8767 return DAG.
getNode(Opc,
DL, XLenVT,
Op.getOperand(1));
8769 case Intrinsic::riscv_sm4ks:
8770 case Intrinsic::riscv_sm4ed: {
8780 DAG.
getNode(Opc,
DL, MVT::i64, NewOp0, NewOp1,
Op.getOperand(3));
8784 return DAG.
getNode(Opc,
DL, XLenVT,
Op.getOperand(1),
Op.getOperand(2),
8787 case Intrinsic::riscv_zip:
8788 case Intrinsic::riscv_unzip: {
8791 return DAG.
getNode(Opc,
DL, XLenVT,
Op.getOperand(1));
8793 case Intrinsic::riscv_mopr: {
8806 case Intrinsic::riscv_moprr: {
8818 Op.getOperand(2),
Op.getOperand(3));
8820 case Intrinsic::riscv_clmul:
8831 case Intrinsic::riscv_clmulh:
8832 case Intrinsic::riscv_clmulr: {
8850 return DAG.
getNode(Opc,
DL, XLenVT,
Op.getOperand(1),
Op.getOperand(2));
8852 case Intrinsic::experimental_get_vector_length:
8854 case Intrinsic::riscv_vmv_x_s: {
8858 case Intrinsic::riscv_vfmv_f_s:
8861 case Intrinsic::riscv_vmv_v_x:
8863 Op.getOperand(3),
Op.getSimpleValueType(),
DL, DAG,
8865 case Intrinsic::riscv_vfmv_v_f:
8867 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
8868 case Intrinsic::riscv_vmv_s_x: {
8871 if (
Scalar.getValueType().bitsLE(XLenVT)) {
8874 Op.getOperand(1), Scalar,
Op.getOperand(3));
8877 assert(
Scalar.getValueType() == MVT::i64 &&
"Unexpected scalar VT!");
8894 MVT VT =
Op.getSimpleValueType();
8899 if (
Op.getOperand(1).isUndef())
8915 case Intrinsic::riscv_vfmv_s_f:
8917 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
8919 case Intrinsic::riscv_vaesdf_vv:
8920 case Intrinsic::riscv_vaesdf_vs:
8921 case Intrinsic::riscv_vaesdm_vv:
8922 case Intrinsic::riscv_vaesdm_vs:
8923 case Intrinsic::riscv_vaesef_vv:
8924 case Intrinsic::riscv_vaesef_vs:
8925 case Intrinsic::riscv_vaesem_vv:
8926 case Intrinsic::riscv_vaesem_vs:
8927 case Intrinsic::riscv_vaeskf1:
8928 case Intrinsic::riscv_vaeskf2:
8929 case Intrinsic::riscv_vaesz_vs:
8930 case Intrinsic::riscv_vsm4k:
8931 case Intrinsic::riscv_vsm4r_vv:
8932 case Intrinsic::riscv_vsm4r_vs: {
8933 if (!
isValidEGW(4,
Op.getSimpleValueType(), Subtarget) ||
8934 !
isValidEGW(4,
Op->getOperand(1).getSimpleValueType(), Subtarget) ||
8935 !
isValidEGW(4,
Op->getOperand(2).getSimpleValueType(), Subtarget))
8940 case Intrinsic::riscv_vsm3c:
8941 case Intrinsic::riscv_vsm3me: {
8942 if (!
isValidEGW(8,
Op.getSimpleValueType(), Subtarget) ||
8943 !
isValidEGW(8,
Op->getOperand(1).getSimpleValueType(), Subtarget))
8948 case Intrinsic::riscv_vsha2ch:
8949 case Intrinsic::riscv_vsha2cl:
8950 case Intrinsic::riscv_vsha2ms: {
8951 if (
Op->getSimpleValueType(0).getScalarSizeInBits() == 64 &&
8952 !Subtarget.hasStdExtZvknhb())
8954 if (!
isValidEGW(4,
Op.getSimpleValueType(), Subtarget) ||
8955 !
isValidEGW(4,
Op->getOperand(1).getSimpleValueType(), Subtarget) ||
8956 !
isValidEGW(4,
Op->getOperand(2).getSimpleValueType(), Subtarget))
8960 case Intrinsic::riscv_sf_vc_v_x:
8961 case Intrinsic::riscv_sf_vc_v_i:
8962 case Intrinsic::riscv_sf_vc_v_xv:
8963 case Intrinsic::riscv_sf_vc_v_iv:
8964 case Intrinsic::riscv_sf_vc_v_vv:
8965 case Intrinsic::riscv_sf_vc_v_fv:
8966 case Intrinsic::riscv_sf_vc_v_xvv:
8967 case Intrinsic::riscv_sf_vc_v_ivv:
8968 case Intrinsic::riscv_sf_vc_v_vvv:
8969 case Intrinsic::riscv_sf_vc_v_fvv:
8970 case Intrinsic::riscv_sf_vc_v_xvw:
8971 case Intrinsic::riscv_sf_vc_v_ivw:
8972 case Intrinsic::riscv_sf_vc_v_vvw:
8973 case Intrinsic::riscv_sf_vc_v_fvw: {
8974 MVT VT =
Op.getSimpleValueType();
9011 MVT VT =
Op.getSimpleValueType();
9015 if (VT.isFloatingPoint()) {
9020 if (VT.isFixedLengthVector())
9030 if (VT.isFixedLengthVector())
9032 if (VT.isFloatingPoint())
9051 unsigned IntNo =
Op.getConstantOperandVal(1);
9055 case Intrinsic::riscv_masked_strided_load: {
9064 MVT VT =
Op->getSimpleValueType(0);
9065 MVT ContainerVT = VT;
9078 auto *
Load = cast<MemIntrinsicSDNode>(
Op);
9090 ScalarVT,
Load->getMemOperand());
9096 Load->getMemOperand());
9101 IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask,
DL,
9106 Ops.push_back(DAG.
getUNDEF(ContainerVT));
9108 Ops.push_back(PassThru);
9110 Ops.push_back(Stride);
9112 Ops.push_back(Mask);
9117 Ops.push_back(Policy);
9123 Load->getMemoryVT(),
Load->getMemOperand());
9124 Chain =
Result.getValue(1);
9130 case Intrinsic::riscv_seg2_load:
9131 case Intrinsic::riscv_seg3_load:
9132 case Intrinsic::riscv_seg4_load:
9133 case Intrinsic::riscv_seg5_load:
9134 case Intrinsic::riscv_seg6_load:
9135 case Intrinsic::riscv_seg7_load:
9136 case Intrinsic::riscv_seg8_load: {
9139 Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
9140 Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
9141 Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
9142 Intrinsic::riscv_vlseg8};
9143 unsigned NF =
Op->getNumValues() - 1;
9144 assert(NF >= 2 && NF <= 8 &&
"Unexpected seg number");
9146 MVT VT =
Op->getSimpleValueType(0);
9152 auto *
Load = cast<MemIntrinsicSDNode>(
Op);
9154 ContainerVTs.push_back(MVT::Other);
9162 Load->getMemoryVT(),
Load->getMemOperand());
9164 for (
unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
9170 case Intrinsic::riscv_sf_vc_v_x_se:
9172 case Intrinsic::riscv_sf_vc_v_i_se:
9174 case Intrinsic::riscv_sf_vc_v_xv_se:
9176 case Intrinsic::riscv_sf_vc_v_iv_se:
9178 case Intrinsic::riscv_sf_vc_v_vv_se:
9180 case Intrinsic::riscv_sf_vc_v_fv_se:
9182 case Intrinsic::riscv_sf_vc_v_xvv_se:
9184 case Intrinsic::riscv_sf_vc_v_ivv_se:
9186 case Intrinsic::riscv_sf_vc_v_vvv_se:
9188 case Intrinsic::riscv_sf_vc_v_fvv_se:
9190 case Intrinsic::riscv_sf_vc_v_xvw_se:
9192 case Intrinsic::riscv_sf_vc_v_ivw_se:
9194 case Intrinsic::riscv_sf_vc_v_vvw_se:
9196 case Intrinsic::riscv_sf_vc_v_fvw_se:
9205 unsigned IntNo =
Op.getConstantOperandVal(1);
9209 case Intrinsic::riscv_masked_strided_store: {
9220 MVT ContainerVT = VT;
9234 IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask,
DL,
9237 auto *
Store = cast<MemIntrinsicSDNode>(
Op);
9247 Ops,
Store->getMemoryVT(),
9248 Store->getMemOperand());
9250 case Intrinsic::riscv_seg2_store:
9251 case Intrinsic::riscv_seg3_store:
9252 case Intrinsic::riscv_seg4_store:
9253 case Intrinsic::riscv_seg5_store:
9254 case Intrinsic::riscv_seg6_store:
9255 case Intrinsic::riscv_seg7_store:
9256 case Intrinsic::riscv_seg8_store: {
9259 Intrinsic::riscv_vsseg2, Intrinsic::riscv_vsseg3,
9260 Intrinsic::riscv_vsseg4, Intrinsic::riscv_vsseg5,
9261 Intrinsic::riscv_vsseg6, Intrinsic::riscv_vsseg7,
9262 Intrinsic::riscv_vsseg8};
9265 assert(NF >= 2 && NF <= 8 &&
"Unexpected seg number");
9267 MVT VT =
Op->getOperand(2).getSimpleValueType();
9275 auto *FixedIntrinsic = cast<MemIntrinsicSDNode>(
Op);
9277 for (
unsigned i = 0; i < NF; i++)
9279 ContainerVT, FixedIntrinsic->getOperand(2 + i), DAG, Subtarget));
9284 FixedIntrinsic->getMemoryVT(), FixedIntrinsic->getMemOperand());
9286 case Intrinsic::riscv_sf_vc_xv_se:
9288 case Intrinsic::riscv_sf_vc_iv_se:
9290 case Intrinsic::riscv_sf_vc_vv_se:
9292 case Intrinsic::riscv_sf_vc_fv_se:
9294 case Intrinsic::riscv_sf_vc_xvv_se:
9296 case Intrinsic::riscv_sf_vc_ivv_se:
9298 case Intrinsic::riscv_sf_vc_vvv_se:
9300 case Intrinsic::riscv_sf_vc_fvv_se:
9302 case Intrinsic::riscv_sf_vc_xvw_se:
9304 case Intrinsic::riscv_sf_vc_ivw_se:
9306 case Intrinsic::riscv_sf_vc_vvw_se:
9308 case Intrinsic::riscv_sf_vc_fvw_se:
9316 switch (ISDOpcode) {
9319 case ISD::VP_REDUCE_ADD:
9322 case ISD::VP_REDUCE_UMAX:
9325 case ISD::VP_REDUCE_SMAX:
9328 case ISD::VP_REDUCE_UMIN:
9331 case ISD::VP_REDUCE_SMIN:
9334 case ISD::VP_REDUCE_AND:
9337 case ISD::VP_REDUCE_OR:
9340 case ISD::VP_REDUCE_XOR:
9343 case ISD::VP_REDUCE_FADD:
9345 case ISD::VP_REDUCE_SEQ_FADD:
9347 case ISD::VP_REDUCE_FMAX:
9349 case ISD::VP_REDUCE_FMIN:
9359 SDValue Vec =
Op.getOperand(IsVP ? 1 : 0);
9364 Op.getOpcode() == ISD::VP_REDUCE_AND ||
9365 Op.getOpcode() == ISD::VP_REDUCE_OR ||
9366 Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
9367 "Unexpected reduction lowering");
9371 MVT ContainerVT = VecVT;
9380 VL =
Op.getOperand(3);
9382 std::tie(Mask, VL) =
9390 switch (
Op.getOpcode()) {
9394 case ISD::VP_REDUCE_AND: {
9404 case ISD::VP_REDUCE_OR:
9411 case ISD::VP_REDUCE_XOR: {
9434 return DAG.
getNode(BaseOpc,
DL,
Op.getValueType(), SetCC,
Op.getOperand(0));
9438 auto *RegisterAVL = dyn_cast<RegisterSDNode>(AVL);
9439 auto *ImmAVL = dyn_cast<ConstantSDNode>(AVL);
9440 return (RegisterAVL && RegisterAVL->getReg() == RISCV::X0) ||
9441 (ImmAVL && ImmAVL->getZExtValue() >= 1);
9457 auto InnerVT = VecVT.
bitsLE(M1VT) ? VecVT : M1VT;
9461 auto InnerVL = NonZeroAVL ? VL : DAG.
getConstant(1,
DL, XLenVT);
9464 if (M1VT != InnerVT)
9470 SDValue Ops[] = {PassThru, Vec, InitialValue, Mask, VL, Policy};
9489 VecEVT =
Lo.getValueType();
9502 MVT ContainerVT = VecVT;
9522 Mask, VL,
DL, DAG, Subtarget);
9528static std::tuple<unsigned, SDValue, SDValue>
9532 auto Flags =
Op->getFlags();
9533 unsigned Opcode =
Op.getOpcode();
9557 return std::make_tuple(RVVOpc,
Op.getOperand(0), Front);
9565 MVT VecEltVT =
Op.getSimpleValueType();
9569 std::tie(RVVOpcode, VectorVal, ScalarVal) =
9573 MVT ContainerVT = VecVT;
9579 MVT ResVT =
Op.getSimpleValueType();
9582 VL,
DL, DAG, Subtarget);
9587 if (
Op->getFlags().hasNoNaNs())
9593 {VectorVal, VectorVal, DAG.getCondCode(ISD::SETNE),
9594 DAG.getUNDEF(Mask.getValueType()), Mask, VL});
9600 DL, ResVT, NoNaNs, Res,
9627 Vec, Mask, VL,
DL, DAG, Subtarget);
9639 unsigned OrigIdx =
Op.getConstantOperandVal(2);
9648 (OrigIdx != 0 || !Vec.
isUndef())) {
9651 assert(OrigIdx % 8 == 0 &&
"Invalid index");
9654 "Unexpected mask vector lowering");
9687 MVT ContainerVT = VecVT;
9723 SubVec =
getVSlideup(DAG, Subtarget,
DL, ContainerVT, Vec, SubVec,
9724 SlideupAmt, Mask, VL, Policy);
9732 unsigned SubRegIdx, RemIdx;
9733 std::tie(SubRegIdx, RemIdx) =
9735 VecVT, SubVecVT, OrigIdx,
TRI);
9754 if (RemIdx == 0 && (!IsSubVecPartReg || Vec.
isUndef()))
9762 MVT InterSubVT = VecVT;
9764 unsigned AlignedIdx = OrigIdx - RemIdx;
9800 SubVec =
getVSlideup(DAG, Subtarget,
DL, InterSubVT, AlignedExtract, SubVec,
9801 SlideupAmt, Mask, VL, Policy);
9806 if (VecVT.
bitsGT(InterSubVT))
9812 return DAG.
getBitcast(
Op.getSimpleValueType(), SubVec);
9818 MVT SubVecVT =
Op.getSimpleValueType();
9823 unsigned OrigIdx =
Op.getConstantOperandVal(1);
9834 assert(OrigIdx % 8 == 0 &&
"Invalid index");
9837 "Unexpected mask vector lowering");
9876 MVT ContainerVT = VecVT;
9886 ContainerVT = *ShrunkVT;
9900 DAG.
getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
9912 MVT ContainerSubVecVT = SubVecVT;
9926 VecVT, ContainerSubVecVT, OrigIdx / Vscale,
TRI);
9927 SubRegIdx = Decompose.first;
9929 (OrigIdx % Vscale));
9933 VecVT, ContainerSubVecVT, OrigIdx,
TRI);
9934 SubRegIdx = Decompose.first;
9957 MVT InterSubVT = VecVT;
9961 assert(SubRegIdx != RISCV::NoSubRegister);
9975 Vec, SlidedownAmt, Mask, VL);
9984 return DAG.
getBitcast(
Op.getSimpleValueType(), Slidedown);
9991 MVT VT =
N.getSimpleValueType();
9995 assert(
Op.getSimpleValueType() == VT &&
9996 "Operands and result must be same type");
10000 unsigned NumVals =
N->getNumValues();
10003 NumVals,
N.getValueType().changeVectorElementType(MVT::i8)));
10006 for (
unsigned I = 0;
I < NumVals;
I++) {
10012 if (TruncVals.
size() > 1)
10014 return TruncVals.
front();
10020 MVT VecVT =
Op.getSimpleValueType();
10023 "vector_interleave on non-scalable vector!");
10034 EVT SplitVT = Op0Lo.getValueType();
10037 DAG.
getVTList(SplitVT, SplitVT), Op0Lo, Op0Hi);
10039 DAG.
getVTList(SplitVT, SplitVT), Op1Lo, Op1Hi);
10053 Op.getOperand(0),
Op.getOperand(1));
10080 Concat, EvenIdx, Passthru, Mask, VL);
10082 Concat, OddIdx, Passthru, Mask, VL);
10096 MVT VecVT =
Op.getSimpleValueType();
10099 "vector_interleave on non-scalable vector!");
10112 EVT SplitVT = Op0Lo.getValueType();
10115 DAG.
getVTList(SplitVT, SplitVT), Op0Lo, Op1Lo);
10117 DAG.
getVTList(SplitVT, SplitVT), Op0Hi, Op1Hi);
10139 Op.getOperand(0),
Op.getOperand(1));
10187 MVT VT =
Op.getSimpleValueType();
10192 uint64_t StepValImm =
Op.getConstantOperandVal(0);
10193 if (StepValImm != 1) {
10202 VL, VT,
DL, DAG, Subtarget);
10217 MVT VecVT =
Op.getSimpleValueType();
10227 unsigned MaxVLMAX =
10237 if (MaxVLMAX > 256 && EltSize == 8) {
10284 return DAG.
getNode(GatherOpc,
DL, VecVT,
Op.getOperand(0), Indices,
10294 MVT VecVT =
Op.getSimpleValueType();
10298 int64_t ImmValue = cast<ConstantSDNode>(
Op.getOperand(2))->getSExtValue();
10299 SDValue DownOffset, UpOffset;
10300 if (ImmValue >= 0) {
10316 DownOffset, TrueMask, UpOffset);
10317 return getVSlideup(DAG, Subtarget,
DL, VecVT, SlideDown, V2, UpOffset,
10323RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(
SDValue Op,
10326 auto *
Load = cast<LoadSDNode>(
Op);
10329 Load->getMemoryVT(),
10330 *
Load->getMemOperand()) &&
10331 "Expecting a correctly-aligned load");
10333 MVT VT =
Op.getSimpleValueType();
10339 const auto [MinVLMAX, MaxVLMAX] =
10342 getLMUL1VT(ContainerVT).bitsLE(ContainerVT)) {
10345 Load->getMemOperand());
10354 IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle,
DL, XLenVT);
10363 Load->getMemoryVT(),
Load->getMemOperand());
10370RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(
SDValue Op,
10373 auto *
Store = cast<StoreSDNode>(
Op);
10376 Store->getMemoryVT(),
10377 *
Store->getMemOperand()) &&
10378 "Expecting a correctly-aligned store");
10400 const auto [MinVLMAX, MaxVLMAX] =
10403 getLMUL1VT(ContainerVT).bitsLE(ContainerVT))
10405 Store->getMemOperand());
10412 IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse,
DL, XLenVT);
10415 {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
10416 Store->getMemoryVT(),
Store->getMemOperand());
10422 MVT VT =
Op.getSimpleValueType();
10424 const auto *MemSD = cast<MemSDNode>(
Op);
10425 EVT MemVT = MemSD->getMemoryVT();
10427 SDValue Chain = MemSD->getChain();
10431 if (
const auto *VPLoad = dyn_cast<VPLoadSDNode>(
Op)) {
10432 Mask = VPLoad->getMask();
10434 VL = VPLoad->getVectorLength();
10436 const auto *MLoad = cast<MaskedLoadSDNode>(
Op);
10437 Mask = MLoad->getMask();
10438 PassThru = MLoad->getPassThru();
10445 MVT ContainerVT = VT;
10459 IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
10476 Chain =
Result.getValue(1);
10488 const auto *MemSD = cast<MemSDNode>(
Op);
10489 EVT MemVT = MemSD->getMemoryVT();
10491 SDValue Chain = MemSD->getChain();
10495 bool IsCompressingStore =
false;
10496 if (
const auto *VPStore = dyn_cast<VPStoreSDNode>(
Op)) {
10497 Val = VPStore->getValue();
10498 Mask = VPStore->getMask();
10499 VL = VPStore->getVectorLength();
10501 const auto *MStore = cast<MaskedStoreSDNode>(
Op);
10502 Val = MStore->getValue();
10503 Mask = MStore->getMask();
10504 IsCompressingStore = MStore->isCompressingStore();
10513 MVT ContainerVT = VT;
10518 if (!IsUnmasked || IsCompressingStore) {
10527 if (IsCompressingStore) {
10530 DAG.
getUNDEF(ContainerVT), Val, Mask, VL);
10537 IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
10546 DAG.
getVTList(MVT::Other), Ops, MemVT, MMO);
10550RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(
SDValue Op,
10552 MVT InVT =
Op.getOperand(0).getSimpleValueType();
10555 MVT VT =
Op.getSimpleValueType();
10569 {Op1, Op2,
Op.getOperand(2), DAG.
getUNDEF(MaskVT), Mask, VL});
10576 unsigned Opc =
Op.getOpcode();
10583 MVT VT =
Op.getSimpleValueType();
10616 MVT ContainerInVT = InVT;
10635 {Chain, Op1, Op1, DAG.getCondCode(ISD::SETOEQ), DAG.getUNDEF(MaskVT),
10639 {Chain, Op2, Op2, DAG.getCondCode(ISD::SETOEQ), DAG.getUNDEF(MaskVT),
10647 {Chain, Op1, Op2, CC, Mask, Mask, VL});
10652 {Chain, Op1, Op2, CC, DAG.getUNDEF(MaskVT), Mask, VL});
10665 MVT VT =
Op.getSimpleValueType();
10669 "Unexpected type for ISD::ABS");
10671 MVT ContainerVT = VT;
10678 if (
Op->getOpcode() == ISD::VP_ABS) {
10679 Mask =
Op->getOperand(1);
10683 VL =
Op->getOperand(2);
10691 DAG.
getUNDEF(ContainerVT), Mask, VL);
10693 DAG.
getUNDEF(ContainerVT), Mask, VL);
10700SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
10703 MVT VT =
Op.getSimpleValueType();
10707 "Can only handle COPYSIGN with matching types.");
10716 Sign, DAG.
getUNDEF(ContainerVT), Mask, VL);
10721SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
10723 MVT VT =
Op.getSimpleValueType();
10726 MVT I1ContainerVT =
10740 Op2, DAG.
getUNDEF(ContainerVT), VL);
10751 MVT VT =
Op.getSimpleValueType();
10756 for (
const SDValue &V :
Op->op_values()) {
10757 assert(!isa<VTSDNode>(V) &&
"Unexpected VTSDNode node!");
10760 if (!
V.getValueType().isVector()) {
10766 assert(useRVVForFixedLengthVectorVT(
V.getSimpleValueType()) &&
10767 "Only fixed length vectors are supported!");
10781 if (
Op->isStrictFPOpcode()) {
10790 DAG.
getNode(NewOpc,
DL, ContainerVT, Ops,
Op->getFlags());
10804 MVT VT =
Op.getSimpleValueType();
10807 MVT ContainerVT = VT;
10813 assert(!isa<VTSDNode>(V) &&
"Unexpected VTSDNode node!");
10819 if (*MaskIdx == OpIdx.index())
10823 if (
Op.getOpcode() == ISD::VP_MERGE) {
10827 assert(
Op.getOpcode() == ISD::VP_SELECT);
10834 if (!
V.getValueType().isFixedLengthVector()) {
10839 MVT OpVT =
V.getSimpleValueType();
10841 assert(useRVVForFixedLengthVectorVT(OpVT) &&
10842 "Only fixed length vectors are supported!");
10847 return DAG.
getNode(RISCVISDOpc,
DL, VT, Ops,
Op->getFlags());
10857 MVT VT =
Op.getSimpleValueType();
10863 MVT ContainerVT = VT;
10873 DAG.
getUNDEF(ContainerVT), Zero, VL);
10876 Op.getOpcode() == ISD::VP_ZERO_EXTEND ? 1 : -1,
DL, XLenVT);
10878 DAG.
getUNDEF(ContainerVT), SplatValue, VL);
10881 ZeroSplat, DAG.
getUNDEF(ContainerVT), VL);
10890 MVT VT =
Op.getSimpleValueType();
10894 ISD::CondCode Condition = cast<CondCodeSDNode>(
Op.getOperand(2))->get();
10898 MVT ContainerVT = VT;
10908 switch (Condition) {
10976 MVT DstVT =
Op.getSimpleValueType();
10977 MVT SrcVT = Src.getSimpleValueType();
10990 if (DstEltSize >= SrcEltSize) {
10999 if (SrcEltSize == 1) {
11010 ZeroSplat, DAG.
getUNDEF(IntVT), VL);
11011 }
else if (DstEltSize > (2 * SrcEltSize)) {
11015 Src = DAG.
getNode(RISCVISDExtOpc,
DL, IntVT, Src, Mask, VL);
11021 "Wrong input/output vector types");
11024 if (DstEltSize > (2 * SrcEltSize)) {
11040 MVT InterimFVT = DstVT;
11041 if (SrcEltSize > (2 * DstEltSize)) {
11042 assert(SrcEltSize == (4 * DstEltSize) &&
"Unexpected types!");
11049 if (InterimFVT != DstVT) {
11055 "Wrong input/output vector types");
11059 if (DstEltSize == 1) {
11062 assert(SrcEltSize >= 16 &&
"Unexpected FP type!");
11072 DAG.
getUNDEF(InterimIVT), SplatZero, VL);
11082 while (InterimIVT != DstVT) {
11094 MVT VT =
Op.getSimpleValueType();
11101RISCVTargetLowering::lowerVPSpliceExperimental(
SDValue Op,
11113 MVT VT =
Op.getSimpleValueType();
11114 MVT ContainerVT = VT;
11127 if (IsMaskVector) {
11138 SplatZeroOp1, DAG.
getUNDEF(ContainerVT), EVL1);
11147 SplatZeroOp2, DAG.
getUNDEF(ContainerVT), EVL2);
11150 int64_t ImmValue = cast<ConstantSDNode>(
Offset)->getSExtValue();
11151 SDValue DownOffset, UpOffset;
11152 if (ImmValue >= 0) {
11166 Op1, DownOffset, Mask, UpOffset);
11170 if (IsMaskVector) {
11174 {Result, DAG.getConstant(0, DL, ContainerVT),
11175 DAG.getCondCode(ISD::SETNE), DAG.getUNDEF(getMaskTypeFor(ContainerVT)),
11185RISCVTargetLowering::lowerVPReverseExperimental(
SDValue Op,
11188 MVT VT =
Op.getSimpleValueType();
11195 MVT ContainerVT = VT;
11203 MVT GatherVT = ContainerVT;
11207 if (IsMaskVector) {
11218 SplatZero, DAG.
getUNDEF(IndicesVT), EVL);
11224 unsigned MaxVLMAX =
11233 if (MaxVLMAX > 256 && EltSize == 8) {
11261 DAG.
getUNDEF(GatherVT), Result, Diff, Mask, EVL);
11263 if (IsMaskVector) {
11286 DAG.
getUNDEF(IndicesVT), VecLen, EVL);
11288 DAG.
getUNDEF(IndicesVT), Mask, EVL);
11290 DAG.
getUNDEF(GatherVT), Mask, EVL);
11292 if (IsMaskVector) {
11307 MVT VT =
Op.getSimpleValueType();
11309 return lowerVPOp(
Op, DAG);
11316 MVT ContainerVT = VT;
11335 MVT VT =
Op.getSimpleValueType();
11336 MVT ContainerVT = VT;
11342 auto *VPNode = cast<VPStridedLoadSDNode>(
Op);
11348 : Intrinsic::riscv_vlse_mask,
11351 DAG.
getUNDEF(ContainerVT), VPNode->getBasePtr(),
11352 VPNode->getStride()};
11360 Ops.
push_back(VPNode->getVectorLength());
11368 VPNode->getMemoryVT(), VPNode->getMemOperand());
11382 auto *VPNode = cast<VPStridedStoreSDNode>(
Op);
11383 SDValue StoreVal = VPNode->getValue();
11385 MVT ContainerVT = VT;
11396 : Intrinsic::riscv_vsse_mask,
11399 VPNode->getBasePtr(), VPNode->getStride()};
11407 Ops.
push_back(VPNode->getVectorLength());
11410 Ops, VPNode->getMemoryVT(),
11411 VPNode->getMemOperand());
11423 MVT VT =
Op.getSimpleValueType();
11425 const auto *MemSD = cast<MemSDNode>(
Op.getNode());
11426 EVT MemVT = MemSD->getMemoryVT();
11428 SDValue Chain = MemSD->getChain();
11434 if (
auto *VPGN = dyn_cast<VPGatherSDNode>(
Op.getNode())) {
11435 Index = VPGN->getIndex();
11436 Mask = VPGN->getMask();
11438 VL = VPGN->getVectorLength();
11443 auto *MGN = cast<MaskedGatherSDNode>(
Op.getNode());
11444 Index = MGN->getIndex();
11445 Mask = MGN->getMask();
11446 PassThru = MGN->getPassThru();
11450 MVT IndexVT =
Index.getSimpleValueType();
11454 "Unexpected VTs!");
11455 assert(
BasePtr.getSimpleValueType() == XLenVT &&
"Unexpected pointer type");
11458 "Unexpected extending MGATHER/VP_GATHER");
11464 MVT ContainerVT = VT;
11488 IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
11505 Chain =
Result.getValue(1);
11522 const auto *MemSD = cast<MemSDNode>(
Op.getNode());
11523 EVT MemVT = MemSD->getMemoryVT();
11525 SDValue Chain = MemSD->getChain();
11528 [[maybe_unused]]
bool IsTruncatingStore =
false;
11531 if (
auto *VPSN = dyn_cast<VPScatterSDNode>(
Op.getNode())) {
11532 Index = VPSN->getIndex();
11533 Mask = VPSN->getMask();
11534 Val = VPSN->getValue();
11535 VL = VPSN->getVectorLength();
11537 IsTruncatingStore =
false;
11540 auto *MSN = cast<MaskedScatterSDNode>(
Op.getNode());
11541 Index = MSN->getIndex();
11542 Mask = MSN->getMask();
11543 Val = MSN->getValue();
11544 IsTruncatingStore = MSN->isTruncatingStore();
11548 MVT IndexVT =
Index.getSimpleValueType();
11552 "Unexpected VTs!");
11553 assert(
BasePtr.getSimpleValueType() == XLenVT &&
"Unexpected pointer type");
11556 assert(!IsTruncatingStore &&
"Unexpected truncating MSCATTER/VP_SCATTER");
11562 MVT ContainerVT = VT;
11586 IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
11596 DAG.
getVTList(MVT::Other), Ops, MemVT, MMO);
11605 RISCVSysReg::lookupSysRegByName(
"FRM")->Encoding,
DL, XLenVT);
11613 static const int Table =
11637 RISCVSysReg::lookupSysRegByName(
"FRM")->Encoding,
DL, XLenVT);
11643 static const unsigned Table =
11666 bool isRISCV64 = Subtarget.
is64Bit();
11730 switch (
N->getOpcode()) {
11732 llvm_unreachable(
"Don't know how to custom type legalize this operation!");
11738 "Unexpected custom legalisation");
11739 bool IsStrict =
N->isStrictFPOpcode();
11742 SDValue Op0 = IsStrict ?
N->getOperand(1) :
N->getOperand(0);
11760 Opc,
DL, VTs, Chain, Op0,
11794 std::tie(Result, Chain) =
11795 makeLibCall(DAG, LC,
N->getValueType(0), Op0, CallOptions,
DL, Chain);
11823 Op0.
getValueType() == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
11834 assert(!Subtarget.
is64Bit() &&
"READCYCLECOUNTER/READSTEADYCOUNTER only "
11835 "has custom type legalization on riscv32");
11837 SDValue LoCounter, HiCounter;
11841 RISCVSysReg::lookupSysRegByName(
"CYCLE")->Encoding,
DL, XLenVT);
11843 RISCVSysReg::lookupSysRegByName(
"CYCLEH")->Encoding,
DL, XLenVT);
11846 RISCVSysReg::lookupSysRegByName(
"TIME")->Encoding,
DL, XLenVT);
11848 RISCVSysReg::lookupSysRegByName(
"TIMEH")->Encoding,
DL, XLenVT);
11852 N->getOperand(0), LoCounter, HiCounter);
11876 unsigned Size =
N->getSimpleValueType(0).getSizeInBits();
11877 unsigned XLen = Subtarget.
getXLen();
11880 assert(
Size == (XLen * 2) &&
"Unexpected custom legalisation");
11888 if (LHSIsU == RHSIsU)
11905 if (RHSIsU && LHSIsS && !RHSIsS)
11907 else if (LHSIsU && RHSIsS && !LHSIsS)
11917 "Unexpected custom legalisation");
11924 "Unexpected custom legalisation");
11927 if (
N->getOpcode() ==
ISD::SHL && Subtarget.hasStdExtZbs() &&
11953 "Unexpected custom legalisation");
11954 assert((Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb() ||
11955 Subtarget.hasVendorXTHeadBb()) &&
11956 "Unexpected custom legalization");
11957 if (!isa<ConstantSDNode>(
N->getOperand(1)) &&
11958 !(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()))
11967 "Unexpected custom legalisation");
11981 MVT VT =
N->getSimpleValueType(0);
11982 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
11983 Subtarget.
is64Bit() && Subtarget.hasStdExtM() &&
11984 "Unexpected custom legalisation");
11996 if (VT != MVT::i32)
12005 "Unexpected custom legalisation");
12009 if (!isa<ConstantSDNode>(
N->getOperand(1)))
12026 EVT OType =
N->getValueType(1);
12039 "Unexpected custom legalisation");
12056 Overflow = DAG.
getSetCC(
DL,
N->getValueType(1), Res,
12060 Overflow = DAG.
getSetCC(
DL,
N->getValueType(1),
N->getOperand(0),
12078 "Unexpected custom legalisation");
12079 if (Subtarget.hasStdExtZbb()) {
12100 "Unexpected custom legalisation");
12106 "Unexpected custom legalisation");
12108 if (Subtarget.hasStdExtZbb()) {
12142 EVT VT =
N->getValueType(0);
12147 if (VT == MVT::i16 && Op0VT == MVT::f16 &&
12151 }
else if (VT == MVT::i16 && Op0VT == MVT::bf16 &&
12152 Subtarget.hasStdExtZfbfmin()) {
12155 }
else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.
is64Bit() &&
12160 }
else if (VT == MVT::i64 && Op0VT == MVT::f64 && XLenVT == MVT::i32 &&
12161 Subtarget.hasStdExtZfa()) {
12163 DAG.
getVTList(MVT::i32, MVT::i32), Op0);
12183 MVT VT =
N->getSimpleValueType(0);
12185 assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.
is64Bit())) &&
12186 "Unexpected custom legalisation");
12187 assert(Subtarget.hasStdExtZbkb() &&
"Unexpected extension");
12213 assert(!Subtarget.
is64Bit() &&
N->getValueType(0) == MVT::i64 &&
12215 "Unexpected EXTRACT_VECTOR_ELT legalization");
12218 MVT ContainerVT = VecVT;
12246 DAG.
getUNDEF(ContainerVT), Mask, VL);
12254 unsigned IntNo =
N->getConstantOperandVal(0);
12258 "Don't know how to custom type legalize this intrinsic!");
12259 case Intrinsic::experimental_get_vector_length: {
12264 case Intrinsic::riscv_orc_b:
12265 case Intrinsic::riscv_brev8:
12266 case Intrinsic::riscv_sha256sig0:
12267 case Intrinsic::riscv_sha256sig1:
12268 case Intrinsic::riscv_sha256sum0:
12269 case Intrinsic::riscv_sha256sum1:
12270 case Intrinsic::riscv_sm3p0:
12271 case Intrinsic::riscv_sm3p1: {
12272 if (!Subtarget.
is64Bit() ||
N->getValueType(0) != MVT::i32)
12292 case Intrinsic::riscv_sm4ks:
12293 case Intrinsic::riscv_sm4ed: {
12301 DAG.
getNode(Opc,
DL, MVT::i64, NewOp0, NewOp1,
N->getOperand(3));
12305 case Intrinsic::riscv_mopr: {
12306 if (!Subtarget.
is64Bit() ||
N->getValueType(0) != MVT::i32)
12316 case Intrinsic::riscv_moprr: {
12317 if (!Subtarget.
is64Bit() ||
N->getValueType(0) != MVT::i32)
12329 case Intrinsic::riscv_clmul: {
12330 if (!Subtarget.
is64Bit() ||
N->getValueType(0) != MVT::i32)
12341 case Intrinsic::riscv_clmulh:
12342 case Intrinsic::riscv_clmulr: {
12343 if (!Subtarget.
is64Bit() ||
N->getValueType(0) != MVT::i32)
12371 case Intrinsic::riscv_vmv_x_s: {
12372 EVT VT =
N->getValueType(0);
12374 if (VT.
bitsLT(XLenVT)) {
12383 "Unexpected custom legalization");
12421 case ISD::VP_REDUCE_ADD:
12422 case ISD::VP_REDUCE_AND:
12423 case ISD::VP_REDUCE_OR:
12424 case ISD::VP_REDUCE_XOR:
12425 case ISD::VP_REDUCE_SMAX:
12426 case ISD::VP_REDUCE_UMAX:
12427 case ISD::VP_REDUCE_SMIN:
12428 case ISD::VP_REDUCE_UMIN:
12492 const EVT VT =
N->getValueType(0);
12493 const unsigned Opc =
N->getOpcode();
12500 (Opc !=
ISD::FADD || !
N->getFlags().hasAllowReassociation()))
12505 "Inconsistent mappings");
12516 !isa<ConstantSDNode>(
RHS.getOperand(1)))
12519 uint64_t RHSIdx = cast<ConstantSDNode>(
RHS.getOperand(1))->getLimitedValue();
12534 LHS.getOperand(0) == SrcVec && isa<ConstantSDNode>(
LHS.getOperand(1))) {
12536 cast<ConstantSDNode>(
LHS.getOperand(1))->getLimitedValue();
12537 if (0 == std::min(LHSIdx, RHSIdx) && 1 == std::max(LHSIdx, RHSIdx)) {
12541 return DAG.
getNode(ReduceOpc,
DL, VT, Vec,
N->getFlags());
12548 if (
LHS.getOpcode() != ReduceOpc)
12563 auto Flags = ReduceVec->
getFlags();
12564 Flags.intersectWith(
N->getFlags());
12565 return DAG.
getNode(ReduceOpc,
DL, VT, Vec, Flags);
12575 auto BinOpToRVVReduce = [](
unsigned Opc) {
12604 auto IsReduction = [&BinOpToRVVReduce](
SDValue V,
unsigned Opc) {
12607 V.getOperand(0).getOpcode() == BinOpToRVVReduce(Opc);
12610 unsigned Opc =
N->getOpcode();
12611 unsigned ReduceIdx;
12612 if (IsReduction(
N->getOperand(0), Opc))
12614 else if (IsReduction(
N->getOperand(1), Opc))
12620 if (Opc ==
ISD::FADD && !
N->getFlags().hasAllowReassociation())
12623 SDValue Extract =
N->getOperand(ReduceIdx);
12655 SDValue NewStart =
N->getOperand(1 - ReduceIdx);
12682 if (!Subtarget.hasStdExtZba())
12686 EVT VT =
N->getValueType(0);
12698 auto *N0C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
12699 auto *N1C = dyn_cast<ConstantSDNode>(N1->
getOperand(1));
12702 int64_t C0 = N0C->getSExtValue();
12703 int64_t C1 = N1C->getSExtValue();
12704 if (C0 <= 0 || C1 <= 0)
12708 int64_t Bits = std::min(C0, C1);
12709 int64_t Diff = std::abs(C0 - C1);
12710 if (Diff != 1 && Diff != 2 && Diff != 3)
12738 EVT VT =
N->getValueType(0);
12746 if ((!Subtarget.hasStdExtZicond() &&
12747 !Subtarget.hasVendorXVentanaCondOps()) ||
12769 bool SwapSelectOps;
12775 SwapSelectOps =
false;
12776 NonConstantVal = FalseVal;
12778 SwapSelectOps =
true;
12779 NonConstantVal = TrueVal;
12785 FalseVal = DAG.
getNode(
N->getOpcode(),
SDLoc(
N), VT, OtherOp, NonConstantVal);
12833 EVT VT =
N->getValueType(0);
12841 auto *N0C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
12842 auto *N1C = dyn_cast<ConstantSDNode>(
N->getOperand(1));
12848 if (!N0C->hasOneUse())
12850 int64_t C0 = N0C->getSExtValue();
12851 int64_t C1 = N1C->getSExtValue();
12853 if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
12856 if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
12857 !isInt<12>(C0 * (C1 / C0))) {
12860 }
else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
12861 isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
12864 }
else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
12865 isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
12883 EVT VT =
N->getValueType(0);
12926 EVT VT =
N->getValueType(0);
12930 auto *N0C = dyn_cast<ConstantSDNode>(N0);
12936 APInt ImmValMinus1 = N0C->getAPIntValue() - 1;
12946 if (!isIntEqualitySetCC(CCVal) || !SetCCOpVT.
isInteger())
12968 EVT VT =
N->getValueType(0);
13017 bool IsAnd =
N->getOpcode() ==
ISD::AND;
13041 EVT VT =
N->getValueType(0);
13061 EVT VT =
N->getValueType(0);
13147 EVT VT =
N->getValueType(0);
13218 auto *ConstN00 = dyn_cast<ConstantSDNode>(N0.
getOperand(0));
13223 const APInt &Imm = ConstN00->getAPIntValue();
13224 if ((Imm + 1).isSignedIntN(12))
13241 LHS.getValueType());
13258 EVT VT =
N->getValueType(0);
13266 unsigned AddSubOpc;
13272 auto IsAddSubWith1 = [&](
SDValue V) ->
bool {
13273 AddSubOpc = V->getOpcode();
13275 SDValue Opnd = V->getOperand(1);
13276 MulOper = V->getOperand(0);
13285 if (IsAddSubWith1(N0)) {
13287 return DAG.
getNode(AddSubOpc,
DL, VT, N1, MulVal);
13290 if (IsAddSubWith1(N1)) {
13292 return DAG.
getNode(AddSubOpc,
DL, VT, N0, MulVal);
13301 if (isIndexTypeSigned(IndexType))
13304 if (!
N->hasOneUse())
13307 EVT VT =
N.getValueType();
13346 EVT SrcVT = Src.getValueType();
13350 NewElen = std::max(NewElen, 8U);
13373 EVT VT =
N->getValueType(0);
13376 if (OpVT != MVT::i64 || !Subtarget.
is64Bit())
13380 auto *N1C = dyn_cast<ConstantSDNode>(N1);
13392 if (!isIntEqualitySetCC(
Cond))
13401 const APInt &C1 = N1C->getAPIntValue();
13419 EVT VT =
N->getValueType(0);
13423 cast<VTSDNode>(
N->getOperand(1))->getVT().bitsGE(MVT::i16))
13425 Src.getOperand(0));
13433struct CombineResult;
13435enum ExtKind : uint8_t { ZExt = 1 << 0, SExt = 1 << 1, FPExt = 1 << 2 };
13461struct NodeExtensionHelper {
13470 bool SupportsFPExt;
13473 bool EnforceOneUse;
13497 return OrigOperand;
13507 unsigned getExtOpc(ExtKind SupportsExt)
const {
13508 switch (SupportsExt) {
13509 case ExtKind::SExt:
13511 case ExtKind::ZExt:
13513 case ExtKind::FPExt:
13524 std::optional<ExtKind> SupportsExt)
const {
13525 if (!SupportsExt.has_value())
13526 return OrigOperand;
13528 MVT NarrowVT = getNarrowType(Root, *SupportsExt);
13532 if (
Source.getValueType() == NarrowVT)
13535 unsigned ExtOpc = getExtOpc(*SupportsExt);
13539 auto [
Mask, VL] = getMaskAndVL(Root, DAG, Subtarget);
13546 return DAG.
getNode(ExtOpc,
DL, NarrowVT, Source, Mask, VL);
13562 static MVT getNarrowType(
const SDNode *Root, ExtKind SupportsExt) {
13568 MVT EltVT = SupportsExt == ExtKind::FPExt
13570 :
MVT::getIntegerVT(NarrowSize);
13572 assert((
int)NarrowSize >= (SupportsExt == ExtKind::FPExt ? 16 : 8) &&
13573 "Trying to extend something we can't represent");
13580 static unsigned getSExtOpcode(
unsigned Opcode) {
13602 static unsigned getZExtOpcode(
unsigned Opcode) {
13624 static unsigned getFPExtOpcode(
unsigned Opcode) {
13641 static unsigned getSUOpcode(
unsigned Opcode) {
13643 "SU is only supported for MUL");
13649 static unsigned getWOpcode(
unsigned Opcode, ExtKind SupportsExt) {
13668 using CombineToTry = std::function<std::optional<CombineResult>(
13669 SDNode * ,
const NodeExtensionHelper & ,
13674 bool needToPromoteOtherUsers()
const {
return EnforceOneUse; }
13680 SupportsZExt =
false;
13681 SupportsSExt =
false;
13682 SupportsFPExt =
false;
13683 EnforceOneUse =
true;
13685 unsigned Opc = OrigOperand.
getOpcode();
13700 if (ScalarBits != NarrowScalarBits * 2)
13711 SupportsZExt =
true;
13716 SupportsSExt =
true;
13721 SupportsFPExt =
true;
13728 EnforceOneUse =
false;
13745 unsigned ScalarBits =
Op.getValueSizeInBits();
13748 if (ScalarBits < EltBits)
13754 if (NarrowSize < 8)
13758 SupportsSExt =
true;
13761 SupportsZExt =
true;
13770 static bool isSupportedRoot(
const SDNode *Root) {
13800 assert(isSupportedRoot(Root) &&
"Trying to build an helper with an "
13801 "unsupported root");
13802 assert(OperandIdx < 2 &&
"Requesting something else than LHS or RHS");
13818 if (OperandIdx == 1) {
13825 std::tie(Mask, VL) = getMaskAndVL(Root, DAG, Subtarget);
13829 EnforceOneUse =
false;
13834 fillUpExtensionSupport(Root, DAG, Subtarget);
13840 bool isVLCompatible(
SDValue VL)
const {
13841 return this->VL !=
SDValue() && this->VL == VL;
13845 bool isMaskCompatible(
SDValue Mask)
const {
13846 return !CheckMask || (this->Mask !=
SDValue() && this->Mask ==
Mask);
13850 static std::pair<SDValue, SDValue>
13853 assert(isSupportedRoot(Root) &&
"Unexpected root");
13870 auto [
Mask, VL] = getMaskAndVL(Root, DAG, Subtarget);
13871 return isMaskCompatible(Mask) && isVLCompatible(VL);
13877 switch (
N->getOpcode()) {
13912struct CombineResult {
13914 unsigned TargetOpcode;
13916 std::optional<ExtKind> LHSExt;
13917 std::optional<ExtKind> RHSExt;
13921 NodeExtensionHelper
LHS;
13923 NodeExtensionHelper
RHS;
13925 CombineResult(
unsigned TargetOpcode,
SDNode *Root,
13926 const NodeExtensionHelper &
LHS, std::optional<ExtKind> LHSExt,
13927 const NodeExtensionHelper &
RHS, std::optional<ExtKind> RHSExt)
13928 : TargetOpcode(TargetOpcode), LHSExt(LHSExt), RHSExt(RHSExt), Root(Root),
13937 std::tie(Mask, VL) =
13938 NodeExtensionHelper::getMaskAndVL(Root, DAG, Subtarget);
13950 LHS.getOrCreateExtendedOp(Root, DAG, Subtarget, LHSExt),
13951 RHS.getOrCreateExtendedOp(Root, DAG, Subtarget, RHSExt),
13966static std::optional<CombineResult>
13967canFoldToVWWithSameExtensionImpl(
SDNode *Root,
const NodeExtensionHelper &LHS,
13968 const NodeExtensionHelper &RHS,
13971 if (!
LHS.areVLAndMaskCompatible(Root, DAG, Subtarget) ||
13972 !
RHS.areVLAndMaskCompatible(Root, DAG, Subtarget))
13973 return std::nullopt;
13974 if ((AllowExtMask & ExtKind::ZExt) &&
LHS.SupportsZExt &&
RHS.SupportsZExt)
13975 return CombineResult(NodeExtensionHelper::getZExtOpcode(Root->
getOpcode()),
13976 Root, LHS, {ExtKind::ZExt}, RHS,
13978 if ((AllowExtMask & ExtKind::SExt) &&
LHS.SupportsSExt &&
RHS.SupportsSExt)
13979 return CombineResult(NodeExtensionHelper::getSExtOpcode(Root->
getOpcode()),
13980 Root, LHS, {ExtKind::SExt}, RHS,
13982 if ((AllowExtMask & ExtKind::FPExt) &&
RHS.SupportsFPExt)
13983 return CombineResult(NodeExtensionHelper::getFPExtOpcode(Root->
getOpcode()),
13984 Root, LHS, {ExtKind::FPExt}, RHS,
13986 return std::nullopt;
13995static std::optional<CombineResult>
13996canFoldToVWWithSameExtension(
SDNode *Root,
const NodeExtensionHelper &LHS,
13999 return canFoldToVWWithSameExtensionImpl(
14000 Root, LHS, RHS, ExtKind::ZExt | ExtKind::SExt | ExtKind::FPExt, DAG,
14008static std::optional<CombineResult>
14009canFoldToVW_W(
SDNode *Root,
const NodeExtensionHelper &LHS,
14012 if (!
RHS.areVLAndMaskCompatible(Root, DAG, Subtarget))
14013 return std::nullopt;
14015 if (
RHS.SupportsFPExt)
14016 return CombineResult(
14017 NodeExtensionHelper::getWOpcode(Root->
getOpcode(), ExtKind::FPExt),
14018 Root, LHS, std::nullopt, RHS, {ExtKind::FPExt});
14025 return CombineResult(
14026 NodeExtensionHelper::getWOpcode(Root->
getOpcode(), ExtKind::ZExt), Root,
14027 LHS, std::nullopt, RHS, {ExtKind::ZExt});
14029 return CombineResult(
14030 NodeExtensionHelper::getWOpcode(Root->
getOpcode(), ExtKind::SExt), Root,
14031 LHS, std::nullopt, RHS, {ExtKind::SExt});
14032 return std::nullopt;
14039static std::optional<CombineResult>
14040canFoldToVWWithSEXT(
SDNode *Root,
const NodeExtensionHelper &LHS,
14043 return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, ExtKind::SExt, DAG,
14051static std::optional<CombineResult>
14052canFoldToVWWithZEXT(
SDNode *Root,
const NodeExtensionHelper &LHS,
14055 return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, ExtKind::ZExt, DAG,
14063static std::optional<CombineResult>
14064canFoldToVWWithFPEXT(
SDNode *Root,
const NodeExtensionHelper &LHS,
14067 return canFoldToVWWithSameExtensionImpl(Root, LHS, RHS, ExtKind::FPExt, DAG,
14075static std::optional<CombineResult>
14076canFoldToVW_SU(
SDNode *Root,
const NodeExtensionHelper &LHS,
14080 if (!
LHS.SupportsSExt || !
RHS.SupportsZExt)
14081 return std::nullopt;
14082 if (!
LHS.areVLAndMaskCompatible(Root, DAG, Subtarget) ||
14083 !
RHS.areVLAndMaskCompatible(Root, DAG, Subtarget))
14084 return std::nullopt;
14085 return CombineResult(NodeExtensionHelper::getSUOpcode(Root->
getOpcode()),
14086 Root, LHS, {ExtKind::SExt}, RHS,
14091NodeExtensionHelper::getSupportedFoldings(
const SDNode *Root) {
14101 Strategies.
push_back(canFoldToVWWithSameExtension);
14106 Strategies.
push_back(canFoldToVWWithSameExtension);
14111 Strategies.
push_back(canFoldToVWWithSameExtension);
14118 Strategies.
push_back(canFoldToVWWithSEXT);
14123 Strategies.
push_back(canFoldToVWWithZEXT);
14128 Strategies.
push_back(canFoldToVWWithFPEXT);
14156 if (!NodeExtensionHelper::isSupportedRoot(
N))
14162 Inserted.insert(
N);
14165 while (!Worklist.
empty()) {
14167 if (!NodeExtensionHelper::isSupportedRoot(Root))
14170 NodeExtensionHelper
LHS(
N, 0, DAG, Subtarget);
14171 NodeExtensionHelper
RHS(
N, 1, DAG, Subtarget);
14172 auto AppendUsersIfNeeded = [&Worklist,
14173 &Inserted](
const NodeExtensionHelper &
Op) {
14174 if (
Op.needToPromoteOtherUsers()) {
14175 for (
SDNode *TheUse :
Op.OrigOperand->uses()) {
14176 if (Inserted.insert(TheUse).second)
14188 NodeExtensionHelper::getSupportedFoldings(
N);
14190 assert(!FoldingStrategies.
empty() &&
"Nothing to be folded");
14191 bool Matched =
false;
14192 for (
int Attempt = 0;
14193 (Attempt != 1 + NodeExtensionHelper::isCommutative(
N)) && !Matched;
14196 for (NodeExtensionHelper::CombineToTry FoldingStrategy :
14197 FoldingStrategies) {
14198 std::optional<CombineResult> Res =
14199 FoldingStrategy(
N,
LHS,
RHS, DAG, Subtarget);
14206 if (Res->LHSExt.has_value())
14207 AppendUsersIfNeeded(
LHS);
14208 if (Res->RHSExt.has_value())
14209 AppendUsersIfNeeded(
RHS);
14220 SDValue InputRootReplacement;
14227 for (CombineResult Res : CombinesToApply) {
14228 SDValue NewValue = Res.materialize(DAG, Subtarget);
14229 if (!InputRootReplacement) {
14231 "First element is expected to be the current node");
14232 InputRootReplacement = NewValue;
14237 for (std::pair<SDValue, SDValue> OldNewValues : ValuesToReplace) {
14241 return InputRootReplacement;
14248 unsigned Opc =
N->getOpcode();
14253 SDValue MergeOp =
N->getOperand(1);
14254 unsigned MergeOpc = MergeOp.
getOpcode();
14265 SDValue Passthru =
N->getOperand(2);
14279 Z = Z.getOperand(1);
14285 {Y, X, Y, MergeOp->getOperand(0), N->getOperand(4)},
14292 [[maybe_unused]]
unsigned Opc =
N->getOpcode();
14321 EVT NewMemVT = (MemVT == MVT::i32) ? MVT::i64 : MVT::i128;
14327 auto Ext = cast<LoadSDNode>(LSNode1)->getExtensionType();
14329 if (MemVT == MVT::i32)
14335 Opcode,
SDLoc(LSNode1), DAG.
getVTList({XLenVT, XLenVT, MVT::Other}),
14370 if (!Subtarget.hasVendorXTHeadMemPair())
14382 auto ExtractBaseAndOffset = [](
SDValue Ptr) -> std::pair<SDValue, uint64_t> {
14384 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Ptr->getOperand(1)))
14385 return {
Ptr->getOperand(0), C1->getZExtValue()};
14389 auto [Base1, Offset1] = ExtractBaseAndOffset(LSNode1->
getOperand(OpNum));
14412 auto [Base2, Offset2] = ExtractBaseAndOffset(LSNode2->
getOperand(OpNum));
14415 if (Base1 != Base2)
14419 bool Valid =
false;
14420 if (MemVT == MVT::i32) {
14422 if ((Offset1 + 4 == Offset2) && isShiftedUInt<2, 3>(Offset1))
14424 }
else if (MemVT == MVT::i64) {
14426 if ((Offset1 + 8 == Offset2) && isShiftedUInt<2, 4>(Offset1))
14460 if (Src->isStrictFPOpcode() || Src->isTargetStrictFPOpcode())
14468 if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
14478 EVT VT =
N->getValueType(0);
14481 MVT SrcVT = Src.getSimpleValueType();
14482 MVT SrcContainerVT = SrcVT;
14484 SDValue XVal = Src.getOperand(0);
14511 FpToInt = DAG.
getNode(Opc,
DL, ContainerVT, XVal, Mask, VL);
14515 FpToInt = DAG.
getNode(Opc,
DL, ContainerVT, XVal, Mask, VL);
14519 FpToInt = DAG.
getNode(Opc,
DL, ContainerVT, XVal, Mask,
14532 if (VT != MVT::i32 && VT != XLenVT)
14562 EVT DstVT =
N->getValueType(0);
14563 if (DstVT != XLenVT)
14569 if (Src->isStrictFPOpcode() || Src->isTargetStrictFPOpcode())
14577 if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
14580 EVT SatVT = cast<VTSDNode>(
N->getOperand(1))->getVT();
14589 if (SatVT == DstVT)
14591 else if (DstVT == MVT::i64 && SatVT == MVT::i32)
14597 Src = Src.getOperand(0);
14618 assert(Subtarget.hasStdExtZbkb() &&
"Unexpected extension");
14624 EVT VT =
N->getValueType(0);
14677 unsigned Offset =
N->isTargetStrictFPOpcode();
14684 auto invertIfNegative = [&Mask, &VL](
SDValue &V) {
14686 V.getOperand(2) == VL) {
14688 V = V.getOperand(0);
14695 bool NegA = invertIfNegative(
A);
14696 bool NegB = invertIfNegative(
B);
14697 bool NegC = invertIfNegative(
C);
14700 if (!NegA && !NegB && !NegC)
14704 if (
N->isTargetStrictFPOpcode())
14706 {N->getOperand(0), A, B, C, Mask, VL});
14716 if (
N->getValueType(0).isScalableVector() &&
14717 N->getValueType(0).getVectorElementType() == MVT::f32 &&
14724 if (
N->isTargetStrictFPOpcode())
14749 switch (
N->getOpcode()) {
14769 return DAG.
getNode(NewOpc,
SDLoc(
N),
N->getValueType(0), Op0, Op1,
14770 N->getOperand(2), Mask, VL);
14777 if (
N->getValueType(0) != MVT::i64 || !Subtarget.
is64Bit())
14780 if (!isa<ConstantSDNode>(
N->getOperand(1)))
14782 uint64_t ShAmt =
N->getConstantOperandVal(1);
14793 cast<VTSDNode>(N0.
getOperand(1))->getVT() == MVT::i32 &&
14821 AddC = dyn_cast<ConstantSDNode>(N0.
getOperand(IsAdd ? 1 : 0));
14834 !isa<ConstantSDNode>(U->getOperand(1)) ||
14835 U->getConstantOperandVal(1) > 32)
14890 if (!
Cond.hasOneUse())
14909 EVT VT =
Cond.getValueType();
14954 LHS =
LHS.getOperand(0);
14964 LHS.getOperand(0).getValueType() == Subtarget.
getXLenVT()) {
14968 CCVal = cast<CondCodeSDNode>(
LHS.getOperand(2))->get();
14972 RHS =
LHS.getOperand(1);
14973 LHS =
LHS.getOperand(0);
14982 RHS =
LHS.getOperand(1);
14983 LHS =
LHS.getOperand(0);
14999 ShAmt =
LHS.getValueSizeInBits() - 1 - ShAmt;
15040 bool Commutative =
true;
15041 unsigned Opc = TrueVal.getOpcode();
15049 Commutative =
false;
15057 if (!TrueVal.hasOneUse() || isa<ConstantSDNode>(FalseVal))
15061 if (FalseVal == TrueVal.getOperand(0))
15063 else if (Commutative && FalseVal == TrueVal.getOperand(1))
15068 EVT VT =
N->getValueType(0);
15070 SDValue OtherOp = TrueVal.getOperand(1 - OpToFold);
15076 assert(IdentityOperand &&
"No identity operand!");
15081 DAG.
getSelect(
DL, OtherOpVT,
N->getOperand(0), OtherOp, IdentityOperand);
15082 return DAG.
getNode(TrueVal.getOpcode(),
DL, VT, FalseVal, NewSel);
15103 CountZeroes =
N->getOperand(2);
15104 ValOnZero =
N->getOperand(1);
15106 CountZeroes =
N->getOperand(1);
15107 ValOnZero =
N->getOperand(2);
15126 if (
Cond->getOperand(0) != CountZeroesArgument)
15142 CountZeroes, BitWidthMinusOne);
15152 EVT VT =
N->getValueType(0);
15153 EVT CondVT =
Cond.getValueType();
15161 (Subtarget.hasStdExtZicond() || Subtarget.hasVendorXVentanaCondOps())) {
15167 const APInt &MaskVal =
LHS.getConstantOperandAPInt(1);
15188 SDValue TrueVal =
N->getOperand(1);
15189 SDValue FalseVal =
N->getOperand(2);
15204 EVT VT =
N->getValueType(0);
15211 const unsigned Opcode =
N->op_begin()->getNode()->getOpcode();
15226 if (
Op.isUndef()) {
15239 if (
Op.getOpcode() != Opcode || !
Op.hasOneUse())
15243 if (!isa<ConstantSDNode>(
Op.getOperand(1)) &&
15244 !isa<ConstantFPSDNode>(
Op.getOperand(1)))
15248 if (
Op.getOperand(0).getValueType() !=
Op.getOperand(1).getValueType())
15276 const unsigned InVecOpcode = InVec->
getOpcode();
15286 if (!isa<ConstantSDNode>(InValRHS) && !isa<ConstantFPSDNode>(InValRHS))
15293 InVecLHS, InValLHS, EltNo);
15295 InVecRHS, InValRHS, EltNo);
15304 auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
15307 unsigned Elt = IndexC->getZExtValue();
15315 unsigned ConcatOpIdx = Elt / ConcatNumElts;
15318 ConcatOp, InVal, NewIdx);
15322 ConcatOps[ConcatOpIdx] = ConcatOp;
15342 if (
N->getNumOperands() <= 2)
15347 MVT VT =
N->getSimpleValueType(0);
15350 MVT ContainerVT = VT;
15358 size_t HalfNumOps =
N->getNumOperands() / 2;
15360 N->ops().take_front(HalfNumOps));
15362 N->ops().drop_front(HalfNumOps));
15383 EVT VT =
N->getValueType(0);
15393 auto *BaseLd = dyn_cast<LoadSDNode>(
N->getOperand(0));
15395 !
SDValue(BaseLd, 0).hasOneUse())
15398 EVT BaseLdVT = BaseLd->getValueType(0);
15405 auto *Ld = dyn_cast<LoadSDNode>(
Op);
15406 if (!Ld || !Ld->isSimple() || !
Op.hasOneUse() ||
15408 Ld->getValueType(0) != BaseLdVT)
15417 using PtrDiff = std::pair<std::variant<int64_t, SDValue>,
bool>;
15419 LoadSDNode *Ld2) -> std::optional<PtrDiff> {
15424 if (BIO1.equalBaseIndex(BIO2, DAG))
15425 return {{BIO2.getOffset() - BIO1.getOffset(),
false}};
15429 SDValue P2 = Ld2->getBasePtr();
15435 return std::nullopt;
15439 auto BaseDiff = GetPtrDiff(Lds[0], Lds[1]);
15444 for (
auto *It = Lds.
begin() + 1; It != Lds.
end() - 1; It++)
15445 if (GetPtrDiff(*It, *std::next(It)) != BaseDiff)
15453 unsigned WideScalarBitWidth =
15466 auto [StrideVariant, MustNegateStride] = *BaseDiff;
15467 SDValue Stride = std::holds_alternative<SDValue>(StrideVariant)
15468 ? std::get<SDValue>(StrideVariant)
15471 if (MustNegateStride)
15484 BaseLd->getBasePtr(), Stride, AllOneMask};
15487 if (
auto *ConstStride = dyn_cast<ConstantSDNode>(Stride);
15488 ConstStride && ConstStride->getSExtValue() >= 0)
15492 ConstStride->getSExtValue() * (
N->getNumOperands() - 1);
15498 BaseLd->getPointerInfo(), BaseLd->getMemOperand()->getFlags(), MemSize,
15502 Ops, WideVecVT, MMO);
15514 if (
N->getValueType(0).isFixedLengthVector())
15517 SDValue Addend =
N->getOperand(0);
15521 SDValue AddMergeOp =
N->getOperand(2);
15526 auto IsVWMulOpc = [](
unsigned Opc) {
15555 return std::make_pair(
N->getOperand(3),
N->getOperand(4));
15556 }(
N, DAG, Subtarget);
15561 if (AddMask != MulMask || AddVL != MulVL)
15566 "Unexpected opcode after VWMACC_VL");
15568 "Unexpected opcode after VWMACC_VL!");
15570 "Unexpected opcode after VWMUL_VL!");
15572 "Unexpected opcode after VWMUL_VL!");
15575 EVT VT =
N->getValueType(0);
15591 const EVT IndexVT =
Index.getValueType();
15595 if (!isIndexTypeSigned(IndexType))
15627 for (
unsigned i = 0; i <
Index->getNumOperands(); i++) {
15630 if (
Index->getOperand(i)->isUndef())
15633 if (
C % ElementSize != 0)
15635 C =
C / ElementSize;
15639 ActiveLanes.
set(
C);
15641 return ActiveLanes.
all();
15659 if (NumElems % 2 != 0)
15663 const unsigned WiderElementSize = ElementSize * 2;
15664 if (WiderElementSize > ST.getELen()/8)
15667 if (!ST.hasFastUnalignedAccess() && BaseAlign < WiderElementSize)
15670 for (
unsigned i = 0; i <
Index->getNumOperands(); i++) {
15673 if (
Index->getOperand(i)->isUndef())
15679 if (
C % WiderElementSize != 0)
15684 if (
C !=
Last + ElementSize)
15700 auto SimplifyDemandedLowBitsHelper = [&](
unsigned OpNo,
unsigned LowBits) {
15711 switch (
N->getOpcode()) {
15731 APInt V =
C->getValueAPF().bitcastToAPInt();
15766 if (SimplifyDemandedLowBitsHelper(0, 32) ||
15767 SimplifyDemandedLowBitsHelper(1, 5))
15775 if (SimplifyDemandedLowBitsHelper(0, 32))
15792 MVT VT =
N->getSimpleValueType(0);
15801 "Unexpected value type!");
15824 EVT VT =
N->getValueType(0);
15879 if (
N->getValueType(0) == MVT::i64 && Subtarget.
is64Bit()) {
15884 Src.getOperand(0));
15889 Src.getOperand(0), Src.getOperand(1));
15904 auto IsTruncNode = [](
SDValue V) {
15907 SDValue VL = V.getOperand(2);
15908 auto *
C = dyn_cast<ConstantSDNode>(VL);
15910 bool IsVLMAXForVMSET = (
C &&
C->isAllOnes()) ||
15911 (isa<RegisterSDNode>(VL) &&
15912 cast<RegisterSDNode>(VL)->getReg() == RISCV::X0);
15921 while (IsTruncNode(
Op)) {
15922 if (!
Op.hasOneUse())
15924 Op =
Op.getOperand(0);
15955 if (
N->getOperand(1).getOpcode() ==
ISD::XOR &&
15964 N->getOperand(0),
Cond);
15976 SDValue FalseV =
N->getOperand(4);
15978 EVT VT =
N->getValueType(0);
15981 if (TrueV == FalseV)
15986 if (!Subtarget.hasShortForwardBranchOpt() && isa<ConstantSDNode>(TrueV) &&
15992 int64_t TrueSImm = cast<ConstantSDNode>(TrueV)->getSExtValue();
15993 int64_t FalseSImm = cast<ConstantSDNode>(FalseV)->getSExtValue();
15996 if (isInt<12>(TrueSImm) && isInt<12>(FalseSImm) &&
15997 isInt<12>(TrueSImm - FalseSImm)) {
16013 {LHS, RHS, CC, TrueV, FalseV});
16080 N->getOperand(0),
LHS,
RHS,
CC,
N->getOperand(4));
16093 EVT VT =
N->getValueType(0);
16117 const auto *MGN = dyn_cast<MaskedGatherSDNode>(
N);
16118 const EVT VT =
N->getValueType(0);
16120 SDValue ScaleOp = MGN->getScale();
16122 assert(!MGN->isIndexScaled() &&
16123 "Scaled gather/scatter should not be formed");
16128 N->getVTList(), MGN->getMemoryVT(),
DL,
16129 {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
16130 MGN->getBasePtr(), Index, ScaleOp},
16131 MGN->getMemOperand(), IndexType, MGN->getExtensionType());
16135 N->getVTList(), MGN->getMemoryVT(),
DL,
16136 {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
16137 MGN->getBasePtr(), Index, ScaleOp},
16138 MGN->getMemOperand(), IndexType, MGN->getExtensionType());
16144 if (std::optional<VIDSequence> SimpleVID =
16146 SimpleVID && SimpleVID->StepDenominator == 1) {
16147 const int64_t StepNumerator = SimpleVID->StepNumerator;
16148 const int64_t Addend = SimpleVID->Addend;
16155 assert(MGN->getBasePtr()->getValueType(0) == PtrVT);
16165 {MGN->getChain(), IntID, MGN->getPassThru(), BasePtr,
16166 DAG.
getConstant(StepNumerator,
DL, XLenVT), MGN->getMask()};
16168 Ops, VT, MGN->getMemOperand());
16176 MGN->getBasePtr(), DAG.
getUNDEF(XLenVT),
16178 MGN->getMemoryVT(), MGN->getMemOperand(),
16187 MGN->getMemOperand()->getBaseAlign(), Subtarget)) {
16189 for (
unsigned i = 0; i <
Index->getNumOperands(); i += 2)
16191 EVT IndexVT =
Index.getValueType()
16198 assert(EltCnt.isKnownEven() &&
"Splitting vector, but not in half!");
16200 EltCnt.divideCoefficientBy(2));
16203 EltCnt.divideCoefficientBy(2));
16208 {MGN->getChain(), Passthru, Mask, MGN->getBasePtr(),
16217 const auto *MSN = dyn_cast<MaskedScatterSDNode>(
N);
16219 SDValue ScaleOp = MSN->getScale();
16221 assert(!MSN->isIndexScaled() &&
16222 "Scaled gather/scatter should not be formed");
16227 N->getVTList(), MSN->getMemoryVT(),
DL,
16228 {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
16230 MSN->getMemOperand(), IndexType, MSN->isTruncatingStore());
16234 N->getVTList(), MSN->getMemoryVT(),
DL,
16235 {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
16237 MSN->getMemOperand(), IndexType, MSN->isTruncatingStore());
16239 EVT VT = MSN->getValue()->getValueType(0);
16241 if (!MSN->isTruncatingStore() &&
16245 return DAG.
getMaskedStore(MSN->getChain(),
DL, Shuffle, MSN->getBasePtr(),
16246 DAG.
getUNDEF(XLenVT), MSN->getMask(),
16247 MSN->getMemoryVT(), MSN->getMemOperand(),
16252 case ISD::VP_GATHER: {
16253 const auto *VPGN = dyn_cast<VPGatherSDNode>(
N);
16255 SDValue ScaleOp = VPGN->getScale();
16257 assert(!VPGN->isIndexScaled() &&
16258 "Scaled gather/scatter should not be formed");
16263 {VPGN->getChain(), VPGN->getBasePtr(), Index,
16264 ScaleOp, VPGN->getMask(),
16265 VPGN->getVectorLength()},
16266 VPGN->getMemOperand(), IndexType);
16270 {VPGN->getChain(), VPGN->getBasePtr(), Index,
16271 ScaleOp, VPGN->getMask(),
16272 VPGN->getVectorLength()},
16273 VPGN->getMemOperand(), IndexType);
16277 case ISD::VP_SCATTER: {
16278 const auto *VPSN = dyn_cast<VPScatterSDNode>(
N);
16280 SDValue ScaleOp = VPSN->getScale();
16282 assert(!VPSN->isIndexScaled() &&
16283 "Scaled gather/scatter should not be formed");
16288 {VPSN->getChain(), VPSN->getValue(),
16289 VPSN->getBasePtr(), Index, ScaleOp,
16290 VPSN->getMask(), VPSN->getVectorLength()},
16291 VPSN->getMemOperand(), IndexType);
16295 {VPSN->getChain(), VPSN->getValue(),
16296 VPSN->getBasePtr(), Index, ScaleOp,
16297 VPSN->getMask(), VPSN->getVectorLength()},
16298 VPSN->getMemOperand(), IndexType);
16309 EVT VT =
N->getValueType(0);
16312 return DAG.
getNode(
N->getOpcode(),
DL, VT,
N->getOperand(0), ShAmt,
16313 N->getOperand(2),
N->getOperand(3),
N->getOperand(4));
16327 EVT VT =
N->getValueType(0);
16331 return DAG.
getNode(
N->getOpcode(),
DL, VT,
N->getOperand(0), ShAmt);
16361 if (
N->getValueType(0).isScalableVector() &&
16362 N->getValueType(0).getVectorElementType() == MVT::f32 &&
16377 auto *Store = cast<StoreSDNode>(
N);
16378 SDValue Chain = Store->getChain();
16379 EVT MemVT = Store->getMemoryVT();
16380 SDValue Val = Store->getValue();
16383 bool IsScalarizable =
16385 Store->isSimple() &&
16415 NewVT, *Store->getMemOperand())) {
16417 return DAG.
getStore(Chain,
DL, NewV, Store->getBasePtr(),
16418 Store->getPointerInfo(), Store->getOriginalAlign(),
16419 Store->getMemOperand()->getFlags());
16427 if (
auto *L = dyn_cast<LoadSDNode>(Val);
16429 L->hasNUsesOfValue(1, 0) && L->hasNUsesOfValue(1, 1) &&
16431 L->getMemoryVT() == MemVT) {
16434 NewVT, *Store->getMemOperand()) &&
16436 NewVT, *L->getMemOperand())) {
16438 L->getPointerInfo(), L->getOriginalAlign(),
16439 L->getMemOperand()->getFlags());
16440 return DAG.
getStore(Chain,
DL, NewL, Store->getBasePtr(),
16441 Store->getPointerInfo(), Store->getOriginalAlign(),
16442 Store->getMemOperand()->getFlags());
16454 MVT VecVT = Src.getSimpleValueType();
16461 Store->getChain(),
DL, Src, Store->getBasePtr(), Store->getOffset(),
16464 Store->getMemOperand(), Store->getAddressingMode(),
16465 Store->isTruncatingStore(),
false);
16472 EVT VT =
N->getValueType(0);
16497 const MVT VT =
N->getSimpleValueType(0);
16498 SDValue Passthru =
N->getOperand(0);
16499 SDValue Scalar =
N->getOperand(1);
16508 const MVT VT =
N->getSimpleValueType(0);
16509 SDValue Passthru =
N->getOperand(0);
16510 SDValue Scalar =
N->getOperand(1);
16515 unsigned ScalarSize = Scalar.getValueSizeInBits();
16517 if (ScalarSize > EltWidth && Passthru.
isUndef())
16518 if (SimplifyDemandedLowBitsHelper(1, EltWidth))
16525 (!Const || Const->isZero() ||
16526 !Const->getAPIntValue().sextOrTrunc(EltWidth).isSignedIntN(5)))
16536 if (
N->getOperand(0).isUndef() &&
16539 Src.getOperand(0).getValueType().isScalableVector()) {
16540 EVT VT =
N->getValueType(0);
16541 EVT SrcVT = Src.getOperand(0).getValueType();
16545 return Src.getOperand(0);
16551 const MVT VT =
N->getSimpleValueType(0);
16552 SDValue Passthru =
N->getOperand(0);
16553 SDValue Scalar =
N->getOperand(1);
16563 DAG.
getNode(
N->getOpcode(),
DL, M1VT, M1Passthru, Scalar, VL);
16573 Const && !Const->isZero() && isInt<5>(Const->getSExtValue()) &&
16581 MVT VecVT =
N->getOperand(0).getSimpleValueType();
16583 if (M1VT.
bitsLT(VecVT)) {
16594 unsigned IntNo =
N->getConstantOperandVal(IntOpNo);
16599 case Intrinsic::riscv_masked_strided_load: {
16600 MVT VT =
N->getSimpleValueType(0);
16601 auto *Load = cast<MemIntrinsicSDNode>(
N);
16602 SDValue PassThru =
N->getOperand(2);
16604 SDValue Stride =
N->getOperand(4);
16610 if (
auto *StrideC = dyn_cast<ConstantSDNode>(Stride);
16611 StrideC && StrideC->getZExtValue() == ElementSize)
16613 DAG.
getUNDEF(XLenVT), Mask, PassThru,
16614 Load->getMemoryVT(), Load->getMemOperand(),
16618 case Intrinsic::riscv_masked_strided_store: {
16619 auto *Store = cast<MemIntrinsicSDNode>(
N);
16622 SDValue Stride =
N->getOperand(4);
16627 const unsigned ElementSize =
Value.getValueType().getScalarStoreSize();
16628 if (
auto *StrideC = dyn_cast<ConstantSDNode>(Stride);
16629 StrideC && StrideC->getZExtValue() == ElementSize)
16632 Store->getMemoryVT(), Store->getMemOperand(),
16636 case Intrinsic::riscv_vcpop:
16637 case Intrinsic::riscv_vcpop_mask:
16638 case Intrinsic::riscv_vfirst:
16639 case Intrinsic::riscv_vfirst_mask: {
16641 if (IntNo == Intrinsic::riscv_vcpop_mask ||
16642 IntNo == Intrinsic::riscv_vfirst_mask)
16643 VL =
N->getOperand(3);
16648 EVT VT =
N->getValueType(0);
16649 if (IntNo == Intrinsic::riscv_vfirst ||
16650 IntNo == Intrinsic::riscv_vfirst_mask)
16659 EVT VT =
N->getValueType(0);
16663 if ((SrcVT == MVT::v1i1 || SrcVT == MVT::v2i1 || SrcVT == MVT::v4i1) &&
16682 EVT XVT,
unsigned KeptBits)
const {
16687 if (XVT != MVT::i32 && XVT != MVT::i64)
16691 if (KeptBits == 32 || KeptBits == 64)
16695 return Subtarget.hasStdExtZbb() &&
16696 ((KeptBits == 8 && XVT == MVT::i64 && !Subtarget.
is64Bit()) ||
16704 "Expected shift op");
16715 auto *C1 = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
16716 auto *C2 = dyn_cast<ConstantSDNode>(
N->getOperand(1));
16718 const APInt &C1Int = C1->getAPIntValue();
16719 APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
16745 if (C1Cost < ShiftedC1Cost)
16759 EVT VT =
Op.getValueType();
16763 unsigned Opcode =
Op.getOpcode();
16771 const APInt &Mask =
C->getAPIntValue();
16780 auto IsLegalMask = [ShrunkMask, ExpandedMask](
const APInt &Mask) ->
bool {
16781 return ShrunkMask.
isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
16783 auto UseMask = [Mask,
Op, &TLO](
const APInt &NewMask) ->
bool {
16784 if (NewMask == Mask)
16789 Op.getOperand(0), NewC);
16802 APInt NewMask =
APInt(Mask.getBitWidth(), 0xffff);
16803 if (IsLegalMask(NewMask))
16804 return UseMask(NewMask);
16807 if (VT == MVT::i64) {
16809 if (IsLegalMask(NewMask))
16810 return UseMask(NewMask);
16825 APInt NewMask = ShrunkMask;
16826 if (MinSignedBits <= 12)
16828 else if (!
C->isOpaque() && MinSignedBits <= 32 && !ShrunkMask.
isSignedIntN(32))
16834 assert(IsLegalMask(NewMask));
16835 return UseMask(NewMask);
16839 static const uint64_t GREVMasks[] = {
16840 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
16841 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
16843 for (
unsigned Stage = 0; Stage != 6; ++Stage) {
16844 unsigned Shift = 1 << Stage;
16845 if (ShAmt & Shift) {
16847 uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask);
16859 const APInt &DemandedElts,
16861 unsigned Depth)
const {
16863 unsigned Opc =
Op.getOpcode();
16868 "Should use MaskedValueIsZero if you don't know whether Op"
16869 " is a target node!");
16952 assert(MinVLenB > 0 &&
"READ_VLENB without vector extension enabled?");
16955 if (MaxVLenB == MinVLenB)
16972 case Intrinsic::riscv_vsetvli:
16973 case Intrinsic::riscv_vsetvlimax: {
16974 bool HasAVL = IntNo == Intrinsic::riscv_vsetvli;
16975 unsigned VSEW =
Op.getConstantOperandVal(HasAVL + 1);
16981 MaxVL = (Fractional) ? MaxVL / LMul : MaxVL * LMul;
16984 if (HasAVL && isa<ConstantSDNode>(
Op.getOperand(1)))
16985 MaxVL = std::min(MaxVL,
Op.getConstantOperandVal(1));
16987 unsigned KnownZeroFirstBit =
Log2_32(MaxVL) + 1;
17000 unsigned Depth)
const {
17001 switch (
Op.getOpcode()) {
17007 if (Tmp == 1)
return 1;
17010 return std::min(Tmp, Tmp2);
17022 if (Tmp < 33)
return 1;
17047 unsigned XLen = Subtarget.
getXLen();
17048 unsigned EltBits =
Op.getOperand(0).getScalarValueSizeInBits();
17049 if (EltBits <= XLen)
17050 return XLen - EltBits + 1;
17054 unsigned IntNo =
Op.getConstantOperandVal(1);
17058 case Intrinsic::riscv_masked_atomicrmw_xchg_i64:
17059 case Intrinsic::riscv_masked_atomicrmw_add_i64:
17060 case Intrinsic::riscv_masked_atomicrmw_sub_i64:
17061 case Intrinsic::riscv_masked_atomicrmw_nand_i64:
17062 case Intrinsic::riscv_masked_atomicrmw_max_i64:
17063 case Intrinsic::riscv_masked_atomicrmw_min_i64:
17064 case Intrinsic::riscv_masked_atomicrmw_umax_i64:
17065 case Intrinsic::riscv_masked_atomicrmw_umin_i64:
17066 case Intrinsic::riscv_masked_cmpxchg_i64:
17074 assert(Subtarget.hasStdExtA());
17086 assert(Ld &&
"Unexpected null LoadSDNode");
17094 auto *CNode = dyn_cast<ConstantPoolSDNode>(
Ptr);
17095 if (!CNode || CNode->isMachineConstantPoolEntry() ||
17096 CNode->getOffset() != 0)
17104 auto *CNode = GetSupportedConstantPool(
Ptr);
17105 if (!CNode || CNode->getTargetFlags() != 0)
17108 return CNode->getConstVal();
17116 auto *CNodeLo = GetSupportedConstantPool(
Ptr.getOperand(1));
17117 auto *CNodeHi = GetSupportedConstantPool(
Ptr.getOperand(0).getOperand(0));
17123 if (CNodeLo->getConstVal() != CNodeHi->getConstVal())
17126 return CNodeLo->getConstVal();
17131 assert(
MI.getOpcode() == RISCV::ReadCounterWide &&
"Unexpected instruction");
17163 Register ReadAgainReg =
RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
17166 int64_t LoCounter =
MI.getOperand(2).getImm();
17167 int64_t HiCounter =
MI.getOperand(3).getImm();
17177 BuildMI(LoopMBB,
DL,
TII->get(RISCV::CSRRS), ReadAgainReg)
17189 MI.eraseFromParent();
17197 assert((
MI.getOpcode() == RISCV::SplitF64Pseudo ||
17198 MI.getOpcode() == RISCV::SplitF64Pseudo_INX) &&
17199 "Unexpected instruction");
17207 Register SrcReg =
MI.getOperand(2).getReg();
17210 ? &RISCV::GPRPairRegClass
17211 : &RISCV::FPR64RegClass;
17229 MI.eraseFromParent();
17236 assert((
MI.getOpcode() == RISCV::BuildPairF64Pseudo ||
17237 MI.getOpcode() == RISCV::BuildPairF64Pseudo_INX) &&
17238 "Unexpected instruction");
17244 Register DstReg =
MI.getOperand(0).getReg();
17249 MI.getOpcode() == RISCV::BuildPairF64Pseudo_INX ? &RISCV::GPRPairRegClass
17250 : &RISCV::FPR64RegClass;
17269 MI.eraseFromParent();
17274 switch (
MI.getOpcode()) {
17277 case RISCV::Select_GPR_Using_CC_GPR:
17278 case RISCV::Select_FPR16_Using_CC_GPR:
17279 case RISCV::Select_FPR16INX_Using_CC_GPR:
17280 case RISCV::Select_FPR32_Using_CC_GPR:
17281 case RISCV::Select_FPR32INX_Using_CC_GPR:
17282 case RISCV::Select_FPR64_Using_CC_GPR:
17283 case RISCV::Select_FPR64INX_Using_CC_GPR:
17284 case RISCV::Select_FPR64IN32X_Using_CC_GPR:
17290 unsigned RelOpcode,
unsigned EqOpcode,
17293 Register DstReg =
MI.getOperand(0).getReg();
17294 Register Src1Reg =
MI.getOperand(1).getReg();
17295 Register Src2Reg =
MI.getOperand(2).getReg();
17297 Register SavedFFlags =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
17321 MI.eraseFromParent();
17372 F->insert(It, FirstMBB);
17373 F->insert(It, SecondMBB);
17374 F->insert(It, SinkMBB);
17423 First.eraseFromParent();
17466 SelectDests.
insert(
MI.getOperand(0).getReg());
17470 if (
MI.getOpcode() != RISCV::Select_GPR_Using_CC_GPR && Next != BB->
end() &&
17471 Next->getOpcode() ==
MI.getOpcode() &&
17472 Next->getOperand(5).getReg() ==
MI.getOperand(0).getReg() &&
17473 Next->getOperand(5).isKill()) {
17478 SequenceMBBI !=
E; ++SequenceMBBI) {
17479 if (SequenceMBBI->isDebugInstr())
17482 if (SequenceMBBI->getOperand(1).getReg() !=
LHS ||
17483 SequenceMBBI->getOperand(2).getReg() !=
RHS ||
17484 SequenceMBBI->getOperand(3).getImm() !=
CC ||
17485 SelectDests.
count(SequenceMBBI->getOperand(4).getReg()) ||
17486 SelectDests.
count(SequenceMBBI->getOperand(5).getReg()))
17488 LastSelectPseudo = &*SequenceMBBI;
17490 SelectDests.
insert(SequenceMBBI->getOperand(0).getReg());
17493 if (SequenceMBBI->hasUnmodeledSideEffects() ||
17494 SequenceMBBI->mayLoadOrStore() ||
17495 SequenceMBBI->usesCustomInsertionHook())
17498 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
17513 F->insert(
I, IfFalseMBB);
17514 F->insert(
I, TailMBB);
17518 TailMBB->
push_back(DebugInstr->removeFromParent());
17522 TailMBB->
splice(TailMBB->
end(), HeadMBB,
17541 auto SelectMBBI =
MI.getIterator();
17542 auto SelectEnd = std::next(LastSelectPseudo->
getIterator());
17543 auto InsertionPoint = TailMBB->
begin();
17544 while (SelectMBBI != SelectEnd) {
17545 auto Next = std::next(SelectMBBI);
17548 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
17549 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
17550 .
addReg(SelectMBBI->getOperand(4).getReg())
17552 .
addReg(SelectMBBI->getOperand(5).getReg())
17566 unsigned CVTFOpc) {
17572 Register SavedFFLAGS =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
17585 .
add(
MI.getOperand(1))
17586 .
add(
MI.getOperand(2))
17587 .
add(
MI.getOperand(3))
17589 .
add(
MI.getOperand(4))
17590 .
add(
MI.getOperand(5))
17591 .
add(
MI.getOperand(6))
17598 .
add(
MI.getOperand(0))
17599 .
add(
MI.getOperand(1))
17601 .
add(
MI.getOperand(3))
17603 .
add(
MI.getOperand(4))
17604 .
add(
MI.getOperand(5))
17605 .
add(
MI.getOperand(6))
17615 MI.eraseFromParent();
17621 unsigned CmpOpc, F2IOpc, I2FOpc, FSGNJOpc, FSGNJXOpc;
17623 switch (
MI.getOpcode()) {
17626 case RISCV::PseudoFROUND_H:
17627 CmpOpc = RISCV::FLT_H;
17628 F2IOpc = RISCV::FCVT_W_H;
17629 I2FOpc = RISCV::FCVT_H_W;
17630 FSGNJOpc = RISCV::FSGNJ_H;
17631 FSGNJXOpc = RISCV::FSGNJX_H;
17632 RC = &RISCV::FPR16RegClass;
17634 case RISCV::PseudoFROUND_H_INX:
17635 CmpOpc = RISCV::FLT_H_INX;
17636 F2IOpc = RISCV::FCVT_W_H_INX;
17637 I2FOpc = RISCV::FCVT_H_W_INX;
17638 FSGNJOpc = RISCV::FSGNJ_H_INX;
17639 FSGNJXOpc = RISCV::FSGNJX_H_INX;
17640 RC = &RISCV::GPRF16RegClass;
17642 case RISCV::PseudoFROUND_S:
17643 CmpOpc = RISCV::FLT_S;
17644 F2IOpc = RISCV::FCVT_W_S;
17645 I2FOpc = RISCV::FCVT_S_W;
17646 FSGNJOpc = RISCV::FSGNJ_S;
17647 FSGNJXOpc = RISCV::FSGNJX_S;
17648 RC = &RISCV::FPR32RegClass;
17650 case RISCV::PseudoFROUND_S_INX:
17651 CmpOpc = RISCV::FLT_S_INX;
17652 F2IOpc = RISCV::FCVT_W_S_INX;
17653 I2FOpc = RISCV::FCVT_S_W_INX;
17654 FSGNJOpc = RISCV::FSGNJ_S_INX;
17655 FSGNJXOpc = RISCV::FSGNJX_S_INX;
17656 RC = &RISCV::GPRF32RegClass;
17658 case RISCV::PseudoFROUND_D:
17660 CmpOpc = RISCV::FLT_D;
17661 F2IOpc = RISCV::FCVT_L_D;
17662 I2FOpc = RISCV::FCVT_D_L;
17663 FSGNJOpc = RISCV::FSGNJ_D;
17664 FSGNJXOpc = RISCV::FSGNJX_D;
17665 RC = &RISCV::FPR64RegClass;
17667 case RISCV::PseudoFROUND_D_INX:
17669 CmpOpc = RISCV::FLT_D_INX;
17670 F2IOpc = RISCV::FCVT_L_D_INX;
17671 I2FOpc = RISCV::FCVT_D_L_INX;
17672 FSGNJOpc = RISCV::FSGNJ_D_INX;
17673 FSGNJXOpc = RISCV::FSGNJX_D_INX;
17674 RC = &RISCV::GPRRegClass;
17686 F->insert(
I, CvtMBB);
17687 F->insert(
I, DoneMBB);
17698 Register DstReg =
MI.getOperand(0).getReg();
17699 Register SrcReg =
MI.getOperand(1).getReg();
17700 Register MaxReg =
MI.getOperand(2).getReg();
17701 int64_t FRM =
MI.getOperand(3).getImm();
17706 Register FabsReg =
MRI.createVirtualRegister(RC);
17710 Register CmpReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
17725 Register F2IReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
17747 MI.eraseFromParent();
17754 switch (
MI.getOpcode()) {
17757 case RISCV::ReadCounterWide:
17759 "ReadCounterWide is only to be used on riscv32");
17761 case RISCV::Select_GPR_Using_CC_GPR:
17762 case RISCV::Select_FPR16_Using_CC_GPR:
17763 case RISCV::Select_FPR16INX_Using_CC_GPR:
17764 case RISCV::Select_FPR32_Using_CC_GPR:
17765 case RISCV::Select_FPR32INX_Using_CC_GPR:
17766 case RISCV::Select_FPR64_Using_CC_GPR:
17767 case RISCV::Select_FPR64INX_Using_CC_GPR:
17768 case RISCV::Select_FPR64IN32X_Using_CC_GPR:
17770 case RISCV::BuildPairF64Pseudo:
17771 case RISCV::BuildPairF64Pseudo_INX:
17773 case RISCV::SplitF64Pseudo:
17774 case RISCV::SplitF64Pseudo_INX:
17776 case RISCV::PseudoQuietFLE_H:
17778 case RISCV::PseudoQuietFLE_H_INX:
17779 return emitQuietFCMP(
MI, BB, RISCV::FLE_H_INX, RISCV::FEQ_H_INX, Subtarget);
17780 case RISCV::PseudoQuietFLT_H:
17782 case RISCV::PseudoQuietFLT_H_INX:
17783 return emitQuietFCMP(
MI, BB, RISCV::FLT_H_INX, RISCV::FEQ_H_INX, Subtarget);
17784 case RISCV::PseudoQuietFLE_S:
17786 case RISCV::PseudoQuietFLE_S_INX:
17787 return emitQuietFCMP(
MI, BB, RISCV::FLE_S_INX, RISCV::FEQ_S_INX, Subtarget);
17788 case RISCV::PseudoQuietFLT_S:
17790 case RISCV::PseudoQuietFLT_S_INX:
17791 return emitQuietFCMP(
MI, BB, RISCV::FLT_S_INX, RISCV::FEQ_S_INX, Subtarget);
17792 case RISCV::PseudoQuietFLE_D:
17794 case RISCV::PseudoQuietFLE_D_INX:
17795 return emitQuietFCMP(
MI, BB, RISCV::FLE_D_INX, RISCV::FEQ_D_INX, Subtarget);
17796 case RISCV::PseudoQuietFLE_D_IN32X:
17799 case RISCV::PseudoQuietFLT_D:
17801 case RISCV::PseudoQuietFLT_D_INX:
17802 return emitQuietFCMP(
MI, BB, RISCV::FLT_D_INX, RISCV::FEQ_D_INX, Subtarget);
17803 case RISCV::PseudoQuietFLT_D_IN32X:
17807 case RISCV::PseudoVFROUND_NOEXCEPT_V_M1_MASK:
17809 RISCV::PseudoVFCVT_F_X_V_M1_MASK);
17810 case RISCV::PseudoVFROUND_NOEXCEPT_V_M2_MASK:
17812 RISCV::PseudoVFCVT_F_X_V_M2_MASK);
17813 case RISCV::PseudoVFROUND_NOEXCEPT_V_M4_MASK:
17815 RISCV::PseudoVFCVT_F_X_V_M4_MASK);
17816 case RISCV::PseudoVFROUND_NOEXCEPT_V_M8_MASK:
17818 RISCV::PseudoVFCVT_F_X_V_M8_MASK);
17819 case RISCV::PseudoVFROUND_NOEXCEPT_V_MF2_MASK:
17821 RISCV::PseudoVFCVT_F_X_V_MF2_MASK);
17822 case RISCV::PseudoVFROUND_NOEXCEPT_V_MF4_MASK:
17824 RISCV::PseudoVFCVT_F_X_V_MF4_MASK);
17825 case RISCV::PseudoFROUND_H:
17826 case RISCV::PseudoFROUND_H_INX:
17827 case RISCV::PseudoFROUND_S:
17828 case RISCV::PseudoFROUND_S_INX:
17829 case RISCV::PseudoFROUND_D:
17830 case RISCV::PseudoFROUND_D_INX:
17831 case RISCV::PseudoFROUND_D_IN32X:
17833 case TargetOpcode::STATEPOINT:
17834 case TargetOpcode::STACKMAP:
17835 case TargetOpcode::PATCHPOINT:
17838 "supported on 64-bit targets");
17856 if (
MI.readsRegister(RISCV::FRM))
17888 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
17889 RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
17892 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
17893 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
17896 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
17897 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
17901 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
17902 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
17903 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
17905 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
17906 RISCV::V20M2, RISCV::V22M2};
17914 static const MCPhysReg ArgIGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
17915 RISCV::X13, RISCV::X14, RISCV::X15,
17916 RISCV::X16, RISCV::X17};
17918 static const MCPhysReg ArgEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
17919 RISCV::X13, RISCV::X14, RISCV::X15};
17930 static const MCPhysReg FastCCIGPRs[] = {
17931 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
17932 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28,
17933 RISCV::X29, RISCV::X30, RISCV::X31};
17936 static const MCPhysReg FastCCEGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12,
17937 RISCV::X13, RISCV::X14, RISCV::X15,
17952 unsigned XLenInBytes = XLen / 8;
17965 Align StackAlign(XLenInBytes);
17966 if (!
EABI || XLen != 32)
17993 std::optional<unsigned> FirstMaskArgument,
17996 if (RC == &RISCV::VRRegClass) {
18000 if (FirstMaskArgument && ValNo == *FirstMaskArgument)
18004 if (RC == &RISCV::VRM2RegClass)
18006 if (RC == &RISCV::VRM4RegClass)
18008 if (RC == &RISCV::VRM8RegClass)
18018 std::optional<unsigned> FirstMaskArgument) {
18019 unsigned XLen =
DL.getLargestLegalIntTypeSizeInBits();
18020 assert(XLen == 32 || XLen == 64);
18021 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
18025 if (ArgFlags.
isNest()) {
18026 if (
unsigned Reg = State.
AllocateReg(RISCV::X7)) {
18034 if (!LocVT.
isVector() && IsRet && ValNo > 1)
18039 bool UseGPRForF16_F32 =
true;
18042 bool UseGPRForF64 =
true;
18054 UseGPRForF16_F32 = !IsFixed;
18058 UseGPRForF16_F32 = !IsFixed;
18059 UseGPRForF64 = !IsFixed;
18065 UseGPRForF16_F32 =
true;
18066 UseGPRForF64 =
true;
18073 if (UseGPRForF16_F32 &&
18074 (ValVT == MVT::f16 || ValVT == MVT::bf16 || ValVT == MVT::f32)) {
18077 }
else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
18094 unsigned TwoXLenInBytes = (2 * XLen) / 8;
18096 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes &&
18100 if (RegIdx != std::size(
ArgGPRs) && RegIdx % 2 == 1)
18109 "PendingLocs and PendingArgFlags out of sync");
18113 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
18114 assert(PendingLocs.
empty() &&
"Can't lower f64 if it is split");
18162 PendingLocs.
size() <= 2) {
18163 assert(PendingLocs.
size() == 2 &&
"Unexpected PendingLocs.size()");
18168 PendingLocs.
clear();
18169 PendingArgFlags.
clear();
18171 XLen, State, VA, AF, ValNo, ValVT, LocVT, ArgFlags,
18177 unsigned StoreSizeBytes = XLen / 8;
18180 if ((ValVT == MVT::f16 || ValVT == MVT::bf16) && !UseGPRForF16_F32)
18182 else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
18184 else if (ValVT == MVT::f64 && !UseGPRForF64)
18187 Reg =
allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
18220 if (!PendingLocs.
empty()) {
18222 assert(PendingLocs.
size() > 2 &&
"Unexpected PendingLocs.size()");
18224 for (
auto &It : PendingLocs) {
18226 It.convertToReg(Reg);
18231 PendingLocs.clear();
18232 PendingArgFlags.
clear();
18236 assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
18238 "Expected an XLenVT or vector types at this stage");
18256template <
typename ArgTy>
18258 for (
const auto &ArgIdx :
enumerate(Args)) {
18259 MVT ArgVT = ArgIdx.value().VT;
18261 return ArgIdx.index();
18263 return std::nullopt;
18266void RISCVTargetLowering::analyzeInputArgs(
18269 RISCVCCAssignFn Fn)
const {
18270 unsigned NumArgs =
Ins.size();
18273 std::optional<unsigned> FirstMaskArgument;
18277 for (
unsigned i = 0; i != NumArgs; ++i) {
18281 Type *ArgTy =
nullptr;
18283 ArgTy = FType->getReturnType();
18284 else if (Ins[i].isOrigArg())
18285 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
18289 ArgFlags, CCInfo,
true, IsRet, ArgTy, *
this,
18290 FirstMaskArgument)) {
18291 LLVM_DEBUG(
dbgs() <<
"InputArg #" << i <<
" has unhandled type "
18298void RISCVTargetLowering::analyzeOutputArgs(
18301 CallLoweringInfo *CLI, RISCVCCAssignFn Fn)
const {
18302 unsigned NumArgs = Outs.
size();
18304 std::optional<unsigned> FirstMaskArgument;
18308 for (
unsigned i = 0; i != NumArgs; i++) {
18309 MVT ArgVT = Outs[i].VT;
18311 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty :
nullptr;
18315 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *
this,
18316 FirstMaskArgument)) {
18317 LLVM_DEBUG(
dbgs() <<
"OutputArg #" << i <<
" has unhandled type "
18371 if (In.isOrigArg()) {
18376 if ((
BitWidth <= 32 && In.Flags.isSExt()) ||
18377 (
BitWidth < 32 && In.Flags.isZExt())) {
18406 }
else if (LocVT == MVT::i64 && VA.
getValVT() == MVT::f32) {
18452 ExtType,
DL, LocVT, Chain, FIN,
18469 Register LoVReg =
RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
18482 Register HiVReg =
RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
18492 unsigned ValNo,
MVT ValVT,
MVT LocVT,
18495 bool IsFixed,
bool IsRet,
Type *OrigTy,
18497 std::optional<unsigned> FirstMaskArgument) {
18498 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
18507 if (LocVT == MVT::f16 &&
18508 (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZfhmin())) {
18510 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
18511 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H,
18512 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H,
18513 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
18514 if (
unsigned Reg = State.
AllocateReg(FPR16List)) {
18520 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
18522 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
18523 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
18524 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
18525 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
18526 if (
unsigned Reg = State.
AllocateReg(FPR32List)) {
18532 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
18534 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
18535 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
18536 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
18537 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
18538 if (
unsigned Reg = State.
AllocateReg(FPR64List)) {
18545 if ((LocVT == MVT::f16 &&
18546 (Subtarget.hasStdExtZhinx() || Subtarget.hasStdExtZhinxmin())) ||
18547 (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
18548 (LocVT == MVT::f64 && Subtarget.
is64Bit() &&
18549 Subtarget.hasStdExtZdinx())) {
18556 if (LocVT == MVT::f16) {
18562 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
18568 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
18610 if (ArgFlags.
isNest()) {
18612 "Attribute 'nest' is not supported in GHC calling convention");
18616 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
18617 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
18619 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
18631 if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
18634 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
18635 RISCV::F18_F, RISCV::F19_F,
18636 RISCV::F20_F, RISCV::F21_F};
18637 if (
unsigned Reg = State.
AllocateReg(FPR32List)) {
18643 if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
18646 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
18647 RISCV::F24_D, RISCV::F25_D,
18648 RISCV::F26_D, RISCV::F27_D};
18649 if (
unsigned Reg = State.
AllocateReg(FPR64List)) {
18655 if ((LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
18656 (LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() &&
18676 switch (CallConv) {
18685 if (Subtarget.isRVE())
18689 "(Zdinx/D) instruction set extensions");
18693 if (Func.hasFnAttribute(
"interrupt")) {
18694 if (!Func.arg_empty())
18696 "Functions with the interrupt attribute cannot have arguments!");
18701 if (!(Kind ==
"user" || Kind ==
"supervisor" || Kind ==
"machine"))
18703 "Function interrupt attribute argument not supported!");
18708 unsigned XLenInBytes = Subtarget.
getXLen() / 8;
18710 std::vector<SDValue> OutChains;
18719 analyzeInputArgs(MF, CCInfo, Ins,
false,
18723 for (
unsigned i = 0, e = ArgLocs.
size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
18744 unsigned ArgIndex = Ins[InsIdx].OrigArgIndex;
18745 unsigned ArgPartOffset = Ins[InsIdx].PartOffset;
18747 while (i + 1 != e && Ins[InsIdx + 1].OrigArgIndex == ArgIndex) {
18749 unsigned PartOffset = Ins[InsIdx + 1].PartOffset - ArgPartOffset;
18778 int VarArgsSaveSize = XLenInBytes * (ArgRegs.
size() -
Idx);
18783 if (VarArgsSaveSize == 0) {
18787 int VaArgOffset = -VarArgsSaveSize;
18795 XLenInBytes, VaArgOffset -
static_cast<int>(XLenInBytes),
true);
18796 VarArgsSaveSize += XLenInBytes;
18803 for (
unsigned I =
Idx;
I < ArgRegs.
size(); ++
I) {
18808 Chain,
DL, ArgValue, FIN,
18810 OutChains.push_back(Store);
18824 if (!OutChains.empty()) {
18825 OutChains.push_back(Chain);
18835bool RISCVTargetLowering::isEligibleForTailCallOptimization(
18839 auto CalleeCC = CLI.CallConv;
18840 auto &Outs = CLI.Outs;
18842 auto CallerCC = Caller.getCallingConv();
18849 if (Caller.hasFnAttribute(
"interrupt"))
18864 for (
auto &VA : ArgLocs)
18870 auto IsCallerStructRet = Caller.hasStructRetAttr();
18871 auto IsCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
18872 if (IsCallerStructRet || IsCalleeStructRet)
18877 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
18878 if (CalleeCC != CallerCC) {
18879 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
18880 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
18887 for (
auto &Arg : Outs)
18888 if (Arg.Flags.isByVal())
18923 if (Subtarget.isRVE())
18927 analyzeOutputArgs(MF, ArgCCInfo, Outs,
false, &CLI,
18933 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
18939 "site marked musttail");
18946 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
18948 if (!Flags.isByVal())
18952 unsigned Size = Flags.getByValSize();
18953 Align Alignment = Flags.getNonZeroByValAlign();
18960 Chain = DAG.
getMemcpy(Chain,
DL, FIPtr, Arg, SizeNode, Alignment,
18974 for (
unsigned i = 0, j = 0, e = ArgLocs.
size(), OutIdx = 0; i != e;
18977 SDValue ArgValue = OutVals[OutIdx];
18997 if (!StackPtr.getNode())
19008 RegsToPass.
push_back(std::make_pair(RegHigh,
Hi));
19026 unsigned ArgIndex = Outs[OutIdx].OrigArgIndex;
19027 unsigned ArgPartOffset = Outs[OutIdx].PartOffset;
19033 while (i + 1 != e && Outs[OutIdx + 1].OrigArgIndex == ArgIndex) {
19034 SDValue PartValue = OutVals[OutIdx + 1];
19035 unsigned PartOffset = Outs[OutIdx + 1].PartOffset - ArgPartOffset;
19047 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
19049 DAG.
getStore(Chain,
DL, ArgValue, SpillSlot,
19051 for (
const auto &Part : Parts) {
19052 SDValue PartValue = Part.first;
19053 SDValue PartOffset = Part.second;
19060 ArgValue = SpillSlot;
19066 if (Flags.isByVal())
19067 ArgValue = ByValArgs[j++];
19074 assert(!IsTailCall &&
"Tail call not allowed if stack is used "
19075 "for passing parameters");
19078 if (!StackPtr.getNode())
19091 if (!MemOpChains.
empty())
19097 for (
auto &Reg : RegsToPass) {
19098 Chain = DAG.
getCopyToReg(Chain,
DL, Reg.first, Reg.second, Glue);
19105 validateCCReservedRegs(RegsToPass, MF);
19110 "Return address register required, but has been reserved."});
19129 for (
auto &Reg : RegsToPass)
19135 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
19136 assert(Mask &&
"Missing call preserved mask for calling convention");
19145 "Unexpected CFI type for a direct call");
19175 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
19176 auto &VA = RVLocs[i];
19184 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
19185 assert(VA.needsCustom());
19208 std::optional<unsigned> FirstMaskArgument;
19212 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
19213 MVT VT = Outs[i].VT;
19217 ArgFlags, CCInfo,
true,
true,
nullptr,
19218 *
this, FirstMaskArgument))
19250 for (
unsigned i = 0, e = RVLocs.size(), OutIdx = 0; i < e; ++i, ++OutIdx) {
19251 SDValue Val = OutVals[OutIdx];
19260 DAG.
getVTList(MVT::i32, MVT::i32), Val);
19264 Register RegHi = RVLocs[++i].getLocReg();
19270 "Return value register required, but has been reserved."});
19286 "Return value register required, but has been reserved."});
19308 if (Func.hasFnAttribute(
"interrupt")) {
19309 if (!Func.getReturnType()->isVoidTy())
19311 "Functions with the interrupt attribute must have void return type!");
19317 if (Kind ==
"supervisor")
19323 return DAG.
getNode(RetOpc,
DL, MVT::Other, RetOps);
19326void RISCVTargetLowering::validateCCReservedRegs(
19327 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
19336 F,
"Argument register required, but has been reserved."});
19342 if (
N->getNumValues() != 1)
19344 if (!
N->hasNUsesOfValue(1, 0))
19347 SDNode *Copy = *
N->use_begin();
19361 if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == MVT::Glue)
19365 bool HasRet =
false;
19366 for (
SDNode *Node : Copy->uses()) {
19374 Chain = Copy->getOperand(0);
19383#define NODE_NAME_CASE(NODE) \
19384 case RISCVISD::NODE: \
19385 return "RISCVISD::" #NODE;
19637#undef NODE_NAME_CASE
19644 if (Constraint.
size() == 1) {
19645 switch (Constraint[0]) {
19661 if (Constraint ==
"vr" || Constraint ==
"vm")
19667std::pair<unsigned, const TargetRegisterClass *>
19673 if (Constraint.
size() == 1) {
19674 switch (Constraint[0]) {
19679 if (VT == MVT::f16 && Subtarget.hasStdExtZhinxmin())
19680 return std::make_pair(0U, &RISCV::GPRF16RegClass);
19681 if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
19682 return std::make_pair(0U, &RISCV::GPRF32RegClass);
19683 if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.
is64Bit())
19684 return std::make_pair(0U, &RISCV::GPRPairRegClass);
19685 return std::make_pair(0U, &RISCV::GPRNoX0RegClass);
19687 if (Subtarget.hasStdExtZfhmin() && VT == MVT::f16)
19688 return std::make_pair(0U, &RISCV::FPR16RegClass);
19689 if (Subtarget.hasStdExtF() && VT == MVT::f32)
19690 return std::make_pair(0U, &RISCV::FPR32RegClass);
19691 if (Subtarget.hasStdExtD() && VT == MVT::f64)
19692 return std::make_pair(0U, &RISCV::FPR64RegClass);
19697 }
else if (Constraint ==
"vr") {
19698 for (
const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
19699 &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
19701 return std::make_pair(0U, RC);
19703 }
else if (Constraint ==
"vm") {
19704 if (
TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.
SimpleTy))
19705 return std::make_pair(0U, &RISCV::VMV0RegClass);
19713 .
Case(
"{zero}", RISCV::X0)
19714 .
Case(
"{ra}", RISCV::X1)
19715 .
Case(
"{sp}", RISCV::X2)
19716 .
Case(
"{gp}", RISCV::X3)
19717 .
Case(
"{tp}", RISCV::X4)
19718 .
Case(
"{t0}", RISCV::X5)
19719 .
Case(
"{t1}", RISCV::X6)
19720 .
Case(
"{t2}", RISCV::X7)
19721 .
Cases(
"{s0}",
"{fp}", RISCV::X8)
19722 .
Case(
"{s1}", RISCV::X9)
19723 .
Case(
"{a0}", RISCV::X10)
19724 .
Case(
"{a1}", RISCV::X11)
19725 .
Case(
"{a2}", RISCV::X12)
19726 .
Case(
"{a3}", RISCV::X13)
19727 .
Case(
"{a4}", RISCV::X14)
19728 .
Case(
"{a5}", RISCV::X15)
19729 .
Case(
"{a6}", RISCV::X16)
19730 .
Case(
"{a7}", RISCV::X17)
19731 .
Case(
"{s2}", RISCV::X18)
19732 .
Case(
"{s3}", RISCV::X19)
19733 .
Case(
"{s4}", RISCV::X20)
19734 .
Case(
"{s5}", RISCV::X21)
19735 .
Case(
"{s6}", RISCV::X22)
19736 .
Case(
"{s7}", RISCV::X23)
19737 .
Case(
"{s8}", RISCV::X24)
19738 .
Case(
"{s9}", RISCV::X25)
19739 .
Case(
"{s10}", RISCV::X26)
19740 .
Case(
"{s11}", RISCV::X27)
19741 .
Case(
"{t3}", RISCV::X28)
19742 .
Case(
"{t4}", RISCV::X29)
19743 .
Case(
"{t5}", RISCV::X30)
19744 .
Case(
"{t6}", RISCV::X31)
19746 if (XRegFromAlias != RISCV::NoRegister)
19747 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
19756 if (Subtarget.hasStdExtF()) {
19758 .
Cases(
"{f0}",
"{ft0}", RISCV::F0_F)
19759 .
Cases(
"{f1}",
"{ft1}", RISCV::F1_F)
19760 .
Cases(
"{f2}",
"{ft2}", RISCV::F2_F)
19761 .
Cases(
"{f3}",
"{ft3}", RISCV::F3_F)
19762 .
Cases(
"{f4}",
"{ft4}", RISCV::F4_F)
19763 .
Cases(
"{f5}",
"{ft5}", RISCV::F5_F)
19764 .
Cases(
"{f6}",
"{ft6}", RISCV::F6_F)
19765 .
Cases(
"{f7}",
"{ft7}", RISCV::F7_F)
19766 .
Cases(
"{f8}",
"{fs0}", RISCV::F8_F)
19767 .
Cases(
"{f9}",
"{fs1}", RISCV::F9_F)
19768 .
Cases(
"{f10}",
"{fa0}", RISCV::F10_F)
19769 .
Cases(
"{f11}",
"{fa1}", RISCV::F11_F)
19770 .
Cases(
"{f12}",
"{fa2}", RISCV::F12_F)
19771 .
Cases(
"{f13}",
"{fa3}", RISCV::F13_F)
19772 .
Cases(
"{f14}",
"{fa4}", RISCV::F14_F)
19773 .
Cases(
"{f15}",
"{fa5}", RISCV::F15_F)
19774 .
Cases(
"{f16}",
"{fa6}", RISCV::F16_F)
19775 .
Cases(
"{f17}",
"{fa7}", RISCV::F17_F)
19776 .
Cases(
"{f18}",
"{fs2}", RISCV::F18_F)
19777 .
Cases(
"{f19}",
"{fs3}", RISCV::F19_F)
19778 .
Cases(
"{f20}",
"{fs4}", RISCV::F20_F)
19779 .
Cases(
"{f21}",
"{fs5}", RISCV::F21_F)
19780 .
Cases(
"{f22}",
"{fs6}", RISCV::F22_F)
19781 .
Cases(
"{f23}",
"{fs7}", RISCV::F23_F)
19782 .
Cases(
"{f24}",
"{fs8}", RISCV::F24_F)
19783 .
Cases(
"{f25}",
"{fs9}", RISCV::F25_F)
19784 .
Cases(
"{f26}",
"{fs10}", RISCV::F26_F)
19785 .
Cases(
"{f27}",
"{fs11}", RISCV::F27_F)
19786 .
Cases(
"{f28}",
"{ft8}", RISCV::F28_F)
19787 .
Cases(
"{f29}",
"{ft9}", RISCV::F29_F)
19788 .
Cases(
"{f30}",
"{ft10}", RISCV::F30_F)
19789 .
Cases(
"{f31}",
"{ft11}", RISCV::F31_F)
19791 if (FReg != RISCV::NoRegister) {
19792 assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F &&
"Unknown fp-reg");
19793 if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
19794 unsigned RegNo = FReg - RISCV::F0_F;
19795 unsigned DReg = RISCV::F0_D + RegNo;
19796 return std::make_pair(DReg, &RISCV::FPR64RegClass);
19798 if (VT == MVT::f32 || VT == MVT::Other)
19799 return std::make_pair(FReg, &RISCV::FPR32RegClass);
19800 if (Subtarget.hasStdExtZfhmin() && VT == MVT::f16) {
19801 unsigned RegNo = FReg - RISCV::F0_F;
19802 unsigned HReg = RISCV::F0_H + RegNo;
19803 return std::make_pair(HReg, &RISCV::FPR16RegClass);
19810 .
Case(
"{v0}", RISCV::V0)
19811 .
Case(
"{v1}", RISCV::V1)
19812 .
Case(
"{v2}", RISCV::V2)
19813 .
Case(
"{v3}", RISCV::V3)
19814 .
Case(
"{v4}", RISCV::V4)
19815 .
Case(
"{v5}", RISCV::V5)
19816 .
Case(
"{v6}", RISCV::V6)
19817 .
Case(
"{v7}", RISCV::V7)
19818 .
Case(
"{v8}", RISCV::V8)
19819 .
Case(
"{v9}", RISCV::V9)
19820 .
Case(
"{v10}", RISCV::V10)
19821 .
Case(
"{v11}", RISCV::V11)
19822 .
Case(
"{v12}", RISCV::V12)
19823 .
Case(
"{v13}", RISCV::V13)
19824 .
Case(
"{v14}", RISCV::V14)
19825 .
Case(
"{v15}", RISCV::V15)
19826 .
Case(
"{v16}", RISCV::V16)
19827 .
Case(
"{v17}", RISCV::V17)
19828 .
Case(
"{v18}", RISCV::V18)
19829 .
Case(
"{v19}", RISCV::V19)
19830 .
Case(
"{v20}", RISCV::V20)
19831 .
Case(
"{v21}", RISCV::V21)
19832 .
Case(
"{v22}", RISCV::V22)
19833 .
Case(
"{v23}", RISCV::V23)
19834 .
Case(
"{v24}", RISCV::V24)
19835 .
Case(
"{v25}", RISCV::V25)
19836 .
Case(
"{v26}", RISCV::V26)
19837 .
Case(
"{v27}", RISCV::V27)
19838 .
Case(
"{v28}", RISCV::V28)
19839 .
Case(
"{v29}", RISCV::V29)
19840 .
Case(
"{v30}", RISCV::V30)
19841 .
Case(
"{v31}", RISCV::V31)
19843 if (VReg != RISCV::NoRegister) {
19844 if (
TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.
SimpleTy))
19845 return std::make_pair(VReg, &RISCV::VMRegClass);
19846 if (
TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.
SimpleTy))
19847 return std::make_pair(VReg, &RISCV::VRRegClass);
19848 for (
const auto *RC :
19849 {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
19850 if (
TRI->isTypeLegalForClass(*RC, VT.
SimpleTy)) {
19851 VReg =
TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
19852 return std::make_pair(VReg, RC);
19858 std::pair<Register, const TargetRegisterClass *> Res =
19864 if (Res.second == &RISCV::GPRF16RegClass ||
19865 Res.second == &RISCV::GPRF32RegClass ||
19866 Res.second == &RISCV::GPRPairRegClass)
19867 return std::make_pair(Res.first, &RISCV::GPRRegClass);
19875 if (ConstraintCode.
size() == 1) {
19876 switch (ConstraintCode[0]) {
19891 if (Constraint.
size() == 1) {
19892 switch (Constraint[0]) {
19895 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
19897 if (isInt<12>(CVal))
19910 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
19912 if (isUInt<5>(CVal))
19930 if (Subtarget.hasStdExtZtso()) {
19946 if (Subtarget.hasStdExtZtso()) {
19954 if (Subtarget.enableSeqCstTrailingFence() && isa<StoreInst>(Inst) &&
19971 if (Subtarget.hasForcedAtomics())
19976 if (Subtarget.hasStdExtZacas() &&
19977 (
Size >= 32 || Subtarget.hasStdExtZabha()))
19983 if (
Size < 32 && !Subtarget.hasStdExtZabha())
19996 return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
19998 return Intrinsic::riscv_masked_atomicrmw_add_i32;
20000 return Intrinsic::riscv_masked_atomicrmw_sub_i32;
20002 return Intrinsic::riscv_masked_atomicrmw_nand_i32;
20004 return Intrinsic::riscv_masked_atomicrmw_max_i32;
20006 return Intrinsic::riscv_masked_atomicrmw_min_i32;
20008 return Intrinsic::riscv_masked_atomicrmw_umax_i32;
20010 return Intrinsic::riscv_masked_atomicrmw_umin_i32;
20019 return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
20021 return Intrinsic::riscv_masked_atomicrmw_add_i64;
20023 return Intrinsic::riscv_masked_atomicrmw_sub_i64;
20025 return Intrinsic::riscv_masked_atomicrmw_nand_i64;
20027 return Intrinsic::riscv_masked_atomicrmw_max_i64;
20029 return Intrinsic::riscv_masked_atomicrmw_min_i64;
20031 return Intrinsic::riscv_masked_atomicrmw_umax_i64;
20033 return Intrinsic::riscv_masked_atomicrmw_umin_i64;
20059 unsigned XLen = Subtarget.
getXLen();
20083 unsigned ValWidth =
20088 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
20091 Builder.
CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
20103 if (Subtarget.hasForcedAtomics())
20107 if (!(Subtarget.hasStdExtZabha() && Subtarget.hasStdExtZacas()) &&
20116 unsigned XLen = Subtarget.
getXLen();
20118 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
20123 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
20129 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
20136 EVT DataVT)
const {
20152 return Subtarget.hasStdExtZfhmin();
20154 return Subtarget.hasStdExtF();
20156 return Subtarget.hasStdExtD();
20188 "RVVBitsPerBlock changed, audit needed");
20197 if (!Subtarget.hasVendorXTHeadMemIdx())
20203 Base =
Op->getOperand(0);
20205 int64_t RHSC =
RHS->getSExtValue();
20211 bool isLegalIndexedOffset =
false;
20212 for (
unsigned i = 0; i < 4; i++)
20213 if (isInt<5>(RHSC >> i) && ((RHSC % (1LL << i)) == 0)) {
20214 isLegalIndexedOffset =
true;
20218 if (!isLegalIndexedOffset)
20235 VT = LD->getMemoryVT();
20236 Ptr = LD->getBasePtr();
20237 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
20238 VT = ST->getMemoryVT();
20239 Ptr = ST->getBasePtr();
20258 VT = LD->getMemoryVT();
20259 Ptr = LD->getBasePtr();
20260 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
20261 VT = ST->getMemoryVT();
20262 Ptr = ST->getBasePtr();
20305 const Constant *PersonalityFn)
const {
20310 const Constant *PersonalityFn)
const {
20334 const bool HasExtMOrZmmul =
20335 Subtarget.hasStdExtM() || Subtarget.hasStdExtZmmul();
20344 if (
auto *ConstNode = dyn_cast<ConstantSDNode>(
C.getNode())) {
20346 const APInt &Imm = ConstNode->getAPIntValue();
20347 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
20348 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
20352 if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
20353 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
20354 (Imm - 8).isPowerOf2()))
20359 if (!Imm.isSignedIntN(12) && Imm.countr_zero() < 12 &&
20360 ConstNode->hasOneUse()) {
20361 APInt ImmS = Imm.ashr(Imm.countr_zero());
20362 if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
20363 (1 - ImmS).isPowerOf2())
20387 if (C1.
isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
20396 unsigned *
Fast)
const {
20399 *
Fast = Subtarget.hasFastUnalignedAccess();
20400 return Subtarget.hasFastUnalignedAccess();
20416 *
Fast = Subtarget.hasFastUnalignedAccess();
20417 return Subtarget.hasFastUnalignedAccess();
20426 if (FuncAttributes.
hasFnAttr(Attribute::NoImplicitFloat))
20438 if (
Op.size() < MinVLenInBytes)
20448 MVT PreferredVT = (
Op.isMemset() && !
Op.isZeroMemset()) ? MVT::i8 : ELenVT;
20452 if (PreferredVT != MVT::i8 && !Subtarget.hasFastUnalignedAccess()) {
20454 if (
Op.isFixedDstAlign())
20455 RequiredAlign = std::min(RequiredAlign,
Op.getDstAlign());
20457 RequiredAlign = std::min(RequiredAlign,
Op.getSrcAlign());
20465 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
20466 bool IsABIRegCopy =
CC.has_value();
20468 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
20469 PartVT == MVT::f32) {
20487 if (PartVTBitSize % ValueVTBitSize == 0) {
20488 assert(PartVTBitSize >= ValueVTBitSize);
20495 if (ValueEltVT != PartEltVT) {
20496 if (PartVTBitSize > ValueVTBitSize) {
20498 assert(Count != 0 &&
"The number of element should not be zero.");
20499 EVT SameEltTypeVT =
20520 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
20521 bool IsABIRegCopy =
CC.has_value();
20522 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
20523 PartVT == MVT::f32) {
20540 if (PartVTBitSize % ValueVTBitSize == 0) {
20541 assert(PartVTBitSize >= ValueVTBitSize);
20542 EVT SameEltTypeVT = ValueVT;
20549 if (ValueEltVT != PartEltVT) {
20551 assert(Count != 0 &&
"The number of element should not be zero.");
20568 bool OptSize = Attr.
hasFnAttr(Attribute::MinSize);
20575 unsigned Opc =
N->getOpcode();
20599 VectorType *VTy,
unsigned Factor,
Align Alignment,
unsigned AddrSpace,
20613 if (
auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
20618 if (FVTy->getNumElements() < 2)
20628 return Factor * LMUL <= 8;
20632 Align Alignment)
const {
20644 if (!Subtarget.hasFastUnalignedAccess() &&
20652 Intrinsic::riscv_seg2_load, Intrinsic::riscv_seg3_load,
20653 Intrinsic::riscv_seg4_load, Intrinsic::riscv_seg5_load,
20654 Intrinsic::riscv_seg6_load, Intrinsic::riscv_seg7_load,
20655 Intrinsic::riscv_seg8_load};
20674 auto *VTy = cast<FixedVectorType>(Shuffles[0]->
getType());
20684 {VTy, LI->getPointerOperandType(), XLenTy});
20686 Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
20691 for (
unsigned i = 0; i < Shuffles.
size(); i++) {
20693 Shuffles[i]->replaceAllUsesWith(SubVec);
20700 Intrinsic::riscv_seg2_store, Intrinsic::riscv_seg3_store,
20701 Intrinsic::riscv_seg4_store, Intrinsic::riscv_seg5_store,
20702 Intrinsic::riscv_seg6_store, Intrinsic::riscv_seg7_store,
20703 Intrinsic::riscv_seg8_store};
20723 unsigned Factor)
const {
20725 auto *ShuffleVTy = cast<FixedVectorType>(SVI->
getType());
20728 ShuffleVTy->getNumElements() / Factor);
20730 SI->getPointerAddressSpace(),
20731 SI->getModule()->getDataLayout()))
20738 {VTy, SI->getPointerOperandType(), XLenTy});
20743 for (
unsigned i = 0; i < Factor; i++) {
20752 Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
20753 Ops.
append({SI->getPointerOperand(), VL});
20766 if (DI->
getIntrinsicID() != Intrinsic::experimental_vector_deinterleave2)
20769 unsigned Factor = 2;
20784 if (
auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
20787 {ResVTy, LI->getPointerOperandType(), XLenTy});
20788 VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
20791 Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
20792 Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
20793 Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
20794 Intrinsic::riscv_vlseg8};
20816 if (II->
getIntrinsicID() != Intrinsic::experimental_vector_interleave2)
20819 unsigned Factor = 2;
20825 SI->getPointerAddressSpace(),
20826 SI->getModule()->getDataLayout()))
20833 if (
auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
20836 {InVTy, SI->getPointerOperandType(), XLenTy});
20837 VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
20840 Intrinsic::riscv_vsseg2, Intrinsic::riscv_vsseg3,
20841 Intrinsic::riscv_vsseg4, Intrinsic::riscv_vsseg5,
20842 Intrinsic::riscv_vsseg6, Intrinsic::riscv_vsseg7,
20843 Intrinsic::riscv_vsseg8};
20851 SI->getPointerOperand(), VL});
20861 "Invalid call instruction for a KCFI check");
20863 MBBI->getOpcode()));
20866 Target.setIsRenamable(
false);
20874#define GET_REGISTER_MATCHER
20875#include "RISCVGenAsmMatcher.inc"
20881 if (Reg == RISCV::NoRegister)
20883 if (Reg == RISCV::NoRegister)
20895 const MDNode *NontemporalInfo =
I.getMetadata(LLVMContext::MD_nontemporal);
20897 if (NontemporalInfo ==
nullptr)
20905 int NontemporalLevel = 5;
20906 const MDNode *RISCVNontemporalInfo =
20907 I.getMetadata(
"riscv-nontemporal-domain");
20908 if (RISCVNontemporalInfo !=
nullptr)
20911 cast<ConstantAsMetadata>(RISCVNontemporalInfo->
getOperand(0))
20915 assert((1 <= NontemporalLevel && NontemporalLevel <= 5) &&
20916 "RISC-V target doesn't support this non-temporal domain.");
20918 NontemporalLevel -= 2;
20920 if (NontemporalLevel & 0b1)
20922 if (NontemporalLevel & 0b10)
20935 return TargetFlags;
20945 return isTypeLegal(VT) && Subtarget.hasStdExtZvbb();
20948 return Subtarget.hasStdExtZbb() &&
20961 if (
Op == Instruction::Add ||
Op == Instruction::Sub ||
20962 Op == Instruction::And ||
Op == Instruction::Or ||
20963 Op == Instruction::Xor ||
Op == Instruction::InsertElement ||
20964 Op == Instruction::Xor ||
Op == Instruction::ShuffleVector)
20972 !isa<ReturnInst>(&Inst))
20975 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
20976 if (AI->getAllocatedType()->isScalableTy())
20984RISCVTargetLowering::BuildSDIVPow2(
SDNode *
N,
const APInt &Divisor,
20992 if (!Subtarget.hasShortForwardBranchOpt())
20994 EVT VT =
N->getValueType(0);
20995 if (!(VT == MVT::i32 || (VT == MVT::i64 && Subtarget.
is64Bit())))
20999 if (Divisor.
sgt(2048) || Divisor.
slt(-2048))
21004bool RISCVTargetLowering::shouldFoldSelectWithSingleBitTest(
21005 EVT VT,
const APInt &AndMask)
const {
21006 if (Subtarget.hasStdExtZicond() || Subtarget.hasVendorXVentanaCondOps())
21007 return !Subtarget.hasStdExtZbs() && AndMask.
ugt(1024);
21011unsigned RISCVTargetLowering::getMinimumJumpTableEntries()
const {
21017#define GET_RISCVVIntrinsicsTable_IMPL
21018#include "RISCVGenSearchableTables.inc"
unsigned const MachineRegisterInfo * MRI
static MCRegister MatchRegisterName(StringRef Name)
static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT)
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V)
static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V)
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define NODE_NAME_CASE(node)
static bool isConstant(const MachineInstr &MI)
amdgpu AMDGPU Register Bank Select
static bool isZeroOrAllOnes(SDValue N, bool AllOnes)
static SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, TargetLowering::DAGCombinerInfo &DCI)
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, TargetLowering::DAGCombinerInfo &DCI, bool AllOnes=false)
static MCRegister MatchRegisterAltName(StringRef Name)
Maps from the set of all alternative registernames to a register number.
Function Alias Analysis Results
static SDValue getTargetNode(GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static MachineBasicBlock * emitSelectPseudo(MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode)
static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget, SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const MCPhysReg ArgFPR32s[]
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
const MCPhysReg ArgFPR64s[]
const MCPhysReg ArgGPRs[]
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, int NumOp, unsigned ExtOpc=ISD::ANY_EXTEND)
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen, AtomicRMWInst::BinOp BinOp)
loop Loop Strength Reduction
static bool isSplat(Value *V)
Return true if V is a splat of a value (which is used when multiplying a matrix with a scalar).
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG, const TargetLowering::DAGCombinerInfo &DCI, const MipsSETargetLowering *TL, const MipsSubtarget &Subtarget)
static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG, const MipsSubtarget &Subtarget)
static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static bool IsSelect(MachineInstr &MI)
const char LLVMTargetMachineRef TM
static StringRef getExtensionType(StringRef Ext)
static SDValue SplitVectorReductionOp(SDValue Op, SelectionDAG &DAG)
static SDValue lowerSADDO_SSUBO(SDValue Op, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static MachineBasicBlock * emitBuildPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const RISCVSubtarget &Subtarget)
static MachineBasicBlock * emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB, unsigned RelOpcode, unsigned EqOpcode, const RISCVSubtarget &Subtarget)
static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef< int > Mask)
Match shuffles that concatenate two vectors, rotate the concatenation, and then extract the original ...
static const Intrinsic::ID FixedVlsegIntrIds[]
static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static MVT getLMUL1VT(MVT VT)
static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2, bool EABI)
static SDValue lowerVECTOR_SHUFFLEAsVSlide1(const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef< int > Mask, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
Match v(f)slide1up/down idioms.
static MachineBasicBlock * emitVFROUND_NOEXCEPT_MASK(MachineInstr &MI, MachineBasicBlock *BB, unsigned CVTXOpc, unsigned CVTFOpc)
static const MCPhysReg ArgVRM2s[]
static bool isInterleaveShuffle(ArrayRef< int > Mask, MVT VT, int &EvenSrc, int &OddSrc, const RISCVSubtarget &Subtarget)
Is this shuffle interleaving contiguous elements from one vector into the even elements and contiguou...
static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &DAG)
According to the property that indexed load/store instructions zero-extend their indices,...
static void promoteVCIXScalar(const SDValue &Op, SmallVectorImpl< SDValue > &Operands, SelectionDAG &DAG)
static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, SDValue Scalar, SDValue VL, SelectionDAG &DAG)
static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode)
static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, SDValue Lo, SDValue Hi, SDValue VL, SelectionDAG &DAG)
static SDValue getWideningInterleave(SDValue EvenV, SDValue OddV, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue getAllOnesMask(MVT VecVT, SDValue VL, const SDLoc &DL, SelectionDAG &DAG)
Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.
static cl::opt< int > FPImmCost(DEBUG_TYPE "-fpimm-cost", cl::Hidden, cl::desc("Give the maximum number of instructions that we will " "use for creating a floating-point immediate value"), cl::init(2))
static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL, MVT VT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performVWADDSUBW_VLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static bool matchIndexAsWiderOp(EVT VT, SDValue Index, SDValue Mask, Align BaseAlign, const RISCVSubtarget &ST)
Match the index of a gather or scatter operation as an operation with twice the element width and hal...
static bool isLegalBitRotate(ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, MVT &RotateVT, unsigned &RotateAmt)
static SDValue combineVFMADD_VLWithVFNEG_VL(SDNode *N, SelectionDAG &DAG)
static SDValue combineOrOfCZERO(SDNode *N, SDValue N0, SDValue N1, SelectionDAG &DAG)
static SDValue useInversedSetcc(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue combineVWADDSUBWSelect(SDNode *N, SelectionDAG &DAG)
static MachineBasicBlock * EmitLoweredCascadedSelect(MachineInstr &First, MachineInstr &Second, MachineBasicBlock *ThisMBB, const RISCVSubtarget &Subtarget)
static SDValue performINSERT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static SDValue lowerFMAXIMUM_FMINIMUM(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue SplitStrictFPVectorOp(SDValue Op, SelectionDAG &DAG)
static std::optional< uint64_t > getExactInteger(const APFloat &APF, uint32_t BitWidth)
static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG)
static SDValue performMemPairCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue combineDeMorganOfBoolean(SDNode *N, SelectionDAG &DAG)
static bool isDeinterleaveShuffle(MVT VT, MVT ContainerVT, SDValue V1, SDValue V2, ArrayRef< int > Mask, const RISCVSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLEAsVSlidedown(const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef< int > Mask, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
static unsigned getRVVReductionOp(unsigned ISDOpcode)
static std::optional< bool > matchSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue Val)
static SDValue lowerShuffleViaVRegSplitting(ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static MVT getMaskTypeFor(MVT VecVT)
Return the type of the mask type suitable for masking the provided vector type.
static SDValue getVCIXISDNodeVOID(SDValue &Op, SelectionDAG &DAG, unsigned Type)
static cl::opt< unsigned > NumRepeatedDivisors(DEBUG_TYPE "-fp-repeated-divisors", cl::Hidden, cl::desc("Set the minimum number of repetitions of a divisor to allow " "transformation to multiplications by the reciprocal"), cl::init(2))
static SDValue foldSelectOfCTTZOrCTLZ(SDNode *N, SelectionDAG &DAG)
static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue foldBinOpIntoSelectIfProfitable(SDNode *BO, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool hasMaskOp(unsigned Opcode)
Return true if a RISC-V target specified op has a mask operand.
static bool legalizeScatterGatherIndexType(SDLoc DL, SDValue &Index, ISD::MemIndexType &IndexType, RISCVTargetLowering::DAGCombinerInfo &DCI)
static SDValue combineSelectToBinOp(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG)
static unsigned getRISCVVLOp(SDValue Op)
Get a RISC-V target specified VL op for a given SDNode.
static unsigned getVecReduceOpcode(unsigned Opc)
Given a binary operator, return the associative generic ISD::VECREDUCE_OP which corresponds to it.
static std::pair< SDValue, SDValue > getDefaultVLOps(uint64_t NumElts, MVT ContainerVT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performFP_TO_INT_SATCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static SDValue lowerReductionSeq(unsigned RVVOpcode, MVT ResVT, SDValue StartValue, SDValue Vec, SDValue Mask, SDValue VL, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
Helper to lower a reduction sequence of the form: scalar = reduce_op vec, scalar_start.
static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo, std::optional< unsigned > FirstMaskArgument, CCState &State, const RISCVTargetLowering &TLI)
static SDValue lowerGetVectorLength(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static std::pair< SDValue, SDValue > getDefaultScalableVLOps(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static std::optional< unsigned > preAssignMask(const ArgTy &Args)
static SDValue getVLOperand(SDValue Op)
static MachineBasicBlock * emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB, const RISCVSubtarget &Subtarget)
static cl::opt< bool > RV64LegalI32("riscv-experimental-rv64-legal-i32", cl::ReallyHidden, cl::desc("Make i32 a legal type for SelectionDAG on RV64."))
static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerVectorXRINT(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static cl::opt< unsigned > ExtensionMaxWebSize(DEBUG_TYPE "-ext-max-web-size", cl::Hidden, cl::desc("Give the maximum size (in number of nodes) of the web of " "instructions that we will consider for VW expansion"), cl::init(18))
static SDValue getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL, EVT VT, SDValue Merge, SDValue Op, SDValue Offset, SDValue Mask, SDValue VL, unsigned Policy=RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED)
static SDValue performCONCAT_VECTORSStridedLoadCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static bool isSelectPseudo(MachineInstr &MI)
static std::optional< MVT > getSmallestVTForIndex(MVT VecVT, unsigned MaxIdx, SDLoc DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool useRVVForFixedLengthVectorVT(MVT VT, const RISCVSubtarget &Subtarget)
static Value * useTpOffset(IRBuilderBase &IRB, unsigned Offset)
static SDValue combineAddOfBooleanXor(SDNode *N, SelectionDAG &DAG)
static MachineBasicBlock * emitSplitF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const RISCVSubtarget &Subtarget)
static SDValue SplitVectorOp(SDValue Op, SelectionDAG &DAG)
static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc)
static SDValue lowerScalarInsert(SDValue Scalar, SDValue VL, MVT VT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerSMULO(SDValue Op, SelectionDAG &DAG)
static SDValue tryFoldSelectIntoOp(SDNode *N, SelectionDAG &DAG, SDValue TrueVal, SDValue FalseVal, bool Swapped)
static SDValue lowerBitreverseShuffle(ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool matchIndexAsShuffle(EVT VT, SDValue Index, SDValue Mask, SmallVector< int > &ShuffleMask)
Match the index vector of a scatter or gather node as the shuffle mask which performs the rearrangeme...
static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue SplitVPOp(SDValue Op, SelectionDAG &DAG)
static bool hasMergeOp(unsigned Opcode)
Return true if a RISC-V target specified op has a merge operand.
static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static void processVCIXOperands(SDValue &OrigOp, SmallVectorImpl< SDValue > &Operands, SelectionDAG &DAG)
static SDValue widenVectorOpsToi8(SDValue N, const SDLoc &DL, SelectionDAG &DAG)
static SDValue lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static std::optional< VIDSequence > isSimpleVIDSequence(SDValue Op, unsigned EltSizeInBits)
static SDValue getDeinterleaveViaVNSRL(const SDLoc &DL, MVT VT, SDValue Src, bool EvenElts, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
static SDValue lowerUADDSAT_USUBSAT(SDValue Op, SelectionDAG &DAG)
static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC)
static SDValue lowerVECTOR_SHUFFLEAsRotate(ShuffleVectorSDNode *SVN, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static RISCVFPRndMode::RoundingMode matchRoundingOp(unsigned Opc)
static SDValue lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performCONCAT_VECTORSSplitCombine(SDNode *N, SelectionDAG &DAG, const RISCVTargetLowering &TLI)
static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue combineSubOfBoolean(SDNode *N, SelectionDAG &DAG)
static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool isValidEGW(int EGS, EVT VT, const RISCVSubtarget &Subtarget)
static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static bool isNonZeroAVL(SDValue AVL)
static SDValue lowerVECTOR_SHUFFLEAsVSlideup(const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef< int > Mask, const RISCVSubtarget &Subtarget, SelectionDAG &DAG)
static SDValue combineBinOp_VLToVWBinOp_VL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
Combine a binary operation to its equivalent VW or VW_W form.
static SDValue getVCIXISDNodeWCHAIN(SDValue &Op, SelectionDAG &DAG, unsigned Type)
static ArrayRef< MCPhysReg > getFastCCArgGPRs(const RISCVABI::ABI ABI)
static const MCPhysReg ArgVRM8s[]
static MachineBasicBlock * emitReadCounterWidePseudo(MachineInstr &MI, MachineBasicBlock *BB)
static const MCPhysReg ArgVRM4s[]
static cl::opt< bool > AllowSplatInVW_W(DEBUG_TYPE "-form-vw-w-with-splat", cl::Hidden, cl::desc("Allow the formation of VW_W operations (e.g., " "VWADD_W) with splat constants"), cl::init(false))
static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const CCValAssign &HiVA, const SDLoc &DL)
static SDValue lowerSADDSAT_SSUBSAT(SDValue Op, SelectionDAG &DAG)
static SDValue getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL, EVT VT, SDValue Merge, SDValue Op, SDValue Offset, SDValue Mask, SDValue VL, unsigned Policy=RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED)
static SDValue tryMemPairCombine(SelectionDAG &DAG, LSBaseSDNode *LSNode1, LSBaseSDNode *LSNode2, SDValue BasePtr, uint64_t Imm)
static std::tuple< unsigned, SDValue, SDValue > getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT, const RISCVSubtarget &Subtarget)
static SDValue performFP_TO_INTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const RISCVSubtarget &Subtarget)
static const MCPhysReg ArgFPR16s[]
static SDValue combineBinOpOfExtractToReduceTree(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
Perform two related transforms whose purpose is to incrementally recognize an explode_vector followed...
static SDValue performVFMADD_VLCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performTRUNCATECombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
Try and optimize BUILD_VECTORs with "dominant values" - these are values which constitute a large pro...
static SDValue getVLOp(uint64_t NumElts, MVT ContainerVT, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, ISD::CondCode &CC, SelectionDAG &DAG)
static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
static SDValue performBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
If we have a build_vector where each lane is binop X, C, where C is a constant (but not necessarily t...
static const Intrinsic::ID FixedVssegIntrIds[]
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isCommutative(Instruction *I)
This file defines the SmallSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static constexpr int Concat[]
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
static APFloat getNaN(const fltSemantics &Sem, bool Negative=false, uint64_t payload=0)
Factory for NaN values.
Class for arbitrary precision integers.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
void clearAllBits()
Set every bit to 0.
unsigned countr_zero() const
Count the number of trailing zero bits.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
APInt sext(unsigned width) const
Sign extend to a new width.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
int64_t getSExtValue() const
Get sign extended value.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
An arbitrary precision integer that knows its signedness.
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getCompareOperand()
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
StringRef getValueAsString() const
Return the attribute's value as a string.
static BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG)
Parses tree in N for base, index, offset addresses.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
bool test(unsigned Idx) const
bool all() const
all - Returns true if all bits are set.
CCState - This class holds information needed while lowering arguments and return values.
MachineFunction & getMachineFunction() const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
SmallVectorImpl< ISD::ArgFlagsTy > & getPendingArgFlags()
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
SmallVectorImpl< CCValAssign > & getPendingLocs()
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
int64_t getLocMemOffset() const
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
This class represents a function call, abstracting a target machine's calling convention.
bool isExactlyValue(double V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getAPIntValue() const
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
Diagnostic information for unsupported feature in backend.
static constexpr ElementCount getScalable(ScalarTy MinVal)
static constexpr ElementCount getFixed(ScalarTy MinVal)
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Argument * getArg(unsigned i) const
bool hasExternalWeakLinkage() const
Module * getParent()
Get the module that this global value is contained inside of...
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Load the specified register of the given register class from the specified stack frame index.
Common base class shared among various IRBuilders.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
FenceInst * CreateFence(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, const Twine &Name="")
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
BasicBlock * GetInsertBlock() const
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
static InstructionCost getInvalid(CostType Val=0)
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Base class for LoadSDNode and StoreSDNode.
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Value * getPointerOperand()
Align getAlign() const
Return the alignment of the access that is being performed.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
const MDOperand & getOperand(unsigned I) const
static MVT getFloatingPointVT(unsigned BitWidth)
static auto integer_fixedlen_vector_valuetypes()
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT changeVectorElementType(MVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
bool bitsLE(MVT VT) const
Return true if this has no more bits than VT.
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static MVT getScalableVectorVT(MVT VT, unsigned NumElements)
MVT changeTypeToInteger()
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsLT(MVT VT) const
Return true if this has less bits than VT.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
uint64_t getScalarStoreSize() const
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool bitsGT(MVT VT) const
Return true if this has more bits than VT.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
MVT getDoubleNumVectorElementsVT() const
MVT getHalfNumVectorElementsVT() const
Return a VT for a vector type with the same element type but half the number of elements.
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
static auto integer_scalable_vector_valuetypes()
MVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
static auto fp_fixedlen_vector_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
void push_back(MachineInstr *MI)
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
Instructions::iterator instr_iterator
instr_iterator instr_end()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
void setFlag(MIFlag Flag)
Set a MI flag.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
const MachinePointerInfo & getPointerInfo() const
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
This is an abstract virtual class for memory operations.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
void setVarArgsFrameIndex(int Index)
int getVarArgsFrameIndex() const
void setVarArgsSaveSize(int Size)
void addSExt32Register(Register Reg)
RISCVABI::ABI getTargetABI() const
unsigned getMinimumJumpTableEntries() const
bool hasStdExtCOrZca() const
unsigned getMaxLMULForFixedLengthVectors() const
bool hasVInstructionsI64() const
bool hasVInstructionsF64() const
bool hasStdExtDOrZdinx() const
bool hasStdExtZfhOrZhinx() const
unsigned getRealMinVLen() const
bool useRVVForFixedLengthVectors() const
bool isTargetFuchsia() const
unsigned getDLenFactor() const
bool hasVInstructionsF16Minimal() const
bool hasConditionalMoveFusion() const
bool isRegisterReservedByUser(Register i) const
bool hasVInstructionsF16() const
bool hasVInstructionsBF16() const
unsigned getMaxBuildIntsCost() const
Align getPrefLoopAlignment() const
bool hasVInstructions() const
std::optional< unsigned > getRealVLen() const
bool useConstantPoolForLargeInts() const
Align getPrefFunctionAlignment() const
bool hasStdExtZfhminOrZhinxmin() const
unsigned getRealMaxVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
bool hasVInstructionsF32() const
bool hasStdExtFOrZfinx() const
static std::pair< unsigned, unsigned > computeVLMAXBounds(MVT ContainerVT, const RISCVSubtarget &Subtarget)
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
InstructionCost getVRGatherVVCost(MVT VT) const
Return the cost of a vrgather.vv instruction for the type VT.
bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
Value * getIRStackGuard(IRBuilderBase &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
bool shouldSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT) const override
Return true if pulling a binary operation into a select with an identity constant is profitable.
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
std::pair< int, bool > getLegalZfaFPImm(const APFloat &Imm, EVT VT) const
RISCVTargetLowering(const TargetMachine &TM, const RISCVSubtarget &STI)
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const override
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const override
This method returns the constant pool value that will be loaded by LD.
const RISCVSubtarget & getSubtarget() const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II, StoreInst *SI) const override
Lower an interleave intrinsic to a target specific store intrinsic.
bool preferScalarizeSplat(SDNode *N) const override
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool canSplatOperand(Instruction *I, int Operand) const
Return true if the (vector) instruction I will be lowered to an instruction with a scalar splat opera...
bool shouldExtendTypeInLibCall(EVT Type) const override
Returns true if arguments should be extended in lib calls.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override
InstructionCost getVRGatherVICost(MVT VT) const
Return the cost of a vrgather.vi (or vx) instruction for the type VT.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Return true if it is beneficial to convert a load of a constant to just the constant itself.
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the register type for a given MVT, ensuring vectors are treated as a series of gpr sized integ...
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool hasAndNotCompare(SDValue Y) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
bool shouldScalarizeBinop(SDValue VecOp) const override
Try to convert an extract element of a vector binary operation into an extract element followed by a ...
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to move this shift by a constant amount through its operand,...
bool areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX, const MemSDNode &NodeY) const override
Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
static unsigned computeVLMAX(unsigned VectorBits, unsigned EltSize, unsigned MinSize)
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
InstructionCost getLMULCost(MVT VT) const
Return the cost of LMUL for linear operations.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const override
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
InstructionCost getVSlideVICost(MVT VT) const
Return the cost of a vslidedown.vi or vslideup.vi instruction for the type VT.
bool fallBackToDAGISel(const Instruction &Inst) const override
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a vlsegN intrinsic.
bool isCtpopFast(EVT VT) const override
Return true if ctpop instruction is fast.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
MVT getContainerForFixedLengthVector(MVT VT) const
static unsigned getRegClassIDForVecVT(MVT VT)
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const override
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
SDValue computeVLMax(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG) const
bool signExtendConstant(const ConstantInt *CI) const override
Return true if this constant should be sign extended when promoting to a larger type.
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const override
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Returns the register with the specified architectural or ABI name.
InstructionCost getVSlideVXCost(MVT VT) const
Return the cost of a vslidedown.vx or vslideup.vx instruction for the type VT.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul)
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override
Return true if result of the specified node is used by a return node only.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override
Returns true if arguments should be sign-extended in lib calls.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const override
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
ISD::NodeType getExtendForAtomicCmpSwapArg() const override
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a vssegN intrinsic.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
bool isLegalElementTypeForRVV(EVT ScalarTy) const
bool isVScaleKnownToBeAPowerOfTwo() const override
Return true only if vscale must be a power of two.
bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *II, LoadInst *LI) const override
Lower a deinterleave intrinsic to a target specific load intrinsic.
static RISCVII::VLMUL getLMUL(MVT VT)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Return the number of registers for a given MVT, ensuring vectors are treated as a series of gpr sized...
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override
bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace, const DataLayout &) const
Returns whether or not generating a interleaved load/store intrinsic for this type will be legal.
bool isIntDivCheap(EVT VT, AttributeList Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts,...
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
bool isLegalStridedLoadStore(EVT DataType, Align Alignment) const
Return true if a stride load store of the given result type and alignment is legal.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
iterator_range< use_iterator > uses()
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setCFIType(uint32_t Type)
bool isUndef() const
Return true if the type of the node type undefined.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
op_iterator op_end() const
op_iterator op_begin() const
static use_iterator use_end()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
unsigned getNumOperands() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT, SDNodeFlags Flags)
Get the (commutative) neutral element for the given opcode, if it exists.
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
bool isSafeToSpeculativelyExecute(unsigned Opcode) const
Some opcodes may create immediate undefined behavior when used with some values (integer division-by-...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
bool shouldOptForSize() const
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
bool NewNodesMustHaveLegalTypes
When true, additional steps are taken to ensure that getConstant() and similar functions return DAG n...
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
std::pair< SDValue, SDValue > SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the vector with EXTRACT_SUBVECTOR using the provided VTs and return the low/high part.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
static const fltSemantics & EVTToAPFloatSemantics(EVT VT)
Returns an APFloat semantics tag appropriate for the given type.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
std::pair< SDValue, SDValue > SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL)
Split the explicit vector length parameter of a VP operation.
SDValue getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
bool isKnownNeverNaN(SDValue Op, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDValue > Ops)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL, bool LegalTypes=true)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
This instruction constructs a fixed permutation of two input vectors.
static bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
static bool isSplatMask(const int *Mask, EVT VT)
int getSplatIndex() const
ArrayRef< int > getMask() const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
std::string lower() const
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
unsigned getMinCmpXchgSizeInBits() const
Returns the size of the smallest cmpxchg or ll/sc instruction the backend supports.
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
virtual bool shouldFoldSelectWithSingleBitTest(EVT VT, const APInt &AndMask) const
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 1...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useTLSDESC() const
Returns true if this target uses TLS Descriptors.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
Type * getStructElementType(unsigned N) const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
Base class of all SIMD vector types.
constexpr ScalarTy getFixedValue() const
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr bool isZero() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
@ C
The default llvm calling convention, compatible with C.
bool isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are ~0 ...
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SET_ROUNDING
Set rounding mode.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ STEP_VECTOR
STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised of a linear sequence of unsign...
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isBuildVectorOfConstantSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantSDNode or undef.
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
std::optional< unsigned > getVPMaskIdx(unsigned Opcode)
The operand position of the vector mask.
std::optional< unsigned > getVPExplicitVectorLengthIdx(unsigned Opcode)
The operand position of the explicit vector length parameter.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool isBuildVectorOfConstantFPSDNodes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR node of all ConstantFPSDNode or undef.
static const int FIRST_TARGET_STRICTFP_OPCODE
FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations which cannot raise FP exceptions s...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode)
Get underlying scalar opcode for VECREDUCE opcode.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isVPOpcode(unsigned Opcode)
Whether this is a vector-predicated Opcode.
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
@ Bitcast
Perform the operation on a different, but equivalently sized type.
ABI getTargetABI(StringRef ABIName)
bool match(Val *V, const Pattern &P)
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_Undef()
Match an arbitrary undef constant.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
@ TAIL_UNDISTURBED_MASK_UNDISTURBED
static int getFRMOpNum(const MCInstrDesc &Desc)
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #3 and #4) ...
@ STRICT_VFCVT_RTZ_XU_F_VL
@ LAST_RISCV_STRICTFP_OPCODE
@ STRICT_VFROUND_NOEXCEPT_VL
@ SPLAT_VECTOR_SPLIT_I64_VL
@ STRICT_VFCVT_RTZ_X_F_VL
int getLoadFPImm(APFloat FPImm)
getLoadFPImm - Return a 5-bit binary encoding of the floating-point immediate value.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static unsigned decodeVSEW(unsigned VSEW)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static RISCVII::VLMUL encodeLMUL(unsigned LMUL, bool Fractional)
static unsigned encodeSEW(unsigned SEW)
static constexpr unsigned FPMASK_Negative_Zero
static constexpr unsigned FPMASK_Positive_Subnormal
static constexpr unsigned FPMASK_Positive_Normal
static constexpr unsigned FPMASK_Negative_Subnormal
static constexpr unsigned FPMASK_Negative_Normal
static constexpr unsigned FPMASK_Positive_Infinity
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
static constexpr unsigned FPMASK_Negative_Infinity
static constexpr unsigned FPMASK_Quiet_NaN
ArrayRef< MCPhysReg > getArgGPRs(const RISCVABI::ABI ABI)
bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static constexpr unsigned FPMASK_Signaling_NaN
static constexpr unsigned FPMASK_Positive_Zero
static constexpr unsigned RVVBitsPerBlock
bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, std::optional< unsigned > FirstMaskArgument)
bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, std::optional< unsigned > FirstMaskArgument)
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ Kill
The last use of a register.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
IterT next_nodbg(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It, then continue incrementing it while it points to a debug instruction.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
static const MachineMemOperand::Flags MONontemporalBit0
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
bool isReleaseOrStronger(AtomicOrdering AO)
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
bool isOneOrOneSplat(SDValue V, bool AllowUndefs=false)
Return true if the value is a constant 1 integer or a splatted vector of a constant 1 integer (with n...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
@ TowardZero
roundTowardZero.
@ NearestTiesToEven
roundTiesToEven.
@ TowardPositive
roundTowardPositive.
@ NearestTiesToAway
roundTiesToAway.
@ TowardNegative
roundTowardNegative.
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
unsigned Log2(Align A)
Returns the log2 of the alignment.
llvm::SmallVector< int, 16 > createSequentialMask(unsigned Start, unsigned NumInts, unsigned NumUndefs)
Create a sequential shuffle mask.
bool isNeutralConstant(unsigned Opc, SDNodeFlags Flags, SDValue V, unsigned OperandNo)
Returns true if V is a neutral element of Opc with Flags.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static unsigned int semanticsPrecision(const fltSemantics &)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
bool isInteger() const
Return true if this is an integer or a vector integer type.
Helper struct to store a base, index and offset that forms an address.
Align getNonZeroOrigAlign() const
static KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
bool isUnknown() const
Returns true if we don't know any bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
BitVector getReservedRegs(const MachineFunction &MF) const override
Register getFrameRegister(const MachineFunction &MF) const override
bool hasScalarOperand() const
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
const ConstantInt * CFIType
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isAfterLegalizeDAG() const
void AddToWorklist(SDNode *N)
bool recursivelyDeleteUnusedNodes(SDNode *N)
bool isBeforeLegalize() const
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT, bool Value=true)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)