19#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
29#define PASS_NAME "RISCV DAG->DAG Pattern Instruction Selection"
32#define GET_RISCVVSSEGTable_IMPL
33#define GET_RISCVVLSEGTable_IMPL
34#define GET_RISCVVLXSEGTable_IMPL
35#define GET_RISCVVSXSEGTable_IMPL
36#define GET_RISCVVLETable_IMPL
37#define GET_RISCVVSETable_IMPL
38#define GET_RISCVVLXTable_IMPL
39#define GET_RISCVVSXTable_IMPL
40#define GET_RISCVMaskedPseudosTable_IMPL
41#include "RISCVGenSearchableTables.inc"
45 assert(
Node->getNumOperands() > 0 &&
"Node with no operands");
46 unsigned LastOpIdx =
Node->getNumOperands() - 1;
63 bool MadeChange =
false;
70 switch (
N->getOpcode()) {
74 MVT VT =
N->getSimpleValueType(0);
80 N->getOperand(0), VL);
87 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
88 MVT VT =
N->getSimpleValueType(0);
137 LLVM_DEBUG(
dbgs() <<
"RISCV DAG preprocessing replacing:\nOld: ");
156 bool MadeChange =
false;
160 if (
N->use_empty() || !
N->isMachineOpcode())
163 MadeChange |= doPeepholeSExtW(
N);
164 MadeChange |= doPeepholeMaskedRVV(
N);
169 MadeChange |= doPeepholeMergeVVMFold();
181 switch (Inst.getOpndKind()) {
214 static const unsigned M1TupleRegClassIDs[] = {
215 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
216 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
217 RISCV::VRN8M1RegClassID};
218 static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
219 RISCV::VRN3M2RegClassID,
220 RISCV::VRN4M2RegClassID};
233 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
234 "Unexpected subreg numbering");
235 SubReg0 = RISCV::sub_vrm1_0;
236 RegClassID = M1TupleRegClassIDs[NF - 2];
239 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
240 "Unexpected subreg numbering");
241 SubReg0 = RISCV::sub_vrm2_0;
242 RegClassID = M2TupleRegClassIDs[NF - 2];
245 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
246 "Unexpected subreg numbering");
247 SubReg0 = RISCV::sub_vrm4_0;
248 RegClassID = RISCV::VRN2M4RegClassID;
257 for (
unsigned I = 0;
I < Regs.
size(); ++
I) {
269 bool IsLoad,
MVT *IndexVT) {
270 SDValue Chain = Node->getOperand(0);
273 Operands.push_back(Node->getOperand(CurOp++));
275 if (IsStridedOrIndexed) {
276 Operands.push_back(Node->getOperand(CurOp++));
278 *IndexVT =
Operands.back()->getSimpleValueType(0);
283 SDValue Mask = Node->getOperand(CurOp++);
297 if (IsMasked && IsLoad) {
299 uint64_t Policy = Node->getConstantOperandVal(CurOp++);
316 unsigned NF = Node->getNumValues() - 1;
317 MVT VT = Node->getSimpleValueType(0);
325 Node->op_begin() + CurOp + NF);
337 RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided,
false,
Log2SEW,
338 static_cast<unsigned>(LMUL));
342 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
346 for (
unsigned I = 0;
I < NF; ++
I) {
358 unsigned NF = Node->getNumValues() - 2;
359 MVT VT = Node->getSimpleValueType(0);
368 Node->op_begin() + CurOp + NF);
381 RISCV::getVLSEGPseudo(NF, IsMasked, IsTU,
false,
true,
382 Log2SEW,
static_cast<unsigned>(LMUL));
386 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
390 for (
unsigned I = 0;
I < NF; ++
I) {
404 unsigned NF = Node->getNumValues() - 1;
405 MVT VT = Node->getSimpleValueType(0);
413 Node->op_begin() + CurOp + NF);
427 "Element count mismatch");
431 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
433 "values when XLEN=32");
436 NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
437 static_cast<unsigned>(IndexLMUL));
441 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
445 for (
unsigned I = 0;
I < NF; ++
I) {
458 unsigned NF = Node->getNumOperands() - 4;
463 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
471 unsigned CurOp = 2 + NF;
477 NF, IsMasked, IsStrided,
Log2SEW,
static_cast<unsigned>(LMUL));
481 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
490 unsigned NF = Node->getNumOperands() - 5;
493 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
501 unsigned CurOp = 2 + NF;
509 "Element count mismatch");
513 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
515 "values when XLEN=32");
518 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
519 static_cast<unsigned>(IndexLMUL));
523 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
538 unsigned IntNo = Node->getConstantOperandVal(0);
540 assert((IntNo == Intrinsic::riscv_vsetvli ||
541 IntNo == Intrinsic::riscv_vsetvlimax) &&
542 "Unexpected vsetvli intrinsic");
544 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
545 unsigned Offset = (VLMax ? 1 : 2);
548 "Unexpected number of operands");
553 Node->getConstantOperandVal(
Offset + 1) & 0x7);
562 unsigned Opcode = RISCV::PseudoVSETVLI;
565 Opcode = RISCV::PseudoVSETVLIX0;
567 VLOperand = Node->getOperand(1);
569 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
571 if (isUInt<5>(AVL)) {
587 MVT VT = Node->getSimpleValueType(0);
588 unsigned Opcode = Node->getOpcode();
590 "Unexpected opcode");
595 SDValue N0 = Node->getOperand(0);
596 SDValue N1 = Node->getOperand(1);
613 bool SignExt =
false;
631 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
632 if (Opcode !=
ISD::AND && (Val & RemovedBitsMask) != 0)
635 int64_t ShiftedVal = Val >> ShAmt;
636 if (!isInt<12>(ShiftedVal))
640 if (SignExt && ShAmt >= 32)
647 case ISD::AND: BinOpc = RISCV::ANDI;
break;
648 case ISD::OR: BinOpc = RISCV::ORI;
break;
649 case ISD::XOR: BinOpc = RISCV::XORI;
break;
652 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
666 if (!Subtarget->hasVendorXTHeadBb())
669 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
673 SDValue N0 = Node->getOperand(0);
677 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
SDLoc DL,
685 MVT VT = Node->getSimpleValueType(0);
686 const unsigned RightShAmt = N1C->getZExtValue();
691 auto *N01C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
695 const unsigned LeftShAmt = N01C->getZExtValue();
698 if (LeftShAmt > RightShAmt)
702 const unsigned Msb = MsbPlusOne - 1;
703 const unsigned Lsb = RightShAmt - LeftShAmt;
705 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
714 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
720 const unsigned Msb = ExtSize - 1;
721 const unsigned Lsb = RightShAmt;
723 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
733 if (!Subtarget->hasVendorXTHeadMemIdx())
748 int64_t
Offset =
C->getSExtValue();
757 for (Shift = 0; Shift < 4; Shift++)
758 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
767 if (LoadVT ==
MVT::i8 && IsPre)
768 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
769 else if (LoadVT ==
MVT::i8 && IsPost)
770 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
771 else if (LoadVT ==
MVT::i16 && IsPre)
772 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
773 else if (LoadVT ==
MVT::i16 && IsPost)
774 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
775 else if (LoadVT ==
MVT::i32 && IsPre)
776 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
777 else if (LoadVT ==
MVT::i32 && IsPost)
778 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
779 else if (LoadVT ==
MVT::i64 && IsPre)
780 Opcode = RISCV::TH_LDIB;
781 else if (LoadVT ==
MVT::i64 && IsPost)
782 Opcode = RISCV::TH_LDIA;
804 if (Node->isMachineOpcode()) {
812 unsigned Opcode = Node->getOpcode();
815 MVT VT = Node->getSimpleValueType(0);
817 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
822 auto *ConstNode = cast<ConstantSDNode>(Node);
823 if (ConstNode->isZero()) {
829 int64_t Imm = ConstNode->getSExtValue();
832 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
834 Imm = SignExtend64<16>(Imm);
837 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
838 Imm = SignExtend64<32>(Imm);
844 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
884 Opc = RISCV::FMV_H_X;
887 Opc = RISCV::FMV_W_X;
893 Opc = Subtarget->
is64Bit() ? RISCV::FMV_D_X : RISCV::FCVT_D_W;
908 if (!Subtarget->hasStdExtZfa())
911 "Unexpected subtarget");
914 if (!
SDValue(Node, 0).use_empty()) {
916 Node->getOperand(0));
919 if (!
SDValue(Node, 1).use_empty()) {
921 Node->getOperand(0));
929 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
932 SDValue N0 = Node->getOperand(0);
936 unsigned ShAmt = N1C->getZExtValue();
942 unsigned XLen = Subtarget->
getXLen();
945 if (TrailingZeros > 0 && LeadingZeros == 32) {
959 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
962 SDValue N0 = Node->getOperand(0);
965 unsigned ShAmt = N1C->getZExtValue();
971 unsigned XLen = Subtarget->
getXLen();
974 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
993 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
997 if (ShAmt >= TrailingOnes)
1000 if (TrailingOnes == 32) {
1013 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1015 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1021 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1043 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1046 SDValue N0 = Node->getOperand(0);
1049 unsigned ShAmt = N1C->getZExtValue();
1051 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1053 if (ExtSize >= 32 || ShAmt >= ExtSize)
1055 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1072 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1077 const bool isC1ANDI = isInt<12>(C1);
1079 SDValue N0 = Node->getOperand(0);
1084 if (!Subtarget->hasVendorXTHeadBb())
1096 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1099 unsigned C2 =
C->getZExtValue();
1100 unsigned XLen = Subtarget->
getXLen();
1101 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1109 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1113 C1 &= maskTrailingZeros<uint64_t>(C2);
1115 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1119 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1125 if (!LeftShift && isC1Mask) {
1129 if (C2 + 32 == Leading) {
1141 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1143 cast<VTSDNode>(
X.getOperand(1))->getVT() ==
MVT::i32) {
1148 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1162 const unsigned Lsb = C2;
1163 if (tryUnsignedBitfieldExtract(Node,
DL, VT,
X, Msb, Lsb))
1168 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1170 cast<VTSDNode>(
X.getOperand(1))->getVT() ==
MVT::i32;
1172 Skip |= HasBitTest && Leading == XLen - 1;
1173 if (OneUseOrZExtW && !Skip) {
1175 RISCV::SLLI,
DL, VT,
X,
1191 if (C2 + Leading < XLen &&
1192 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1194 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1203 if (OneUseOrZExtW && !IsCANDI) {
1205 RISCV::SLLI,
DL, VT,
X,
1221 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1223 unsigned SrliOpc = RISCV::SRLI;
1226 isa<ConstantSDNode>(
X.getOperand(1)) &&
1227 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1228 SrliOpc = RISCV::SRLIW;
1229 X =
X.getOperand(0);
1241 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1242 OneUseOrZExtW && !IsCANDI) {
1244 RISCV::SRLIW,
DL, VT,
X,
1259 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1261 RISCV::SRLI,
DL, VT,
X,
1270 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1272 RISCV::SRLIW,
DL, VT,
X,
1288 if (isC1Mask && !isC1ANDI) {
1290 if (tryUnsignedBitfieldExtract(Node,
DL, VT, N0, Msb, 0))
1307 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1308 if (!N1C || !N1C->hasOneUse())
1312 SDValue N0 = Node->getOperand(0);
1329 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1331 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1332 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1336 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1338 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1339 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1345 unsigned XLen = Subtarget->
getXLen();
1351 unsigned ConstantShift = XLen - LeadingZeros;
1355 uint64_t ShiftedC1 = C1 << ConstantShift;
1358 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1376 unsigned IntNo = Node->getConstantOperandVal(0);
1381 case Intrinsic::riscv_vmsgeu:
1382 case Intrinsic::riscv_vmsge: {
1383 SDValue Src1 = Node->getOperand(1);
1384 SDValue Src2 = Node->getOperand(2);
1385 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1386 bool IsCmpUnsignedZero =
false;
1391 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1392 int64_t CVal =
C->getSExtValue();
1393 if (CVal >= -15 && CVal <= 16) {
1394 if (!IsUnsigned || CVal != 0)
1396 IsCmpUnsignedZero =
true;
1400 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1404#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1405 case RISCVII::VLMUL::lmulenum: \
1406 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1407 : RISCV::PseudoVMSLT_VX_##suffix; \
1408 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1409 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1418#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1426 if (IsCmpUnsignedZero) {
1437 {Cmp, Cmp, VL, SEW}));
1440 case Intrinsic::riscv_vmsgeu_mask:
1441 case Intrinsic::riscv_vmsge_mask: {
1442 SDValue Src1 = Node->getOperand(2);
1443 SDValue Src2 = Node->getOperand(3);
1444 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1445 bool IsCmpUnsignedZero =
false;
1450 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1451 int64_t CVal =
C->getSExtValue();
1452 if (CVal >= -15 && CVal <= 16) {
1453 if (!IsUnsigned || CVal != 0)
1455 IsCmpUnsignedZero =
true;
1459 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1464#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1465 case RISCVII::VLMUL::lmulenum: \
1466 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1467 : RISCV::PseudoVMSLT_VX_##suffix; \
1468 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1469 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1478#undef CASE_VMSLT_OPCODES
1484#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1485 case RISCVII::VLMUL::lmulenum: \
1486 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1487 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1488 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1497#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1504 SDValue MaskedOff = Node->getOperand(1);
1505 SDValue Mask = Node->getOperand(4);
1508 if (IsCmpUnsignedZero) {
1511 if (Mask == MaskedOff) {
1517 {Mask, MaskedOff, VL, MaskSEW}));
1524 if (Mask == MaskedOff) {
1529 {Mask, Cmp, VL, MaskSEW}));
1546 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1550 {Cmp, Mask, VL, MaskSEW}));
1553 case Intrinsic::riscv_vsetvli:
1554 case Intrinsic::riscv_vsetvlimax:
1560 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1565 case Intrinsic::riscv_vlseg2:
1566 case Intrinsic::riscv_vlseg3:
1567 case Intrinsic::riscv_vlseg4:
1568 case Intrinsic::riscv_vlseg5:
1569 case Intrinsic::riscv_vlseg6:
1570 case Intrinsic::riscv_vlseg7:
1571 case Intrinsic::riscv_vlseg8: {
1575 case Intrinsic::riscv_vlseg2_mask:
1576 case Intrinsic::riscv_vlseg3_mask:
1577 case Intrinsic::riscv_vlseg4_mask:
1578 case Intrinsic::riscv_vlseg5_mask:
1579 case Intrinsic::riscv_vlseg6_mask:
1580 case Intrinsic::riscv_vlseg7_mask:
1581 case Intrinsic::riscv_vlseg8_mask: {
1585 case Intrinsic::riscv_vlsseg2:
1586 case Intrinsic::riscv_vlsseg3:
1587 case Intrinsic::riscv_vlsseg4:
1588 case Intrinsic::riscv_vlsseg5:
1589 case Intrinsic::riscv_vlsseg6:
1590 case Intrinsic::riscv_vlsseg7:
1591 case Intrinsic::riscv_vlsseg8: {
1595 case Intrinsic::riscv_vlsseg2_mask:
1596 case Intrinsic::riscv_vlsseg3_mask:
1597 case Intrinsic::riscv_vlsseg4_mask:
1598 case Intrinsic::riscv_vlsseg5_mask:
1599 case Intrinsic::riscv_vlsseg6_mask:
1600 case Intrinsic::riscv_vlsseg7_mask:
1601 case Intrinsic::riscv_vlsseg8_mask: {
1605 case Intrinsic::riscv_vloxseg2:
1606 case Intrinsic::riscv_vloxseg3:
1607 case Intrinsic::riscv_vloxseg4:
1608 case Intrinsic::riscv_vloxseg5:
1609 case Intrinsic::riscv_vloxseg6:
1610 case Intrinsic::riscv_vloxseg7:
1611 case Intrinsic::riscv_vloxseg8:
1614 case Intrinsic::riscv_vluxseg2:
1615 case Intrinsic::riscv_vluxseg3:
1616 case Intrinsic::riscv_vluxseg4:
1617 case Intrinsic::riscv_vluxseg5:
1618 case Intrinsic::riscv_vluxseg6:
1619 case Intrinsic::riscv_vluxseg7:
1620 case Intrinsic::riscv_vluxseg8:
1623 case Intrinsic::riscv_vloxseg2_mask:
1624 case Intrinsic::riscv_vloxseg3_mask:
1625 case Intrinsic::riscv_vloxseg4_mask:
1626 case Intrinsic::riscv_vloxseg5_mask:
1627 case Intrinsic::riscv_vloxseg6_mask:
1628 case Intrinsic::riscv_vloxseg7_mask:
1629 case Intrinsic::riscv_vloxseg8_mask:
1632 case Intrinsic::riscv_vluxseg2_mask:
1633 case Intrinsic::riscv_vluxseg3_mask:
1634 case Intrinsic::riscv_vluxseg4_mask:
1635 case Intrinsic::riscv_vluxseg5_mask:
1636 case Intrinsic::riscv_vluxseg6_mask:
1637 case Intrinsic::riscv_vluxseg7_mask:
1638 case Intrinsic::riscv_vluxseg8_mask:
1641 case Intrinsic::riscv_vlseg8ff:
1642 case Intrinsic::riscv_vlseg7ff:
1643 case Intrinsic::riscv_vlseg6ff:
1644 case Intrinsic::riscv_vlseg5ff:
1645 case Intrinsic::riscv_vlseg4ff:
1646 case Intrinsic::riscv_vlseg3ff:
1647 case Intrinsic::riscv_vlseg2ff: {
1651 case Intrinsic::riscv_vlseg8ff_mask:
1652 case Intrinsic::riscv_vlseg7ff_mask:
1653 case Intrinsic::riscv_vlseg6ff_mask:
1654 case Intrinsic::riscv_vlseg5ff_mask:
1655 case Intrinsic::riscv_vlseg4ff_mask:
1656 case Intrinsic::riscv_vlseg3ff_mask:
1657 case Intrinsic::riscv_vlseg2ff_mask: {
1661 case Intrinsic::riscv_vloxei:
1662 case Intrinsic::riscv_vloxei_mask:
1663 case Intrinsic::riscv_vluxei:
1664 case Intrinsic::riscv_vluxei_mask: {
1665 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1666 IntNo == Intrinsic::riscv_vluxei_mask;
1667 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1668 IntNo == Intrinsic::riscv_vloxei_mask;
1670 MVT VT = Node->getSimpleValueType(0);
1675 bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
1678 Operands.push_back(Node->getOperand(CurOp++));
1689 "Element count mismatch");
1694 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1696 "values when XLEN=32");
1699 IsMasked, IsTU, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
1700 static_cast<unsigned>(IndexLMUL));
1704 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1710 case Intrinsic::riscv_vlm:
1711 case Intrinsic::riscv_vle:
1712 case Intrinsic::riscv_vle_mask:
1713 case Intrinsic::riscv_vlse:
1714 case Intrinsic::riscv_vlse_mask: {
1715 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1716 IntNo == Intrinsic::riscv_vlse_mask;
1718 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1720 MVT VT = Node->getSimpleValueType(0);
1725 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1727 bool IsTU = HasPassthruOperand &&
1728 (IsMasked || !Node->getOperand(CurOp).isUndef());
1731 Operands.push_back(Node->getOperand(CurOp++));
1732 else if (HasPassthruOperand)
1741 RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided,
false,
Log2SEW,
1742 static_cast<unsigned>(LMUL));
1746 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1752 case Intrinsic::riscv_vleff:
1753 case Intrinsic::riscv_vleff_mask: {
1754 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1756 MVT VT = Node->getSimpleValueType(0);
1761 bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
1764 Operands.push_back(Node->getOperand(CurOp++));
1775 RISCV::getVLEPseudo(IsMasked, IsTU,
false,
true,
1776 Log2SEW,
static_cast<unsigned>(LMUL));
1779 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1789 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1791 case Intrinsic::riscv_vsseg2:
1792 case Intrinsic::riscv_vsseg3:
1793 case Intrinsic::riscv_vsseg4:
1794 case Intrinsic::riscv_vsseg5:
1795 case Intrinsic::riscv_vsseg6:
1796 case Intrinsic::riscv_vsseg7:
1797 case Intrinsic::riscv_vsseg8: {
1801 case Intrinsic::riscv_vsseg2_mask:
1802 case Intrinsic::riscv_vsseg3_mask:
1803 case Intrinsic::riscv_vsseg4_mask:
1804 case Intrinsic::riscv_vsseg5_mask:
1805 case Intrinsic::riscv_vsseg6_mask:
1806 case Intrinsic::riscv_vsseg7_mask:
1807 case Intrinsic::riscv_vsseg8_mask: {
1811 case Intrinsic::riscv_vssseg2:
1812 case Intrinsic::riscv_vssseg3:
1813 case Intrinsic::riscv_vssseg4:
1814 case Intrinsic::riscv_vssseg5:
1815 case Intrinsic::riscv_vssseg6:
1816 case Intrinsic::riscv_vssseg7:
1817 case Intrinsic::riscv_vssseg8: {
1821 case Intrinsic::riscv_vssseg2_mask:
1822 case Intrinsic::riscv_vssseg3_mask:
1823 case Intrinsic::riscv_vssseg4_mask:
1824 case Intrinsic::riscv_vssseg5_mask:
1825 case Intrinsic::riscv_vssseg6_mask:
1826 case Intrinsic::riscv_vssseg7_mask:
1827 case Intrinsic::riscv_vssseg8_mask: {
1831 case Intrinsic::riscv_vsoxseg2:
1832 case Intrinsic::riscv_vsoxseg3:
1833 case Intrinsic::riscv_vsoxseg4:
1834 case Intrinsic::riscv_vsoxseg5:
1835 case Intrinsic::riscv_vsoxseg6:
1836 case Intrinsic::riscv_vsoxseg7:
1837 case Intrinsic::riscv_vsoxseg8:
1840 case Intrinsic::riscv_vsuxseg2:
1841 case Intrinsic::riscv_vsuxseg3:
1842 case Intrinsic::riscv_vsuxseg4:
1843 case Intrinsic::riscv_vsuxseg5:
1844 case Intrinsic::riscv_vsuxseg6:
1845 case Intrinsic::riscv_vsuxseg7:
1846 case Intrinsic::riscv_vsuxseg8:
1849 case Intrinsic::riscv_vsoxseg2_mask:
1850 case Intrinsic::riscv_vsoxseg3_mask:
1851 case Intrinsic::riscv_vsoxseg4_mask:
1852 case Intrinsic::riscv_vsoxseg5_mask:
1853 case Intrinsic::riscv_vsoxseg6_mask:
1854 case Intrinsic::riscv_vsoxseg7_mask:
1855 case Intrinsic::riscv_vsoxseg8_mask:
1858 case Intrinsic::riscv_vsuxseg2_mask:
1859 case Intrinsic::riscv_vsuxseg3_mask:
1860 case Intrinsic::riscv_vsuxseg4_mask:
1861 case Intrinsic::riscv_vsuxseg5_mask:
1862 case Intrinsic::riscv_vsuxseg6_mask:
1863 case Intrinsic::riscv_vsuxseg7_mask:
1864 case Intrinsic::riscv_vsuxseg8_mask:
1867 case Intrinsic::riscv_vsoxei:
1868 case Intrinsic::riscv_vsoxei_mask:
1869 case Intrinsic::riscv_vsuxei:
1870 case Intrinsic::riscv_vsuxei_mask: {
1871 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1872 IntNo == Intrinsic::riscv_vsuxei_mask;
1873 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1874 IntNo == Intrinsic::riscv_vsoxei_mask;
1876 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1881 Operands.push_back(Node->getOperand(CurOp++));
1889 "Element count mismatch");
1894 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1896 "values when XLEN=32");
1899 IsMasked,
false, IsOrdered, IndexLog2EEW,
1900 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
1904 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1910 case Intrinsic::riscv_vsm:
1911 case Intrinsic::riscv_vse:
1912 case Intrinsic::riscv_vse_mask:
1913 case Intrinsic::riscv_vsse:
1914 case Intrinsic::riscv_vsse_mask: {
1915 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1916 IntNo == Intrinsic::riscv_vsse_mask;
1918 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1920 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1925 Operands.push_back(Node->getOperand(CurOp++));
1932 IsMasked, IsStrided,
Log2SEW,
static_cast<unsigned>(LMUL));
1935 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1945 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1957 SDValue V = Node->getOperand(0);
1958 SDValue SubV = Node->getOperand(1);
1960 auto Idx = Node->getConstantOperandVal(2);
1964 MVT SubVecContainerVT = SubVecVT;
1967 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
1969 VT =
TLI.getContainerForFixedLengthVector(VT);
1973 std::tie(SubRegIdx,
Idx) =
1975 VT, SubVecContainerVT,
Idx,
TRI);
1987 (void)IsSubVecPartReg;
1988 assert((!IsSubVecPartReg || V.isUndef()) &&
1989 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1990 "the subvector is smaller than a full-sized register");
1994 if (SubRegIdx == RISCV::NoSubRegister) {
1998 "Unexpected subvector extraction");
2011 SDValue V = Node->getOperand(0);
2012 auto Idx = Node->getConstantOperandVal(1);
2013 MVT InVT = V.getSimpleValueType();
2017 MVT SubVecContainerVT = VT;
2020 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2022 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2026 std::tie(SubRegIdx,
Idx) =
2028 InVT, SubVecContainerVT,
Idx,
TRI);
2038 if (SubRegIdx == RISCV::NoSubRegister) {
2042 "Unexpected subvector extraction");
2059 if (!Subtarget->hasOptimizedZeroStrideLoad())
2065 if (!Node->getOperand(0).isUndef())
2067 SDValue Src = Node->getOperand(1);
2068 auto *Ld = dyn_cast<LoadSDNode>(Src);
2071 EVT MemVT = Ld->getMemoryVT();
2098 false,
false,
true,
false,
2099 Log2SEW,
static_cast<unsigned>(LMUL));
2117 const SDValue &Op,
unsigned ConstraintID, std::vector<SDValue> &OutOps) {
2118 switch (ConstraintID) {
2122 OutOps.push_back(Op);
2125 OutOps.push_back(Op);
2136 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2154 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr.getOperand(0))) {
2155 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2156 if (isInt<12>(CVal)) {
2172 if (!isa<ConstantSDNode>(
Addr))
2175 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2180 int64_t Lo12 = SignExtend64<12>(CVal);
2182 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2184 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2202 if (Seq.
back().getOpcode() != RISCV::ADDI)
2204 Lo12 = Seq.
back().getImm();
2208 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2218 for (
auto *
Use :
Add->uses()) {
2223 EVT VT = cast<MemSDNode>(
Use)->getMemoryVT();
2229 cast<StoreSDNode>(
Use)->getValue() ==
Add)
2232 cast<AtomicSDNode>(
Use)->getVal() ==
Add)
2240 unsigned MaxShiftAmount,
2243 EVT VT =
Addr.getSimpleValueType();
2249 if (
N.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
N.getOperand(1))) {
2251 if (
N.getConstantOperandVal(1) <= MaxShiftAmount) {
2253 ShiftAmt =
N.getConstantOperandVal(1);
2258 return ShiftAmt != 0;
2262 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Addr.getOperand(1))) {
2267 isInt<12>(C1->getSExtValue())) {
2276 }
else if (UnwrapShl(
Addr.getOperand(0),
Index, Scale)) {
2280 UnwrapShl(
Addr.getOperand(1),
Index, Scale);
2284 }
else if (UnwrapShl(
Addr,
Index, Scale)) {
2299 MVT VT =
Addr.getSimpleValueType();
2308 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2309 if (isInt<12>(CVal)) {
2313 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2321 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2322 if (CVal == 0 || Alignment > CVal) {
2323 int64_t CombinedOffset = CVal + GA->getOffset();
2327 CombinedOffset, GA->getTargetFlags());
2333 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2341 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2342 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2343 assert(!isInt<12>(CVal) &&
"simm12 not already handled?");
2348 if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
2349 int64_t Adj = CVal < 0 ? -2048 : 2047;
2415 if (Imm != 0 && Imm % ShiftWidth == 0) {
2424 if (Imm != 0 && Imm % ShiftWidth == 0) {
2428 unsigned NegOpc = VT ==
MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2436 if (Imm % ShiftWidth == ShiftWidth - 1) {
2458 "Unexpected condition code!");
2465 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
2466 if (CCVal != ExpectedCCVal)
2472 if (!
LHS.getValueType().isInteger())
2483 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
2484 int64_t CVal =
C->getSExtValue();
2487 if (CVal == -2048) {
2490 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
2497 if (isInt<12>(CVal) || CVal == 2048) {
2500 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
2516 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
2517 Val =
N.getOperand(0);
2521 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
2522 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
2527 N.getConstantOperandVal(1) == ShiftAmt &&
2534 MVT VT =
N.getSimpleValueType();
2545 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2546 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
2547 Val =
N.getOperand(0);
2551 MVT VT =
N.getSimpleValueType();
2566 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
2572 uint64_t Mask =
N.getConstantOperandVal(1);
2575 unsigned XLen = Subtarget->
getXLen();
2577 Mask &= maskTrailingZeros<uint64_t>(C2);
2579 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2587 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
2589 EVT VT =
N.getValueType();
2599 if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
2601 EVT VT =
N.getValueType();
2613 bool LeftShift =
N.getOpcode() ==
ISD::SHL;
2614 if ((LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
2615 isa<ConstantSDNode>(
N.getOperand(1))) {
2621 unsigned C1 =
N.getConstantOperandVal(1);
2622 unsigned XLen = Subtarget->
getXLen();
2627 if (LeftShift && Leading == 32 && Trailing > 0 &&
2628 (Trailing + C1) == ShAmt) {
2630 EVT VT =
N.getValueType();
2639 if (!LeftShift && Leading == 32 && Trailing > C1 &&
2640 (Trailing - C1) == ShAmt) {
2642 EVT VT =
N.getValueType();
2661 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
2666 uint64_t Mask =
N.getConstantOperandVal(1);
2669 Mask &= maskTrailingZeros<uint64_t>(C2);
2677 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
2679 EVT VT =
N.getValueType();
2703 const unsigned Depth)
const {
2709 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
2710 "Unexpected opcode");
2715 for (
auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
2718 if (!
User->isMachineOpcode())
2722 switch (
User->getMachineOpcode()) {
2745 case RISCV::SLLI_UW:
2746 case RISCV::FMV_W_X:
2747 case RISCV::FCVT_H_W:
2748 case RISCV::FCVT_H_WU:
2749 case RISCV::FCVT_S_W:
2750 case RISCV::FCVT_S_WU:
2751 case RISCV::FCVT_D_W:
2752 case RISCV::FCVT_D_WU:
2753 case RISCV::TH_REVW:
2754 case RISCV::TH_SRRIW:
2767 if (UI.getOperandNo() != 1 || Bits <
Log2_32(Subtarget->
getXLen()))
2772 if (Bits < Subtarget->getXLen() -
User->getConstantOperandVal(1))
2781 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
2800 unsigned ShAmt =
User->getConstantOperandVal(1);
2814 case RISCV::FMV_H_X:
2815 case RISCV::ZEXT_H_RV32:
2816 case RISCV::ZEXT_H_RV64:
2822 if (Bits < (Subtarget->
getXLen() / 2))
2826 case RISCV::SH1ADD_UW:
2827 case RISCV::SH2ADD_UW:
2828 case RISCV::SH3ADD_UW:
2831 if (UI.getOperandNo() != 0 || Bits < 32)
2835 if (UI.getOperandNo() != 0 || Bits < 8)
2839 if (UI.getOperandNo() != 0 || Bits < 16)
2843 if (UI.getOperandNo() != 0 || Bits < 32)
2855 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
2856 int64_t
Offset =
C->getSExtValue();
2858 for (Shift = 0; Shift < 4; Shift++)
2859 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
2866 EVT Ty =
N->getValueType(0);
2878 auto *
C = dyn_cast<ConstantSDNode>(
N);
2879 if (
C && isUInt<5>(
C->getZExtValue())) {
2881 N->getValueType(0));
2882 }
else if (
C &&
C->isAllOnes()) {
2885 N->getValueType(0));
2886 }
else if (isa<RegisterSDNode>(
N) &&
2887 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
2893 N->getValueType(0));
2904 assert(
N.getNumOperands() == 3 &&
"Unexpected number of operands");
2905 SplatVal =
N.getOperand(1);
2916 !isa<ConstantSDNode>(
N.getOperand(1)))
2918 assert(
N.getNumOperands() == 3 &&
"Unexpected number of operands");
2921 cast<ConstantSDNode>(
N.getOperand(1))->getSExtValue();
2931 assert(XLenVT ==
N.getOperand(1).getSimpleValueType() &&
2932 "Unexpected splat operand type");
2933 MVT EltVT =
N.getSimpleValueType().getVectorElementType();
2934 if (EltVT.
bitsLT(XLenVT))
2937 if (!ValidateImm(SplatImm))
2946 [](int64_t Imm) {
return isInt<5>(Imm); });
2951 N, SplatVal, *
CurDAG, *Subtarget,
2952 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
2958 N, SplatVal, *
CurDAG, *Subtarget, [](int64_t Imm) {
2959 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
2965 !isa<ConstantSDNode>(
N.getOperand(1)))
2969 cast<ConstantSDNode>(
N.getOperand(1))->getSExtValue();
2971 if (!isUInt<5>(SplatImm))
3008 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3011 if (!isInt<5>(ImmVal))
3023bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
3025 if (
N->getMachineOpcode() != RISCV::ADDIW ||
3047 case RISCV::ADD: Opc = RISCV::ADDW;
break;
3048 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
3049 case RISCV::SUB: Opc = RISCV::SUBW;
break;
3050 case RISCV::MUL: Opc = RISCV::MULW;
break;
3051 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
3059 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
3074 case RISCV::TH_MULAW:
3075 case RISCV::TH_MULAH:
3076 case RISCV::TH_MULSW:
3077 case RISCV::TH_MULSH:
3090 if (!isa<RegisterSDNode>(
N->getOperand(MaskOpIdx)) ||
3091 cast<RegisterSDNode>(
N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
3095 const auto *Glued =
N->getGluedNode();
3101 if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
3102 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
3108 const auto IsVMSet = [](
unsigned Opc) {
3109 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
3110 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
3111 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
3112 Opc == RISCV::PseudoVMSET_M_B8;
3126bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(
SDNode *
N) {
3128 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
3132 unsigned MaskOpIdx =
I->MaskOpIdx;
3138 std::optional<unsigned> TailPolicyOpIdx;
3145 if (!(
N->getConstantOperandVal(*TailPolicyOpIdx) &
3149 if (
I->UnmaskedTUPseudo ==
I->MaskedPseudo && !
N->getOperand(0).isUndef())
3152 if (!
N->getOperand(0).isUndef())
3157 unsigned Opc = IsTA ?
I->UnmaskedPseudo :
I->UnmaskedTUPseudo;
3166 "Unexpected pseudo to transform to");
3171 for (
unsigned I = IsTA,
E =
N->getNumOperands();
I !=
E;
I++) {
3174 if (
I == MaskOpIdx ||
I == TailPolicyOpIdx ||
3181 const auto *Glued =
N->getGluedNode();
3182 if (
auto *TGlued = Glued->getGluedNode())
3186 Result->setFlags(
N->getFlags());
3201bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(
SDNode *
N,
bool IsTA) {
3202 unsigned Offset = IsTA ? 0 : 1;
3211 "Expect True is the first output of an instruction.");
3230 SDValue MergeOpN =
N->getOperand(0);
3247 HasMergeOp ? RISCV::getMaskedPseudoInfo(TrueOpc)
3248 : RISCV::lookupMaskedIntrinsicByUnmaskedTA(TrueOpc);
3270 if (
SDNode *Glued =
N->getGluedNode())
3278 unsigned TrueVLIndex =
3279 True.
getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
3282 auto IsNoFPExcept = [
this](
SDValue N) {
3284 N->getFlags().hasNoFPExcept();
3294 unsigned MaskedOpc =
Info->MaskedPseudo;
3296 "Expected instructions with mask have policy operand.");
3298 "Expected instructions with mask have merge operand.");
3318 if (
N->getGluedNode())
3319 Ops.
push_back(
N->getOperand(
N->getNumOperands() - 1));
3334 doPeepholeMaskedRVV(Result);
3340bool RISCVDAGToDAGISel::performVMergeToVAdd(
SDNode *
N) {
3342 switch (
N->getMachineOpcode()) {
3345 case RISCV::PseudoVMERGE_VVM_MF8_TU:
3346 NewOpc = RISCV::PseudoVADD_VI_MF8_TU;
3348 case RISCV::PseudoVMERGE_VVM_MF4_TU:
3349 NewOpc = RISCV::PseudoVADD_VI_MF4_TU;
3351 case RISCV::PseudoVMERGE_VVM_MF2_TU:
3352 NewOpc = RISCV::PseudoVADD_VI_MF2_TU;
3354 case RISCV::PseudoVMERGE_VVM_M1_TU:
3355 NewOpc = RISCV::PseudoVADD_VI_M1_TU;
3357 case RISCV::PseudoVMERGE_VVM_M2_TU:
3358 NewOpc = RISCV::PseudoVADD_VI_M2_TU;
3360 case RISCV::PseudoVMERGE_VVM_M4_TU:
3361 NewOpc = RISCV::PseudoVADD_VI_M4_TU;
3363 case RISCV::PseudoVMERGE_VVM_M8_TU:
3364 NewOpc = RISCV::PseudoVADD_VI_M8_TU;
3372 EVT VT =
N->getValueType(0);
3373 SDValue Ops[] = {
N->getOperand(1),
N->getOperand(2),
3375 N->getOperand(4),
N->getOperand(5)};
3381bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
3382 bool MadeChange =
false;
3387 if (
N->use_empty() || !
N->isMachineOpcode())
3390 auto IsVMergeTU = [](
unsigned Opcode) {
3391 return Opcode == RISCV::PseudoVMERGE_VVM_MF8_TU ||
3392 Opcode == RISCV::PseudoVMERGE_VVM_MF4_TU ||
3393 Opcode == RISCV::PseudoVMERGE_VVM_MF2_TU ||
3394 Opcode == RISCV::PseudoVMERGE_VVM_M1_TU ||
3395 Opcode == RISCV::PseudoVMERGE_VVM_M2_TU ||
3396 Opcode == RISCV::PseudoVMERGE_VVM_M4_TU ||
3397 Opcode == RISCV::PseudoVMERGE_VVM_M8_TU;
3400 auto IsVMergeTA = [](
unsigned Opcode) {
3401 return Opcode == RISCV::PseudoVMERGE_VVM_MF8 ||
3402 Opcode == RISCV::PseudoVMERGE_VVM_MF4 ||
3403 Opcode == RISCV::PseudoVMERGE_VVM_MF2 ||
3404 Opcode == RISCV::PseudoVMERGE_VVM_M1 ||
3405 Opcode == RISCV::PseudoVMERGE_VVM_M2 ||
3406 Opcode == RISCV::PseudoVMERGE_VVM_M4 ||
3407 Opcode == RISCV::PseudoVMERGE_VVM_M8;
3410 unsigned Opc =
N->getMachineOpcode();
3413 if ((IsVMergeTU(Opc) &&
N->getOperand(0) ==
N->getOperand(1)) ||
3415 MadeChange |= performCombineVMergeAndVOps(
N, IsVMergeTA(Opc));
3416 if (IsVMergeTU(Opc) &&
N->getOperand(0) ==
N->getOperand(1))
3417 MadeChange |= performVMergeToVAdd(
N);
static Register createTuple(ArrayRef< Register > Regs, const unsigned RegClassIDs[], const unsigned SubRegs[], MachineIRBuilder &MIB)
Create a REG_SEQUENCE instruction using the registers in Regs.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
typename CallsiteContextGraph< DerivedCCG, FuncTy, CallTy >::FuncInfo FuncInfo
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
static bool usesAllOnesMask(SDNode *N, unsigned MaskOpIdx)
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)
static bool isAllUndef(ArrayRef< SDValue > Values)
static bool isWorthFoldingAdd(SDValue Add)
static unsigned getLastNonGlueOrChainOpIdx(const SDNode *Node)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static unsigned getVecPolicyOpIdx(const SDNode *Node, const MCInstrDesc &MCID)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, ValidateFn ValidateImm)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset)
bool(*)(int64_t) ValidateFn
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
bool bitsLT(MVT VT) const
Return true if this has less bits than VT.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
MVT getVectorElementType() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool selectVSplatUimm5(SDValue N, SDValue &SplatVal)
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVLSEGFF(SDNode *Node, bool IsMasked)
bool selectFPImm(SDValue N, SDValue &Imm)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
bool hasAllHUsers(SDNode *Node) const
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVLOp(SDValue N, SDValue &VL)
bool trySignedBitfieldExtract(SDNode *Node)
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
bool hasVInstructions() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
op_iterator op_end() const
op_iterator op_begin() const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
unsigned getNumOperands() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
bool mayRaiseFPException(SDNode *Node) const
Return whether the node may raise an FP exception.
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOpt::Level OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getRegister(unsigned Reg, EVT VT)
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize Fixed(ScalarTy ExactSize)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
Iterator for intrusive lists based on ilist_node.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Level
Code generation optimization level.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
static bool hasDummyMaskOp(uint64_t TSFlags)
static bool hasMergeOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
@ SPLAT_VECTOR_SPLIT_I64_VL
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
static unsigned decodeVSEW(unsigned VSEW)
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
static constexpr int64_t VLMaxSentinel
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOpt::Level OptLevel)
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.