19 #include "llvm/IR/IntrinsicsRISCV.h"
28 #define DEBUG_TYPE "riscv-isel"
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #define GET_RISCVMaskedPseudosTable_IMPL
41 #include "RISCVGenSearchableTables.inc"
54 MVT VT =
N->getSimpleValueType(0);
60 N->getOperand(0), VL);
75 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
76 MVT VT =
N->getSimpleValueType(0);
138 bool MadeChange =
false;
142 if (
N->use_empty() || !
N->isMachineOpcode())
145 MadeChange |= doPeepholeSExtW(
N);
146 MadeChange |= doPeepholeLoadStoreADDI(
N);
147 MadeChange |= doPeepholeMaskedRVV(
N);
160 unsigned &OffsetOpIdx) {
161 switch (
N->getMachineOpcode()) {
200 switch (Inst.getOpndKind()) {
225 static const unsigned M1TupleRegClassIDs[] = {
226 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
227 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
228 RISCV::VRN8M1RegClassID};
229 static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
230 RISCV::VRN3M2RegClassID,
231 RISCV::VRN4M2RegClassID};
244 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
245 "Unexpected subreg numbering");
246 SubReg0 = RISCV::sub_vrm1_0;
247 RegClassID = M1TupleRegClassIDs[NF - 2];
250 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
251 "Unexpected subreg numbering");
252 SubReg0 = RISCV::sub_vrm2_0;
253 RegClassID = M2TupleRegClassIDs[NF - 2];
256 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
257 "Unexpected subreg numbering");
258 SubReg0 = RISCV::sub_vrm4_0;
259 RegClassID = RISCV::VRN2M4RegClassID;
268 for (
unsigned I = 0;
I < Regs.
size(); ++
I) {
269 Ops.push_back(Regs[
I]);
278 SDNode *Node,
unsigned Log2SEW,
const SDLoc &
DL,
unsigned CurOp,
280 bool IsLoad,
MVT *IndexVT) {
281 SDValue Chain = Node->getOperand(0);
288 if (IsStridedOrIndexed) {
289 Operands.push_back(Node->getOperand(CurOp++));
291 *IndexVT =
Operands.back()->getSimpleValueType(0);
310 if (IsMasked && IsLoad) {
312 uint64_t Policy = Node->getConstantOperandVal(CurOp++);
329 unsigned NF = Node->getNumValues() - 1;
330 MVT VT = Node->getSimpleValueType(0);
338 Node->op_begin() + CurOp + NF);
350 RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided,
false, Log2SEW,
351 static_cast<unsigned>(
LMUL));
355 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
359 for (
unsigned I = 0;
I < NF; ++
I) {
371 unsigned NF = Node->getNumValues() - 2;
372 MVT VT = Node->getSimpleValueType(0);
381 Node->op_begin() + CurOp + NF);
394 RISCV::getVLSEGPseudo(NF, IsMasked, IsTU,
false,
true,
395 Log2SEW,
static_cast<unsigned>(
LMUL));
399 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
403 for (
unsigned I = 0;
I < NF; ++
I) {
417 unsigned NF = Node->getNumValues() - 1;
418 MVT VT = Node->getSimpleValueType(0);
426 Node->op_begin() + CurOp + NF);
440 "Element count mismatch");
444 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
446 "values when XLEN=32");
449 NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(
LMUL),
450 static_cast<unsigned>(IndexLMUL));
454 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
458 for (
unsigned I = 0;
I < NF; ++
I) {
471 unsigned NF = Node->getNumOperands() - 4;
476 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
484 unsigned CurOp = 2 + NF;
490 NF, IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(
LMUL));
494 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
503 unsigned NF = Node->getNumOperands() - 5;
506 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
514 unsigned CurOp = 2 + NF;
522 "Element count mismatch");
526 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
528 "values when XLEN=32");
531 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(
LMUL),
532 static_cast<unsigned>(IndexLMUL));
536 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
548 "Unexpected opcode");
554 unsigned IntNoOffset = HasChain ? 1 : 0;
555 unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
557 assert((IntNo == Intrinsic::riscv_vsetvli ||
558 IntNo == Intrinsic::riscv_vsetvlimax ||
559 IntNo == Intrinsic::riscv_vsetvli_opt ||
560 IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
561 "Unexpected vsetvli intrinsic");
563 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
564 IntNo == Intrinsic::riscv_vsetvlimax_opt;
565 unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
567 assert(Node->getNumOperands() == Offset + 2 &&
568 "Unexpected number of operands");
573 Node->getConstantOperandVal(Offset + 1) & 0x7);
584 unsigned Opcode = RISCV::PseudoVSETVLI;
587 Opcode = RISCV::PseudoVSETVLIX0;
589 VLOperand = Node->getOperand(IntNoOffset + 1);
591 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
593 if (isUInt<5>(AVL)) {
597 Ops.push_back(Node->getOperand(0));
607 Ops.push_back(Node->getOperand(0));
614 if (Node->isMachineOpcode()) {
622 unsigned Opcode = Node->getOpcode();
625 MVT VT = Node->getSimpleValueType(0);
629 auto *ConstNode = cast<ConstantSDNode>(Node);
630 if (VT == XLenVT && ConstNode->isZero()) {
636 int64_t
Imm = ConstNode->getSExtValue();
641 Imm = SignExtend64<16>(
Imm);
645 Imm = SignExtend64<32>(
Imm);
656 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
660 int64_t Offset = N1C->getSExtValue();
661 int64_t Lo12 = SignExtend64<12>(Offset);
664 if (Lo12 == 0 || isInt<12>(Offset))
668 if (isInt<12>(Offset / 2) && isInt<12>(Offset - Offset / 2))
671 bool AllPointerUses =
true;
672 for (
auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
677 unsigned BaseOpIdx, OffsetOpIdx;
678 if (!
User->isMachineOpcode() ||
680 UI.getOperandNo() != BaseOpIdx) {
681 AllPointerUses =
false;
688 cast<ConstantSDNode>(
User->
getOperand(OffsetOpIdx))->getSExtValue();
690 if (!isInt<12>(MemOffs)) {
691 AllPointerUses =
false;
702 Offset = SignExtend64<32>(Offset);
722 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
725 SDValue N0 = Node->getOperand(0);
729 unsigned ShAmt = N1C->getZExtValue();
731 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
736 if (TrailingOnes == 32 || ShAmt >= TrailingOnes)
738 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
757 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
760 SDValue N0 = Node->getOperand(0);
763 unsigned ShAmt = N1C->getZExtValue();
765 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
767 if (ExtSize >= 32 || ShAmt >= ExtSize)
769 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
780 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
784 SDValue N0 = Node->getOperand(0);
790 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
794 unsigned XLen = Subtarget->
getXLen();
795 if (!C2 || C2 >= XLen)
805 bool IsCANDI = isInt<6>(N1C->getSExtValue());
809 C1 &= maskTrailingZeros<uint64_t>(C2);
811 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
815 bool OneUseOrZExtW = N0.
hasOneUse() ||
C1 == UINT64_C(0xFFFFFFFF);
838 if (C2 >= 32 && (C3 - C2) == 1 && N0.
hasOneUse() &&
840 cast<VTSDNode>(
X.getOperand(1))->getVT() ==
MVT::i32) {
845 RISCV::SRLIW,
DL, XLenVT,
SDValue(SRAIW, 0),
855 cast<VTSDNode>(
X.getOperand(1))->getVT() ==
MVT::i32;
858 if (OneUseOrZExtW && !Skip) {
860 RISCV::SLLI,
DL, XLenVT,
X,
876 if (C2 + C3 < XLen &&
877 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
879 if ((XLen - (C2 + C3)) == 32 && Subtarget->
hasStdExtZba()) {
888 if (OneUseOrZExtW && !IsCANDI) {
890 RISCV::SLLI,
DL, XLenVT,
X,
906 if (Leading == C2 && C2 + C3 < XLen && OneUseOrZExtW && !IsCANDI) {
907 unsigned SrliOpc = RISCV::SRLI;
909 if (
X.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
X.getOperand(1)) &&
910 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
911 SrliOpc = RISCV::SRLIW;
915 SrliOpc,
DL, XLenVT,
X,
924 if (Leading > 32 && (Leading - 32) == C2 && C2 + C3 < 32 &&
925 OneUseOrZExtW && !IsCANDI) {
927 RISCV::SRLIW,
DL, XLenVT,
X,
942 if (Leading == 0 && C2 < C3 && OneUseOrZExtW && !IsCANDI) {
944 RISCV::SRLI,
DL, XLenVT,
X,
953 if (C2 < C3 && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
955 RISCV::SRLIW,
DL, XLenVT,
X,
975 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
976 if (!N1C || !N1C->hasOneUse())
980 SDValue N0 = Node->getOperand(0);
993 if (!N0.
hasOneUse() && C2 != UINT64_C(0xFFFFFFFF))
999 (C2 == UINT64_C(0xFFFF) &&
1001 (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->
hasStdExtZba()))
1007 unsigned XLen = Subtarget->
getXLen();
1013 unsigned ConstantShift = XLen - LeadingZeros;
1020 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1033 unsigned IntNo = Node->getConstantOperandVal(0);
1038 case Intrinsic::riscv_vmsgeu:
1039 case Intrinsic::riscv_vmsge: {
1040 SDValue Src1 = Node->getOperand(1);
1041 SDValue Src2 = Node->getOperand(2);
1042 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1043 bool IsCmpUnsignedZero =
false;
1048 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1049 int64_t CVal =
C->getSExtValue();
1050 if (CVal >= -15 && CVal <= 16) {
1051 if (!IsUnsigned || CVal != 0)
1053 IsCmpUnsignedZero =
true;
1057 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1061 #define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1062 case RISCVII::VLMUL::lmulenum: \
1063 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1064 : RISCV::PseudoVMSLT_VX_##suffix; \
1065 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1066 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1075 #undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1083 if (IsCmpUnsignedZero) {
1094 {Cmp, Cmp, VL, SEW}));
1097 case Intrinsic::riscv_vmsgeu_mask:
1098 case Intrinsic::riscv_vmsge_mask: {
1099 SDValue Src1 = Node->getOperand(2);
1100 SDValue Src2 = Node->getOperand(3);
1101 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1102 bool IsCmpUnsignedZero =
false;
1107 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1108 int64_t CVal =
C->getSExtValue();
1109 if (CVal >= -15 && CVal <= 16) {
1110 if (!IsUnsigned || CVal != 0)
1112 IsCmpUnsignedZero =
true;
1116 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1121 #define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1122 case RISCVII::VLMUL::lmulenum: \
1123 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1124 : RISCV::PseudoVMSLT_VX_##suffix; \
1125 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1126 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1135 #undef CASE_VMSLT_OPCODES
1141 #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1142 case RISCVII::VLMUL::lmulenum: \
1143 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1144 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1145 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1154 #undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1161 SDValue MaskedOff = Node->getOperand(1);
1165 if (IsCmpUnsignedZero) {
1168 if (
Mask == MaskedOff) {
1174 {Mask, MaskedOff, VL, MaskSEW}));
1181 if (
Mask == MaskedOff) {
1186 {Mask, Cmp, VL, MaskSEW}));
1203 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1207 {Cmp, Mask, VL, MaskSEW}));
1210 case Intrinsic::riscv_vsetvli_opt:
1211 case Intrinsic::riscv_vsetvlimax_opt:
1217 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1222 case Intrinsic::riscv_vsetvli:
1223 case Intrinsic::riscv_vsetvlimax:
1225 case Intrinsic::riscv_vlseg2:
1226 case Intrinsic::riscv_vlseg3:
1227 case Intrinsic::riscv_vlseg4:
1228 case Intrinsic::riscv_vlseg5:
1229 case Intrinsic::riscv_vlseg6:
1230 case Intrinsic::riscv_vlseg7:
1231 case Intrinsic::riscv_vlseg8: {
1235 case Intrinsic::riscv_vlseg2_mask:
1236 case Intrinsic::riscv_vlseg3_mask:
1237 case Intrinsic::riscv_vlseg4_mask:
1238 case Intrinsic::riscv_vlseg5_mask:
1239 case Intrinsic::riscv_vlseg6_mask:
1240 case Intrinsic::riscv_vlseg7_mask:
1241 case Intrinsic::riscv_vlseg8_mask: {
1245 case Intrinsic::riscv_vlsseg2:
1246 case Intrinsic::riscv_vlsseg3:
1247 case Intrinsic::riscv_vlsseg4:
1248 case Intrinsic::riscv_vlsseg5:
1249 case Intrinsic::riscv_vlsseg6:
1250 case Intrinsic::riscv_vlsseg7:
1251 case Intrinsic::riscv_vlsseg8: {
1255 case Intrinsic::riscv_vlsseg2_mask:
1256 case Intrinsic::riscv_vlsseg3_mask:
1257 case Intrinsic::riscv_vlsseg4_mask:
1258 case Intrinsic::riscv_vlsseg5_mask:
1259 case Intrinsic::riscv_vlsseg6_mask:
1260 case Intrinsic::riscv_vlsseg7_mask:
1261 case Intrinsic::riscv_vlsseg8_mask: {
1265 case Intrinsic::riscv_vloxseg2:
1266 case Intrinsic::riscv_vloxseg3:
1267 case Intrinsic::riscv_vloxseg4:
1268 case Intrinsic::riscv_vloxseg5:
1269 case Intrinsic::riscv_vloxseg6:
1270 case Intrinsic::riscv_vloxseg7:
1271 case Intrinsic::riscv_vloxseg8:
1274 case Intrinsic::riscv_vluxseg2:
1275 case Intrinsic::riscv_vluxseg3:
1276 case Intrinsic::riscv_vluxseg4:
1277 case Intrinsic::riscv_vluxseg5:
1278 case Intrinsic::riscv_vluxseg6:
1279 case Intrinsic::riscv_vluxseg7:
1280 case Intrinsic::riscv_vluxseg8:
1283 case Intrinsic::riscv_vloxseg2_mask:
1284 case Intrinsic::riscv_vloxseg3_mask:
1285 case Intrinsic::riscv_vloxseg4_mask:
1286 case Intrinsic::riscv_vloxseg5_mask:
1287 case Intrinsic::riscv_vloxseg6_mask:
1288 case Intrinsic::riscv_vloxseg7_mask:
1289 case Intrinsic::riscv_vloxseg8_mask:
1292 case Intrinsic::riscv_vluxseg2_mask:
1293 case Intrinsic::riscv_vluxseg3_mask:
1294 case Intrinsic::riscv_vluxseg4_mask:
1295 case Intrinsic::riscv_vluxseg5_mask:
1296 case Intrinsic::riscv_vluxseg6_mask:
1297 case Intrinsic::riscv_vluxseg7_mask:
1298 case Intrinsic::riscv_vluxseg8_mask:
1301 case Intrinsic::riscv_vlseg8ff:
1302 case Intrinsic::riscv_vlseg7ff:
1303 case Intrinsic::riscv_vlseg6ff:
1304 case Intrinsic::riscv_vlseg5ff:
1305 case Intrinsic::riscv_vlseg4ff:
1306 case Intrinsic::riscv_vlseg3ff:
1307 case Intrinsic::riscv_vlseg2ff: {
1311 case Intrinsic::riscv_vlseg8ff_mask:
1312 case Intrinsic::riscv_vlseg7ff_mask:
1313 case Intrinsic::riscv_vlseg6ff_mask:
1314 case Intrinsic::riscv_vlseg5ff_mask:
1315 case Intrinsic::riscv_vlseg4ff_mask:
1316 case Intrinsic::riscv_vlseg3ff_mask:
1317 case Intrinsic::riscv_vlseg2ff_mask: {
1321 case Intrinsic::riscv_vloxei:
1322 case Intrinsic::riscv_vloxei_mask:
1323 case Intrinsic::riscv_vluxei:
1324 case Intrinsic::riscv_vluxei_mask: {
1325 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1326 IntNo == Intrinsic::riscv_vluxei_mask;
1327 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1328 IntNo == Intrinsic::riscv_vloxei_mask;
1330 MVT VT = Node->getSimpleValueType(0);
1335 bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef());
1338 Operands.push_back(Node->getOperand(CurOp++));
1349 "Element count mismatch");
1354 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1356 "values when XLEN=32");
1359 IsMasked, IsTU, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(
LMUL),
1360 static_cast<unsigned>(IndexLMUL));
1364 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1370 case Intrinsic::riscv_vlm:
1371 case Intrinsic::riscv_vle:
1372 case Intrinsic::riscv_vle_mask:
1373 case Intrinsic::riscv_vlse:
1374 case Intrinsic::riscv_vlse_mask: {
1375 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1376 IntNo == Intrinsic::riscv_vlse_mask;
1378 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1380 MVT VT = Node->getSimpleValueType(0);
1385 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1388 HasPassthruOperand &&
1389 ((!IsMasked && !Node->getOperand(CurOp).isUndef()) || IsMasked);
1392 Operands.push_back(Node->getOperand(CurOp++));
1393 else if (HasPassthruOperand)
1402 RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided,
false, Log2SEW,
1403 static_cast<unsigned>(
LMUL));
1407 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1413 case Intrinsic::riscv_vleff:
1414 case Intrinsic::riscv_vleff_mask: {
1415 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1417 MVT VT = Node->getSimpleValueType(0);
1422 bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef());
1425 Operands.push_back(Node->getOperand(CurOp++));
1436 RISCV::getVLEPseudo(IsMasked, IsTU,
false,
true,
1437 Log2SEW,
static_cast<unsigned>(
LMUL));
1440 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1450 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1452 case Intrinsic::riscv_vsseg2:
1453 case Intrinsic::riscv_vsseg3:
1454 case Intrinsic::riscv_vsseg4:
1455 case Intrinsic::riscv_vsseg5:
1456 case Intrinsic::riscv_vsseg6:
1457 case Intrinsic::riscv_vsseg7:
1458 case Intrinsic::riscv_vsseg8: {
1462 case Intrinsic::riscv_vsseg2_mask:
1463 case Intrinsic::riscv_vsseg3_mask:
1464 case Intrinsic::riscv_vsseg4_mask:
1465 case Intrinsic::riscv_vsseg5_mask:
1466 case Intrinsic::riscv_vsseg6_mask:
1467 case Intrinsic::riscv_vsseg7_mask:
1468 case Intrinsic::riscv_vsseg8_mask: {
1472 case Intrinsic::riscv_vssseg2:
1473 case Intrinsic::riscv_vssseg3:
1474 case Intrinsic::riscv_vssseg4:
1475 case Intrinsic::riscv_vssseg5:
1476 case Intrinsic::riscv_vssseg6:
1477 case Intrinsic::riscv_vssseg7:
1478 case Intrinsic::riscv_vssseg8: {
1482 case Intrinsic::riscv_vssseg2_mask:
1483 case Intrinsic::riscv_vssseg3_mask:
1484 case Intrinsic::riscv_vssseg4_mask:
1485 case Intrinsic::riscv_vssseg5_mask:
1486 case Intrinsic::riscv_vssseg6_mask:
1487 case Intrinsic::riscv_vssseg7_mask:
1488 case Intrinsic::riscv_vssseg8_mask: {
1492 case Intrinsic::riscv_vsoxseg2:
1493 case Intrinsic::riscv_vsoxseg3:
1494 case Intrinsic::riscv_vsoxseg4:
1495 case Intrinsic::riscv_vsoxseg5:
1496 case Intrinsic::riscv_vsoxseg6:
1497 case Intrinsic::riscv_vsoxseg7:
1498 case Intrinsic::riscv_vsoxseg8:
1501 case Intrinsic::riscv_vsuxseg2:
1502 case Intrinsic::riscv_vsuxseg3:
1503 case Intrinsic::riscv_vsuxseg4:
1504 case Intrinsic::riscv_vsuxseg5:
1505 case Intrinsic::riscv_vsuxseg6:
1506 case Intrinsic::riscv_vsuxseg7:
1507 case Intrinsic::riscv_vsuxseg8:
1510 case Intrinsic::riscv_vsoxseg2_mask:
1511 case Intrinsic::riscv_vsoxseg3_mask:
1512 case Intrinsic::riscv_vsoxseg4_mask:
1513 case Intrinsic::riscv_vsoxseg5_mask:
1514 case Intrinsic::riscv_vsoxseg6_mask:
1515 case Intrinsic::riscv_vsoxseg7_mask:
1516 case Intrinsic::riscv_vsoxseg8_mask:
1519 case Intrinsic::riscv_vsuxseg2_mask:
1520 case Intrinsic::riscv_vsuxseg3_mask:
1521 case Intrinsic::riscv_vsuxseg4_mask:
1522 case Intrinsic::riscv_vsuxseg5_mask:
1523 case Intrinsic::riscv_vsuxseg6_mask:
1524 case Intrinsic::riscv_vsuxseg7_mask:
1525 case Intrinsic::riscv_vsuxseg8_mask:
1528 case Intrinsic::riscv_vsoxei:
1529 case Intrinsic::riscv_vsoxei_mask:
1530 case Intrinsic::riscv_vsuxei:
1531 case Intrinsic::riscv_vsuxei_mask: {
1532 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1533 IntNo == Intrinsic::riscv_vsuxei_mask;
1534 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1535 IntNo == Intrinsic::riscv_vsoxei_mask;
1537 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1542 Operands.push_back(Node->getOperand(CurOp++));
1550 "Element count mismatch");
1555 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1557 "values when XLEN=32");
1560 IsMasked,
false, IsOrdered, IndexLog2EEW,
1561 static_cast<unsigned>(
LMUL),
static_cast<unsigned>(IndexLMUL));
1565 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1571 case Intrinsic::riscv_vsm:
1572 case Intrinsic::riscv_vse:
1573 case Intrinsic::riscv_vse_mask:
1574 case Intrinsic::riscv_vsse:
1575 case Intrinsic::riscv_vsse_mask: {
1576 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1577 IntNo == Intrinsic::riscv_vsse_mask;
1579 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1581 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1586 Operands.push_back(Node->getOperand(CurOp++));
1593 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(
LMUL));
1596 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1606 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1618 SDValue V = Node->getOperand(0);
1619 SDValue SubV = Node->getOperand(1);
1621 auto Idx = Node->getConstantOperandVal(2);
1625 MVT SubVecContainerVT = SubVecVT;
1628 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
1630 VT =
TLI.getContainerForFixedLengthVector(VT);
1634 std::tie(SubRegIdx, Idx) =
1636 VT, SubVecContainerVT, Idx,
TRI);
1648 (void)IsSubVecPartReg;
1650 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1651 "the subvector is smaller than a full-sized register");
1655 if (SubRegIdx == RISCV::NoSubRegister) {
1659 "Unexpected subvector extraction");
1672 SDValue V = Node->getOperand(0);
1673 auto Idx = Node->getConstantOperandVal(1);
1678 MVT SubVecContainerVT = VT;
1681 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
1683 InVT =
TLI.getContainerForFixedLengthVector(InVT);
1687 std::tie(SubRegIdx, Idx) =
1689 InVT, SubVecContainerVT, Idx,
TRI);
1699 if (SubRegIdx == RISCV::NoSubRegister) {
1703 "Unexpected subvector extraction");
1724 if (HasPassthruOperand && !Node->getOperand(0).isUndef())
1726 SDValue Src = HasPassthruOperand ? Node->getOperand(1) : Node->getOperand(0);
1727 auto *Ld = dyn_cast<LoadSDNode>(Src);
1730 EVT MemVT = Ld->getMemoryVT();
1741 else if (IsScalarMove) {
1759 false,
false,
true,
false,
1760 Log2SEW,
static_cast<unsigned>(
LMUL));
1776 const SDValue &
Op,
unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1777 switch (ConstraintID) {
1781 OutOps.push_back(
Op);
1784 OutOps.push_back(
Op);
1795 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
1813 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr.getOperand(0))) {
1814 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
1815 if (isInt<12>(CVal)) {
1830 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr))
1843 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
1844 if (isInt<12>(CVal)) {
1846 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
1865 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
1866 const APInt &AndMask =
N->getConstantOperandAPInt(1);
1874 ShAmt =
N.getOperand(0);
1882 ShAmt =
N.getOperand(0);
1886 isa<ConstantSDNode>(
N.getOperand(0))) {
1890 if (
Imm != 0 &&
Imm % ShiftWidth == 0) {
1892 EVT VT =
N.getValueType();
1909 cast<VTSDNode>(
N.getOperand(1))->getVT() ==
MVT::i32) {
1910 Val =
N.getOperand(0);
1913 MVT VT =
N.getSimpleValueType();
1924 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
1925 if (
C &&
C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1926 Val =
N.getOperand(0);
1930 MVT VT =
N.getSimpleValueType();
1958 isa<ConstantSDNode>(Node)) &&
1959 "Unexpected opcode");
1961 for (
auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
1964 if (!
User->isMachineOpcode())
1968 switch (
User->getMachineOpcode()) {
1991 case RISCV::SLLI_UW:
1992 case RISCV::FMV_W_X:
1993 case RISCV::FCVT_H_W:
1994 case RISCV::FCVT_H_WU:
1995 case RISCV::FCVT_S_W:
1996 case RISCV::FCVT_S_WU:
1997 case RISCV::FCVT_D_W:
1998 case RISCV::FCVT_D_WU:
2004 if (Bits < Subtarget->getXLen() -
User->getConstantOperandVal(1))
2017 case RISCV::ZEXT_H_RV32:
2018 case RISCV::ZEXT_H_RV64:
2023 case RISCV::SH1ADD_UW:
2024 case RISCV::SH2ADD_UW:
2025 case RISCV::SH3ADD_UW:
2028 if (UI.getOperandNo() != 0 ||
Bits < 32)
2032 if (UI.getOperandNo() != 0 ||
Bits < 8)
2036 if (UI.getOperandNo() != 0 ||
Bits < 16)
2040 if (UI.getOperandNo() != 0 ||
Bits < 32)
2052 auto *
C = dyn_cast<ConstantSDNode>(
N);
2053 if (
C && isUInt<5>(
C->getZExtValue())) {
2055 N->getValueType(0));
2056 }
else if (
C &&
C->isAllOnesValue()) {
2059 N->getValueType(0));
2060 }
else if (isa<RegisterSDNode>(
N) &&
2061 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
2067 N->getValueType(0));
2078 SplatVal =
N.getOperand(1);
2089 !isa<ConstantSDNode>(
N.getOperand(1)))
2093 cast<ConstantSDNode>(
N.getOperand(1))->getSExtValue();
2103 assert(XLenVT ==
N.getOperand(1).getSimpleValueType() &&
2104 "Unexpected splat operand type");
2105 MVT EltVT =
N.getSimpleValueType().getVectorElementType();
2106 if (EltVT.
bitsLT(XLenVT))
2109 if (!ValidateImm(SplatImm))
2118 [](int64_t
Imm) {
return isInt<5>(
Imm); });
2123 N, SplatVal, *
CurDAG, *Subtarget,
2124 [](int64_t
Imm) {
return (isInt<5>(
Imm) &&
Imm != -16) ||
Imm == 16; });
2130 N, SplatVal, *
CurDAG, *Subtarget, [](int64_t
Imm) {
2131 return Imm != 0 && ((isInt<5>(
Imm) &&
Imm != -16) ||
Imm == 16);
2137 !isa<ConstantSDNode>(
N.getOperand(1)))
2141 cast<ConstantSDNode>(
N.getOperand(1))->getSExtValue();
2143 if (!isUInt<5>(SplatImm))
2154 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
2157 if (!isInt<5>(ImmVal))
2175 bool RISCVDAGToDAGISel::doPeepholeLoadStoreADDI(
SDNode *
N) {
2176 unsigned OffsetOpIdx, BaseOpIdx;
2180 if (!isa<ConstantSDNode>(
N->getOperand(OffsetOpIdx)))
2185 if (!
Base.isMachineOpcode())
2188 if (
Base.getMachineOpcode() == RISCV::ADDI) {
2190 }
else if (
Base.getMachineOpcode() == RISCV::ADDIW &&
2191 isa<ConstantSDNode>(
Base.getOperand(1)) &&
2192 Base.getOperand(0).isMachineOpcode() &&
2193 Base.getOperand(0).getMachineOpcode() == RISCV::LUI &&
2194 isa<ConstantSDNode>(
Base.getOperand(0).getOperand(0))) {
2199 SignExtend64<32>(
Base.getOperand(0).getConstantOperandVal(0) << 12);
2200 Offset += cast<ConstantSDNode>(
Base.getOperand(1))->getSExtValue();
2207 uint64_t Offset2 =
N->getConstantOperandVal(OffsetOpIdx);
2209 if (
auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
2210 int64_t Offset1 =
Const->getSExtValue();
2211 int64_t CombinedOffset = Offset1 + Offset2;
2212 if (!isInt<12>(CombinedOffset))
2216 }
else if (
auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
2223 if (Offset2 != 0 && Alignment <= Offset2)
2225 int64_t Offset1 = GA->getOffset();
2226 int64_t CombinedOffset = Offset1 + Offset2;
2229 CombinedOffset, GA->getTargetFlags());
2230 }
else if (
auto *
CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
2233 if (Offset2 != 0 && Alignment <= Offset2)
2235 int64_t Offset1 =
CP->getOffset();
2236 int64_t CombinedOffset = Offset1 + Offset2;
2239 CombinedOffset,
CP->getTargetFlags());
2244 LLVM_DEBUG(
dbgs() <<
"Folding add-immediate into mem-op:\nBase: ");
2251 if (BaseOpIdx == 0) {
2256 ImmOperand,
N->getOperand(3));
2264 bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
2266 if (
N->getMachineOpcode() != RISCV::ADDIW ||
2289 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
2292 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
2300 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
2329 bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(
SDNode *
N) {
2331 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
2335 unsigned MaskOpIdx =
I->MaskOpIdx;
2338 if (!isa<RegisterSDNode>(
N->getOperand(MaskOpIdx)) ||
2339 cast<RegisterSDNode>(
N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
2343 const auto *Glued =
N->getGluedNode();
2349 if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
2350 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
2356 const auto IsVMSet = [](
unsigned Opc) {
2357 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
2358 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
2359 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
2360 Opc == RISCV::PseudoVMSET_M_B8;
2378 TailPolicyOpIdx =
N->getNumOperands() - 1;
2379 if (
N->getOperand(*TailPolicyOpIdx).getValueType() ==
MVT::Glue)
2380 (*TailPolicyOpIdx)--;
2381 if (
N->getOperand(*TailPolicyOpIdx).getValueType() ==
MVT::Other)
2382 (*TailPolicyOpIdx)--;
2384 if (!(
N->getConstantOperandVal(*TailPolicyOpIdx) &
2388 if (
I->UnmaskedTUPseudo ==
I->MaskedPseudo && !
N->getOperand(0).isUndef())
2391 if (!
N->getOperand(0).isUndef())
2396 unsigned Opc = IsTA ?
I->UnmaskedPseudo :
I->UnmaskedTUPseudo;
2405 "Unexpected pseudo to transform to");
2410 for (
unsigned I = IsTA,
E =
N->getNumOperands();
I !=
E;
I++) {
2413 if (
I == MaskOpIdx ||
I == TailPolicyOpIdx ||
2420 if (
auto *TGlued = Glued->getGluedNode())
2421 Ops.push_back(
SDValue(TGlued, TGlued->getNumValues() - 1));