20#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
29#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
32 "riscv-use-rematerializable-movimm",
cl::Hidden,
33 cl::desc(
"Use a rematerializable pseudoinstruction for 2 instruction "
34 "constant materialization"),
38#define GET_RISCVVSSEGTable_IMPL
39#define GET_RISCVVLSEGTable_IMPL
40#define GET_RISCVVLXSEGTable_IMPL
41#define GET_RISCVVSXSEGTable_IMPL
42#define GET_RISCVVLETable_IMPL
43#define GET_RISCVVSETable_IMPL
44#define GET_RISCVVLXTable_IMPL
45#define GET_RISCVVSXTable_IMPL
46#define GET_RISCVMaskedPseudosTable_IMPL
47#include "RISCVGenSearchableTables.inc"
53 bool MadeChange =
false;
60 switch (
N->getOpcode()) {
64 MVT VT =
N->getSimpleValueType(0);
80 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
81 MVT VT =
N->getSimpleValueType(0);
87 Lo.getValueType() == MVT::i32 &&
Hi.getValueType() == MVT::i32 &&
95 int FI = cast<FrameIndexSDNode>(StackSlot.
getNode())->getIndex();
119 MVT::i64, MPI,
Align(8),
126 LLVM_DEBUG(
dbgs() <<
"RISC-V DAG preprocessing replacing:\nOld: ");
145 bool MadeChange =
false;
149 if (
N->use_empty() || !
N->isMachineOpcode())
152 MadeChange |= doPeepholeSExtW(
N);
157 MadeChange |= doPeepholeMaskedRVV(cast<MachineSDNode>(
N));
162 MadeChange |= doPeepholeMergeVVMFold();
170 MadeChange |= doPeepholeNoRegPassThru();
182 switch (Inst.getOpndKind()) {
221 if (Seq.
size() > 3) {
222 unsigned ShiftAmt, AddOpc;
242 static const unsigned M1TupleRegClassIDs[] = {
243 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
244 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
245 RISCV::VRN8M1RegClassID};
246 static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
247 RISCV::VRN3M2RegClassID,
248 RISCV::VRN4M2RegClassID};
261 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
262 "Unexpected subreg numbering");
263 SubReg0 = RISCV::sub_vrm1_0;
264 RegClassID = M1TupleRegClassIDs[NF - 2];
267 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
268 "Unexpected subreg numbering");
269 SubReg0 = RISCV::sub_vrm2_0;
270 RegClassID = M2TupleRegClassIDs[NF - 2];
273 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
274 "Unexpected subreg numbering");
275 SubReg0 = RISCV::sub_vrm4_0;
276 RegClassID = RISCV::VRN2M4RegClassID;
285 for (
unsigned I = 0;
I < Regs.
size(); ++
I) {
295 SDNode *Node,
unsigned Log2SEW,
const SDLoc &
DL,
unsigned CurOp,
297 bool IsLoad,
MVT *IndexVT) {
298 SDValue Chain = Node->getOperand(0);
301 Operands.push_back(Node->getOperand(CurOp++));
303 if (IsStridedOrIndexed) {
304 Operands.push_back(Node->getOperand(CurOp++));
306 *IndexVT =
Operands.back()->getSimpleValueType(0);
311 SDValue Mask = Node->getOperand(CurOp++);
330 Policy = Node->getConstantOperandVal(CurOp++);
343 unsigned NF = Node->getNumValues() - 1;
344 MVT VT = Node->getSimpleValueType(0);
352 Node->op_begin() + CurOp + NF);
361 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided,
false, Log2SEW,
362 static_cast<unsigned>(LMUL));
366 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
370 for (
unsigned I = 0;
I < NF; ++
I) {
382 unsigned NF = Node->getNumValues() - 2;
383 MVT VT = Node->getSimpleValueType(0);
392 Node->op_begin() + CurOp + NF);
402 RISCV::getVLSEGPseudo(NF, IsMasked,
false,
true,
403 Log2SEW,
static_cast<unsigned>(LMUL));
407 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
411 for (
unsigned I = 0;
I < NF; ++
I) {
425 unsigned NF = Node->getNumValues() - 1;
426 MVT VT = Node->getSimpleValueType(0);
434 Node->op_begin() + CurOp + NF);
445 "Element count mismatch");
449 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
451 "values when XLEN=32");
454 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
455 static_cast<unsigned>(IndexLMUL));
459 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
463 for (
unsigned I = 0;
I < NF; ++
I) {
476 unsigned NF = Node->getNumOperands() - 4;
481 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
489 unsigned CurOp = 2 + NF;
495 NF, IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
499 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
508 unsigned NF = Node->getNumOperands() - 5;
511 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
519 unsigned CurOp = 2 + NF;
527 "Element count mismatch");
531 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
533 "values when XLEN=32");
536 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
537 static_cast<unsigned>(IndexLMUL));
541 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
556 unsigned IntNo = Node->getConstantOperandVal(0);
558 assert((IntNo == Intrinsic::riscv_vsetvli ||
559 IntNo == Intrinsic::riscv_vsetvlimax) &&
560 "Unexpected vsetvli intrinsic");
562 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
563 unsigned Offset = (VLMax ? 1 : 2);
566 "Unexpected number of operands");
571 Node->getConstantOperandVal(
Offset + 1) & 0x7);
578 unsigned Opcode = RISCV::PseudoVSETVLI;
579 if (
auto *
C = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
586 Opcode = RISCV::PseudoVSETVLIX0;
588 VLOperand = Node->getOperand(1);
590 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
592 if (isUInt<5>(AVL)) {
595 XLenVT, VLImm, VTypeIOp));
606 MVT VT = Node->getSimpleValueType(0);
607 unsigned Opcode = Node->getOpcode();
609 "Unexpected opcode");
614 SDValue N0 = Node->getOperand(0);
615 SDValue N1 = Node->getOperand(1);
632 bool SignExt =
false;
650 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
651 if (Opcode !=
ISD::AND && (Val & RemovedBitsMask) != 0)
654 int64_t ShiftedVal = Val >> ShAmt;
655 if (!isInt<12>(ShiftedVal))
659 if (SignExt && ShAmt >= 32)
666 case ISD::AND: BinOpc = RISCV::ANDI;
break;
667 case ISD::OR: BinOpc = RISCV::ORI;
break;
668 case ISD::XOR: BinOpc = RISCV::XORI;
break;
671 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
685 if (!Subtarget->hasVendorXTHeadBb())
688 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
692 SDValue N0 = Node->getOperand(0);
696 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
SDLoc DL,
704 MVT VT = Node->getSimpleValueType(0);
705 const unsigned RightShAmt = N1C->getZExtValue();
710 auto *N01C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
714 const unsigned LeftShAmt = N01C->getZExtValue();
717 if (LeftShAmt > RightShAmt)
721 const unsigned Msb = MsbPlusOne - 1;
722 const unsigned Lsb = RightShAmt - LeftShAmt;
724 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
733 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
739 const unsigned Msb = ExtSize - 1;
740 const unsigned Lsb = RightShAmt;
742 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
752 if (!Subtarget->hasVendorXTHeadMemIdx())
766 "Unexpected addressing mode");
769 int64_t
Offset =
C->getSExtValue();
774 for (Shift = 0; Shift < 4; Shift++)
775 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
784 if (LoadVT == MVT::i8 && IsPre)
785 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
786 else if (LoadVT == MVT::i8 && IsPost)
787 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
788 else if (LoadVT == MVT::i16 && IsPre)
789 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
790 else if (LoadVT == MVT::i16 && IsPost)
791 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
792 else if (LoadVT == MVT::i32 && IsPre)
793 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
794 else if (LoadVT == MVT::i32 && IsPost)
795 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
796 else if (LoadVT == MVT::i64 && IsPre)
797 Opcode = RISCV::TH_LDIB;
798 else if (LoadVT == MVT::i64 && IsPost)
799 Opcode = RISCV::TH_LDIA;
826 unsigned IntNo = Node->getConstantOperandVal(1);
828 assert((IntNo == Intrinsic::riscv_sf_vc_x_se ||
829 IntNo == Intrinsic::riscv_sf_vc_i_se) &&
830 "Unexpected vsetvli intrinsic");
833 unsigned Log2SEW =
Log2_32(Node->getConstantOperandVal(6));
837 Node->getOperand(4), Node->getOperand(5),
838 Node->getOperand(8), SEWOp,
839 Node->getOperand(0)};
842 auto *LMulSDNode = cast<ConstantSDNode>(Node->getOperand(7));
843 switch (LMulSDNode->getSExtValue()) {
845 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF8
846 : RISCV::PseudoVC_I_SE_MF8;
849 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF4
850 : RISCV::PseudoVC_I_SE_MF4;
853 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF2
854 : RISCV::PseudoVC_I_SE_MF2;
857 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M1
858 : RISCV::PseudoVC_I_SE_M1;
861 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M2
862 : RISCV::PseudoVC_I_SE_M2;
865 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M4
866 : RISCV::PseudoVC_I_SE_M4;
869 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M8
870 : RISCV::PseudoVC_I_SE_M8;
875 Opcode,
DL, Node->getSimpleValueType(0),
Operands));
880 if (Node->isMachineOpcode()) {
888 unsigned Opcode = Node->getOpcode();
891 MVT VT = Node->getSimpleValueType(0);
893 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
897 assert((VT == Subtarget->
getXLenVT() || VT == MVT::i32) &&
"Unexpected VT");
898 auto *ConstNode = cast<ConstantSDNode>(Node);
899 if (ConstNode->isZero()) {
905 int64_t Imm = ConstNode->getSExtValue();
908 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
910 Imm = SignExtend64<16>(Imm);
913 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
914 Imm = SignExtend64<32>(Imm);
920 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
921 auto [FPImm, NeedsFNeg] =
932 FNegOpc = RISCV::FSGNJN_H;
936 FNegOpc = RISCV::FSGNJN_S;
940 FNegOpc = RISCV::FSGNJN_D;
953 bool NegZeroF64 = APF.
isNegZero() && VT == MVT::f64;
963 bool HasZdinx = Subtarget->hasStdExtZdinx();
964 bool Is64Bit = Subtarget->
is64Bit();
970 assert(Subtarget->hasStdExtZfbfmin());
971 Opc = RISCV::FMV_H_X;
974 Opc = Subtarget->hasStdExtZhinxmin() ? RISCV::COPY : RISCV::FMV_H_X;
977 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
984 Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
986 Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W;
991 if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
1000 Opc = RISCV::FSGNJN_D;
1002 Opc = Is64Bit ? RISCV::FSGNJN_D_INX : RISCV::FSGNJN_D_IN32X;
1011 if (!Subtarget->hasStdExtZdinx())
1018 Node->getOperand(0),
1020 Node->getOperand(1),
1029 if (Subtarget->hasStdExtZdinx()) {
1032 if (!
SDValue(Node, 0).use_empty()) {
1034 Node->getOperand(0));
1038 if (!
SDValue(Node, 1).use_empty()) {
1040 Node->getOperand(0));
1048 if (!Subtarget->hasStdExtZfa())
1051 "Unexpected subtarget");
1054 if (!
SDValue(Node, 0).use_empty()) {
1056 Node->getOperand(0));
1059 if (!
SDValue(Node, 1).use_empty()) {
1061 Node->getOperand(0));
1069 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1072 SDValue N0 = Node->getOperand(0);
1076 unsigned ShAmt = N1C->getZExtValue();
1082 unsigned XLen = Subtarget->
getXLen();
1085 if (TrailingZeros > 0 && LeadingZeros == 32) {
1099 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1102 SDValue N0 = Node->getOperand(0);
1105 unsigned ShAmt = N1C->getZExtValue();
1111 unsigned XLen = Subtarget->
getXLen();
1114 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
1133 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
1137 if (ShAmt >= TrailingOnes)
1140 if (TrailingOnes == 32) {
1142 Subtarget->
is64Bit() ? RISCV::SRLIW : RISCV::SRLI,
DL, VT,
1153 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1155 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1161 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1183 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1186 SDValue N0 = Node->getOperand(0);
1189 unsigned ShAmt = N1C->getZExtValue();
1191 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1193 if (ExtSize >= 32 || ShAmt >= ExtSize)
1195 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1212 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1217 const bool isC1ANDI = isInt<12>(C1);
1219 SDValue N0 = Node->getOperand(0);
1224 if (!Subtarget->hasVendorXTHeadBb())
1236 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1239 unsigned C2 =
C->getZExtValue();
1240 unsigned XLen = Subtarget->
getXLen();
1241 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1249 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1253 C1 &= maskTrailingZeros<uint64_t>(C2);
1255 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1259 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1265 if (!LeftShift && isC1Mask) {
1269 if (C2 + 32 == Leading) {
1281 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1283 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32) {
1288 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1302 const unsigned Lsb = C2;
1303 if (tryUnsignedBitfieldExtract(Node,
DL, VT,
X, Msb, Lsb))
1308 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1310 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32;
1312 Skip |= HasBitTest && Leading == XLen - 1;
1313 if (OneUseOrZExtW && !Skip) {
1315 RISCV::SLLI,
DL, VT,
X,
1331 if (C2 + Leading < XLen &&
1332 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1334 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1343 if (OneUseOrZExtW && !IsCANDI) {
1345 RISCV::SLLI,
DL, VT,
X,
1361 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1363 unsigned SrliOpc = RISCV::SRLI;
1366 isa<ConstantSDNode>(
X.getOperand(1)) &&
1367 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1368 SrliOpc = RISCV::SRLIW;
1369 X =
X.getOperand(0);
1381 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1382 OneUseOrZExtW && !IsCANDI) {
1384 RISCV::SRLIW,
DL, VT,
X,
1399 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1401 RISCV::SRLI,
DL, VT,
X,
1410 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1412 RISCV::SRLIW,
DL, VT,
X,
1428 if (isC1Mask && !isC1ANDI) {
1430 if (tryUnsignedBitfieldExtract(Node,
DL, VT, N0, Msb, 0))
1447 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1448 if (!N1C || !N1C->hasOneUse())
1452 SDValue N0 = Node->getOperand(0);
1469 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1471 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1472 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1476 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1478 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1479 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1485 unsigned XLen = Subtarget->
getXLen();
1491 unsigned ConstantShift = XLen - LeadingZeros;
1495 uint64_t ShiftedC1 = C1 << ConstantShift;
1498 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1516 unsigned IntNo = Node->getConstantOperandVal(0);
1521 case Intrinsic::riscv_vmsgeu:
1522 case Intrinsic::riscv_vmsge: {
1523 SDValue Src1 = Node->getOperand(1);
1524 SDValue Src2 = Node->getOperand(2);
1525 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1526 bool IsCmpUnsignedZero =
false;
1531 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1532 int64_t CVal =
C->getSExtValue();
1533 if (CVal >= -15 && CVal <= 16) {
1534 if (!IsUnsigned || CVal != 0)
1536 IsCmpUnsignedZero =
true;
1540 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1544#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1545 case RISCVII::VLMUL::lmulenum: \
1546 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1547 : RISCV::PseudoVMSLT_VX_##suffix; \
1548 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1549 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1558#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1566 if (IsCmpUnsignedZero) {
1577 {Cmp, Cmp, VL, SEW}));
1580 case Intrinsic::riscv_vmsgeu_mask:
1581 case Intrinsic::riscv_vmsge_mask: {
1582 SDValue Src1 = Node->getOperand(2);
1583 SDValue Src2 = Node->getOperand(3);
1584 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1585 bool IsCmpUnsignedZero =
false;
1590 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1591 int64_t CVal =
C->getSExtValue();
1592 if (CVal >= -15 && CVal <= 16) {
1593 if (!IsUnsigned || CVal != 0)
1595 IsCmpUnsignedZero =
true;
1599 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1604#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1605 case RISCVII::VLMUL::lmulenum: \
1606 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1607 : RISCV::PseudoVMSLT_VX_##suffix; \
1608 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1609 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1618#undef CASE_VMSLT_OPCODES
1624#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1625 case RISCVII::VLMUL::lmulenum: \
1626 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1627 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1628 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1637#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1644 SDValue MaskedOff = Node->getOperand(1);
1645 SDValue Mask = Node->getOperand(4);
1648 if (IsCmpUnsignedZero) {
1651 if (Mask == MaskedOff) {
1657 {Mask, MaskedOff, VL, MaskSEW}));
1664 if (Mask == MaskedOff) {
1669 {Mask, Cmp, VL, MaskSEW}));
1686 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1690 {Cmp, Mask, VL, MaskSEW}));
1693 case Intrinsic::riscv_vsetvli:
1694 case Intrinsic::riscv_vsetvlimax:
1700 unsigned IntNo = Node->getConstantOperandVal(1);
1705 case Intrinsic::riscv_vlseg2:
1706 case Intrinsic::riscv_vlseg3:
1707 case Intrinsic::riscv_vlseg4:
1708 case Intrinsic::riscv_vlseg5:
1709 case Intrinsic::riscv_vlseg6:
1710 case Intrinsic::riscv_vlseg7:
1711 case Intrinsic::riscv_vlseg8: {
1715 case Intrinsic::riscv_vlseg2_mask:
1716 case Intrinsic::riscv_vlseg3_mask:
1717 case Intrinsic::riscv_vlseg4_mask:
1718 case Intrinsic::riscv_vlseg5_mask:
1719 case Intrinsic::riscv_vlseg6_mask:
1720 case Intrinsic::riscv_vlseg7_mask:
1721 case Intrinsic::riscv_vlseg8_mask: {
1725 case Intrinsic::riscv_vlsseg2:
1726 case Intrinsic::riscv_vlsseg3:
1727 case Intrinsic::riscv_vlsseg4:
1728 case Intrinsic::riscv_vlsseg5:
1729 case Intrinsic::riscv_vlsseg6:
1730 case Intrinsic::riscv_vlsseg7:
1731 case Intrinsic::riscv_vlsseg8: {
1735 case Intrinsic::riscv_vlsseg2_mask:
1736 case Intrinsic::riscv_vlsseg3_mask:
1737 case Intrinsic::riscv_vlsseg4_mask:
1738 case Intrinsic::riscv_vlsseg5_mask:
1739 case Intrinsic::riscv_vlsseg6_mask:
1740 case Intrinsic::riscv_vlsseg7_mask:
1741 case Intrinsic::riscv_vlsseg8_mask: {
1745 case Intrinsic::riscv_vloxseg2:
1746 case Intrinsic::riscv_vloxseg3:
1747 case Intrinsic::riscv_vloxseg4:
1748 case Intrinsic::riscv_vloxseg5:
1749 case Intrinsic::riscv_vloxseg6:
1750 case Intrinsic::riscv_vloxseg7:
1751 case Intrinsic::riscv_vloxseg8:
1754 case Intrinsic::riscv_vluxseg2:
1755 case Intrinsic::riscv_vluxseg3:
1756 case Intrinsic::riscv_vluxseg4:
1757 case Intrinsic::riscv_vluxseg5:
1758 case Intrinsic::riscv_vluxseg6:
1759 case Intrinsic::riscv_vluxseg7:
1760 case Intrinsic::riscv_vluxseg8:
1763 case Intrinsic::riscv_vloxseg2_mask:
1764 case Intrinsic::riscv_vloxseg3_mask:
1765 case Intrinsic::riscv_vloxseg4_mask:
1766 case Intrinsic::riscv_vloxseg5_mask:
1767 case Intrinsic::riscv_vloxseg6_mask:
1768 case Intrinsic::riscv_vloxseg7_mask:
1769 case Intrinsic::riscv_vloxseg8_mask:
1772 case Intrinsic::riscv_vluxseg2_mask:
1773 case Intrinsic::riscv_vluxseg3_mask:
1774 case Intrinsic::riscv_vluxseg4_mask:
1775 case Intrinsic::riscv_vluxseg5_mask:
1776 case Intrinsic::riscv_vluxseg6_mask:
1777 case Intrinsic::riscv_vluxseg7_mask:
1778 case Intrinsic::riscv_vluxseg8_mask:
1781 case Intrinsic::riscv_vlseg8ff:
1782 case Intrinsic::riscv_vlseg7ff:
1783 case Intrinsic::riscv_vlseg6ff:
1784 case Intrinsic::riscv_vlseg5ff:
1785 case Intrinsic::riscv_vlseg4ff:
1786 case Intrinsic::riscv_vlseg3ff:
1787 case Intrinsic::riscv_vlseg2ff: {
1791 case Intrinsic::riscv_vlseg8ff_mask:
1792 case Intrinsic::riscv_vlseg7ff_mask:
1793 case Intrinsic::riscv_vlseg6ff_mask:
1794 case Intrinsic::riscv_vlseg5ff_mask:
1795 case Intrinsic::riscv_vlseg4ff_mask:
1796 case Intrinsic::riscv_vlseg3ff_mask:
1797 case Intrinsic::riscv_vlseg2ff_mask: {
1801 case Intrinsic::riscv_vloxei:
1802 case Intrinsic::riscv_vloxei_mask:
1803 case Intrinsic::riscv_vluxei:
1804 case Intrinsic::riscv_vluxei_mask: {
1805 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1806 IntNo == Intrinsic::riscv_vluxei_mask;
1807 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1808 IntNo == Intrinsic::riscv_vloxei_mask;
1810 MVT VT = Node->getSimpleValueType(0);
1815 Operands.push_back(Node->getOperand(CurOp++));
1823 "Element count mismatch");
1828 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1830 "values when XLEN=32");
1833 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
1834 static_cast<unsigned>(IndexLMUL));
1838 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1844 case Intrinsic::riscv_vlm:
1845 case Intrinsic::riscv_vle:
1846 case Intrinsic::riscv_vle_mask:
1847 case Intrinsic::riscv_vlse:
1848 case Intrinsic::riscv_vlse_mask: {
1849 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1850 IntNo == Intrinsic::riscv_vlse_mask;
1852 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1854 MVT VT = Node->getSimpleValueType(0);
1863 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1866 if (HasPassthruOperand)
1867 Operands.push_back(Node->getOperand(CurOp++));
1880 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
1881 static_cast<unsigned>(LMUL));
1885 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1891 case Intrinsic::riscv_vleff:
1892 case Intrinsic::riscv_vleff_mask: {
1893 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1895 MVT VT = Node->getSimpleValueType(0);
1900 Operands.push_back(Node->getOperand(CurOp++));
1907 RISCV::getVLEPseudo(IsMasked,
false,
true,
1908 Log2SEW,
static_cast<unsigned>(LMUL));
1911 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1921 unsigned IntNo = Node->getConstantOperandVal(1);
1923 case Intrinsic::riscv_vsseg2:
1924 case Intrinsic::riscv_vsseg3:
1925 case Intrinsic::riscv_vsseg4:
1926 case Intrinsic::riscv_vsseg5:
1927 case Intrinsic::riscv_vsseg6:
1928 case Intrinsic::riscv_vsseg7:
1929 case Intrinsic::riscv_vsseg8: {
1933 case Intrinsic::riscv_vsseg2_mask:
1934 case Intrinsic::riscv_vsseg3_mask:
1935 case Intrinsic::riscv_vsseg4_mask:
1936 case Intrinsic::riscv_vsseg5_mask:
1937 case Intrinsic::riscv_vsseg6_mask:
1938 case Intrinsic::riscv_vsseg7_mask:
1939 case Intrinsic::riscv_vsseg8_mask: {
1943 case Intrinsic::riscv_vssseg2:
1944 case Intrinsic::riscv_vssseg3:
1945 case Intrinsic::riscv_vssseg4:
1946 case Intrinsic::riscv_vssseg5:
1947 case Intrinsic::riscv_vssseg6:
1948 case Intrinsic::riscv_vssseg7:
1949 case Intrinsic::riscv_vssseg8: {
1953 case Intrinsic::riscv_vssseg2_mask:
1954 case Intrinsic::riscv_vssseg3_mask:
1955 case Intrinsic::riscv_vssseg4_mask:
1956 case Intrinsic::riscv_vssseg5_mask:
1957 case Intrinsic::riscv_vssseg6_mask:
1958 case Intrinsic::riscv_vssseg7_mask:
1959 case Intrinsic::riscv_vssseg8_mask: {
1963 case Intrinsic::riscv_vsoxseg2:
1964 case Intrinsic::riscv_vsoxseg3:
1965 case Intrinsic::riscv_vsoxseg4:
1966 case Intrinsic::riscv_vsoxseg5:
1967 case Intrinsic::riscv_vsoxseg6:
1968 case Intrinsic::riscv_vsoxseg7:
1969 case Intrinsic::riscv_vsoxseg8:
1972 case Intrinsic::riscv_vsuxseg2:
1973 case Intrinsic::riscv_vsuxseg3:
1974 case Intrinsic::riscv_vsuxseg4:
1975 case Intrinsic::riscv_vsuxseg5:
1976 case Intrinsic::riscv_vsuxseg6:
1977 case Intrinsic::riscv_vsuxseg7:
1978 case Intrinsic::riscv_vsuxseg8:
1981 case Intrinsic::riscv_vsoxseg2_mask:
1982 case Intrinsic::riscv_vsoxseg3_mask:
1983 case Intrinsic::riscv_vsoxseg4_mask:
1984 case Intrinsic::riscv_vsoxseg5_mask:
1985 case Intrinsic::riscv_vsoxseg6_mask:
1986 case Intrinsic::riscv_vsoxseg7_mask:
1987 case Intrinsic::riscv_vsoxseg8_mask:
1990 case Intrinsic::riscv_vsuxseg2_mask:
1991 case Intrinsic::riscv_vsuxseg3_mask:
1992 case Intrinsic::riscv_vsuxseg4_mask:
1993 case Intrinsic::riscv_vsuxseg5_mask:
1994 case Intrinsic::riscv_vsuxseg6_mask:
1995 case Intrinsic::riscv_vsuxseg7_mask:
1996 case Intrinsic::riscv_vsuxseg8_mask:
1999 case Intrinsic::riscv_vsoxei:
2000 case Intrinsic::riscv_vsoxei_mask:
2001 case Intrinsic::riscv_vsuxei:
2002 case Intrinsic::riscv_vsuxei_mask: {
2003 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
2004 IntNo == Intrinsic::riscv_vsuxei_mask;
2005 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
2006 IntNo == Intrinsic::riscv_vsoxei_mask;
2008 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2013 Operands.push_back(Node->getOperand(CurOp++));
2021 "Element count mismatch");
2026 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
2028 "values when XLEN=32");
2031 IsMasked, IsOrdered, IndexLog2EEW,
2032 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
2036 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2042 case Intrinsic::riscv_vsm:
2043 case Intrinsic::riscv_vse:
2044 case Intrinsic::riscv_vse_mask:
2045 case Intrinsic::riscv_vsse:
2046 case Intrinsic::riscv_vsse_mask: {
2047 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
2048 IntNo == Intrinsic::riscv_vsse_mask;
2050 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
2052 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2057 Operands.push_back(Node->getOperand(CurOp++));
2064 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
2067 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2073 case Intrinsic::riscv_sf_vc_x_se:
2074 case Intrinsic::riscv_sf_vc_i_se:
2081 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
2093 SDValue V = Node->getOperand(0);
2094 SDValue SubV = Node->getOperand(1);
2096 auto Idx = Node->getConstantOperandVal(2);
2100 MVT SubVecContainerVT = SubVecVT;
2104 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
2106 MVT ContainerVT = VT;
2108 ContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2112 std::tie(SubRegIdx,
Idx) =
2114 ContainerVT, SubVecContainerVT,
Idx,
TRI);
2123 [[maybe_unused]]
bool IsSubVecPartReg =
2127 assert((!IsSubVecPartReg || V.isUndef()) &&
2128 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
2129 "the subvector is smaller than a full-sized register");
2133 if (SubRegIdx == RISCV::NoSubRegister) {
2134 unsigned InRegClassID =
2138 "Unexpected subvector extraction");
2151 SDValue V = Node->getOperand(0);
2152 auto Idx = Node->getConstantOperandVal(1);
2153 MVT InVT = V.getSimpleValueType();
2157 MVT SubVecContainerVT = VT;
2161 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2164 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2168 std::tie(SubRegIdx,
Idx) =
2170 InVT, SubVecContainerVT,
Idx,
TRI);
2180 if (SubRegIdx == RISCV::NoSubRegister) {
2184 "Unexpected subvector extraction");
2203 if (!Node->getOperand(0).isUndef())
2205 SDValue Src = Node->getOperand(1);
2206 auto *Ld = dyn_cast<LoadSDNode>(Src);
2209 if (!Ld || Ld->isIndexed())
2211 EVT MemVT = Ld->getMemoryVT();
2237 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2247 Operands.append({VL, SEW, PolicyOp, Ld->getChain()});
2251 false, IsStrided,
false,
2252 Log2SEW,
static_cast<unsigned>(LMUL));
2264 unsigned Locality = Node->getConstantOperandVal(3);
2268 if (
auto *LoadStoreMem = dyn_cast<MemSDNode>(Node)) {
2272 int NontemporalLevel = 0;
2275 NontemporalLevel = 3;
2278 NontemporalLevel = 1;
2281 NontemporalLevel = 0;
2287 if (NontemporalLevel & 0b1)
2289 if (NontemporalLevel & 0b10)
2301 std::vector<SDValue> &OutOps) {
2304 switch (ConstraintID) {
2309 assert(Found &&
"SelectAddrRegImm should always succeed");
2310 OutOps.push_back(Op0);
2311 OutOps.push_back(Op1);
2315 OutOps.push_back(
Op);
2329 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2347 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr.getOperand(0))) {
2348 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2349 if (isInt<12>(CVal)) {
2365 bool IsPrefetch =
false) {
2366 if (!isa<ConstantSDNode>(
Addr))
2369 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2374 int64_t Lo12 = SignExtend64<12>(CVal);
2376 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2377 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2381 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2398 if (Seq.
back().getOpcode() != RISCV::ADDI)
2400 Lo12 = Seq.
back().getImm();
2401 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2406 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2416 for (
auto *
Use :
Add->uses()) {
2421 EVT VT = cast<MemSDNode>(
Use)->getMemoryVT();
2427 cast<StoreSDNode>(
Use)->getValue() ==
Add)
2430 cast<AtomicSDNode>(
Use)->getVal() ==
Add)
2438 unsigned MaxShiftAmount,
2441 EVT VT =
Addr.getSimpleValueType();
2447 if (
N.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
N.getOperand(1))) {
2449 if (
N.getConstantOperandVal(1) <= MaxShiftAmount) {
2451 ShiftAmt =
N.getConstantOperandVal(1);
2456 return ShiftAmt != 0;
2460 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Addr.getOperand(1))) {
2465 isInt<12>(C1->getSExtValue())) {
2474 }
else if (UnwrapShl(
Addr.getOperand(0),
Index, Scale)) {
2478 UnwrapShl(
Addr.getOperand(1),
Index, Scale);
2482 }
else if (UnwrapShl(
Addr,
Index, Scale)) {
2497 MVT VT =
Addr.getSimpleValueType();
2505 int64_t RV32ZdinxRange = IsINX ? 4 : 0;
2507 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2508 if (isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) {
2512 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2520 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2521 if (CVal == 0 || Alignment > CVal) {
2522 int64_t CombinedOffset = CVal + GA->getOffset();
2526 CombinedOffset, GA->getTargetFlags());
2532 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2540 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2541 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2542 assert(!(isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) &&
2543 "simm12 not already handled?");
2548 if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
2549 int64_t Adj = CVal < 0 ? -2048 : 2047;
2591 MVT VT =
Addr.getSimpleValueType();
2594 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2595 if (isInt<12>(CVal)) {
2599 if ((CVal & 0b11111) != 0) {
2605 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2613 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2614 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2615 assert(!(isInt<12>(CVal) && isInt<12>(CVal)) &&
2616 "simm12 not already handled?");
2620 if ((-2049 >= CVal && CVal >= -4096) || (4065 >= CVal && CVal >= 2017)) {
2621 int64_t Adj = CVal < 0 ? -2048 : 2016;
2622 int64_t AdjustedOffset = CVal - Adj;
2624 RISCV::ADDI,
DL, VT,
Addr.getOperand(0),
2686 if (Imm != 0 && Imm % ShiftWidth == 0) {
2695 if (Imm != 0 && Imm % ShiftWidth == 0) {
2699 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2707 if (Imm % ShiftWidth == ShiftWidth - 1) {
2729 "Unexpected condition code!");
2736 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
2737 if (CCVal != ExpectedCCVal)
2743 if (!
LHS.getValueType().isScalarInteger())
2754 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
2755 int64_t CVal =
C->getSExtValue();
2758 if (CVal == -2048) {
2761 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
2768 if (isInt<12>(CVal) || CVal == 2048) {
2771 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
2787 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
2788 Val =
N.getOperand(0);
2792 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
2793 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
2798 N.getConstantOperandVal(1) == ShiftAmt &&
2805 MVT VT =
N.getSimpleValueType();
2816 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2817 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
2818 Val =
N.getOperand(0);
2822 MVT VT =
N.getSimpleValueType();
2837 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
2843 uint64_t Mask =
N.getConstantOperandVal(1);
2846 unsigned XLen = Subtarget->
getXLen();
2848 Mask &= maskTrailingZeros<uint64_t>(C2);
2850 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2858 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
2860 EVT VT =
N.getValueType();
2870 if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
2872 EVT VT =
N.getValueType();
2884 bool LeftShift =
N.getOpcode() ==
ISD::SHL;
2885 if ((LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
2886 isa<ConstantSDNode>(
N.getOperand(1))) {
2892 unsigned C1 =
N.getConstantOperandVal(1);
2893 unsigned XLen = Subtarget->
getXLen();
2898 if (LeftShift && Leading == 32 && Trailing > 0 &&
2899 (Trailing + C1) == ShAmt) {
2901 EVT VT =
N.getValueType();
2910 if (!LeftShift && Leading == 32 && Trailing > C1 &&
2911 (Trailing - C1) == ShAmt) {
2913 EVT VT =
N.getValueType();
2932 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
2937 uint64_t Mask =
N.getConstantOperandVal(1);
2940 Mask &= maskTrailingZeros<uint64_t>(C2);
2948 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
2950 EVT VT =
N.getValueType();
2978 bool HasGlueOp =
User->getGluedNode() !=
nullptr;
2980 bool HasChainOp =
User->
getOperand(ChainOpIdx).getValueType() == MVT::Other;
2984 const unsigned Log2SEW =
User->getConstantOperandVal(VLIdx + 1);
2986 if (UserOpNo == VLIdx)
2989 auto NumDemandedBits =
2991 return NumDemandedBits && Bits >= *NumDemandedBits;
3004 const unsigned Depth)
const {
3010 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
3011 "Unexpected opcode");
3018 if (
Depth == 0 && !Node->getValueType(0).isScalarInteger())
3021 for (
auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
3024 if (!
User->isMachineOpcode())
3028 switch (
User->getMachineOpcode()) {
3053 case RISCV::SLLI_UW:
3054 case RISCV::FMV_W_X:
3055 case RISCV::FCVT_H_W:
3056 case RISCV::FCVT_H_WU:
3057 case RISCV::FCVT_S_W:
3058 case RISCV::FCVT_S_WU:
3059 case RISCV::FCVT_D_W:
3060 case RISCV::FCVT_D_WU:
3061 case RISCV::TH_REVW:
3062 case RISCV::TH_SRRIW:
3075 if (UI.getOperandNo() != 1 || Bits <
Log2_32(Subtarget->
getXLen()))
3080 if (Bits < Subtarget->getXLen() -
User->getConstantOperandVal(1))
3089 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
3108 unsigned ShAmt =
User->getConstantOperandVal(1);
3122 case RISCV::FMV_H_X:
3123 case RISCV::ZEXT_H_RV32:
3124 case RISCV::ZEXT_H_RV64:
3130 if (Bits < (Subtarget->
getXLen() / 2))
3134 case RISCV::SH1ADD_UW:
3135 case RISCV::SH2ADD_UW:
3136 case RISCV::SH3ADD_UW:
3139 if (UI.getOperandNo() != 0 || Bits < 32)
3143 if (UI.getOperandNo() != 0 || Bits < 8)
3147 if (UI.getOperandNo() != 0 || Bits < 16)
3151 if (UI.getOperandNo() != 0 || Bits < 32)
3163 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3164 int64_t
Offset =
C->getSExtValue();
3166 for (Shift = 0; Shift < 4; Shift++)
3167 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
3174 EVT Ty =
N->getValueType(0);
3186 auto *
C = dyn_cast<ConstantSDNode>(
N);
3187 if (
C && isUInt<5>(
C->getZExtValue())) {
3189 N->getValueType(0));
3190 }
else if (
C &&
C->isAllOnes()) {
3193 N->getValueType(0));
3194 }
else if (isa<RegisterSDNode>(
N) &&
3195 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
3201 N->getValueType(0));
3211 if (!
N.getOperand(0).isUndef())
3213 N =
N.getOperand(1);
3218 !
Splat.getOperand(0).isUndef())
3220 assert(
Splat.getNumOperands() == 3 &&
"Unexpected number of operands");
3229 SplatVal =
Splat.getOperand(1);
3236 std::function<
bool(int64_t)> ValidateImm) {
3238 if (!
Splat || !isa<ConstantSDNode>(
Splat.getOperand(1)))
3241 const unsigned SplatEltSize =
Splat.getScalarValueSizeInBits();
3243 "Unexpected splat operand type");
3252 APInt SplatConst =
Splat.getConstantOperandAPInt(1).sextOrTrunc(SplatEltSize);
3256 if (!ValidateImm(SplatImm))
3265 [](int64_t Imm) {
return isInt<5>(Imm); });
3270 N, SplatVal, *
CurDAG, *Subtarget,
3271 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
3277 N, SplatVal, *
CurDAG, *Subtarget, [](int64_t Imm) {
3278 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
3285 N, SplatVal, *
CurDAG, *Subtarget,
3286 [Bits](int64_t Imm) {
return isUIntN(Bits, Imm); });
3299 isa<ConstantSDNode>(VL) &&
3306 if (!
N.hasOneUse() ||
3307 N.getValueType().getSizeInBits().getKnownMinValue() < 8)
3309 N =
N->getOperand(0);
3330 ->getLegalZfaFPImm(APF, VT)
3335 if (VT == MVT::f64 && !Subtarget->
is64Bit()) {
3347 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3350 if (!isInt<5>(ImmVal))
3362bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
3364 if (
N->getMachineOpcode() != RISCV::ADDIW ||
3386 case RISCV::ADD: Opc = RISCV::ADDW;
break;
3387 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
3388 case RISCV::SUB: Opc = RISCV::SUBW;
break;
3389 case RISCV::MUL: Opc = RISCV::MULW;
break;
3390 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
3398 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
3413 case RISCV::TH_MULAW:
3414 case RISCV::TH_MULAH:
3415 case RISCV::TH_MULSW:
3416 case RISCV::TH_MULSH:
3431 if (!isa<RegisterSDNode>(MaskOp) ||
3432 cast<RegisterSDNode>(MaskOp)->
getReg() != RISCV::V0)
3436 const auto *Glued = GlueOp.
getNode();
3442 if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
3443 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
3455 const auto IsVMSet = [](
unsigned Opc) {
3456 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
3457 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
3458 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
3459 Opc == RISCV::PseudoVMSET_M_B8;
3472 N->getOperand(
N->getNumOperands() - 1));
3476 return V.isMachineOpcode() &&
3477 V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
3486 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
3490 unsigned MaskOpIdx =
I->MaskOpIdx;
3496 const unsigned Opc =
I->UnmaskedPseudo;
3503 "Masked and unmasked pseudos are inconsistent");
3505 assert(UseTUPseudo == HasTiedDest &&
"Unexpected pseudo structure");
3510 for (
unsigned I = !UseTUPseudo, E =
N->getNumOperands();
I != E;
I++) {
3513 if (
I == MaskOpIdx ||
Op.getValueType() == MVT::Glue)
3519 const auto *Glued =
N->getGluedNode();
3520 if (
auto *TGlued = Glued->getGluedNode())
3526 if (!
N->memoperands_empty())
3529 Result->setFlags(
N->getFlags());
3546 return RISCV::PseudoVMSET_M_B1;
3548 return RISCV::PseudoVMSET_M_B2;
3550 return RISCV::PseudoVMSET_M_B4;
3552 return RISCV::PseudoVMSET_M_B8;
3554 return RISCV::PseudoVMSET_M_B16;
3556 return RISCV::PseudoVMSET_M_B32;
3558 return RISCV::PseudoVMSET_M_B64;
3586bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(
SDNode *
N) {
3590 Merge =
N->getOperand(0);
3591 False =
N->getOperand(0);
3592 True =
N->getOperand(1);
3593 VL =
N->getOperand(2);
3598 Merge =
N->getOperand(0);
3599 False =
N->getOperand(1);
3600 True =
N->getOperand(2);
3601 Mask =
N->getOperand(3);
3602 VL =
N->getOperand(4);
3604 Glue =
N->getOperand(
N->getNumOperands() - 1);
3606 assert(!Mask || cast<RegisterSDNode>(Mask)->
getReg() == RISCV::V0);
3615 "Expect True is the first output of an instruction.");
3629 bool IsMasked =
false;
3631 RISCV::lookupMaskedIntrinsicByUnmasked(TrueOpc);
3632 if (!Info && HasTiedDest) {
3633 Info = RISCV::getMaskedPseudoInfo(TrueOpc);
3654 if (False != MergeOpTrue)
3661 assert(HasTiedDest &&
"Expected tied dest");
3702 unsigned TrueVLIndex =
3703 True.
getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
3714 auto *CLHS = dyn_cast<ConstantSDNode>(LHS);
3715 auto *CRHS = dyn_cast<ConstantSDNode>(RHS);
3718 return CLHS->getZExtValue() <= CRHS->getZExtValue() ?
LHS :
RHS;
3724 VL = GetMinVL(TrueVL, VL);
3731 if (TrueVL != VL || !IsMasked)
3756 RISCV::V0, AllOnesMask,
SDValue());
3761 unsigned MaskedOpc =
Info->MaskedPseudo;
3765 "Expected instructions with mask have policy operand.");
3768 "Expected instructions with mask have a tied dest.");
3778 bool MergeVLShrunk = VL != OrigVL;
3790 const unsigned NormalOpsEnd = TrueVLIndex - IsMasked - HasRoundingMode;
3791 assert(!IsMasked || NormalOpsEnd ==
Info->MaskOpIdx);
3800 if (HasRoundingMode)
3803 Ops.
append({VL, SEW, PolicyOp});
3816 if (!cast<MachineSDNode>(True)->memoperands_empty())
3829bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
3830 bool MadeChange =
false;
3835 if (
N->use_empty() || !
N->isMachineOpcode())
3839 MadeChange |= performCombineVMergeAndVOps(
N);
3849bool RISCVDAGToDAGISel::doPeepholeNoRegPassThru() {
3850 bool MadeChange =
false;
3855 if (
N->use_empty() || !
N->isMachineOpcode())
3858 const unsigned Opc =
N->getMachineOpcode();
3859 if (!RISCVVPseudosTable::getPseudoInfo(Opc) ||
3866 for (
unsigned I = 1, E =
N->getNumOperands();
I != E;
I++) {
3873 Result->setFlags(
N->getFlags());
static Register createTuple(ArrayRef< Register > Regs, const unsigned RegClassIDs[], const unsigned SubRegs[], MachineIRBuilder &MIB)
Create a REG_SEQUENCE instruction using the registers in Regs.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)
static bool isWorthFoldingAdd(SDValue Add)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static bool isImplicitDef(SDValue V)
static unsigned GetVMSetForLMul(RISCVII::VLMUL LMUL)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool usesAllOnesMask(SDValue MaskOp, SDValue GlueOp)
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, unsigned Bits, const TargetInstrInfo *TII)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset, bool IsPrefetch=false)
static bool IsVMv(SDNode *N)
static cl::opt< bool > UsePseudoMovImm("riscv-use-rematerializable-movimm", cl::Hidden, cl::desc("Use a rematerializable pseudoinstruction for 2 instruction " "constant materialization"), cl::init(false))
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)
static SDValue findVSplat(SDValue N)
static bool selectVSplatImmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, std::function< bool(int64_t)> ValidateImm)
static bool IsVMerge(SDNode *N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
static StringRef getMemConstraintName(ConstraintCode C)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
void setFlags(Flags f)
Bitwise OR the current flags with the given flags.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the least significant 5 bits of Offset shoule be all zeros.
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVLSEGFF(SDNode *Node, bool IsMasked)
bool selectFPImm(SDValue N, SDValue &Imm)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
void selectSF_VC_X_SE(SDNode *Node)
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal)
bool hasAllHUsers(SDNode *Node) const
bool SelectInlineAsmMemoryOperand(const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool selectVLOp(SDValue N, SDValue &VL)
bool trySignedBitfieldExtract(SDNode *Node)
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, bool IsINX=false)
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal)
bool hasVInstructions() const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
op_iterator op_begin() const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
unsigned getNumOperands() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOptLevel OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
bool mayRaiseFPException(SDNode *Node) const
Return whether the node may raise an FP exception.
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getRegister(unsigned Reg, EVT VT)
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
ilist< SDNode >::iterator allnodes_iterator
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
static bool hasRoundModeOp(uint64_t TSFlags)
static VLMUL getLMul(uint64_t TSFlags)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
@ SPLAT_VECTOR_SPLIT_I64_VL
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static unsigned decodeVSEW(unsigned VSEW)
unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
static constexpr int64_t VLMaxSentinel
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
static const MachineMemOperand::Flags MONontemporalBit1
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
static const MachineMemOperand::Flags MONontemporalBit0
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
CodeGenOptLevel
Code generation optimization level.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOptLevel OptLevel)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
bool hasNoFPExcept() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.