20#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
29#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
32 "riscv-use-rematerializable-movimm",
cl::Hidden,
33 cl::desc(
"Use a rematerializable pseudoinstruction for 2 instruction "
34 "constant materialization"),
38#define GET_RISCVVSSEGTable_IMPL
39#define GET_RISCVVLSEGTable_IMPL
40#define GET_RISCVVLXSEGTable_IMPL
41#define GET_RISCVVSXSEGTable_IMPL
42#define GET_RISCVVLETable_IMPL
43#define GET_RISCVVSETable_IMPL
44#define GET_RISCVVLXTable_IMPL
45#define GET_RISCVVSXTable_IMPL
46#include "RISCVGenSearchableTables.inc"
52 bool MadeChange =
false;
59 switch (
N->getOpcode()) {
63 MVT VT =
N->getSimpleValueType(0);
79 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
80 MVT VT =
N->getSimpleValueType(0);
86 Lo.getValueType() == MVT::i32 &&
Hi.getValueType() == MVT::i32 &&
94 int FI = cast<FrameIndexSDNode>(StackSlot.
getNode())->getIndex();
118 MVT::i64, MPI,
Align(8),
125 LLVM_DEBUG(
dbgs() <<
"RISC-V DAG preprocessing replacing:\nOld: ");
144 bool MadeChange =
false;
148 if (
N->use_empty() || !
N->isMachineOpcode())
151 MadeChange |= doPeepholeSExtW(
N);
156 MadeChange |= doPeepholeMaskedRVV(cast<MachineSDNode>(
N));
161 MadeChange |= doPeepholeMergeVVMFold();
169 MadeChange |= doPeepholeNoRegPassThru();
181 switch (Inst.getOpndKind()) {
220 if (Seq.
size() > 3) {
221 unsigned ShiftAmt, AddOpc;
241 static const unsigned M1TupleRegClassIDs[] = {
242 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
243 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
244 RISCV::VRN8M1RegClassID};
245 static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
246 RISCV::VRN3M2RegClassID,
247 RISCV::VRN4M2RegClassID};
260 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
261 "Unexpected subreg numbering");
262 SubReg0 = RISCV::sub_vrm1_0;
263 RegClassID = M1TupleRegClassIDs[NF - 2];
266 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
267 "Unexpected subreg numbering");
268 SubReg0 = RISCV::sub_vrm2_0;
269 RegClassID = M2TupleRegClassIDs[NF - 2];
272 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
273 "Unexpected subreg numbering");
274 SubReg0 = RISCV::sub_vrm4_0;
275 RegClassID = RISCV::VRN2M4RegClassID;
284 for (
unsigned I = 0;
I < Regs.
size(); ++
I) {
294 SDNode *Node,
unsigned Log2SEW,
const SDLoc &
DL,
unsigned CurOp,
296 bool IsLoad,
MVT *IndexVT) {
297 SDValue Chain = Node->getOperand(0);
300 Operands.push_back(Node->getOperand(CurOp++));
302 if (IsStridedOrIndexed) {
303 Operands.push_back(Node->getOperand(CurOp++));
305 *IndexVT =
Operands.back()->getSimpleValueType(0);
310 SDValue Mask = Node->getOperand(CurOp++);
329 Policy = Node->getConstantOperandVal(CurOp++);
342 unsigned NF = Node->getNumValues() - 1;
343 MVT VT = Node->getSimpleValueType(0);
351 Node->op_begin() + CurOp + NF);
360 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided,
false, Log2SEW,
361 static_cast<unsigned>(LMUL));
365 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
369 for (
unsigned I = 0;
I < NF; ++
I) {
381 unsigned NF = Node->getNumValues() - 2;
382 MVT VT = Node->getSimpleValueType(0);
391 Node->op_begin() + CurOp + NF);
401 RISCV::getVLSEGPseudo(NF, IsMasked,
false,
true,
402 Log2SEW,
static_cast<unsigned>(LMUL));
406 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
410 for (
unsigned I = 0;
I < NF; ++
I) {
424 unsigned NF = Node->getNumValues() - 1;
425 MVT VT = Node->getSimpleValueType(0);
433 Node->op_begin() + CurOp + NF);
444 "Element count mismatch");
448 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
450 "values when XLEN=32");
453 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
454 static_cast<unsigned>(IndexLMUL));
458 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
462 for (
unsigned I = 0;
I < NF; ++
I) {
475 unsigned NF = Node->getNumOperands() - 4;
480 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
488 unsigned CurOp = 2 + NF;
494 NF, IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
498 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
507 unsigned NF = Node->getNumOperands() - 5;
510 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
518 unsigned CurOp = 2 + NF;
526 "Element count mismatch");
530 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
532 "values when XLEN=32");
535 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
536 static_cast<unsigned>(IndexLMUL));
540 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
555 unsigned IntNo = Node->getConstantOperandVal(0);
557 assert((IntNo == Intrinsic::riscv_vsetvli ||
558 IntNo == Intrinsic::riscv_vsetvlimax) &&
559 "Unexpected vsetvli intrinsic");
561 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
562 unsigned Offset = (VLMax ? 1 : 2);
565 "Unexpected number of operands");
570 Node->getConstantOperandVal(
Offset + 1) & 0x7);
577 unsigned Opcode = RISCV::PseudoVSETVLI;
578 if (
auto *
C = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
585 Opcode = RISCV::PseudoVSETVLIX0;
587 VLOperand = Node->getOperand(1);
589 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
591 if (isUInt<5>(AVL)) {
594 XLenVT, VLImm, VTypeIOp));
605 MVT VT = Node->getSimpleValueType(0);
606 unsigned Opcode = Node->getOpcode();
608 "Unexpected opcode");
613 SDValue N0 = Node->getOperand(0);
614 SDValue N1 = Node->getOperand(1);
631 bool SignExt =
false;
649 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
650 if (Opcode !=
ISD::AND && (Val & RemovedBitsMask) != 0)
653 int64_t ShiftedVal = Val >> ShAmt;
654 if (!isInt<12>(ShiftedVal))
658 if (SignExt && ShAmt >= 32)
665 case ISD::AND: BinOpc = RISCV::ANDI;
break;
666 case ISD::OR: BinOpc = RISCV::ORI;
break;
667 case ISD::XOR: BinOpc = RISCV::XORI;
break;
670 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
684 if (!Subtarget->hasVendorXTHeadBb())
687 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
691 SDValue N0 = Node->getOperand(0);
695 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
SDLoc DL,
703 MVT VT = Node->getSimpleValueType(0);
704 const unsigned RightShAmt = N1C->getZExtValue();
709 auto *N01C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
713 const unsigned LeftShAmt = N01C->getZExtValue();
716 if (LeftShAmt > RightShAmt)
720 const unsigned Msb = MsbPlusOne - 1;
721 const unsigned Lsb = RightShAmt - LeftShAmt;
723 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
732 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
738 const unsigned Msb = ExtSize - 1;
739 const unsigned Lsb = RightShAmt;
741 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
751 if (!Subtarget->hasVendorXTHeadMemIdx())
765 "Unexpected addressing mode");
768 int64_t
Offset =
C->getSExtValue();
773 for (Shift = 0; Shift < 4; Shift++)
774 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
783 if (LoadVT == MVT::i8 && IsPre)
784 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
785 else if (LoadVT == MVT::i8 && IsPost)
786 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
787 else if (LoadVT == MVT::i16 && IsPre)
788 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
789 else if (LoadVT == MVT::i16 && IsPost)
790 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
791 else if (LoadVT == MVT::i32 && IsPre)
792 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
793 else if (LoadVT == MVT::i32 && IsPost)
794 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
795 else if (LoadVT == MVT::i64 && IsPre)
796 Opcode = RISCV::TH_LDIB;
797 else if (LoadVT == MVT::i64 && IsPost)
798 Opcode = RISCV::TH_LDIA;
825 unsigned IntNo = Node->getConstantOperandVal(1);
827 assert((IntNo == Intrinsic::riscv_sf_vc_x_se ||
828 IntNo == Intrinsic::riscv_sf_vc_i_se) &&
829 "Unexpected vsetvli intrinsic");
832 unsigned Log2SEW =
Log2_32(Node->getConstantOperandVal(6));
836 Node->getOperand(4), Node->getOperand(5),
837 Node->getOperand(8), SEWOp,
838 Node->getOperand(0)};
841 auto *LMulSDNode = cast<ConstantSDNode>(Node->getOperand(7));
842 switch (LMulSDNode->getSExtValue()) {
844 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF8
845 : RISCV::PseudoVC_I_SE_MF8;
848 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF4
849 : RISCV::PseudoVC_I_SE_MF4;
852 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_MF2
853 : RISCV::PseudoVC_I_SE_MF2;
856 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M1
857 : RISCV::PseudoVC_I_SE_M1;
860 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M2
861 : RISCV::PseudoVC_I_SE_M2;
864 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M4
865 : RISCV::PseudoVC_I_SE_M4;
868 Opcode = IntNo == Intrinsic::riscv_sf_vc_x_se ? RISCV::PseudoVC_X_SE_M8
869 : RISCV::PseudoVC_I_SE_M8;
874 Opcode,
DL, Node->getSimpleValueType(0),
Operands));
879 if (Node->isMachineOpcode()) {
887 unsigned Opcode = Node->getOpcode();
890 MVT VT = Node->getSimpleValueType(0);
892 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
896 assert((VT == Subtarget->
getXLenVT() || VT == MVT::i32) &&
"Unexpected VT");
897 auto *ConstNode = cast<ConstantSDNode>(Node);
898 if (ConstNode->isZero()) {
904 int64_t Imm = ConstNode->getSExtValue();
908 if (isUInt<8>(Imm) && isInt<6>(SignExtend64<8>(Imm)) &&
hasAllBUsers(Node))
909 Imm = SignExtend64<8>(Imm);
912 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
914 Imm = SignExtend64<16>(Imm);
917 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
918 Imm = SignExtend64<32>(Imm);
924 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
925 auto [FPImm, NeedsFNeg] =
936 FNegOpc = RISCV::FSGNJN_H;
940 FNegOpc = RISCV::FSGNJN_S;
944 FNegOpc = RISCV::FSGNJN_D;
957 bool NegZeroF64 = APF.
isNegZero() && VT == MVT::f64;
967 bool HasZdinx = Subtarget->hasStdExtZdinx();
968 bool Is64Bit = Subtarget->
is64Bit();
974 assert(Subtarget->hasStdExtZfbfmin());
975 Opc = RISCV::FMV_H_X;
978 Opc = Subtarget->hasStdExtZhinxmin() ? RISCV::COPY : RISCV::FMV_H_X;
981 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
988 Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
990 Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W;
995 if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
1004 Opc = RISCV::FSGNJN_D;
1006 Opc = Is64Bit ? RISCV::FSGNJN_D_INX : RISCV::FSGNJN_D_IN32X;
1015 if (!Subtarget->hasStdExtZdinx())
1022 Node->getOperand(0),
1024 Node->getOperand(1),
1033 if (Subtarget->hasStdExtZdinx()) {
1036 if (!
SDValue(Node, 0).use_empty()) {
1038 Node->getOperand(0));
1042 if (!
SDValue(Node, 1).use_empty()) {
1044 Node->getOperand(0));
1052 if (!Subtarget->hasStdExtZfa())
1055 "Unexpected subtarget");
1058 if (!
SDValue(Node, 0).use_empty()) {
1060 Node->getOperand(0));
1063 if (!
SDValue(Node, 1).use_empty()) {
1065 Node->getOperand(0));
1073 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1076 SDValue N0 = Node->getOperand(0);
1080 unsigned ShAmt = N1C->getZExtValue();
1086 unsigned XLen = Subtarget->
getXLen();
1089 if (TrailingZeros > 0 && LeadingZeros == 32) {
1103 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1106 SDValue N0 = Node->getOperand(0);
1109 unsigned ShAmt = N1C->getZExtValue();
1115 unsigned XLen = Subtarget->
getXLen();
1118 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
1137 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
1141 if (ShAmt >= TrailingOnes)
1144 if (TrailingOnes == 32) {
1146 Subtarget->
is64Bit() ? RISCV::SRLIW : RISCV::SRLI,
DL, VT,
1157 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1159 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1165 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1187 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1190 SDValue N0 = Node->getOperand(0);
1193 unsigned ShAmt = N1C->getZExtValue();
1195 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1197 if (ExtSize >= 32 || ShAmt >= ExtSize)
1199 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1216 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1221 const bool isC1ANDI = isInt<12>(C1);
1223 SDValue N0 = Node->getOperand(0);
1228 if (!Subtarget->hasVendorXTHeadBb())
1240 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1243 unsigned C2 =
C->getZExtValue();
1244 unsigned XLen = Subtarget->
getXLen();
1245 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1253 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1257 C1 &= maskTrailingZeros<uint64_t>(C2);
1259 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1263 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1269 if (!LeftShift && isC1Mask) {
1273 if (C2 + 32 == Leading) {
1285 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1287 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32) {
1292 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1306 const unsigned Lsb = C2;
1307 if (tryUnsignedBitfieldExtract(Node,
DL, VT,
X, Msb, Lsb))
1312 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1314 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32;
1316 Skip |= HasBitTest && Leading == XLen - 1;
1317 if (OneUseOrZExtW && !Skip) {
1319 RISCV::SLLI,
DL, VT,
X,
1335 if (C2 + Leading < XLen &&
1336 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1338 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1347 if (OneUseOrZExtW && !IsCANDI) {
1349 RISCV::SLLI,
DL, VT,
X,
1365 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1367 unsigned SrliOpc = RISCV::SRLI;
1370 isa<ConstantSDNode>(
X.getOperand(1)) &&
1371 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1372 SrliOpc = RISCV::SRLIW;
1373 X =
X.getOperand(0);
1385 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1386 OneUseOrZExtW && !IsCANDI) {
1388 RISCV::SRLIW,
DL, VT,
X,
1403 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1405 RISCV::SRLI,
DL, VT,
X,
1414 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1416 RISCV::SRLIW,
DL, VT,
X,
1426 if (C2 < Trailing && Leading + Trailing == 32 && OneUseOrZExtW &&
1427 Subtarget->hasStdExtZba()) {
1429 RISCV::SRLI,
DL, VT,
X,
1432 RISCV::SLLI_UW,
DL, VT,
SDValue(SRLI, 0),
1445 if (isC1Mask && !isC1ANDI) {
1447 if (tryUnsignedBitfieldExtract(Node,
DL, VT, N0, Msb, 0))
1464 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1465 if (!N1C || !N1C->hasOneUse())
1469 SDValue N0 = Node->getOperand(0);
1486 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1488 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1489 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1493 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1495 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1496 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1502 unsigned XLen = Subtarget->
getXLen();
1508 unsigned ConstantShift = XLen - LeadingZeros;
1512 uint64_t ShiftedC1 = C1 << ConstantShift;
1515 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1533 unsigned IntNo = Node->getConstantOperandVal(0);
1538 case Intrinsic::riscv_vmsgeu:
1539 case Intrinsic::riscv_vmsge: {
1540 SDValue Src1 = Node->getOperand(1);
1541 SDValue Src2 = Node->getOperand(2);
1542 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1543 bool IsCmpUnsignedZero =
false;
1548 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1549 int64_t CVal =
C->getSExtValue();
1550 if (CVal >= -15 && CVal <= 16) {
1551 if (!IsUnsigned || CVal != 0)
1553 IsCmpUnsignedZero =
true;
1557 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1561#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1562 case RISCVII::VLMUL::lmulenum: \
1563 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1564 : RISCV::PseudoVMSLT_VX_##suffix; \
1565 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1566 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1575#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1583 if (IsCmpUnsignedZero) {
1594 {Cmp, Cmp, VL, SEW}));
1597 case Intrinsic::riscv_vmsgeu_mask:
1598 case Intrinsic::riscv_vmsge_mask: {
1599 SDValue Src1 = Node->getOperand(2);
1600 SDValue Src2 = Node->getOperand(3);
1601 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1602 bool IsCmpUnsignedZero =
false;
1607 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1608 int64_t CVal =
C->getSExtValue();
1609 if (CVal >= -15 && CVal <= 16) {
1610 if (!IsUnsigned || CVal != 0)
1612 IsCmpUnsignedZero =
true;
1616 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1621#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1622 case RISCVII::VLMUL::lmulenum: \
1623 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1624 : RISCV::PseudoVMSLT_VX_##suffix; \
1625 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1626 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1635#undef CASE_VMSLT_OPCODES
1641#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1642 case RISCVII::VLMUL::lmulenum: \
1643 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1644 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1645 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1654#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1661 SDValue MaskedOff = Node->getOperand(1);
1662 SDValue Mask = Node->getOperand(4);
1665 if (IsCmpUnsignedZero) {
1668 if (Mask == MaskedOff) {
1674 {Mask, MaskedOff, VL, MaskSEW}));
1681 if (Mask == MaskedOff) {
1686 {Mask, Cmp, VL, MaskSEW}));
1703 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1707 {Cmp, Mask, VL, MaskSEW}));
1710 case Intrinsic::riscv_vsetvli:
1711 case Intrinsic::riscv_vsetvlimax:
1717 unsigned IntNo = Node->getConstantOperandVal(1);
1722 case Intrinsic::riscv_vlseg2:
1723 case Intrinsic::riscv_vlseg3:
1724 case Intrinsic::riscv_vlseg4:
1725 case Intrinsic::riscv_vlseg5:
1726 case Intrinsic::riscv_vlseg6:
1727 case Intrinsic::riscv_vlseg7:
1728 case Intrinsic::riscv_vlseg8: {
1732 case Intrinsic::riscv_vlseg2_mask:
1733 case Intrinsic::riscv_vlseg3_mask:
1734 case Intrinsic::riscv_vlseg4_mask:
1735 case Intrinsic::riscv_vlseg5_mask:
1736 case Intrinsic::riscv_vlseg6_mask:
1737 case Intrinsic::riscv_vlseg7_mask:
1738 case Intrinsic::riscv_vlseg8_mask: {
1742 case Intrinsic::riscv_vlsseg2:
1743 case Intrinsic::riscv_vlsseg3:
1744 case Intrinsic::riscv_vlsseg4:
1745 case Intrinsic::riscv_vlsseg5:
1746 case Intrinsic::riscv_vlsseg6:
1747 case Intrinsic::riscv_vlsseg7:
1748 case Intrinsic::riscv_vlsseg8: {
1752 case Intrinsic::riscv_vlsseg2_mask:
1753 case Intrinsic::riscv_vlsseg3_mask:
1754 case Intrinsic::riscv_vlsseg4_mask:
1755 case Intrinsic::riscv_vlsseg5_mask:
1756 case Intrinsic::riscv_vlsseg6_mask:
1757 case Intrinsic::riscv_vlsseg7_mask:
1758 case Intrinsic::riscv_vlsseg8_mask: {
1762 case Intrinsic::riscv_vloxseg2:
1763 case Intrinsic::riscv_vloxseg3:
1764 case Intrinsic::riscv_vloxseg4:
1765 case Intrinsic::riscv_vloxseg5:
1766 case Intrinsic::riscv_vloxseg6:
1767 case Intrinsic::riscv_vloxseg7:
1768 case Intrinsic::riscv_vloxseg8:
1771 case Intrinsic::riscv_vluxseg2:
1772 case Intrinsic::riscv_vluxseg3:
1773 case Intrinsic::riscv_vluxseg4:
1774 case Intrinsic::riscv_vluxseg5:
1775 case Intrinsic::riscv_vluxseg6:
1776 case Intrinsic::riscv_vluxseg7:
1777 case Intrinsic::riscv_vluxseg8:
1780 case Intrinsic::riscv_vloxseg2_mask:
1781 case Intrinsic::riscv_vloxseg3_mask:
1782 case Intrinsic::riscv_vloxseg4_mask:
1783 case Intrinsic::riscv_vloxseg5_mask:
1784 case Intrinsic::riscv_vloxseg6_mask:
1785 case Intrinsic::riscv_vloxseg7_mask:
1786 case Intrinsic::riscv_vloxseg8_mask:
1789 case Intrinsic::riscv_vluxseg2_mask:
1790 case Intrinsic::riscv_vluxseg3_mask:
1791 case Intrinsic::riscv_vluxseg4_mask:
1792 case Intrinsic::riscv_vluxseg5_mask:
1793 case Intrinsic::riscv_vluxseg6_mask:
1794 case Intrinsic::riscv_vluxseg7_mask:
1795 case Intrinsic::riscv_vluxseg8_mask:
1798 case Intrinsic::riscv_vlseg8ff:
1799 case Intrinsic::riscv_vlseg7ff:
1800 case Intrinsic::riscv_vlseg6ff:
1801 case Intrinsic::riscv_vlseg5ff:
1802 case Intrinsic::riscv_vlseg4ff:
1803 case Intrinsic::riscv_vlseg3ff:
1804 case Intrinsic::riscv_vlseg2ff: {
1808 case Intrinsic::riscv_vlseg8ff_mask:
1809 case Intrinsic::riscv_vlseg7ff_mask:
1810 case Intrinsic::riscv_vlseg6ff_mask:
1811 case Intrinsic::riscv_vlseg5ff_mask:
1812 case Intrinsic::riscv_vlseg4ff_mask:
1813 case Intrinsic::riscv_vlseg3ff_mask:
1814 case Intrinsic::riscv_vlseg2ff_mask: {
1818 case Intrinsic::riscv_vloxei:
1819 case Intrinsic::riscv_vloxei_mask:
1820 case Intrinsic::riscv_vluxei:
1821 case Intrinsic::riscv_vluxei_mask: {
1822 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1823 IntNo == Intrinsic::riscv_vluxei_mask;
1824 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1825 IntNo == Intrinsic::riscv_vloxei_mask;
1827 MVT VT = Node->getSimpleValueType(0);
1832 Operands.push_back(Node->getOperand(CurOp++));
1840 "Element count mismatch");
1845 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1847 "values when XLEN=32");
1850 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
1851 static_cast<unsigned>(IndexLMUL));
1855 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1861 case Intrinsic::riscv_vlm:
1862 case Intrinsic::riscv_vle:
1863 case Intrinsic::riscv_vle_mask:
1864 case Intrinsic::riscv_vlse:
1865 case Intrinsic::riscv_vlse_mask: {
1866 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1867 IntNo == Intrinsic::riscv_vlse_mask;
1869 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1871 MVT VT = Node->getSimpleValueType(0);
1880 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1883 if (HasPassthruOperand)
1884 Operands.push_back(Node->getOperand(CurOp++));
1897 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
1898 static_cast<unsigned>(LMUL));
1902 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1908 case Intrinsic::riscv_vleff:
1909 case Intrinsic::riscv_vleff_mask: {
1910 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1912 MVT VT = Node->getSimpleValueType(0);
1917 Operands.push_back(Node->getOperand(CurOp++));
1924 RISCV::getVLEPseudo(IsMasked,
false,
true,
1925 Log2SEW,
static_cast<unsigned>(LMUL));
1928 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1938 unsigned IntNo = Node->getConstantOperandVal(1);
1940 case Intrinsic::riscv_vsseg2:
1941 case Intrinsic::riscv_vsseg3:
1942 case Intrinsic::riscv_vsseg4:
1943 case Intrinsic::riscv_vsseg5:
1944 case Intrinsic::riscv_vsseg6:
1945 case Intrinsic::riscv_vsseg7:
1946 case Intrinsic::riscv_vsseg8: {
1950 case Intrinsic::riscv_vsseg2_mask:
1951 case Intrinsic::riscv_vsseg3_mask:
1952 case Intrinsic::riscv_vsseg4_mask:
1953 case Intrinsic::riscv_vsseg5_mask:
1954 case Intrinsic::riscv_vsseg6_mask:
1955 case Intrinsic::riscv_vsseg7_mask:
1956 case Intrinsic::riscv_vsseg8_mask: {
1960 case Intrinsic::riscv_vssseg2:
1961 case Intrinsic::riscv_vssseg3:
1962 case Intrinsic::riscv_vssseg4:
1963 case Intrinsic::riscv_vssseg5:
1964 case Intrinsic::riscv_vssseg6:
1965 case Intrinsic::riscv_vssseg7:
1966 case Intrinsic::riscv_vssseg8: {
1970 case Intrinsic::riscv_vssseg2_mask:
1971 case Intrinsic::riscv_vssseg3_mask:
1972 case Intrinsic::riscv_vssseg4_mask:
1973 case Intrinsic::riscv_vssseg5_mask:
1974 case Intrinsic::riscv_vssseg6_mask:
1975 case Intrinsic::riscv_vssseg7_mask:
1976 case Intrinsic::riscv_vssseg8_mask: {
1980 case Intrinsic::riscv_vsoxseg2:
1981 case Intrinsic::riscv_vsoxseg3:
1982 case Intrinsic::riscv_vsoxseg4:
1983 case Intrinsic::riscv_vsoxseg5:
1984 case Intrinsic::riscv_vsoxseg6:
1985 case Intrinsic::riscv_vsoxseg7:
1986 case Intrinsic::riscv_vsoxseg8:
1989 case Intrinsic::riscv_vsuxseg2:
1990 case Intrinsic::riscv_vsuxseg3:
1991 case Intrinsic::riscv_vsuxseg4:
1992 case Intrinsic::riscv_vsuxseg5:
1993 case Intrinsic::riscv_vsuxseg6:
1994 case Intrinsic::riscv_vsuxseg7:
1995 case Intrinsic::riscv_vsuxseg8:
1998 case Intrinsic::riscv_vsoxseg2_mask:
1999 case Intrinsic::riscv_vsoxseg3_mask:
2000 case Intrinsic::riscv_vsoxseg4_mask:
2001 case Intrinsic::riscv_vsoxseg5_mask:
2002 case Intrinsic::riscv_vsoxseg6_mask:
2003 case Intrinsic::riscv_vsoxseg7_mask:
2004 case Intrinsic::riscv_vsoxseg8_mask:
2007 case Intrinsic::riscv_vsuxseg2_mask:
2008 case Intrinsic::riscv_vsuxseg3_mask:
2009 case Intrinsic::riscv_vsuxseg4_mask:
2010 case Intrinsic::riscv_vsuxseg5_mask:
2011 case Intrinsic::riscv_vsuxseg6_mask:
2012 case Intrinsic::riscv_vsuxseg7_mask:
2013 case Intrinsic::riscv_vsuxseg8_mask:
2016 case Intrinsic::riscv_vsoxei:
2017 case Intrinsic::riscv_vsoxei_mask:
2018 case Intrinsic::riscv_vsuxei:
2019 case Intrinsic::riscv_vsuxei_mask: {
2020 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
2021 IntNo == Intrinsic::riscv_vsuxei_mask;
2022 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
2023 IntNo == Intrinsic::riscv_vsoxei_mask;
2025 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2030 Operands.push_back(Node->getOperand(CurOp++));
2038 "Element count mismatch");
2043 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
2045 "values when XLEN=32");
2048 IsMasked, IsOrdered, IndexLog2EEW,
2049 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
2053 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2059 case Intrinsic::riscv_vsm:
2060 case Intrinsic::riscv_vse:
2061 case Intrinsic::riscv_vse_mask:
2062 case Intrinsic::riscv_vsse:
2063 case Intrinsic::riscv_vsse_mask: {
2064 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
2065 IntNo == Intrinsic::riscv_vsse_mask;
2067 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
2069 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
2074 Operands.push_back(Node->getOperand(CurOp++));
2081 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
2084 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
2090 case Intrinsic::riscv_sf_vc_x_se:
2091 case Intrinsic::riscv_sf_vc_i_se:
2098 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
2110 SDValue V = Node->getOperand(0);
2111 SDValue SubV = Node->getOperand(1);
2113 auto Idx = Node->getConstantOperandVal(2);
2117 MVT SubVecContainerVT = SubVecVT;
2120 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
2122 [[maybe_unused]]
bool ExactlyVecRegSized =
2124 .isKnownMultipleOf(Subtarget->
expandVScale(VecRegSize));
2126 .getKnownMinValue()));
2127 assert(
Idx == 0 && (ExactlyVecRegSized || V.isUndef()));
2129 MVT ContainerVT = VT;
2131 ContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2135 std::tie(SubRegIdx,
Idx) =
2137 ContainerVT, SubVecContainerVT,
Idx,
TRI);
2146 [[maybe_unused]]
bool IsSubVecPartReg =
2150 assert((!IsSubVecPartReg || V.isUndef()) &&
2151 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
2152 "the subvector is smaller than a full-sized register");
2156 if (SubRegIdx == RISCV::NoSubRegister) {
2157 unsigned InRegClassID =
2161 "Unexpected subvector extraction");
2174 SDValue V = Node->getOperand(0);
2175 auto Idx = Node->getConstantOperandVal(1);
2176 MVT InVT = V.getSimpleValueType();
2180 MVT SubVecContainerVT = VT;
2184 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2187 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2191 std::tie(SubRegIdx,
Idx) =
2193 InVT, SubVecContainerVT,
Idx,
TRI);
2203 if (SubRegIdx == RISCV::NoSubRegister) {
2207 "Unexpected subvector extraction");
2226 if (!Node->getOperand(0).isUndef())
2228 SDValue Src = Node->getOperand(1);
2229 auto *Ld = dyn_cast<LoadSDNode>(Src);
2232 if (!Ld || Ld->isIndexed())
2234 EVT MemVT = Ld->getMemoryVT();
2260 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2270 Operands.append({VL, SEW, PolicyOp, Ld->getChain()});
2274 false, IsStrided,
false,
2275 Log2SEW,
static_cast<unsigned>(LMUL));
2287 unsigned Locality = Node->getConstantOperandVal(3);
2291 if (
auto *LoadStoreMem = dyn_cast<MemSDNode>(Node)) {
2295 int NontemporalLevel = 0;
2298 NontemporalLevel = 3;
2301 NontemporalLevel = 1;
2304 NontemporalLevel = 0;
2310 if (NontemporalLevel & 0b1)
2312 if (NontemporalLevel & 0b10)
2324 std::vector<SDValue> &OutOps) {
2327 switch (ConstraintID) {
2332 assert(Found &&
"SelectAddrRegImm should always succeed");
2333 OutOps.push_back(Op0);
2334 OutOps.push_back(Op1);
2338 OutOps.push_back(
Op);
2352 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2370 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr.getOperand(0))) {
2371 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2372 if (isInt<12>(CVal)) {
2388 bool IsPrefetch =
false) {
2389 if (!isa<ConstantSDNode>(
Addr))
2392 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2397 int64_t Lo12 = SignExtend64<12>(CVal);
2399 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2400 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2404 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2421 if (Seq.
back().getOpcode() != RISCV::ADDI)
2423 Lo12 = Seq.
back().getImm();
2424 if (IsPrefetch && (Lo12 & 0b11111) != 0)
2429 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2439 for (
auto *
Use :
Add->uses()) {
2444 EVT VT = cast<MemSDNode>(
Use)->getMemoryVT();
2450 cast<StoreSDNode>(
Use)->getValue() ==
Add)
2453 cast<AtomicSDNode>(
Use)->getVal() ==
Add)
2461 unsigned MaxShiftAmount,
2464 EVT VT =
Addr.getSimpleValueType();
2470 if (
N.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
N.getOperand(1))) {
2472 if (
N.getConstantOperandVal(1) <= MaxShiftAmount) {
2474 ShiftAmt =
N.getConstantOperandVal(1);
2479 return ShiftAmt != 0;
2483 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Addr.getOperand(1))) {
2488 isInt<12>(C1->getSExtValue())) {
2497 }
else if (UnwrapShl(
Addr.getOperand(0),
Index, Scale)) {
2501 UnwrapShl(
Addr.getOperand(1),
Index, Scale);
2505 }
else if (UnwrapShl(
Addr,
Index, Scale)) {
2520 MVT VT =
Addr.getSimpleValueType();
2528 int64_t RV32ZdinxRange = IsINX ? 4 : 0;
2530 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2531 if (isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) {
2535 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2543 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2544 if (CVal == 0 || Alignment > CVal) {
2545 int64_t CombinedOffset = CVal + GA->getOffset();
2549 CombinedOffset, GA->getTargetFlags());
2555 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2563 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2564 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2565 assert(!(isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) &&
2566 "simm12 not already handled?");
2571 if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
2572 int64_t Adj = CVal < 0 ? -2048 : 2047;
2614 MVT VT =
Addr.getSimpleValueType();
2617 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2618 if (isInt<12>(CVal)) {
2622 if ((CVal & 0b11111) != 0) {
2628 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2636 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2637 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2638 assert(!(isInt<12>(CVal) && isInt<12>(CVal)) &&
2639 "simm12 not already handled?");
2643 if ((-2049 >= CVal && CVal >= -4096) || (4065 >= CVal && CVal >= 2017)) {
2644 int64_t Adj = CVal < 0 ? -2048 : 2016;
2645 int64_t AdjustedOffset = CVal - Adj;
2647 RISCV::ADDI,
DL, VT,
Addr.getOperand(0),
2709 if (Imm != 0 && Imm % ShiftWidth == 0) {
2718 if (Imm != 0 && Imm % ShiftWidth == 0) {
2722 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2730 if (Imm % ShiftWidth == ShiftWidth - 1) {
2752 "Unexpected condition code!");
2759 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
2760 if (CCVal != ExpectedCCVal)
2766 if (!
LHS.getValueType().isScalarInteger())
2777 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
2778 int64_t CVal =
C->getSExtValue();
2781 if (CVal == -2048) {
2784 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
2791 if (isInt<12>(CVal) || CVal == 2048) {
2794 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
2810 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
2811 Val =
N.getOperand(0);
2815 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
2816 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
2821 N.getConstantOperandVal(1) == ShiftAmt &&
2828 MVT VT =
N.getSimpleValueType();
2839 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2840 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
2841 Val =
N.getOperand(0);
2845 MVT VT =
N.getSimpleValueType();
2860 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
2866 uint64_t Mask =
N.getConstantOperandVal(1);
2869 unsigned XLen = Subtarget->
getXLen();
2871 Mask &= maskTrailingZeros<uint64_t>(C2);
2873 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2881 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
2883 EVT VT =
N.getValueType();
2893 if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
2895 EVT VT =
N.getValueType();
2907 bool LeftShift =
N.getOpcode() ==
ISD::SHL;
2908 if ((LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
2909 isa<ConstantSDNode>(
N.getOperand(1))) {
2915 unsigned C1 =
N.getConstantOperandVal(1);
2916 unsigned XLen = Subtarget->
getXLen();
2921 if (LeftShift && Leading == 32 && Trailing > 0 &&
2922 (Trailing + C1) == ShAmt) {
2924 EVT VT =
N.getValueType();
2933 if (!LeftShift && Leading == 32 && Trailing > C1 &&
2934 (Trailing - C1) == ShAmt) {
2936 EVT VT =
N.getValueType();
2955 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
2960 uint64_t Mask =
N.getConstantOperandVal(1);
2963 Mask &= maskTrailingZeros<uint64_t>(C2);
2971 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
2973 EVT VT =
N.getValueType();
3001 bool HasGlueOp =
User->getGluedNode() !=
nullptr;
3003 bool HasChainOp =
User->
getOperand(ChainOpIdx).getValueType() == MVT::Other;
3007 const unsigned Log2SEW =
User->getConstantOperandVal(VLIdx + 1);
3009 if (UserOpNo == VLIdx)
3012 auto NumDemandedBits =
3014 return NumDemandedBits && Bits >= *NumDemandedBits;
3027 const unsigned Depth)
const {
3033 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
3034 "Unexpected opcode");
3041 if (
Depth == 0 && !Node->getValueType(0).isScalarInteger())
3044 for (
auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
3047 if (!
User->isMachineOpcode())
3051 switch (
User->getMachineOpcode()) {
3076 case RISCV::SLLI_UW:
3077 case RISCV::FMV_W_X:
3078 case RISCV::FCVT_H_W:
3079 case RISCV::FCVT_H_WU:
3080 case RISCV::FCVT_S_W:
3081 case RISCV::FCVT_S_WU:
3082 case RISCV::FCVT_D_W:
3083 case RISCV::FCVT_D_WU:
3084 case RISCV::TH_REVW:
3085 case RISCV::TH_SRRIW:
3098 if (UI.getOperandNo() != 1 || Bits <
Log2_32(Subtarget->
getXLen()))
3103 if (Bits < Subtarget->getXLen() -
User->getConstantOperandVal(1))
3112 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
3131 unsigned ShAmt =
User->getConstantOperandVal(1);
3145 case RISCV::FMV_H_X:
3146 case RISCV::ZEXT_H_RV32:
3147 case RISCV::ZEXT_H_RV64:
3153 if (Bits < (Subtarget->
getXLen() / 2))
3157 case RISCV::SH1ADD_UW:
3158 case RISCV::SH2ADD_UW:
3159 case RISCV::SH3ADD_UW:
3162 if (UI.getOperandNo() != 0 || Bits < 32)
3166 if (UI.getOperandNo() != 0 || Bits < 8)
3170 if (UI.getOperandNo() != 0 || Bits < 16)
3174 if (UI.getOperandNo() != 0 || Bits < 32)
3186 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3187 int64_t
Offset =
C->getSExtValue();
3189 for (Shift = 0; Shift < 4; Shift++)
3190 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
3197 EVT Ty =
N->getValueType(0);
3209 auto *
C = dyn_cast<ConstantSDNode>(
N);
3210 if (
C && isUInt<5>(
C->getZExtValue())) {
3212 N->getValueType(0));
3213 }
else if (
C &&
C->isAllOnes()) {
3216 N->getValueType(0));
3217 }
else if (isa<RegisterSDNode>(
N) &&
3218 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
3224 N->getValueType(0));
3234 if (!
N.getOperand(0).isUndef())
3236 N =
N.getOperand(1);
3241 !
Splat.getOperand(0).isUndef())
3243 assert(
Splat.getNumOperands() == 3 &&
"Unexpected number of operands");
3252 SplatVal =
Splat.getOperand(1);
3259 std::function<
bool(int64_t)> ValidateImm) {
3261 if (!
Splat || !isa<ConstantSDNode>(
Splat.getOperand(1)))
3264 const unsigned SplatEltSize =
Splat.getScalarValueSizeInBits();
3266 "Unexpected splat operand type");
3275 APInt SplatConst =
Splat.getConstantOperandAPInt(1).sextOrTrunc(SplatEltSize);
3279 if (!ValidateImm(SplatImm))
3288 [](int64_t Imm) {
return isInt<5>(Imm); });
3293 N, SplatVal, *
CurDAG, *Subtarget,
3294 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
3300 N, SplatVal, *
CurDAG, *Subtarget, [](int64_t Imm) {
3301 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
3308 N, SplatVal, *
CurDAG, *Subtarget,
3309 [Bits](int64_t Imm) {
return isUIntN(Bits, Imm); });
3313 auto IsExtOrTrunc = [](
SDValue N) {
3314 switch (
N->getOpcode()) {
3329 while (IsExtOrTrunc(
N)) {
3330 if (!
N.hasOneUse() ||
N.getScalarValueSizeInBits() < 8)
3332 N =
N->getOperand(0);
3353 ->getLegalZfaFPImm(APF, VT)
3358 if (VT == MVT::f64 && !Subtarget->
is64Bit()) {
3370 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3373 if (!isInt<5>(ImmVal))
3385bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
3387 if (
N->getMachineOpcode() != RISCV::ADDIW ||
3409 case RISCV::ADD: Opc = RISCV::ADDW;
break;
3410 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
3411 case RISCV::SUB: Opc = RISCV::SUBW;
break;
3412 case RISCV::MUL: Opc = RISCV::MULW;
break;
3413 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
3421 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
3436 case RISCV::TH_MULAW:
3437 case RISCV::TH_MULAH:
3438 case RISCV::TH_MULSW:
3439 case RISCV::TH_MULSH:
3454 if (!isa<RegisterSDNode>(MaskOp) ||
3455 cast<RegisterSDNode>(MaskOp)->
getReg() != RISCV::V0)
3459 const auto *Glued = GlueOp.
getNode();
3465 if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
3466 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
3478 const auto IsVMSet = [](
unsigned Opc) {
3479 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
3480 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
3481 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
3482 Opc == RISCV::PseudoVMSET_M_B8;
3495 N->getOperand(
N->getNumOperands() - 1));
3499 if (!V.isMachineOpcode())
3501 if (V.getMachineOpcode() == TargetOpcode::REG_SEQUENCE) {
3502 for (
unsigned I = 1;
I < V.getNumOperands();
I += 2)
3507 return V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
3516 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
3520 unsigned MaskOpIdx =
I->MaskOpIdx;
3526 const unsigned Opc =
I->UnmaskedPseudo;
3533 "Masked and unmasked pseudos are inconsistent");
3535 assert(UseTUPseudo == HasTiedDest &&
"Unexpected pseudo structure");
3540 for (
unsigned I = !UseTUPseudo, E =
N->getNumOperands();
I != E;
I++) {
3543 if (
I == MaskOpIdx ||
Op.getValueType() == MVT::Glue)
3549 const auto *Glued =
N->getGluedNode();
3550 if (
auto *TGlued = Glued->getGluedNode())
3556 if (!
N->memoperands_empty())
3559 Result->setFlags(
N->getFlags());
3576 return RISCV::PseudoVMSET_M_B1;
3578 return RISCV::PseudoVMSET_M_B2;
3580 return RISCV::PseudoVMSET_M_B4;
3582 return RISCV::PseudoVMSET_M_B8;
3584 return RISCV::PseudoVMSET_M_B16;
3586 return RISCV::PseudoVMSET_M_B32;
3588 return RISCV::PseudoVMSET_M_B64;
3616bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(
SDNode *
N) {
3620 Merge =
N->getOperand(0);
3621 False =
N->getOperand(0);
3622 True =
N->getOperand(1);
3623 VL =
N->getOperand(2);
3628 Merge =
N->getOperand(0);
3629 False =
N->getOperand(1);
3630 True =
N->getOperand(2);
3631 Mask =
N->getOperand(3);
3632 VL =
N->getOperand(4);
3634 Glue =
N->getOperand(
N->getNumOperands() - 1);
3636 assert(!Mask || cast<RegisterSDNode>(Mask)->
getReg() == RISCV::V0);
3645 "Expect True is the first output of an instruction.");
3659 bool IsMasked =
false;
3661 RISCV::lookupMaskedIntrinsicByUnmasked(TrueOpc);
3662 if (!Info && HasTiedDest) {
3663 Info = RISCV::getMaskedPseudoInfo(TrueOpc);
3684 if (False != MergeOpTrue)
3691 assert(HasTiedDest &&
"Expected tied dest");
3731 unsigned TrueVLIndex =
3732 True.
getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
3743 auto *CLHS = dyn_cast<ConstantSDNode>(LHS);
3744 auto *CRHS = dyn_cast<ConstantSDNode>(RHS);
3747 return CLHS->getZExtValue() <= CRHS->getZExtValue() ?
LHS :
RHS;
3753 VL = GetMinVL(TrueVL, VL);
3760 if (TrueVL != VL || !IsMasked)
3785 RISCV::V0, AllOnesMask,
SDValue());
3790 unsigned MaskedOpc =
Info->MaskedPseudo;
3794 "Expected instructions with mask have policy operand.");
3797 "Expected instructions with mask have a tied dest.");
3807 bool MergeVLShrunk = VL != OrigVL;
3819 const unsigned NormalOpsEnd = TrueVLIndex - IsMasked - HasRoundingMode;
3820 assert(!IsMasked || NormalOpsEnd ==
Info->MaskOpIdx);
3829 if (HasRoundingMode)
3832 Ops.
append({VL, SEW, PolicyOp});
3845 if (!cast<MachineSDNode>(True)->memoperands_empty())
3858bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
3859 bool MadeChange =
false;
3864 if (
N->use_empty() || !
N->isMachineOpcode())
3868 MadeChange |= performCombineVMergeAndVOps(
N);
3878bool RISCVDAGToDAGISel::doPeepholeNoRegPassThru() {
3879 bool MadeChange =
false;
3884 if (
N->use_empty() || !
N->isMachineOpcode())
3887 const unsigned Opc =
N->getMachineOpcode();
3888 if (!RISCVVPseudosTable::getPseudoInfo(Opc) ||
3895 for (
unsigned I = 1, E =
N->getNumOperands();
I != E;
I++) {
3902 Result->setFlags(
N->getFlags());
static Register createTuple(ArrayRef< Register > Regs, const unsigned RegClassIDs[], const unsigned SubRegs[], MachineIRBuilder &MIB)
Create a REG_SEQUENCE instruction using the registers in Regs.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)
static bool isWorthFoldingAdd(SDValue Add)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static bool isImplicitDef(SDValue V)
static unsigned GetVMSetForLMul(RISCVII::VLMUL LMUL)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool usesAllOnesMask(SDValue MaskOp, SDValue GlueOp)
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, unsigned Bits, const TargetInstrInfo *TII)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset, bool IsPrefetch=false)
static bool IsVMv(SDNode *N)
static cl::opt< bool > UsePseudoMovImm("riscv-use-rematerializable-movimm", cl::Hidden, cl::desc("Use a rematerializable pseudoinstruction for 2 instruction " "constant materialization"), cl::init(false))
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)
static SDValue findVSplat(SDValue N)
static bool selectVSplatImmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, std::function< bool(int64_t)> ValidateImm)
static bool IsVMerge(SDNode *N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
static StringRef getMemConstraintName(ConstraintCode C)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
void setFlags(Flags f)
Bitwise OR the current flags with the given flags.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool SelectAddrRegImmLsb00000(SDValue Addr, SDValue &Base, SDValue &Offset)
Similar to SelectAddrRegImm, except that the least significant 5 bits of Offset shoule be all zeros.
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVLSEGFF(SDNode *Node, bool IsMasked)
bool selectFPImm(SDValue N, SDValue &Imm)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
void selectSF_VC_X_SE(SDNode *Node)
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal)
bool hasAllHUsers(SDNode *Node) const
bool SelectInlineAsmMemoryOperand(const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
bool hasAllBUsers(SDNode *Node) const
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool selectVLOp(SDValue N, SDValue &VL)
bool trySignedBitfieldExtract(SDNode *Node)
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, bool IsINX=false)
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal)
Quantity expandVScale(Quantity X) const
If the ElementCount or TypeSize X is scalable and VScale (VLEN) is exactly known, returns X converted...
bool hasVInstructions() const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
op_iterator op_begin() const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
unsigned getNumOperands() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOptLevel OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
bool mayRaiseFPException(SDNode *Node) const
Return whether the node may raise an FP exception.
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getRegister(unsigned Reg, EVT VT)
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
ilist< SDNode >::iterator allnodes_iterator
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
static bool hasRoundModeOp(uint64_t TSFlags)
static VLMUL getLMul(uint64_t TSFlags)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
@ SPLAT_VECTOR_SPLIT_I64_VL
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static unsigned decodeVSEW(unsigned VSEW)
unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
static constexpr unsigned RVVBitsPerBlock
static constexpr int64_t VLMaxSentinel
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
static const MachineMemOperand::Flags MONontemporalBit1
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
static const MachineMemOperand::Flags MONontemporalBit0
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
CodeGenOptLevel
Code generation optimization level.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOptLevel OptLevel)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
bool hasNoFPExcept() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.