20#include "llvm/IR/IntrinsicsRISCV.h"
29#define DEBUG_TYPE "riscv-isel"
30#define PASS_NAME "RISC-V DAG->DAG Pattern Instruction Selection"
33#define GET_RISCVVSSEGTable_IMPL
34#define GET_RISCVVLSEGTable_IMPL
35#define GET_RISCVVLXSEGTable_IMPL
36#define GET_RISCVVSXSEGTable_IMPL
37#define GET_RISCVVLETable_IMPL
38#define GET_RISCVVSETable_IMPL
39#define GET_RISCVVLXTable_IMPL
40#define GET_RISCVVSXTable_IMPL
41#define GET_RISCVMaskedPseudosTable_IMPL
42#include "RISCVGenSearchableTables.inc"
48 bool MadeChange =
false;
55 switch (
N->getOpcode()) {
59 MVT VT =
N->getSimpleValueType(0);
65 N->getOperand(0), VL);
72 assert(
N->getNumOperands() == 4 &&
"Unexpected number of operands");
73 MVT VT =
N->getSimpleValueType(0);
79 Lo.getValueType() == MVT::i32 &&
Hi.getValueType() == MVT::i32 &&
87 int FI = cast<FrameIndexSDNode>(StackSlot.
getNode())->getIndex();
111 MVT::i64, MPI,
Align(8),
118 LLVM_DEBUG(
dbgs() <<
"RISC-V DAG preprocessing replacing:\nOld: ");
137 bool MadeChange =
false;
141 if (
N->use_empty() || !
N->isMachineOpcode())
144 MadeChange |= doPeepholeSExtW(
N);
145 MadeChange |= doPeepholeMaskedRVV(cast<MachineSDNode>(
N));
150 MadeChange |= doPeepholeMergeVVMFold();
158 MadeChange |= doPeepholeNoRegPassThru();
170 switch (Inst.getOpndKind()) {
203 if (Seq.
size() > 3) {
204 int64_t LoVal = SignExtend64<32>(Imm);
206 if (LoVal == HiVal ||
207 (Subtarget.hasStdExtZba() &&
Lo_32(Imm) ==
Hi_32(Imm))) {
210 if ((SeqLo.
size() + 2) < Seq.
size()) {
218 unsigned AddOpc = (LoVal == HiVal) ? RISCV::ADD : RISCV::ADD_UW;
230 static const unsigned M1TupleRegClassIDs[] = {
231 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
232 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
233 RISCV::VRN8M1RegClassID};
234 static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
235 RISCV::VRN3M2RegClassID,
236 RISCV::VRN4M2RegClassID};
249 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
250 "Unexpected subreg numbering");
251 SubReg0 = RISCV::sub_vrm1_0;
252 RegClassID = M1TupleRegClassIDs[NF - 2];
255 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
256 "Unexpected subreg numbering");
257 SubReg0 = RISCV::sub_vrm2_0;
258 RegClassID = M2TupleRegClassIDs[NF - 2];
261 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
262 "Unexpected subreg numbering");
263 SubReg0 = RISCV::sub_vrm4_0;
264 RegClassID = RISCV::VRN2M4RegClassID;
273 for (
unsigned I = 0;
I < Regs.
size(); ++
I) {
285 bool IsLoad,
MVT *IndexVT) {
286 SDValue Chain = Node->getOperand(0);
289 Operands.push_back(Node->getOperand(CurOp++));
291 if (IsStridedOrIndexed) {
292 Operands.push_back(Node->getOperand(CurOp++));
294 *IndexVT =
Operands.back()->getSimpleValueType(0);
299 SDValue Mask = Node->getOperand(CurOp++);
318 Policy = Node->getConstantOperandVal(CurOp++);
331 unsigned NF = Node->getNumValues() - 1;
332 MVT VT = Node->getSimpleValueType(0);
340 Node->op_begin() + CurOp + NF);
349 RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided,
false,
Log2SEW,
350 static_cast<unsigned>(LMUL));
354 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
358 for (
unsigned I = 0;
I < NF; ++
I) {
370 unsigned NF = Node->getNumValues() - 2;
371 MVT VT = Node->getSimpleValueType(0);
380 Node->op_begin() + CurOp + NF);
390 RISCV::getVLSEGPseudo(NF, IsMasked,
false,
true,
391 Log2SEW,
static_cast<unsigned>(LMUL));
395 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
399 for (
unsigned I = 0;
I < NF; ++
I) {
413 unsigned NF = Node->getNumValues() - 1;
414 MVT VT = Node->getSimpleValueType(0);
422 Node->op_begin() + CurOp + NF);
433 "Element count mismatch");
437 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
439 "values when XLEN=32");
442 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
443 static_cast<unsigned>(IndexLMUL));
447 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
451 for (
unsigned I = 0;
I < NF; ++
I) {
464 unsigned NF = Node->getNumOperands() - 4;
469 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
477 unsigned CurOp = 2 + NF;
483 NF, IsMasked, IsStrided,
Log2SEW,
static_cast<unsigned>(LMUL));
487 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
496 unsigned NF = Node->getNumOperands() - 5;
499 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
507 unsigned CurOp = 2 + NF;
515 "Element count mismatch");
519 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
521 "values when XLEN=32");
524 NF, IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
525 static_cast<unsigned>(IndexLMUL));
529 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
544 unsigned IntNo = Node->getConstantOperandVal(0);
546 assert((IntNo == Intrinsic::riscv_vsetvli ||
547 IntNo == Intrinsic::riscv_vsetvlimax) &&
548 "Unexpected vsetvli intrinsic");
550 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
551 unsigned Offset = (VLMax ? 1 : 2);
554 "Unexpected number of operands");
559 Node->getConstantOperandVal(
Offset + 1) & 0x7);
566 unsigned Opcode = RISCV::PseudoVSETVLI;
567 if (
auto *
C = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
575 Opcode = RISCV::PseudoVSETVLIX0;
577 VLOperand = Node->getOperand(1);
579 if (
auto *
C = dyn_cast<ConstantSDNode>(VLOperand)) {
581 if (isUInt<5>(AVL)) {
584 XLenVT, VLImm, VTypeIOp));
595 MVT VT = Node->getSimpleValueType(0);
596 unsigned Opcode = Node->getOpcode();
598 "Unexpected opcode");
603 SDValue N0 = Node->getOperand(0);
604 SDValue N1 = Node->getOperand(1);
621 bool SignExt =
false;
639 uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
640 if (Opcode !=
ISD::AND && (Val & RemovedBitsMask) != 0)
643 int64_t ShiftedVal = Val >> ShAmt;
644 if (!isInt<12>(ShiftedVal))
648 if (SignExt && ShAmt >= 32)
655 case ISD::AND: BinOpc = RISCV::ANDI;
break;
656 case ISD::OR: BinOpc = RISCV::ORI;
break;
657 case ISD::XOR: BinOpc = RISCV::XORI;
break;
660 unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
674 if (!Subtarget->hasVendorXTHeadBb())
677 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
681 SDValue N0 = Node->getOperand(0);
685 auto BitfieldExtract = [&](
SDValue N0,
unsigned Msb,
unsigned Lsb,
SDLoc DL,
693 MVT VT = Node->getSimpleValueType(0);
694 const unsigned RightShAmt = N1C->getZExtValue();
699 auto *N01C = dyn_cast<ConstantSDNode>(N0->
getOperand(1));
703 const unsigned LeftShAmt = N01C->getZExtValue();
706 if (LeftShAmt > RightShAmt)
710 const unsigned Msb = MsbPlusOne - 1;
711 const unsigned Lsb = RightShAmt - LeftShAmt;
713 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
722 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
728 const unsigned Msb = ExtSize - 1;
729 const unsigned Lsb = RightShAmt;
731 SDNode *TH_EXT = BitfieldExtract(N0, Msb, Lsb,
DL, VT);
741 if (!Subtarget->hasVendorXTHeadMemIdx())
756 int64_t
Offset =
C->getSExtValue();
765 for (Shift = 0; Shift < 4; Shift++)
766 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
775 if (LoadVT == MVT::i8 && IsPre)
776 Opcode = IsZExt ? RISCV::TH_LBUIB : RISCV::TH_LBIB;
777 else if (LoadVT == MVT::i8 && IsPost)
778 Opcode = IsZExt ? RISCV::TH_LBUIA : RISCV::TH_LBIA;
779 else if (LoadVT == MVT::i16 && IsPre)
780 Opcode = IsZExt ? RISCV::TH_LHUIB : RISCV::TH_LHIB;
781 else if (LoadVT == MVT::i16 && IsPost)
782 Opcode = IsZExt ? RISCV::TH_LHUIA : RISCV::TH_LHIA;
783 else if (LoadVT == MVT::i32 && IsPre)
784 Opcode = IsZExt ? RISCV::TH_LWUIB : RISCV::TH_LWIB;
785 else if (LoadVT == MVT::i32 && IsPost)
786 Opcode = IsZExt ? RISCV::TH_LWUIA : RISCV::TH_LWIA;
787 else if (LoadVT == MVT::i64 && IsPre)
788 Opcode = RISCV::TH_LDIB;
789 else if (LoadVT == MVT::i64 && IsPost)
790 Opcode = RISCV::TH_LDIA;
812 if (Node->isMachineOpcode()) {
820 unsigned Opcode = Node->getOpcode();
823 MVT VT = Node->getSimpleValueType(0);
825 bool HasBitTest = Subtarget->hasStdExtZbs() || Subtarget->hasVendorXTHeadBs();
830 auto *ConstNode = cast<ConstantSDNode>(Node);
831 if (ConstNode->isZero()) {
837 int64_t Imm = ConstNode->getSExtValue();
840 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
842 Imm = SignExtend64<16>(Imm);
845 if (!isInt<32>(Imm) && isUInt<32>(Imm) &&
hasAllWUsers(Node))
846 Imm = SignExtend64<32>(Imm);
852 const APFloat &APF = cast<ConstantFPSDNode>(Node)->getValueAPF();
877 bool NegZeroF64 = APF.
isNegZero() && VT == MVT::f64;
892 assert(Subtarget->hasStdExtZfbfmin());
893 Opc = RISCV::FMV_H_X;
900 Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
906 bool HasZdinx = Subtarget->hasStdExtZdinx();
908 Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
910 Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W;
915 if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
931 if (!Subtarget->hasStdExtZfa())
934 "Unexpected subtarget");
937 if (!
SDValue(Node, 0).use_empty()) {
939 Node->getOperand(0));
942 if (!
SDValue(Node, 1).use_empty()) {
944 Node->getOperand(0));
952 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
955 SDValue N0 = Node->getOperand(0);
959 unsigned ShAmt = N1C->getZExtValue();
965 unsigned XLen = Subtarget->
getXLen();
968 if (TrailingZeros > 0 && LeadingZeros == 32) {
982 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
985 SDValue N0 = Node->getOperand(0);
988 unsigned ShAmt = N1C->getZExtValue();
994 unsigned XLen = Subtarget->
getXLen();
997 if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
1016 Mask |= maskTrailingOnes<uint64_t>(ShAmt);
1020 if (ShAmt >= TrailingOnes)
1023 if (TrailingOnes == 32) {
1036 if (HasBitTest && ShAmt + 1 == TrailingOnes) {
1038 Subtarget->hasStdExtZbs() ? RISCV::BEXTI : RISCV::TH_TST,
DL, VT,
1044 unsigned LShAmt = Subtarget->
getXLen() - TrailingOnes;
1066 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1069 SDValue N0 = Node->getOperand(0);
1072 unsigned ShAmt = N1C->getZExtValue();
1074 cast<VTSDNode>(N0.
getOperand(1))->getVT().getSizeInBits();
1076 if (ExtSize >= 32 || ShAmt >= ExtSize)
1078 unsigned LShAmt = Subtarget->
getXLen() - ExtSize;
1095 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1100 const bool isC1ANDI = isInt<12>(C1);
1102 SDValue N0 = Node->getOperand(0);
1107 if (!Subtarget->hasVendorXTHeadBb())
1119 auto *
C = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
1122 unsigned C2 =
C->getZExtValue();
1123 unsigned XLen = Subtarget->
getXLen();
1124 assert((C2 > 0 && C2 < XLen) &&
"Unexpected shift amount!");
1132 bool IsCANDI = isInt<6>(N1C->getSExtValue());
1136 C1 &= maskTrailingZeros<uint64_t>(C2);
1138 C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
1142 bool OneUseOrZExtW = N0.
hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
1148 if (!LeftShift && isC1Mask) {
1152 if (C2 + 32 == Leading) {
1164 if (C2 >= 32 && (Leading - C2) == 1 && N0.
hasOneUse() &&
1166 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32) {
1171 RISCV::SRLIW,
DL, VT,
SDValue(SRAIW, 0),
1185 const unsigned Lsb = C2;
1186 if (tryUnsignedBitfieldExtract(Node,
DL, VT,
X, Msb, Lsb))
1191 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
1193 cast<VTSDNode>(
X.getOperand(1))->getVT() == MVT::i32;
1195 Skip |= HasBitTest && Leading == XLen - 1;
1196 if (OneUseOrZExtW && !Skip) {
1198 RISCV::SLLI,
DL, VT,
X,
1214 if (C2 + Leading < XLen &&
1215 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
1217 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
1226 if (OneUseOrZExtW && !IsCANDI) {
1228 RISCV::SLLI,
DL, VT,
X,
1244 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
1246 unsigned SrliOpc = RISCV::SRLI;
1249 isa<ConstantSDNode>(
X.getOperand(1)) &&
1250 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
1251 SrliOpc = RISCV::SRLIW;
1252 X =
X.getOperand(0);
1264 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
1265 OneUseOrZExtW && !IsCANDI) {
1267 RISCV::SRLIW,
DL, VT,
X,
1282 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1284 RISCV::SRLI,
DL, VT,
X,
1293 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1295 RISCV::SRLIW,
DL, VT,
X,
1311 if (isC1Mask && !isC1ANDI) {
1313 if (tryUnsignedBitfieldExtract(Node,
DL, VT, N0, Msb, 0))
1330 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
1331 if (!N1C || !N1C->hasOneUse())
1335 SDValue N0 = Node->getOperand(0);
1352 (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb());
1354 IsANDIOrZExt |= C2 == UINT64_C(0xFFFF) && Subtarget->hasVendorXTHeadBb();
1355 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1359 bool IsZExtW = C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba();
1361 IsZExtW |= C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasVendorXTHeadBb();
1362 if (IsZExtW && (isInt<32>(N1C->getSExtValue()) || !N0.
hasOneUse()))
1368 unsigned XLen = Subtarget->
getXLen();
1374 unsigned ConstantShift = XLen - LeadingZeros;
1378 uint64_t ShiftedC1 = C1 << ConstantShift;
1381 ShiftedC1 = SignExtend64<32>(ShiftedC1);
1399 unsigned IntNo = Node->getConstantOperandVal(0);
1404 case Intrinsic::riscv_vmsgeu:
1405 case Intrinsic::riscv_vmsge: {
1406 SDValue Src1 = Node->getOperand(1);
1407 SDValue Src2 = Node->getOperand(2);
1408 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
1409 bool IsCmpUnsignedZero =
false;
1414 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1415 int64_t CVal =
C->getSExtValue();
1416 if (CVal >= -15 && CVal <= 16) {
1417 if (!IsUnsigned || CVal != 0)
1419 IsCmpUnsignedZero =
true;
1423 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1427#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
1428 case RISCVII::VLMUL::lmulenum: \
1429 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1430 : RISCV::PseudoVMSLT_VX_##suffix; \
1431 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
1432 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
1441#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1449 if (IsCmpUnsignedZero) {
1460 {Cmp, Cmp, VL, SEW}));
1463 case Intrinsic::riscv_vmsgeu_mask:
1464 case Intrinsic::riscv_vmsge_mask: {
1465 SDValue Src1 = Node->getOperand(2);
1466 SDValue Src2 = Node->getOperand(3);
1467 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
1468 bool IsCmpUnsignedZero =
false;
1473 if (
auto *
C = dyn_cast<ConstantSDNode>(Src2)) {
1474 int64_t CVal =
C->getSExtValue();
1475 if (CVal >= -15 && CVal <= 16) {
1476 if (!IsUnsigned || CVal != 0)
1478 IsCmpUnsignedZero =
true;
1482 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1487#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
1488 case RISCVII::VLMUL::lmulenum: \
1489 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
1490 : RISCV::PseudoVMSLT_VX_##suffix; \
1491 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
1492 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
1501#undef CASE_VMSLT_OPCODES
1507#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \
1508 case RISCVII::VLMUL::lmulenum: \
1509 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
1510 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
1511 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \
1520#undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1527 SDValue MaskedOff = Node->getOperand(1);
1528 SDValue Mask = Node->getOperand(4);
1531 if (IsCmpUnsignedZero) {
1534 if (Mask == MaskedOff) {
1540 {Mask, MaskedOff, VL, MaskSEW}));
1547 if (Mask == MaskedOff) {
1552 {Mask, Cmp, VL, MaskSEW}));
1569 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1573 {Cmp, Mask, VL, MaskSEW}));
1576 case Intrinsic::riscv_vsetvli:
1577 case Intrinsic::riscv_vsetvlimax:
1583 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1588 case Intrinsic::riscv_vlseg2:
1589 case Intrinsic::riscv_vlseg3:
1590 case Intrinsic::riscv_vlseg4:
1591 case Intrinsic::riscv_vlseg5:
1592 case Intrinsic::riscv_vlseg6:
1593 case Intrinsic::riscv_vlseg7:
1594 case Intrinsic::riscv_vlseg8: {
1598 case Intrinsic::riscv_vlseg2_mask:
1599 case Intrinsic::riscv_vlseg3_mask:
1600 case Intrinsic::riscv_vlseg4_mask:
1601 case Intrinsic::riscv_vlseg5_mask:
1602 case Intrinsic::riscv_vlseg6_mask:
1603 case Intrinsic::riscv_vlseg7_mask:
1604 case Intrinsic::riscv_vlseg8_mask: {
1608 case Intrinsic::riscv_vlsseg2:
1609 case Intrinsic::riscv_vlsseg3:
1610 case Intrinsic::riscv_vlsseg4:
1611 case Intrinsic::riscv_vlsseg5:
1612 case Intrinsic::riscv_vlsseg6:
1613 case Intrinsic::riscv_vlsseg7:
1614 case Intrinsic::riscv_vlsseg8: {
1618 case Intrinsic::riscv_vlsseg2_mask:
1619 case Intrinsic::riscv_vlsseg3_mask:
1620 case Intrinsic::riscv_vlsseg4_mask:
1621 case Intrinsic::riscv_vlsseg5_mask:
1622 case Intrinsic::riscv_vlsseg6_mask:
1623 case Intrinsic::riscv_vlsseg7_mask:
1624 case Intrinsic::riscv_vlsseg8_mask: {
1628 case Intrinsic::riscv_vloxseg2:
1629 case Intrinsic::riscv_vloxseg3:
1630 case Intrinsic::riscv_vloxseg4:
1631 case Intrinsic::riscv_vloxseg5:
1632 case Intrinsic::riscv_vloxseg6:
1633 case Intrinsic::riscv_vloxseg7:
1634 case Intrinsic::riscv_vloxseg8:
1637 case Intrinsic::riscv_vluxseg2:
1638 case Intrinsic::riscv_vluxseg3:
1639 case Intrinsic::riscv_vluxseg4:
1640 case Intrinsic::riscv_vluxseg5:
1641 case Intrinsic::riscv_vluxseg6:
1642 case Intrinsic::riscv_vluxseg7:
1643 case Intrinsic::riscv_vluxseg8:
1646 case Intrinsic::riscv_vloxseg2_mask:
1647 case Intrinsic::riscv_vloxseg3_mask:
1648 case Intrinsic::riscv_vloxseg4_mask:
1649 case Intrinsic::riscv_vloxseg5_mask:
1650 case Intrinsic::riscv_vloxseg6_mask:
1651 case Intrinsic::riscv_vloxseg7_mask:
1652 case Intrinsic::riscv_vloxseg8_mask:
1655 case Intrinsic::riscv_vluxseg2_mask:
1656 case Intrinsic::riscv_vluxseg3_mask:
1657 case Intrinsic::riscv_vluxseg4_mask:
1658 case Intrinsic::riscv_vluxseg5_mask:
1659 case Intrinsic::riscv_vluxseg6_mask:
1660 case Intrinsic::riscv_vluxseg7_mask:
1661 case Intrinsic::riscv_vluxseg8_mask:
1664 case Intrinsic::riscv_vlseg8ff:
1665 case Intrinsic::riscv_vlseg7ff:
1666 case Intrinsic::riscv_vlseg6ff:
1667 case Intrinsic::riscv_vlseg5ff:
1668 case Intrinsic::riscv_vlseg4ff:
1669 case Intrinsic::riscv_vlseg3ff:
1670 case Intrinsic::riscv_vlseg2ff: {
1674 case Intrinsic::riscv_vlseg8ff_mask:
1675 case Intrinsic::riscv_vlseg7ff_mask:
1676 case Intrinsic::riscv_vlseg6ff_mask:
1677 case Intrinsic::riscv_vlseg5ff_mask:
1678 case Intrinsic::riscv_vlseg4ff_mask:
1679 case Intrinsic::riscv_vlseg3ff_mask:
1680 case Intrinsic::riscv_vlseg2ff_mask: {
1684 case Intrinsic::riscv_vloxei:
1685 case Intrinsic::riscv_vloxei_mask:
1686 case Intrinsic::riscv_vluxei:
1687 case Intrinsic::riscv_vluxei_mask: {
1688 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1689 IntNo == Intrinsic::riscv_vluxei_mask;
1690 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1691 IntNo == Intrinsic::riscv_vloxei_mask;
1693 MVT VT = Node->getSimpleValueType(0);
1698 Operands.push_back(Node->getOperand(CurOp++));
1706 "Element count mismatch");
1711 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1713 "values when XLEN=32");
1716 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
1717 static_cast<unsigned>(IndexLMUL));
1721 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1727 case Intrinsic::riscv_vlm:
1728 case Intrinsic::riscv_vle:
1729 case Intrinsic::riscv_vle_mask:
1730 case Intrinsic::riscv_vlse:
1731 case Intrinsic::riscv_vlse_mask: {
1732 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1733 IntNo == Intrinsic::riscv_vlse_mask;
1735 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1737 MVT VT = Node->getSimpleValueType(0);
1746 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
1749 if (HasPassthruOperand)
1750 Operands.push_back(Node->getOperand(CurOp++));
1763 RISCV::getVLEPseudo(IsMasked, IsStrided,
false,
Log2SEW,
1764 static_cast<unsigned>(LMUL));
1768 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1774 case Intrinsic::riscv_vleff:
1775 case Intrinsic::riscv_vleff_mask: {
1776 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1778 MVT VT = Node->getSimpleValueType(0);
1783 Operands.push_back(Node->getOperand(CurOp++));
1790 RISCV::getVLEPseudo(IsMasked,
false,
true,
1791 Log2SEW,
static_cast<unsigned>(LMUL));
1794 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1804 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1806 case Intrinsic::riscv_vsseg2:
1807 case Intrinsic::riscv_vsseg3:
1808 case Intrinsic::riscv_vsseg4:
1809 case Intrinsic::riscv_vsseg5:
1810 case Intrinsic::riscv_vsseg6:
1811 case Intrinsic::riscv_vsseg7:
1812 case Intrinsic::riscv_vsseg8: {
1816 case Intrinsic::riscv_vsseg2_mask:
1817 case Intrinsic::riscv_vsseg3_mask:
1818 case Intrinsic::riscv_vsseg4_mask:
1819 case Intrinsic::riscv_vsseg5_mask:
1820 case Intrinsic::riscv_vsseg6_mask:
1821 case Intrinsic::riscv_vsseg7_mask:
1822 case Intrinsic::riscv_vsseg8_mask: {
1826 case Intrinsic::riscv_vssseg2:
1827 case Intrinsic::riscv_vssseg3:
1828 case Intrinsic::riscv_vssseg4:
1829 case Intrinsic::riscv_vssseg5:
1830 case Intrinsic::riscv_vssseg6:
1831 case Intrinsic::riscv_vssseg7:
1832 case Intrinsic::riscv_vssseg8: {
1836 case Intrinsic::riscv_vssseg2_mask:
1837 case Intrinsic::riscv_vssseg3_mask:
1838 case Intrinsic::riscv_vssseg4_mask:
1839 case Intrinsic::riscv_vssseg5_mask:
1840 case Intrinsic::riscv_vssseg6_mask:
1841 case Intrinsic::riscv_vssseg7_mask:
1842 case Intrinsic::riscv_vssseg8_mask: {
1846 case Intrinsic::riscv_vsoxseg2:
1847 case Intrinsic::riscv_vsoxseg3:
1848 case Intrinsic::riscv_vsoxseg4:
1849 case Intrinsic::riscv_vsoxseg5:
1850 case Intrinsic::riscv_vsoxseg6:
1851 case Intrinsic::riscv_vsoxseg7:
1852 case Intrinsic::riscv_vsoxseg8:
1855 case Intrinsic::riscv_vsuxseg2:
1856 case Intrinsic::riscv_vsuxseg3:
1857 case Intrinsic::riscv_vsuxseg4:
1858 case Intrinsic::riscv_vsuxseg5:
1859 case Intrinsic::riscv_vsuxseg6:
1860 case Intrinsic::riscv_vsuxseg7:
1861 case Intrinsic::riscv_vsuxseg8:
1864 case Intrinsic::riscv_vsoxseg2_mask:
1865 case Intrinsic::riscv_vsoxseg3_mask:
1866 case Intrinsic::riscv_vsoxseg4_mask:
1867 case Intrinsic::riscv_vsoxseg5_mask:
1868 case Intrinsic::riscv_vsoxseg6_mask:
1869 case Intrinsic::riscv_vsoxseg7_mask:
1870 case Intrinsic::riscv_vsoxseg8_mask:
1873 case Intrinsic::riscv_vsuxseg2_mask:
1874 case Intrinsic::riscv_vsuxseg3_mask:
1875 case Intrinsic::riscv_vsuxseg4_mask:
1876 case Intrinsic::riscv_vsuxseg5_mask:
1877 case Intrinsic::riscv_vsuxseg6_mask:
1878 case Intrinsic::riscv_vsuxseg7_mask:
1879 case Intrinsic::riscv_vsuxseg8_mask:
1882 case Intrinsic::riscv_vsoxei:
1883 case Intrinsic::riscv_vsoxei_mask:
1884 case Intrinsic::riscv_vsuxei:
1885 case Intrinsic::riscv_vsuxei_mask: {
1886 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1887 IntNo == Intrinsic::riscv_vsuxei_mask;
1888 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1889 IntNo == Intrinsic::riscv_vsoxei_mask;
1891 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1896 Operands.push_back(Node->getOperand(CurOp++));
1904 "Element count mismatch");
1909 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
1911 "values when XLEN=32");
1914 IsMasked, IsOrdered, IndexLog2EEW,
1915 static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
1919 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1925 case Intrinsic::riscv_vsm:
1926 case Intrinsic::riscv_vse:
1927 case Intrinsic::riscv_vse_mask:
1928 case Intrinsic::riscv_vsse:
1929 case Intrinsic::riscv_vsse_mask: {
1930 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1931 IntNo == Intrinsic::riscv_vsse_mask;
1933 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1935 MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1940 Operands.push_back(Node->getOperand(CurOp++));
1947 IsMasked, IsStrided,
Log2SEW,
static_cast<unsigned>(LMUL));
1950 if (
auto *
MemOp = dyn_cast<MemSDNode>(Node))
1960 MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1972 SDValue V = Node->getOperand(0);
1973 SDValue SubV = Node->getOperand(1);
1975 auto Idx = Node->getConstantOperandVal(2);
1979 MVT SubVecContainerVT = SubVecVT;
1982 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(SubVecVT);
1984 VT =
TLI.getContainerForFixedLengthVector(VT);
1988 std::tie(SubRegIdx,
Idx) =
1990 VT, SubVecContainerVT,
Idx,
TRI);
2002 (void)IsSubVecPartReg;
2003 assert((!IsSubVecPartReg || V.isUndef()) &&
2004 "Expecting lowering to have created legal INSERT_SUBVECTORs when "
2005 "the subvector is smaller than a full-sized register");
2009 if (SubRegIdx == RISCV::NoSubRegister) {
2013 "Unexpected subvector extraction");
2026 SDValue V = Node->getOperand(0);
2027 auto Idx = Node->getConstantOperandVal(1);
2028 MVT InVT = V.getSimpleValueType();
2032 MVT SubVecContainerVT = VT;
2035 SubVecContainerVT =
TLI.getContainerForFixedLengthVector(VT);
2037 InVT =
TLI.getContainerForFixedLengthVector(InVT);
2041 std::tie(SubRegIdx,
Idx) =
2043 InVT, SubVecContainerVT,
Idx,
TRI);
2053 if (SubRegIdx == RISCV::NoSubRegister) {
2057 "Unexpected subvector extraction");
2076 if (!Node->getOperand(0).isUndef())
2078 SDValue Src = Node->getOperand(1);
2079 auto *Ld = dyn_cast<LoadSDNode>(Src);
2082 if (!Ld || Ld->isIndexed())
2084 EVT MemVT = Ld->getMemoryVT();
2110 if (IsStrided && !Subtarget->hasOptimizedZeroStrideLoad())
2120 Operands.append({VL,
SEW, PolicyOp, Ld->getChain()});
2124 false, IsStrided,
false,
2125 Log2SEW,
static_cast<unsigned>(LMUL));
2137 unsigned Locality = Node->getConstantOperandVal(3);
2141 if (
auto *LoadStoreMem = dyn_cast<MemSDNode>(Node)) {
2145 int NontemporalLevel = 0;
2148 NontemporalLevel = 3;
2151 NontemporalLevel = 1;
2154 NontemporalLevel = 0;
2160 if (NontemporalLevel & 0b1)
2162 if (NontemporalLevel & 0b10)
2174 std::vector<SDValue> &OutOps) {
2177 switch (ConstraintID) {
2182 assert(Found &&
"SelectAddrRegImm should always succeed");
2184 OutOps.push_back(Op0);
2185 OutOps.push_back(Op1);
2189 OutOps.push_back(
Op);
2203 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr)) {
2221 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Addr.getOperand(0))) {
2222 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2223 if (isInt<12>(CVal)) {
2239 if (!isa<ConstantSDNode>(
Addr))
2242 int64_t CVal = cast<ConstantSDNode>(
Addr)->getSExtValue();
2247 int64_t Lo12 = SignExtend64<12>(CVal);
2249 if (!Subtarget->
is64Bit() || isInt<32>(
Hi)) {
2251 int64_t Hi20 = (
Hi >> 12) & 0xfffff;
2269 if (Seq.
back().getOpcode() != RISCV::ADDI)
2271 Lo12 = Seq.
back().getImm();
2275 assert(!Seq.
empty() &&
"Expected more instructions in sequence");
2285 for (
auto *
Use :
Add->uses()) {
2290 EVT VT = cast<MemSDNode>(
Use)->getMemoryVT();
2296 cast<StoreSDNode>(
Use)->getValue() ==
Add)
2299 cast<AtomicSDNode>(
Use)->getVal() ==
Add)
2307 unsigned MaxShiftAmount,
2310 EVT VT =
Addr.getSimpleValueType();
2316 if (
N.getOpcode() ==
ISD::SHL && isa<ConstantSDNode>(
N.getOperand(1))) {
2318 if (
N.getConstantOperandVal(1) <= MaxShiftAmount) {
2320 ShiftAmt =
N.getConstantOperandVal(1);
2325 return ShiftAmt != 0;
2329 if (
auto *C1 = dyn_cast<ConstantSDNode>(
Addr.getOperand(1))) {
2334 isInt<12>(C1->getSExtValue())) {
2343 }
else if (UnwrapShl(
Addr.getOperand(0),
Index, Scale)) {
2347 UnwrapShl(
Addr.getOperand(1),
Index, Scale);
2351 }
else if (UnwrapShl(
Addr,
Index, Scale)) {
2366 MVT VT =
Addr.getSimpleValueType();
2374 int64_t RV32ZdinxRange = IsINX ? 4 : 0;
2376 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2377 if (isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) {
2381 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
2389 GA->getGlobal()->getPointerAlignment(
DL), GA->getOffset());
2390 if (CVal == 0 || Alignment > CVal) {
2391 int64_t CombinedOffset = CVal + GA->getOffset();
2395 CombinedOffset, GA->getTargetFlags());
2401 if (
auto *FIN = dyn_cast<FrameIndexSDNode>(
Base))
2409 if (
Addr.getOpcode() ==
ISD::ADD && isa<ConstantSDNode>(
Addr.getOperand(1))) {
2410 int64_t CVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
2411 assert(!(isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) &&
2412 "simm12 not already handled?");
2417 if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
2418 int64_t Adj = CVal < 0 ? -2048 : 2047;
2484 if (Imm != 0 && Imm % ShiftWidth == 0) {
2493 if (Imm != 0 && Imm % ShiftWidth == 0) {
2497 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2505 if (Imm % ShiftWidth == ShiftWidth - 1) {
2527 "Unexpected condition code!");
2534 ISD::CondCode CCVal = cast<CondCodeSDNode>(
N->getOperand(2))->get();
2535 if (CCVal != ExpectedCCVal)
2541 if (!
LHS.getValueType().isScalarInteger())
2552 if (
auto *
C = dyn_cast<ConstantSDNode>(
RHS)) {
2553 int64_t CVal =
C->getSExtValue();
2556 if (CVal == -2048) {
2559 RISCV::XORI,
DL,
N->getValueType(0),
LHS,
2566 if (isInt<12>(CVal) || CVal == 2048) {
2569 RISCV::ADDI,
DL,
N->getValueType(0),
LHS,
2585 cast<VTSDNode>(
N.getOperand(1))->getVT().getSizeInBits() == Bits) {
2586 Val =
N.getOperand(0);
2590 auto UnwrapShlSra = [](
SDValue N,
unsigned ShiftAmt) {
2591 if (
N.getOpcode() !=
ISD::SRA || !isa<ConstantSDNode>(
N.getOperand(1)))
2596 N.getConstantOperandVal(1) == ShiftAmt &&
2603 MVT VT =
N.getSimpleValueType();
2614 auto *
C = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2615 if (
C &&
C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
2616 Val =
N.getOperand(0);
2620 MVT VT =
N.getSimpleValueType();
2635 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1))) {
2641 uint64_t Mask =
N.getConstantOperandVal(1);
2644 unsigned XLen = Subtarget->
getXLen();
2646 Mask &= maskTrailingZeros<uint64_t>(C2);
2648 Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2656 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
2658 EVT VT =
N.getValueType();
2668 if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
2670 EVT VT =
N.getValueType();
2682 bool LeftShift =
N.getOpcode() ==
ISD::SHL;
2683 if ((LeftShift ||
N.getOpcode() ==
ISD::SRL) &&
2684 isa<ConstantSDNode>(
N.getOperand(1))) {
2690 unsigned C1 =
N.getConstantOperandVal(1);
2691 unsigned XLen = Subtarget->
getXLen();
2696 if (LeftShift && Leading == 32 && Trailing > 0 &&
2697 (Trailing + C1) == ShAmt) {
2699 EVT VT =
N.getValueType();
2708 if (!LeftShift && Leading == 32 && Trailing > C1 &&
2709 (Trailing - C1) == ShAmt) {
2711 EVT VT =
N.getValueType();
2730 if (
N.getOpcode() ==
ISD::AND && isa<ConstantSDNode>(
N.getOperand(1)) &&
2735 uint64_t Mask =
N.getConstantOperandVal(1);
2738 Mask &= maskTrailingZeros<uint64_t>(C2);
2746 if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
2748 EVT VT =
N.getValueType();
2766 RISCVVPseudosTable::getPseudoInfo(
User->getMachineOpcode());
2777 bool HasGlueOp =
User->getGluedNode() !=
nullptr;
2779 bool HasChainOp =
User->
getOperand(ChainOpIdx).getValueType() == MVT::Other;
2783 const unsigned Log2SEW =
User->getConstantOperandVal(VLIdx + 1);
2785 if (UserOpNo == VLIdx)
2794 case RISCV::VSLL_VX:
2795 case RISCV::VSRL_VX:
2796 case RISCV::VSRA_VX:
2798 case RISCV::VSSRL_VX:
2799 case RISCV::VSSRA_VX:
2806 case RISCV::VNSRL_WX:
2807 case RISCV::VNSRA_WX:
2809 case RISCV::VNCLIPU_WX:
2810 case RISCV::VNCLIP_WX:
2817 case RISCV::VADD_VX:
2818 case RISCV::VSUB_VX:
2819 case RISCV::VRSUB_VX:
2821 case RISCV::VWADDU_VX:
2822 case RISCV::VWSUBU_VX:
2823 case RISCV::VWADD_VX:
2824 case RISCV::VWSUB_VX:
2825 case RISCV::VWADDU_WX:
2826 case RISCV::VWSUBU_WX:
2827 case RISCV::VWADD_WX:
2828 case RISCV::VWSUB_WX:
2830 case RISCV::VADC_VXM:
2831 case RISCV::VADC_VIM:
2832 case RISCV::VMADC_VXM:
2833 case RISCV::VMADC_VIM:
2834 case RISCV::VMADC_VX:
2835 case RISCV::VSBC_VXM:
2836 case RISCV::VMSBC_VXM:
2837 case RISCV::VMSBC_VX:
2839 case RISCV::VAND_VX:
2841 case RISCV::VXOR_VX:
2843 case RISCV::VMSEQ_VX:
2844 case RISCV::VMSNE_VX:
2845 case RISCV::VMSLTU_VX:
2846 case RISCV::VMSLT_VX:
2847 case RISCV::VMSLEU_VX:
2848 case RISCV::VMSLE_VX:
2849 case RISCV::VMSGTU_VX:
2850 case RISCV::VMSGT_VX:
2852 case RISCV::VMINU_VX:
2853 case RISCV::VMIN_VX:
2854 case RISCV::VMAXU_VX:
2855 case RISCV::VMAX_VX:
2857 case RISCV::VMUL_VX:
2858 case RISCV::VMULH_VX:
2859 case RISCV::VMULHU_VX:
2860 case RISCV::VMULHSU_VX:
2862 case RISCV::VDIVU_VX:
2863 case RISCV::VDIV_VX:
2864 case RISCV::VREMU_VX:
2865 case RISCV::VREM_VX:
2867 case RISCV::VWMUL_VX:
2868 case RISCV::VWMULU_VX:
2869 case RISCV::VWMULSU_VX:
2871 case RISCV::VMACC_VX:
2872 case RISCV::VNMSAC_VX:
2873 case RISCV::VMADD_VX:
2874 case RISCV::VNMSUB_VX:
2876 case RISCV::VWMACCU_VX:
2877 case RISCV::VWMACC_VX:
2878 case RISCV::VWMACCSU_VX:
2879 case RISCV::VWMACCUS_VX:
2881 case RISCV::VMERGE_VXM:
2883 case RISCV::VMV_V_X:
2885 case RISCV::VSADDU_VX:
2886 case RISCV::VSADD_VX:
2887 case RISCV::VSSUBU_VX:
2888 case RISCV::VSSUB_VX:
2890 case RISCV::VAADDU_VX:
2891 case RISCV::VAADD_VX:
2892 case RISCV::VASUBU_VX:
2893 case RISCV::VASUB_VX:
2895 case RISCV::VSMUL_VX:
2897 case RISCV::VMV_S_X:
2914 const unsigned Depth)
const {
2920 isa<ConstantSDNode>(Node) ||
Depth != 0) &&
2921 "Unexpected opcode");
2926 for (
auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
2929 if (!
User->isMachineOpcode())
2933 switch (
User->getMachineOpcode()) {
2958 case RISCV::SLLI_UW:
2959 case RISCV::FMV_W_X:
2960 case RISCV::FCVT_H_W:
2961 case RISCV::FCVT_H_WU:
2962 case RISCV::FCVT_S_W:
2963 case RISCV::FCVT_S_WU:
2964 case RISCV::FCVT_D_W:
2965 case RISCV::FCVT_D_WU:
2966 case RISCV::TH_REVW:
2967 case RISCV::TH_SRRIW:
2980 if (UI.getOperandNo() != 1 || Bits <
Log2_32(Subtarget->
getXLen()))
2985 if (Bits < Subtarget->getXLen() -
User->getConstantOperandVal(1))
2994 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
3013 unsigned ShAmt =
User->getConstantOperandVal(1);
3027 case RISCV::FMV_H_X:
3028 case RISCV::ZEXT_H_RV32:
3029 case RISCV::ZEXT_H_RV64:
3035 if (Bits < (Subtarget->
getXLen() / 2))
3039 case RISCV::SH1ADD_UW:
3040 case RISCV::SH2ADD_UW:
3041 case RISCV::SH3ADD_UW:
3044 if (UI.getOperandNo() != 0 || Bits < 32)
3048 if (UI.getOperandNo() != 0 || Bits < 8)
3052 if (UI.getOperandNo() != 0 || Bits < 16)
3056 if (UI.getOperandNo() != 0 || Bits < 32)
3068 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3069 int64_t
Offset =
C->getSExtValue();
3071 for (Shift = 0; Shift < 4; Shift++)
3072 if (isInt<5>(
Offset >> Shift) && ((
Offset % (1LL << Shift)) == 0))
3079 EVT Ty =
N->getValueType(0);
3091 auto *
C = dyn_cast<ConstantSDNode>(
N);
3092 if (
C && isUInt<5>(
C->getZExtValue())) {
3094 N->getValueType(0));
3095 }
else if (
C &&
C->isAllOnes()) {
3098 N->getValueType(0));
3099 }
else if (isa<RegisterSDNode>(
N) &&
3100 cast<RegisterSDNode>(
N)->
getReg() == RISCV::X0) {
3106 N->getValueType(0));
3116 if (!
N.getOperand(0).isUndef())
3118 N =
N.getOperand(1);
3123 !
Splat.getOperand(0).isUndef())
3125 assert(
Splat.getNumOperands() == 3 &&
"Unexpected number of operands");
3134 SplatVal =
Splat.getOperand(1);
3141 std::function<
bool(int64_t)> ValidateImm) {
3143 if (!
Splat || !isa<ConstantSDNode>(
Splat.getOperand(1)))
3146 const unsigned SplatEltSize =
Splat.getScalarValueSizeInBits();
3148 "Unexpected splat operand type");
3157 APInt SplatConst =
Splat.getConstantOperandAPInt(1).sextOrTrunc(SplatEltSize);
3161 if (!ValidateImm(SplatImm))
3170 [](int64_t Imm) {
return isInt<5>(Imm); });
3175 N, SplatVal, *
CurDAG, *Subtarget,
3176 [](int64_t Imm) {
return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
3182 N, SplatVal, *
CurDAG, *Subtarget, [](int64_t Imm) {
3183 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
3190 N, SplatVal, *
CurDAG, *Subtarget,
3191 [Bits](int64_t Imm) {
return isUIntN(Bits, Imm); });
3204 isa<ConstantSDNode>(VL) &&
3211 if (!
N.hasOneUse() ||
3212 N.getValueType().getSizeInBits().getKnownMinValue() < 8)
3214 N =
N->getOperand(0);
3236 if (VT == MVT::f64 && !Subtarget->
is64Bit()) {
3248 if (
auto *
C = dyn_cast<ConstantSDNode>(
N)) {
3251 if (!isInt<5>(ImmVal))
3263bool RISCVDAGToDAGISel::doPeepholeSExtW(
SDNode *
N) {
3265 if (
N->getMachineOpcode() != RISCV::ADDIW ||
3287 case RISCV::ADD: Opc = RISCV::ADDW;
break;
3288 case RISCV::ADDI: Opc = RISCV::ADDIW;
break;
3289 case RISCV::SUB: Opc = RISCV::SUBW;
break;
3290 case RISCV::MUL: Opc = RISCV::MULW;
break;
3291 case RISCV::SLLI: Opc = RISCV::SLLIW;
break;
3299 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
3314 case RISCV::TH_MULAW:
3315 case RISCV::TH_MULAH:
3316 case RISCV::TH_MULSW:
3317 case RISCV::TH_MULSH:
3329 if (!isa<RegisterSDNode>(MaskOp) ||
3330 cast<RegisterSDNode>(MaskOp)->
getReg() != RISCV::V0)
3334 const auto *Glued = GlueOp.
getNode();
3340 if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
3341 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
3353 const auto IsVMSet = [](
unsigned Opc) {
3354 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
3355 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
3356 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
3357 Opc == RISCV::PseudoVMSET_M_B8;
3370 N->getOperand(
N->getNumOperands() - 1));
3374 return V.isMachineOpcode() &&
3375 V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
3384 RISCV::getMaskedPseudoInfo(
N->getMachineOpcode());
3388 unsigned MaskOpIdx =
I->MaskOpIdx;
3394 const unsigned Opc =
I->UnmaskedPseudo;
3401 "Masked and unmasked pseudos are inconsistent");
3403 assert(UseTUPseudo == HasTiedDest &&
"Unexpected pseudo structure");
3408 for (
unsigned I = !UseTUPseudo,
E =
N->getNumOperands();
I !=
E;
I++) {
3411 if (
I == MaskOpIdx ||
Op.getValueType() == MVT::Glue)
3417 const auto *Glued =
N->getGluedNode();
3418 if (
auto *TGlued = Glued->getGluedNode())
3424 if (!
N->memoperands_empty())
3427 Result->setFlags(
N->getFlags());
3434 unsigned Opc =
N->getMachineOpcode();
3435 return Opc == RISCV::PseudoVMERGE_VVM_MF8 ||
3436 Opc == RISCV::PseudoVMERGE_VVM_MF4 ||
3437 Opc == RISCV::PseudoVMERGE_VVM_MF2 ||
3438 Opc == RISCV::PseudoVMERGE_VVM_M1 ||
3439 Opc == RISCV::PseudoVMERGE_VVM_M2 ||
3440 Opc == RISCV::PseudoVMERGE_VVM_M4 || Opc == RISCV::PseudoVMERGE_VVM_M8;
3444 unsigned Opc =
N->getMachineOpcode();
3445 return Opc == RISCV::PseudoVMV_V_V_MF8 || Opc == RISCV::PseudoVMV_V_V_MF4 ||
3446 Opc == RISCV::PseudoVMV_V_V_MF2 || Opc == RISCV::PseudoVMV_V_V_M1 ||
3447 Opc == RISCV::PseudoVMV_V_V_M2 || Opc == RISCV::PseudoVMV_V_V_M4 ||
3448 Opc == RISCV::PseudoVMV_V_V_M8;
3454 return RISCV::PseudoVMSET_M_B1;
3456 return RISCV::PseudoVMSET_M_B2;
3458 return RISCV::PseudoVMSET_M_B4;
3460 return RISCV::PseudoVMSET_M_B8;
3462 return RISCV::PseudoVMSET_M_B16;
3464 return RISCV::PseudoVMSET_M_B32;
3466 return RISCV::PseudoVMSET_M_B64;
3486bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(
SDNode *
N) {
3490 Merge =
N->getOperand(0);
3491 False =
N->getOperand(0);
3492 True =
N->getOperand(1);
3493 VL =
N->getOperand(2);
3498 Merge =
N->getOperand(0);
3499 False =
N->getOperand(1);
3500 True =
N->getOperand(2);
3501 Mask =
N->getOperand(3);
3502 VL =
N->getOperand(4);
3504 Glue =
N->getOperand(
N->getNumOperands() - 1);
3506 assert(!Mask || cast<RegisterSDNode>(Mask)->
getReg() == RISCV::V0);
3515 "Expect True is the first output of an instruction.");
3529 bool IsMasked =
false;
3531 RISCV::lookupMaskedIntrinsicByUnmasked(TrueOpc);
3532 if (!Info && HasTiedDest) {
3533 Info = RISCV::getMaskedPseudoInfo(TrueOpc);
3549 if (False != MergeOpTrue)
3554 assert(HasTiedDest &&
"Expected tied dest");
3597 unsigned TrueVLIndex =
3598 True.
getNumOperands() - HasVecPolicyOp - HasChainOp - HasGlueOp - 2;
3609 auto *CLHS = dyn_cast<ConstantSDNode>(LHS);
3610 auto *CRHS = dyn_cast<ConstantSDNode>(RHS);
3613 return CLHS->getZExtValue() <= CRHS->getZExtValue() ?
LHS :
RHS;
3619 VL = GetMinVL(TrueVL, VL);
3626 if (TrueVL != VL || !IsMasked)
3651 RISCV::V0, AllOnesMask,
SDValue());
3656 unsigned MaskedOpc =
Info->MaskedPseudo;
3660 "Expected instructions with mask have policy operand.");
3663 "Expected instructions with mask have a tied dest.");
3673 bool MergeVLShrunk = VL != OrigVL;
3685 const unsigned NormalOpsEnd = TrueVLIndex - IsMasked - HasRoundingMode;
3686 assert(!IsMasked || NormalOpsEnd ==
Info->MaskOpIdx);
3695 if (HasRoundingMode)
3711 if (!cast<MachineSDNode>(True)->memoperands_empty())
3722 doPeepholeMaskedRVV(Result);
3728bool RISCVDAGToDAGISel::performVMergeToVMv(
SDNode *
N) {
3729#define CASE_VMERGE_TO_VMV(lmul) \
3730 case RISCV::PseudoVMERGE_VVM_##lmul: \
3731 NewOpc = RISCV::PseudoVMV_V_V_##lmul; \
3734 switch (
N->getMachineOpcode()) {
3753 NewOpc,
DL,
N->getValueType(0),
3754 {N->getOperand(1), N->getOperand(2), N->getOperand(4), N->getOperand(5),
3760bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
3761 bool MadeChange =
false;
3766 if (
N->use_empty() || !
N->isMachineOpcode())
3770 MadeChange |= performCombineVMergeAndVOps(
N);
3771 if (
IsVMerge(
N) &&
N->getOperand(0) ==
N->getOperand(1))
3772 MadeChange |= performVMergeToVMv(
N);
3782bool RISCVDAGToDAGISel::doPeepholeNoRegPassThru() {
3783 bool MadeChange =
false;
3788 if (
N->use_empty() || !
N->isMachineOpcode())
3791 const unsigned Opc =
N->getMachineOpcode();
3792 if (!RISCVVPseudosTable::getPseudoInfo(Opc) ||
3799 for (
unsigned I = 1,
E =
N->getNumOperands();
I !=
E;
I++) {
3806 Result->setFlags(
N->getFlags());
static Register createTuple(ArrayRef< Register > Regs, const unsigned RegClassIDs[], const unsigned SubRegs[], MachineIRBuilder &MIB)
Create a REG_SEQUENCE instruction using the registers in Regs.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
mir Rename Register Operands
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, int64_t Imm, const RISCVSubtarget &Subtarget)
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)
static bool isWorthFoldingAdd(SDValue Add)
static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, RISCVMatInt::InstSeq &Seq)
static bool isImplicitDef(SDValue V)
static unsigned GetVMSetForLMul(RISCVII::VLMUL LMUL)
#define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)
static bool usesAllOnesMask(SDValue MaskOp, SDValue GlueOp)
#define CASE_VMERGE_TO_VMV(lmul)
static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, const RISCVSubtarget *Subtarget, SDValue Addr, SDValue &Base, SDValue &Offset)
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo, unsigned Bits, const TargetInstrInfo *TII)
static bool IsVMv(SDNode *N)
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)
static SDValue findVSplat(SDValue N)
static bool selectVSplatImmHelper(SDValue N, SDValue &SplatVal, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, std::function< bool(int64_t)> ValidateImm)
static bool IsVMerge(SDNode *N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
int64_t getSExtValue() const
Get sign extended value.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
This class is used to form a handle around another node that is persistent and is updated across invo...
static StringRef getMemConstraintName(ConstraintCode C)
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Describe properties that are true of each instruction in the target description file.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
void setFlags(Flags f)
Bitwise OR the current flags with the given flags.
An SDNode that represents everything that will be needed to construct a MachineInstr.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
bool selectSETCC(SDValue N, ISD::CondCode ExpectedCCVal, SDValue &Val)
RISC-V doesn't have general instructions for integer setne/seteq, but we can check for equality with ...
bool selectSExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectZExtBits(SDValue N, unsigned Bits, SDValue &Val)
bool selectSHXADD_UWOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD_UW.
bool hasAllNBitUsers(SDNode *Node, unsigned Bits, const unsigned Depth=0) const
void selectVSSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset)
void selectVLSEGFF(SDNode *Node, bool IsMasked)
bool selectFPImm(SDValue N, SDValue &Imm)
bool selectSimm5Shl2(SDValue N, SDValue &Simm5, SDValue &Shl2)
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal)
bool hasAllHUsers(SDNode *Node) const
bool SelectInlineAsmMemoryOperand(const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, std::vector< SDValue > &OutOps) override
SelectInlineAsmMemoryOperand - Select the specified address as a target addressing mode,...
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal)
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm)
bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset)
bool hasAllWUsers(SDNode *Node) const
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
void Select(SDNode *Node) override
Main hook for targets to transform nodes into machine nodes.
bool selectVSplat(SDValue N, SDValue &SplatVal)
void addVectorLoadStoreOperands(SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp, bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl< SDValue > &Operands, bool IsLoad=false, MVT *IndexVT=nullptr)
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
void selectVLXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool tryShrinkShlLogicImm(SDNode *Node)
void selectVSETVLI(SDNode *Node)
bool selectVLOp(SDValue N, SDValue &VL)
bool trySignedBitfieldExtract(SDNode *Node)
void selectVSXSEG(SDNode *Node, bool IsMasked, bool IsOrdered)
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal)
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal)
bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, bool IsINX=false)
void selectVLSEG(SDNode *Node, bool IsMasked, bool IsStrided)
bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt)
bool selectSHXADDOp(SDValue N, unsigned ShAmt, SDValue &Val)
Look for various patterns that can be done with a SHL that can be folded into a SHXADD.
bool tryIndexedLoad(SDNode *Node)
bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale)
bool selectVSplatUimm(SDValue N, unsigned Bits, SDValue &SplatVal)
unsigned getRealMinVLen() const
bool hasVInstructions() const
unsigned getRealMaxVLen() const
bool hasStdExtZhinxOrZhinxmin() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getSubregIndexByMVT(MVT VT, unsigned Index)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
SDNodeFlags getFlags() const
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
SDNode * getGluedNode() const
If this node has a glue operand, return the node to which the glue operand points.
op_iterator op_begin() const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
unsigned getNumOperands() const
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const
IsProfitableToFold - Returns true if it's profitable to fold the specific operand node N of U during ...
static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, CodeGenOptLevel OptLevel, bool IgnoreChains=false)
IsLegalToFold - Returns true if the specific operand node N of U can be folded during instruction sel...
bool mayRaiseFPException(SDNode *Node) const
Return whether the node may raise an FP exception.
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
static constexpr unsigned MaxRecursionDepth
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getRegister(unsigned Reg, EVT VT)
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetInstrInfo - Interface to description of machine instruction set.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static constexpr TypeSize Fixed(ScalarTy ExactSize)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
Iterator for intrusive lists based on ilist_node.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isIntEqualitySetCC(CondCode Code)
Return true if this is a setcc instruction that performs an equality comparison when used with intege...
static bool hasRoundModeOp(uint64_t TSFlags)
static VLMUL getLMul(uint64_t TSFlags)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
@ SPLAT_VECTOR_SPLIT_I64_VL
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
static unsigned decodeVSEW(unsigned VSEW)
unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)
unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic)
static constexpr int64_t VLMaxSentinel
This is an optimization pass for GlobalISel generic memory operations.
static const MachineMemOperand::Flags MONontemporalBit1
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
static const MachineMemOperand::Flags MONontemporalBit0
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
CodeGenOptLevel
Code generation optimization level.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
FunctionPass * createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOptLevel OptLevel)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
bool hasNoFPExcept() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.