41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
44#define GET_INSTRINFO_CTOR_DTOR
45#define GET_INSTRINFO_NAMED_OPS
46#include "RISCVGenInstrInfo.inc"
48#define DEBUG_TYPE "riscv-instr-info"
50 "Number of registers within vector register groups spilled");
52 "Number of registers within vector register groups reloaded");
56 cl::desc(
"Prefer whole register move for vector registers."));
59 "riscv-force-machine-combiner-strategy",
cl::Hidden,
60 cl::desc(
"Force machine combiner to use a specific strategy for machine "
61 "trace metrics evaluation."),
66 "MinInstrCount strategy.")));
72#define GET_RISCVVPseudosTable_IMPL
73#include "RISCVGenSearchableTables.inc"
79#define GET_RISCVMaskedPseudosTable_IMPL
80#include "RISCVGenSearchableTables.inc"
86 RISCV::ADJCALLSTACKUP),
89#define GET_INSTRINFO_HELPERS
90#include "RISCVGenInstrInfo.inc"
93 if (
STI.hasStdExtZca())
102 int &FrameIndex)
const {
112 case RISCV::VL1RE8_V:
113 case RISCV::VL1RE16_V:
114 case RISCV::VL1RE32_V:
115 case RISCV::VL1RE64_V:
118 case RISCV::VL2RE8_V:
119 case RISCV::VL2RE16_V:
120 case RISCV::VL2RE32_V:
121 case RISCV::VL2RE64_V:
124 case RISCV::VL4RE8_V:
125 case RISCV::VL4RE16_V:
126 case RISCV::VL4RE32_V:
127 case RISCV::VL4RE64_V:
130 case RISCV::VL8RE8_V:
131 case RISCV::VL8RE16_V:
132 case RISCV::VL8RE32_V:
133 case RISCV::VL8RE64_V:
141 switch (
MI.getOpcode()) {
165 case RISCV::VL1RE8_V:
166 case RISCV::VL2RE8_V:
167 case RISCV::VL4RE8_V:
168 case RISCV::VL8RE8_V:
169 if (!
MI.getOperand(1).isFI())
171 FrameIndex =
MI.getOperand(1).getIndex();
174 return MI.getOperand(0).getReg();
177 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
178 MI.getOperand(2).getImm() == 0) {
179 FrameIndex =
MI.getOperand(1).getIndex();
180 return MI.getOperand(0).getReg();
187 int &FrameIndex)
const {
195 switch (
MI.getOpcode()) {
220 if (!
MI.getOperand(1).isFI())
222 FrameIndex =
MI.getOperand(1).getIndex();
225 return MI.getOperand(0).getReg();
228 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
229 MI.getOperand(2).getImm() == 0) {
230 FrameIndex =
MI.getOperand(1).getIndex();
231 return MI.getOperand(0).getReg();
241 case RISCV::VFMV_V_F:
244 case RISCV::VFMV_S_F:
246 return MI.getOperand(1).isUndef();
254 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
265 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
266 "Unexpected COPY instruction.");
270 bool FoundDef =
false;
271 bool FirstVSetVLI =
false;
272 unsigned FirstSEW = 0;
275 if (
MBBI->isMetaInstruction())
278 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
288 unsigned FirstVType =
MBBI->getOperand(2).getImm();
293 if (FirstLMul != LMul)
298 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
304 unsigned VType =
MBBI->getOperand(2).getImm();
322 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
324 }
else if (
MBBI->getNumDefs()) {
327 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
333 if (!MO.isReg() || !MO.isDef())
335 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
350 if (MO.getReg() != SrcReg)
391 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
392 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
394 assert(!Fractional &&
"It is impossible be fractional lmul here.");
395 unsigned NumRegs = NF * LMulVal;
401 SrcEncoding += NumRegs - 1;
402 DstEncoding += NumRegs - 1;
408 unsigned,
unsigned> {
416 uint16_t Diff = DstEncoding - SrcEncoding;
417 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
418 DstEncoding % 8 == 7)
420 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
421 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
422 DstEncoding % 4 == 3)
424 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
425 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
426 DstEncoding % 2 == 1)
428 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
431 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
436 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
438 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
439 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
441 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
442 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
444 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
447 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
450 while (
I != NumRegs) {
455 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
456 GetCopyInfo(SrcEncoding, DstEncoding);
460 if (LMul == LMulCopied &&
463 if (DefMBBI->getOpcode() == VIOpc)
470 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
472 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
480 MIB = MIB.add(DefMBBI->getOperand(2));
488 MIB.addImm(Log2SEW ? Log2SEW : 3);
500 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
501 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
510 bool RenamableDest,
bool RenamableSrc)
const {
514 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
521 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
527 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
533 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
534 if (
STI.isRV32() &&
STI.hasStdExtZdinx()) {
543 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
544 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
546 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
548 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
552 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
553 .
addReg(EvenReg, KillFlag)
556 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
563 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
564 RISCV::GPRRegClass.
contains(DstReg)) {
566 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
571 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
573 if (
STI.hasStdExtZfh()) {
574 Opc = RISCV::FSGNJ_H;
577 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
578 "Unexpected extensions");
580 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
581 &RISCV::FPR32RegClass);
582 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
583 &RISCV::FPR32RegClass);
584 Opc = RISCV::FSGNJ_S;
588 .
addReg(SrcReg, KillFlag);
592 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
595 .
addReg(SrcReg, KillFlag);
599 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
602 .
addReg(SrcReg, KillFlag);
606 if (RISCV::FPR32RegClass.
contains(DstReg) &&
607 RISCV::GPRRegClass.
contains(SrcReg)) {
609 .
addReg(SrcReg, KillFlag);
613 if (RISCV::GPRRegClass.
contains(DstReg) &&
614 RISCV::FPR32RegClass.
contains(SrcReg)) {
616 .
addReg(SrcReg, KillFlag);
620 if (RISCV::FPR64RegClass.
contains(DstReg) &&
621 RISCV::GPRRegClass.
contains(SrcReg)) {
622 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
624 .
addReg(SrcReg, KillFlag);
628 if (RISCV::GPRRegClass.
contains(DstReg) &&
629 RISCV::FPR64RegClass.
contains(SrcReg)) {
630 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
632 .
addReg(SrcReg, KillFlag);
638 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
649 Register SrcReg,
bool IsKill,
int FI,
657 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
658 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
660 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
661 Opcode = RISCV::SH_INX;
662 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
663 Opcode = RISCV::SW_INX;
664 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
665 Opcode = RISCV::PseudoRV32ZdinxSD;
666 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
668 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
670 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
672 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
673 Opcode = RISCV::VS1R_V;
674 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
675 Opcode = RISCV::VS2R_V;
676 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
677 Opcode = RISCV::VS4R_V;
678 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
679 Opcode = RISCV::VS8R_V;
680 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
681 Opcode = RISCV::PseudoVSPILL2_M1;
682 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
683 Opcode = RISCV::PseudoVSPILL2_M2;
684 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
685 Opcode = RISCV::PseudoVSPILL2_M4;
686 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
687 Opcode = RISCV::PseudoVSPILL3_M1;
688 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
689 Opcode = RISCV::PseudoVSPILL3_M2;
690 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
691 Opcode = RISCV::PseudoVSPILL4_M1;
692 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
693 Opcode = RISCV::PseudoVSPILL4_M2;
694 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
695 Opcode = RISCV::PseudoVSPILL5_M1;
696 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
697 Opcode = RISCV::PseudoVSPILL6_M1;
698 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
699 Opcode = RISCV::PseudoVSPILL7_M1;
700 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
701 Opcode = RISCV::PseudoVSPILL8_M1;
743 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
744 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
746 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
747 Opcode = RISCV::LH_INX;
748 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
749 Opcode = RISCV::LW_INX;
750 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
751 Opcode = RISCV::PseudoRV32ZdinxLD;
752 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
754 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
756 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
758 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
759 Opcode = RISCV::VL1RE8_V;
760 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
761 Opcode = RISCV::VL2RE8_V;
762 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
763 Opcode = RISCV::VL4RE8_V;
764 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
765 Opcode = RISCV::VL8RE8_V;
766 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
767 Opcode = RISCV::PseudoVRELOAD2_M1;
768 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
769 Opcode = RISCV::PseudoVRELOAD2_M2;
770 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
771 Opcode = RISCV::PseudoVRELOAD2_M4;
772 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
773 Opcode = RISCV::PseudoVRELOAD3_M1;
774 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
775 Opcode = RISCV::PseudoVRELOAD3_M2;
776 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
777 Opcode = RISCV::PseudoVRELOAD4_M1;
778 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
779 Opcode = RISCV::PseudoVRELOAD4_M2;
780 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
781 Opcode = RISCV::PseudoVRELOAD5_M1;
782 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
783 Opcode = RISCV::PseudoVRELOAD6_M1;
784 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
785 Opcode = RISCV::PseudoVRELOAD7_M1;
786 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
787 Opcode = RISCV::PseudoVRELOAD8_M1;
825 if (
Ops.size() != 1 ||
Ops[0] != 1)
828 switch (
MI.getOpcode()) {
830 if (RISCVInstrInfo::isSEXT_W(
MI))
832 if (RISCVInstrInfo::isZEXT_W(
MI))
834 if (RISCVInstrInfo::isZEXT_B(
MI))
841 case RISCV::ZEXT_H_RV32:
842 case RISCV::ZEXT_H_RV64:
849 case RISCV::VMV_X_S: {
852 if (ST.getXLen() < (1U << Log2SEW))
867 case RISCV::VFMV_F_S: {
894 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
903 return RISCV::PseudoCCLB;
905 return RISCV::PseudoCCLBU;
907 return RISCV::PseudoCCLH;
909 return RISCV::PseudoCCLHU;
911 return RISCV::PseudoCCLW;
913 return RISCV::PseudoCCLWU;
915 return RISCV::PseudoCCLD;
917 return RISCV::PseudoCCQC_E_LB;
918 case RISCV::QC_E_LBU:
919 return RISCV::PseudoCCQC_E_LBU;
921 return RISCV::PseudoCCQC_E_LH;
922 case RISCV::QC_E_LHU:
923 return RISCV::PseudoCCQC_E_LHU;
925 return RISCV::PseudoCCQC_E_LW;
936 if (
MI.getOpcode() != RISCV::PseudoCCMOVGPR)
941 if (!
STI.hasShortForwardBranchILoad() || !PredOpc)
945 if (
Ops.size() != 1 || (
Ops[0] != 4 &&
Ops[0] != 5))
948 bool Invert =
Ops[0] == 5;
952 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
957 MI.getDebugLoc(),
get(PredOpc), DestReg)
958 .
add({
MI.getOperand(1),
MI.getOperand(2)});
982 bool DstIsDead)
const {
998 bool SrcRenamable =
false;
1002 bool LastItem = ++Num == Seq.
size();
1007 switch (Inst.getOpndKind()) {
1017 .
addReg(SrcReg, SrcRegState)
1024 .
addReg(SrcReg, SrcRegState)
1025 .
addReg(SrcReg, SrcRegState)
1031 .
addReg(SrcReg, SrcRegState)
1039 SrcRenamable = DstRenamable;
1049 case RISCV::CV_BEQIMM:
1050 case RISCV::QC_BEQI:
1051 case RISCV::QC_E_BEQI:
1052 case RISCV::NDS_BBC:
1053 case RISCV::NDS_BEQC:
1057 case RISCV::QC_BNEI:
1058 case RISCV::QC_E_BNEI:
1059 case RISCV::CV_BNEIMM:
1060 case RISCV::NDS_BBS:
1061 case RISCV::NDS_BNEC:
1064 case RISCV::QC_BLTI:
1065 case RISCV::QC_E_BLTI:
1068 case RISCV::QC_BGEI:
1069 case RISCV::QC_E_BGEI:
1072 case RISCV::QC_BLTUI:
1073 case RISCV::QC_E_BLTUI:
1076 case RISCV::QC_BGEUI:
1077 case RISCV::QC_E_BGEUI:
1109 "Unknown conditional branch");
1120 case RISCV::QC_MVEQ:
1121 return RISCV::QC_MVNE;
1122 case RISCV::QC_MVNE:
1123 return RISCV::QC_MVEQ;
1124 case RISCV::QC_MVLT:
1125 return RISCV::QC_MVGE;
1126 case RISCV::QC_MVGE:
1127 return RISCV::QC_MVLT;
1128 case RISCV::QC_MVLTU:
1129 return RISCV::QC_MVGEU;
1130 case RISCV::QC_MVGEU:
1131 return RISCV::QC_MVLTU;
1132 case RISCV::QC_MVEQI:
1133 return RISCV::QC_MVNEI;
1134 case RISCV::QC_MVNEI:
1135 return RISCV::QC_MVEQI;
1136 case RISCV::QC_MVLTI:
1137 return RISCV::QC_MVGEI;
1138 case RISCV::QC_MVGEI:
1139 return RISCV::QC_MVLTI;
1140 case RISCV::QC_MVLTUI:
1141 return RISCV::QC_MVGEUI;
1142 case RISCV::QC_MVGEUI:
1143 return RISCV::QC_MVLTUI;
1148 switch (SelectOpc) {
1167 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1177 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1182 return RISCV::CV_BEQIMM;
1184 return RISCV::CV_BNEIMM;
1187 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1192 return RISCV::QC_BEQI;
1194 return RISCV::QC_BNEI;
1196 return RISCV::QC_BLTI;
1198 return RISCV::QC_BGEI;
1201 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1206 return RISCV::QC_BLTUI;
1208 return RISCV::QC_BGEUI;
1211 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1216 return RISCV::QC_E_BEQI;
1218 return RISCV::QC_E_BNEI;
1220 return RISCV::QC_E_BLTI;
1222 return RISCV::QC_E_BGEI;
1225 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1230 return RISCV::QC_E_BLTUI;
1232 return RISCV::QC_E_BGEUI;
1235 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1240 return RISCV::NDS_BBC;
1242 return RISCV::NDS_BBS;
1245 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1250 return RISCV::NDS_BEQC;
1252 return RISCV::NDS_BNEC;
1281 bool AllowModify)
const {
1282 TBB = FBB =
nullptr;
1287 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1293 int NumTerminators = 0;
1294 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1297 if (J->getDesc().isUnconditionalBranch() ||
1298 J->getDesc().isIndirectBranch()) {
1305 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1306 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1307 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1310 I = FirstUncondOrIndirectBr;
1314 if (
I->getDesc().isIndirectBranch())
1318 if (
I->isPreISelOpcode())
1322 if (NumTerminators > 2)
1326 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1332 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1338 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1339 I->getDesc().isUnconditionalBranch()) {
1350 int *BytesRemoved)
const {
1357 if (!
I->getDesc().isUnconditionalBranch() &&
1358 !
I->getDesc().isConditionalBranch())
1364 I->eraseFromParent();
1368 if (
I ==
MBB.begin())
1371 if (!
I->getDesc().isConditionalBranch())
1377 I->eraseFromParent();
1390 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1392 "RISC-V branch conditions have two components!");
1426 assert(RS &&
"RegScavenger required for long branching");
1428 "new block should be inserted for expanding unconditional branch");
1431 "restore block should be inserted for restoring clobbered registers");
1440 "Branch offsets outside of the signed 32-bit range not supported");
1445 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1446 auto II =
MBB.end();
1452 RS->enterBasicBlockEnd(
MBB);
1454 if (
STI.hasStdExtZicfilp())
1455 RC = &RISCV::GPRX7RegClass;
1457 RS->scavengeRegisterBackwards(*RC,
MI.getIterator(),
1461 RS->setRegUsed(TmpGPR);
1466 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1468 if (
STI.hasStdExtZicfilp())
1472 if (FrameIndex == -1)
1477 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1480 MI.getOperand(1).setMBB(&RestoreBB);
1484 TRI->eliminateFrameIndex(RestoreBB.
back(),
1488 MRI.replaceRegWith(ScratchReg, TmpGPR);
1489 MRI.clearVirtRegs();
1494 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1499 Cond[0].setImm(RISCV::BNE);
1502 Cond[0].setImm(RISCV::BNEI);
1505 Cond[0].setImm(RISCV::BEQ);
1508 Cond[0].setImm(RISCV::BEQI);
1511 Cond[0].setImm(RISCV::BGE);
1514 Cond[0].setImm(RISCV::BLT);
1517 Cond[0].setImm(RISCV::BGEU);
1520 Cond[0].setImm(RISCV::BLTU);
1522 case RISCV::CV_BEQIMM:
1523 Cond[0].setImm(RISCV::CV_BNEIMM);
1525 case RISCV::CV_BNEIMM:
1526 Cond[0].setImm(RISCV::CV_BEQIMM);
1528 case RISCV::QC_BEQI:
1529 Cond[0].setImm(RISCV::QC_BNEI);
1531 case RISCV::QC_BNEI:
1532 Cond[0].setImm(RISCV::QC_BEQI);
1534 case RISCV::QC_BGEI:
1535 Cond[0].setImm(RISCV::QC_BLTI);
1537 case RISCV::QC_BLTI:
1538 Cond[0].setImm(RISCV::QC_BGEI);
1540 case RISCV::QC_BGEUI:
1541 Cond[0].setImm(RISCV::QC_BLTUI);
1543 case RISCV::QC_BLTUI:
1544 Cond[0].setImm(RISCV::QC_BGEUI);
1546 case RISCV::QC_E_BEQI:
1547 Cond[0].setImm(RISCV::QC_E_BNEI);
1549 case RISCV::QC_E_BNEI:
1550 Cond[0].setImm(RISCV::QC_E_BEQI);
1552 case RISCV::QC_E_BGEI:
1553 Cond[0].setImm(RISCV::QC_E_BLTI);
1555 case RISCV::QC_E_BLTI:
1556 Cond[0].setImm(RISCV::QC_E_BGEI);
1558 case RISCV::QC_E_BGEUI:
1559 Cond[0].setImm(RISCV::QC_E_BLTUI);
1561 case RISCV::QC_E_BLTUI:
1562 Cond[0].setImm(RISCV::QC_E_BGEUI);
1564 case RISCV::NDS_BBC:
1565 Cond[0].setImm(RISCV::NDS_BBS);
1567 case RISCV::NDS_BBS:
1568 Cond[0].setImm(RISCV::NDS_BBC);
1570 case RISCV::NDS_BEQC:
1571 Cond[0].setImm(RISCV::NDS_BNEC);
1573 case RISCV::NDS_BNEC:
1574 Cond[0].setImm(RISCV::NDS_BEQC);
1584 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1585 MI->getOperand(1).getReg() == RISCV::X0) {
1586 Imm =
MI->getOperand(2).getImm();
1599 if (Reg == RISCV::X0) {
1603 return Reg.isVirtual() &&
isLoadImm(
MRI.getVRegDef(Reg), Imm);
1607 bool IsSigned =
false;
1608 bool IsEquality =
false;
1609 switch (
MI.getOpcode()) {
1645 MI.eraseFromParent();
1671 auto searchConst = [&](int64_t C1) ->
Register {
1673 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1676 I.getOperand(0).getReg().isVirtual();
1679 return DefC1->getOperand(0).getReg();
1692 MRI.hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1694 if (
Register RegZ = searchConst(C0 + 1)) {
1701 MRI.clearKillFlags(RegZ);
1702 MI.eraseFromParent();
1713 MRI.hasOneUse(RHS.getReg())) {
1715 if (
Register RegZ = searchConst(C0 - 1)) {
1722 MRI.clearKillFlags(RegZ);
1723 MI.eraseFromParent();
1733 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1735 int NumOp =
MI.getNumExplicitOperands();
1736 return MI.getOperand(NumOp - 1).getMBB();
1740 int64_t BrOffset)
const {
1741 unsigned XLen =
STI.getXLen();
1748 case RISCV::NDS_BBC:
1749 case RISCV::NDS_BBS:
1750 case RISCV::NDS_BEQC:
1751 case RISCV::NDS_BNEC:
1761 case RISCV::CV_BEQIMM:
1762 case RISCV::CV_BNEIMM:
1763 case RISCV::QC_BEQI:
1764 case RISCV::QC_BNEI:
1765 case RISCV::QC_BGEI:
1766 case RISCV::QC_BLTI:
1767 case RISCV::QC_BLTUI:
1768 case RISCV::QC_BGEUI:
1769 case RISCV::QC_E_BEQI:
1770 case RISCV::QC_E_BNEI:
1771 case RISCV::QC_E_BGEI:
1772 case RISCV::QC_E_BLTI:
1773 case RISCV::QC_E_BLTUI:
1774 case RISCV::QC_E_BGEUI:
1777 case RISCV::PseudoBR:
1779 case RISCV::PseudoJump:
1790 case RISCV::ADD:
return RISCV::PseudoCCADD;
1791 case RISCV::SUB:
return RISCV::PseudoCCSUB;
1792 case RISCV::SLL:
return RISCV::PseudoCCSLL;
1793 case RISCV::SRL:
return RISCV::PseudoCCSRL;
1794 case RISCV::SRA:
return RISCV::PseudoCCSRA;
1795 case RISCV::AND:
return RISCV::PseudoCCAND;
1796 case RISCV::OR:
return RISCV::PseudoCCOR;
1797 case RISCV::XOR:
return RISCV::PseudoCCXOR;
1798 case RISCV::MAX:
return RISCV::PseudoCCMAX;
1799 case RISCV::MAXU:
return RISCV::PseudoCCMAXU;
1800 case RISCV::MIN:
return RISCV::PseudoCCMIN;
1801 case RISCV::MINU:
return RISCV::PseudoCCMINU;
1802 case RISCV::MUL:
return RISCV::PseudoCCMUL;
1803 case RISCV::LUI:
return RISCV::PseudoCCLUI;
1804 case RISCV::QC_LI:
return RISCV::PseudoCCQC_LI;
1805 case RISCV::QC_E_LI:
return RISCV::PseudoCCQC_E_LI;
1807 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
1808 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
1809 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
1810 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
1811 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
1812 case RISCV::ORI:
return RISCV::PseudoCCORI;
1813 case RISCV::XORI:
return RISCV::PseudoCCXORI;
1815 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
1816 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
1817 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
1818 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
1819 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
1821 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
1822 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
1823 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
1824 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
1826 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
1827 case RISCV::ORN:
return RISCV::PseudoCCORN;
1828 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
1830 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
1831 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
1835 return RISCV::INSTRUCTION_LIST_END;
1844 if (!
Reg.isVirtual())
1846 if (!
MRI.hasOneNonDBGUse(
Reg))
1852 if (!STI.hasShortForwardBranchIMinMax() &&
1853 (
MI->getOpcode() == RISCV::MAX ||
MI->getOpcode() == RISCV::MIN ||
1854 MI->getOpcode() == RISCV::MINU ||
MI->getOpcode() == RISCV::MAXU))
1857 if (!STI.hasShortForwardBranchIMul() &&
MI->getOpcode() == RISCV::MUL)
1864 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1865 MI->getOperand(1).getReg() == RISCV::X0)
1870 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1880 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1883 bool DontMoveAcrossStores =
true;
1884 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1891 unsigned &TrueOp,
unsigned &FalseOp,
1892 bool &Optimizable)
const {
1893 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1894 "Unknown select instruction");
1904 Cond.push_back(
MI.getOperand(1));
1905 Cond.push_back(
MI.getOperand(2));
1906 Cond.push_back(
MI.getOperand(3));
1908 Optimizable =
STI.hasShortForwardBranchIALU();
1915 bool PreferFalse)
const {
1916 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1917 "Unknown select instruction");
1918 if (!
STI.hasShortForwardBranchIALU())
1924 bool Invert = !
DefMI;
1932 Register DestReg =
MI.getOperand(0).getReg();
1934 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1938 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1945 NewMI.
add(
MI.getOperand(1));
1946 NewMI.
add(
MI.getOperand(2));
1955 NewMI.
add(FalseReg);
1970 if (
DefMI->getParent() !=
MI.getParent())
1974 DefMI->eraseFromParent();
1979 if (
MI.isMetaInstruction())
1982 unsigned Opcode =
MI.getOpcode();
1984 if (Opcode == TargetOpcode::INLINEASM ||
1985 Opcode == TargetOpcode::INLINEASM_BR) {
1987 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1991 if (!
MI.memoperands_empty()) {
1994 if (
STI.hasStdExtZca()) {
1995 if (isCompressibleInst(
MI,
STI))
2003 if (Opcode == TargetOpcode::BUNDLE)
2004 return getInstBundleLength(
MI);
2006 if (
MI.getParent() &&
MI.getParent()->getParent()) {
2007 if (isCompressibleInst(
MI,
STI))
2012 case RISCV::PseudoMV_FPR16INX:
2013 case RISCV::PseudoMV_FPR32INX:
2015 return STI.hasStdExtZca() ? 2 : 4;
2016 case TargetOpcode::STACKMAP:
2019 case TargetOpcode::PATCHPOINT:
2022 case TargetOpcode::STATEPOINT: {
2026 return std::max(NumBytes, 8U);
2028 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2029 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
2030 case TargetOpcode::PATCHABLE_TAIL_CALL: {
2033 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
2034 F.hasFnAttribute(
"patchable-function-entry")) {
2036 if (
F.getFnAttribute(
"patchable-function-entry")
2038 .getAsInteger(10, Num))
2039 return get(Opcode).getSize();
2042 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
2046 return STI.is64Bit() ? 68 : 44;
2049 return get(Opcode).getSize();
2053unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
2057 while (++
I != E &&
I->isInsideBundle()) {
2058 assert(!
I->isBundle() &&
"No nested bundle!");
2065 const unsigned Opcode =
MI.getOpcode();
2069 case RISCV::FSGNJ_D:
2070 case RISCV::FSGNJ_S:
2071 case RISCV::FSGNJ_H:
2072 case RISCV::FSGNJ_D_INX:
2073 case RISCV::FSGNJ_D_IN32X:
2074 case RISCV::FSGNJ_S_INX:
2075 case RISCV::FSGNJ_H_INX:
2077 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2078 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
2082 return (
MI.getOperand(1).isReg() &&
2083 MI.getOperand(1).getReg() == RISCV::X0) ||
2084 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
2086 return MI.isAsCheapAsAMove();
2089std::optional<DestSourcePair>
2093 switch (
MI.getOpcode()) {
2099 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2100 MI.getOperand(2).isReg())
2102 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2103 MI.getOperand(1).isReg())
2108 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
2109 MI.getOperand(2).getImm() == 0)
2113 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2114 MI.getOperand(1).isReg())
2118 case RISCV::SH1ADD_UW:
2120 case RISCV::SH2ADD_UW:
2122 case RISCV::SH3ADD_UW:
2123 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2124 MI.getOperand(2).isReg())
2127 case RISCV::FSGNJ_D:
2128 case RISCV::FSGNJ_S:
2129 case RISCV::FSGNJ_H:
2130 case RISCV::FSGNJ_D_INX:
2131 case RISCV::FSGNJ_D_IN32X:
2132 case RISCV::FSGNJ_S_INX:
2133 case RISCV::FSGNJ_H_INX:
2135 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2136 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2140 return std::nullopt;
2148 const auto &SchedModel =
STI.getSchedModel();
2149 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2161 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2165 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2166 RISCV::OpName::frm) < 0;
2168 "New instructions require FRM whereas the old one does not have it");
2175 for (
auto *NewMI : InsInstrs) {
2177 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2178 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2220bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2221 bool Invert)
const {
2222#define OPCODE_LMUL_CASE(OPC) \
2223 case RISCV::OPC##_M1: \
2224 case RISCV::OPC##_M2: \
2225 case RISCV::OPC##_M4: \
2226 case RISCV::OPC##_M8: \
2227 case RISCV::OPC##_MF2: \
2228 case RISCV::OPC##_MF4: \
2229 case RISCV::OPC##_MF8
2231#define OPCODE_LMUL_MASK_CASE(OPC) \
2232 case RISCV::OPC##_M1_MASK: \
2233 case RISCV::OPC##_M2_MASK: \
2234 case RISCV::OPC##_M4_MASK: \
2235 case RISCV::OPC##_M8_MASK: \
2236 case RISCV::OPC##_MF2_MASK: \
2237 case RISCV::OPC##_MF4_MASK: \
2238 case RISCV::OPC##_MF8_MASK
2243 Opcode = *InvOpcode;
2260#undef OPCODE_LMUL_MASK_CASE
2261#undef OPCODE_LMUL_CASE
2264bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2271 const TargetRegisterInfo *
TRI =
MRI->getTargetRegisterInfo();
2275 const uint64_t TSFlags =
Desc.TSFlags;
2277 auto checkImmOperand = [&](
unsigned OpIdx) {
2281 auto checkRegOperand = [&](
unsigned OpIdx) {
2289 if (!checkRegOperand(1))
2304 bool SeenMI2 =
false;
2305 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2314 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2315 Register SrcReg = It->getOperand(1).getReg();
2333 if (MI1VReg != SrcReg)
2342 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2381bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2382 bool &Commuted)
const {
2386 "Expect the present of passthrough operand.");
2392 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2393 areRVVInstsReassociable(Inst, *MI2);
2397 return areRVVInstsReassociable(Inst, *MI1) &&
2398 (isVectorAssociativeAndCommutative(*MI1) ||
2399 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2406 if (!isVectorAssociativeAndCommutative(Inst) &&
2407 !isVectorAssociativeAndCommutative(Inst,
true))
2419 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
2421 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
2433 for (
unsigned I = 0;
I < 5; ++
I)
2439 bool &Commuted)
const {
2440 if (isVectorAssociativeAndCommutative(Inst) ||
2441 isVectorAssociativeAndCommutative(Inst,
true))
2442 return hasReassociableVectorSibling(Inst, Commuted);
2448 unsigned OperandIdx = Commuted ? 2 : 1;
2452 int16_t InstFrmOpIdx =
2453 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2454 int16_t SiblingFrmOpIdx =
2455 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2457 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2462 bool Invert)
const {
2463 if (isVectorAssociativeAndCommutative(Inst, Invert))
2471 Opc = *InverseOpcode;
2516std::optional<unsigned>
2518#define RVV_OPC_LMUL_CASE(OPC, INV) \
2519 case RISCV::OPC##_M1: \
2520 return RISCV::INV##_M1; \
2521 case RISCV::OPC##_M2: \
2522 return RISCV::INV##_M2; \
2523 case RISCV::OPC##_M4: \
2524 return RISCV::INV##_M4; \
2525 case RISCV::OPC##_M8: \
2526 return RISCV::INV##_M8; \
2527 case RISCV::OPC##_MF2: \
2528 return RISCV::INV##_MF2; \
2529 case RISCV::OPC##_MF4: \
2530 return RISCV::INV##_MF4; \
2531 case RISCV::OPC##_MF8: \
2532 return RISCV::INV##_MF8
2534#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2535 case RISCV::OPC##_M1_MASK: \
2536 return RISCV::INV##_M1_MASK; \
2537 case RISCV::OPC##_M2_MASK: \
2538 return RISCV::INV##_M2_MASK; \
2539 case RISCV::OPC##_M4_MASK: \
2540 return RISCV::INV##_M4_MASK; \
2541 case RISCV::OPC##_M8_MASK: \
2542 return RISCV::INV##_M8_MASK; \
2543 case RISCV::OPC##_MF2_MASK: \
2544 return RISCV::INV##_MF2_MASK; \
2545 case RISCV::OPC##_MF4_MASK: \
2546 return RISCV::INV##_MF4_MASK; \
2547 case RISCV::OPC##_MF8_MASK: \
2548 return RISCV::INV##_MF8_MASK
2552 return std::nullopt;
2554 return RISCV::FSUB_H;
2556 return RISCV::FSUB_S;
2558 return RISCV::FSUB_D;
2560 return RISCV::FADD_H;
2562 return RISCV::FADD_S;
2564 return RISCV::FADD_D;
2581#undef RVV_OPC_LMUL_MASK_CASE
2582#undef RVV_OPC_LMUL_CASE
2587 bool DoRegPressureReduce) {
2603 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2614 bool DoRegPressureReduce) {
2621 DoRegPressureReduce)) {
2627 DoRegPressureReduce)) {
2637 bool DoRegPressureReduce) {
2645 unsigned CombineOpc) {
2652 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2655 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2666 unsigned OuterShiftAmt) {
2672 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2699 case RISCV::SH1ADD_UW:
2701 case RISCV::SH2ADD_UW:
2703 case RISCV::SH3ADD_UW:
2749 bool DoRegPressureReduce)
const {
2758 DoRegPressureReduce);
2766 return RISCV::FMADD_H;
2768 return RISCV::FMADD_S;
2770 return RISCV::FMADD_D;
2815 bool Mul1IsKill = Mul1.
isKill();
2816 bool Mul2IsKill = Mul2.
isKill();
2817 bool AddendIsKill = Addend.
isKill();
2826 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2851 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2858 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2861 switch (InnerShiftAmt - OuterShiftAmt) {
2865 InnerOpc = RISCV::ADD;
2868 InnerOpc = RISCV::SH1ADD;
2871 InnerOpc = RISCV::SH2ADD;
2874 InnerOpc = RISCV::SH3ADD;
2882 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2892 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2909 DelInstrs, InstrIdxForVirtReg);
2936 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2938 unsigned OpType = Operand.OperandType;
2944 ErrInfo =
"Expected an immediate operand.";
2947 int64_t Imm = MO.
getImm();
2954#define CASE_OPERAND_UIMM(NUM) \
2955 case RISCVOp::OPERAND_UIMM##NUM: \
2956 Ok = isUInt<NUM>(Imm); \
2958#define CASE_OPERAND_SIMM(NUM) \
2959 case RISCVOp::OPERAND_SIMM##NUM: \
2960 Ok = isInt<NUM>(Imm); \
2991 Ok = Imm >= 1 && Imm <= 32;
3033 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
3043 Ok = Imm >= -15 && Imm <= 16;
3071 Ok = Ok && Imm != 0;
3074 Ok = (
isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);
3077 Ok = Imm >= 0 && Imm <= 10;
3080 Ok = Imm >= 0 && Imm <= 7;
3083 Ok = Imm >= 1 && Imm <= 10;
3086 Ok = Imm >= 2 && Imm <= 14;
3095 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3130 Ok = Imm == 1 || Imm == 2 || Imm == 4;
3134 ErrInfo =
"Invalid immediate";
3143 ErrInfo =
"Expected a non-register operand.";
3147 ErrInfo =
"Invalid immediate";
3156 ErrInfo =
"Expected a non-register operand.";
3160 ErrInfo =
"Invalid immediate";
3168 ErrInfo =
"Expected a non-register operand.";
3172 ErrInfo =
"Invalid immediate";
3178 int64_t Imm = MO.
getImm();
3181 ErrInfo =
"Invalid immediate";
3184 }
else if (!MO.
isReg()) {
3185 ErrInfo =
"Expected a register or immediate operand.";
3195 if (!
Op.isImm() && !
Op.isReg()) {
3196 ErrInfo =
"Invalid operand type for VL operand";
3199 if (
Op.isReg() &&
Op.getReg().isValid()) {
3201 auto *RC =
MRI.getRegClass(
Op.getReg());
3202 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {
3203 ErrInfo =
"Invalid register class for VL operand";
3208 ErrInfo =
"VL operand w/o SEW operand?";
3214 if (!
MI.getOperand(
OpIdx).isImm()) {
3215 ErrInfo =
"SEW value expected to be an immediate";
3220 ErrInfo =
"Unexpected SEW value";
3223 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3225 ErrInfo =
"Unexpected SEW value";
3231 if (!
MI.getOperand(
OpIdx).isImm()) {
3232 ErrInfo =
"Policy operand expected to be an immediate";
3237 ErrInfo =
"Invalid Policy Value";
3241 ErrInfo =
"policy operand w/o VL operand?";
3249 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3250 ErrInfo =
"policy operand w/o tied operand?";
3257 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3258 ErrInfo =
"dynamic rounding mode should read FRM";
3280 case RISCV::LD_RV32:
3290 case RISCV::SD_RV32:
3306 int64_t NewOffset = OldOffset + Disp;
3328 "Addressing mode not supported for folding");
3402 case RISCV::LD_RV32:
3405 case RISCV::SD_RV32:
3412 OffsetIsScalable =
false;
3428 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3436 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3439 auto Base1 = MO1->getValue();
3440 auto Base2 = MO2->getValue();
3441 if (!Base1 || !Base2)
3449 return Base1 == Base2;
3455 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3456 unsigned NumBytes)
const {
3459 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3464 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3470 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3476 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3526 int64_t OffsetA = 0, OffsetB = 0;
3532 int LowOffset = std::min(OffsetA, OffsetB);
3533 int HighOffset = std::max(OffsetA, OffsetB);
3534 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3536 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3543std::pair<unsigned, unsigned>
3546 return std::make_pair(TF & Mask, TF & ~Mask);
3552 static const std::pair<unsigned, const char *> TargetFlags[] = {
3553 {MO_CALL,
"riscv-call"},
3554 {MO_LO,
"riscv-lo"},
3555 {MO_HI,
"riscv-hi"},
3556 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3557 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3558 {MO_GOT_HI,
"riscv-got-hi"},
3559 {MO_TPREL_LO,
"riscv-tprel-lo"},
3560 {MO_TPREL_HI,
"riscv-tprel-hi"},
3561 {MO_TPREL_ADD,
"riscv-tprel-add"},
3562 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3563 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3564 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3565 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3566 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3567 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3575 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3588 unsigned &Flags)
const {
3607 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3608 F.hasFnAttribute(
"patchable-function-entry");
3613 return MI.readsRegister(RegNo,
TRI) ||
3614 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3619 return MI.modifiesRegister(RegNo,
TRI) ||
3620 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3624 if (!
MBB.back().isReturn())
3647 if (
C.back().isReturn()) {
3649 "The candidate who uses return instruction must be outlined "
3662 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
3665std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3668 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3669 unsigned MinRepeats)
const {
3675 if (RepeatedSequenceLocs.size() < MinRepeats)
3676 return std::nullopt;
3680 unsigned InstrSizeCExt =
3682 unsigned CallOverhead = 0, FrameOverhead = 0;
3685 unsigned CFICount = 0;
3686 for (
auto &
I : Candidate) {
3687 if (
I.isCFIInstruction())
3698 std::vector<MCCFIInstruction> CFIInstructions =
3699 C.getMF()->getFrameInstructions();
3701 if (CFICount > 0 && CFICount != CFIInstructions.size())
3702 return std::nullopt;
3710 CallOverhead = 4 + InstrSizeCExt;
3717 FrameOverhead = InstrSizeCExt;
3723 return std::nullopt;
3725 for (
auto &
C : RepeatedSequenceLocs)
3726 C.setCallInfo(MOCI, CallOverhead);
3728 unsigned SequenceSize = 0;
3729 for (
auto &
MI : Candidate)
3732 return std::make_unique<outliner::OutlinedFunction>(
3733 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3739 unsigned Flags)
const {
3743 MBB->getParent()->getSubtarget().getRegisterInfo();
3744 const auto &
F =
MI.getMF()->getFunction();
3749 if (
MI.isCFIInstruction())
3757 for (
const auto &MO :
MI.operands()) {
3762 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3763 F.hasSection() ||
F.getSectionPrefix()))
3780 MBB.addLiveIn(RISCV::X5);
3795 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3803 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3814 return std::nullopt;
3818 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3819 MI.getOperand(2).isImm())
3820 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3822 return std::nullopt;
3830 std::string GenericComment =
3832 if (!GenericComment.empty())
3833 return GenericComment;
3837 return std::string();
3841 return std::string();
3843 std::string Comment;
3850 switch (OpInfo.OperandType) {
3853 unsigned Imm =
Op.getImm();
3858 unsigned Imm =
Op.getImm();
3863 unsigned Imm =
Op.getImm();
3869 unsigned Log2SEW =
Op.getImm();
3870 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3876 unsigned Policy =
Op.getImm();
3878 "Invalid Policy Value");
3888#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3889 RISCV::Pseudo##OP##_##LMUL
3891#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3892 RISCV::Pseudo##OP##_##LMUL##_MASK
3894#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3895 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3896 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3898#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3899 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3900 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3901 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3902 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3903 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3904 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3906#define CASE_RVV_OPCODE_UNMASK(OP) \
3907 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3908 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3910#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3911 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3912 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3913 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3914 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3915 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3916 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3918#define CASE_RVV_OPCODE_MASK(OP) \
3919 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3920 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3922#define CASE_RVV_OPCODE_WIDEN(OP) \
3923 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3924 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3926#define CASE_RVV_OPCODE(OP) \
3927 CASE_RVV_OPCODE_UNMASK(OP): \
3928 case CASE_RVV_OPCODE_MASK(OP)
3932#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3933 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3935#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3936 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3937 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3938 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3939 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3940 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3941 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3942 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3945#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3946 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3948#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3949 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3950 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3951 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3952 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3954#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3955 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3956 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3958#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3959 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3960 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3962#define CASE_VFMA_OPCODE_VV(OP) \
3963 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3964 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
3965 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3966 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3968#define CASE_VFMA_SPLATS(OP) \
3969 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3970 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
3971 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3972 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3976 unsigned &SrcOpIdx1,
3977 unsigned &SrcOpIdx2)
const {
3979 if (!
Desc.isCommutable())
3982 switch (
MI.getOpcode()) {
3983 case RISCV::TH_MVEQZ:
3984 case RISCV::TH_MVNEZ:
3988 if (
MI.getOperand(2).getReg() == RISCV::X0)
3991 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3992 case RISCV::QC_SELECTIEQ:
3993 case RISCV::QC_SELECTINE:
3994 case RISCV::QC_SELECTIIEQ:
3995 case RISCV::QC_SELECTIINE:
3996 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3997 case RISCV::QC_MVEQ:
3998 case RISCV::QC_MVNE:
3999 case RISCV::QC_MVLT:
4000 case RISCV::QC_MVGE:
4001 case RISCV::QC_MVLTU:
4002 case RISCV::QC_MVGEU:
4003 case RISCV::QC_MVEQI:
4004 case RISCV::QC_MVNEI:
4005 case RISCV::QC_MVLTI:
4006 case RISCV::QC_MVGEI:
4007 case RISCV::QC_MVLTUI:
4008 case RISCV::QC_MVGEUI:
4009 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
4010 case RISCV::TH_MULA:
4011 case RISCV::TH_MULAW:
4012 case RISCV::TH_MULAH:
4013 case RISCV::TH_MULS:
4014 case RISCV::TH_MULSW:
4015 case RISCV::TH_MULSH:
4017 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4018 case RISCV::PseudoCCMOVGPRNoX0:
4019 case RISCV::PseudoCCMOVGPR:
4021 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
4048 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4075 unsigned CommutableOpIdx1 = 1;
4076 unsigned CommutableOpIdx2 = 3;
4077 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4098 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
4100 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
4104 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
4105 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
4111 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
4112 SrcOpIdx2 == CommuteAnyOperandIndex) {
4115 unsigned CommutableOpIdx1 = SrcOpIdx1;
4116 if (SrcOpIdx1 == SrcOpIdx2) {
4119 CommutableOpIdx1 = 1;
4120 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
4122 CommutableOpIdx1 = SrcOpIdx2;
4127 unsigned CommutableOpIdx2;
4128 if (CommutableOpIdx1 != 1) {
4130 CommutableOpIdx2 = 1;
4132 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
4137 if (Op1Reg !=
MI.getOperand(2).getReg())
4138 CommutableOpIdx2 = 2;
4140 CommutableOpIdx2 = 3;
4145 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4158#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4159 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4160 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4163#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4164 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4165 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4166 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4167 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4168 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4169 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4170 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4173#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4174 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4175 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4178#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4179 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4180 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4181 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4182 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4184#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4185 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4186 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4188#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4189 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4190 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4192#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4193 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4194 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4195 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4196 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4198#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4199 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4200 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4201 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4202 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4208 unsigned OpIdx2)
const {
4211 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4215 switch (
MI.getOpcode()) {
4216 case RISCV::TH_MVEQZ:
4217 case RISCV::TH_MVNEZ: {
4218 auto &WorkingMI = cloneIfNew(
MI);
4219 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4220 : RISCV::TH_MVEQZ));
4224 case RISCV::QC_SELECTIEQ:
4225 case RISCV::QC_SELECTINE:
4226 case RISCV::QC_SELECTIIEQ:
4227 case RISCV::QC_SELECTIINE:
4229 case RISCV::QC_MVEQ:
4230 case RISCV::QC_MVNE:
4231 case RISCV::QC_MVLT:
4232 case RISCV::QC_MVGE:
4233 case RISCV::QC_MVLTU:
4234 case RISCV::QC_MVGEU:
4235 case RISCV::QC_MVEQI:
4236 case RISCV::QC_MVNEI:
4237 case RISCV::QC_MVLTI:
4238 case RISCV::QC_MVGEI:
4239 case RISCV::QC_MVLTUI:
4240 case RISCV::QC_MVGEUI: {
4241 auto &WorkingMI = cloneIfNew(
MI);
4246 case RISCV::PseudoCCMOVGPRNoX0:
4247 case RISCV::PseudoCCMOVGPR: {
4251 auto &WorkingMI = cloneIfNew(
MI);
4252 WorkingMI.getOperand(3).setImm(CC);
4276 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4277 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4279 switch (
MI.getOpcode()) {
4302 auto &WorkingMI = cloneIfNew(
MI);
4303 WorkingMI.setDesc(
get(
Opc));
4313 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4316 if (OpIdx1 == 3 || OpIdx2 == 3) {
4318 switch (
MI.getOpcode()) {
4329 auto &WorkingMI = cloneIfNew(
MI);
4330 WorkingMI.setDesc(
get(
Opc));
4342#undef CASE_VMA_CHANGE_OPCODE_COMMON
4343#undef CASE_VMA_CHANGE_OPCODE_LMULS
4344#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4345#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4346#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4347#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4348#undef CASE_VFMA_CHANGE_OPCODE_VV
4349#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4351#undef CASE_RVV_OPCODE_UNMASK_LMUL
4352#undef CASE_RVV_OPCODE_MASK_LMUL
4353#undef CASE_RVV_OPCODE_LMUL
4354#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4355#undef CASE_RVV_OPCODE_UNMASK
4356#undef CASE_RVV_OPCODE_MASK_WIDEN
4357#undef CASE_RVV_OPCODE_MASK
4358#undef CASE_RVV_OPCODE_WIDEN
4359#undef CASE_RVV_OPCODE
4361#undef CASE_VMA_OPCODE_COMMON
4362#undef CASE_VMA_OPCODE_LMULS
4363#undef CASE_VFMA_OPCODE_COMMON
4364#undef CASE_VFMA_OPCODE_LMULS_M1
4365#undef CASE_VFMA_OPCODE_LMULS_MF2
4366#undef CASE_VFMA_OPCODE_LMULS_MF4
4367#undef CASE_VFMA_OPCODE_VV
4368#undef CASE_VFMA_SPLATS
4371 switch (
MI.getOpcode()) {
4379 if (
MI.getOperand(1).getReg() == RISCV::X0)
4380 commuteInstruction(
MI);
4382 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4383 MI.getOperand(2).ChangeToImmediate(0);
4384 MI.setDesc(
get(RISCV::ADDI));
4388 if (
MI.getOpcode() == RISCV::XOR &&
4389 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4390 MI.getOperand(1).setReg(RISCV::X0);
4391 MI.getOperand(2).ChangeToImmediate(0);
4392 MI.setDesc(
get(RISCV::ADDI));
4399 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4400 MI.setDesc(
get(RISCV::ADDI));
4406 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4407 MI.getOperand(2).ChangeToImmediate(0);
4408 MI.setDesc(
get(RISCV::ADDI));
4414 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4415 MI.getOperand(2).ChangeToImmediate(0);
4416 MI.setDesc(
get(RISCV::ADDIW));
4423 if (
MI.getOperand(1).getReg() == RISCV::X0)
4424 commuteInstruction(
MI);
4426 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4427 MI.getOperand(2).ChangeToImmediate(0);
4428 MI.setDesc(
get(RISCV::ADDIW));
4433 case RISCV::SH1ADD_UW:
4435 case RISCV::SH2ADD_UW:
4437 case RISCV::SH3ADD_UW:
4439 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4440 MI.removeOperand(1);
4442 MI.setDesc(
get(RISCV::ADDI));
4446 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4447 MI.removeOperand(2);
4448 unsigned Opc =
MI.getOpcode();
4449 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4450 Opc == RISCV::SH3ADD_UW) {
4452 MI.setDesc(
get(RISCV::SLLI_UW));
4456 MI.setDesc(
get(RISCV::SLLI));
4470 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4471 MI.getOperand(2).getReg() == RISCV::X0) {
4472 MI.getOperand(1).setReg(RISCV::X0);
4473 MI.getOperand(2).ChangeToImmediate(0);
4474 MI.setDesc(
get(RISCV::ADDI));
4480 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4481 MI.getOperand(2).setImm(0);
4482 MI.setDesc(
get(RISCV::ADDI));
4490 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4491 MI.getOperand(2).ChangeToImmediate(0);
4492 MI.setDesc(
get(RISCV::ADDI));
4496 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4497 MI.getOperand(2).ChangeToImmediate(0);
4498 MI.setDesc(
get(RISCV::ADDI));
4506 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4507 MI.getOperand(2).ChangeToImmediate(0);
4508 MI.setDesc(
get(RISCV::ADDI));
4518 case RISCV::SLLI_UW:
4520 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4521 MI.getOperand(2).setImm(0);
4522 MI.setDesc(
get(RISCV::ADDI));
4530 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4531 MI.getOperand(2).getReg() == RISCV::X0) {
4532 MI.getOperand(2).ChangeToImmediate(0);
4533 MI.setDesc(
get(RISCV::ADDI));
4537 if (
MI.getOpcode() == RISCV::ADD_UW &&
4538 MI.getOperand(1).getReg() == RISCV::X0) {
4539 MI.removeOperand(1);
4541 MI.setDesc(
get(RISCV::ADDI));
4547 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4548 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4549 MI.setDesc(
get(RISCV::ADDI));
4555 case RISCV::ZEXT_H_RV32:
4556 case RISCV::ZEXT_H_RV64:
4559 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4561 MI.setDesc(
get(RISCV::ADDI));
4570 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4571 MI.getOperand(2).ChangeToImmediate(0);
4572 MI.setDesc(
get(RISCV::ADDI));
4579 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4581 MI.removeOperand(0);
4582 MI.insert(
MI.operands_begin() + 1, {MO0});
4587 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4589 MI.removeOperand(0);
4590 MI.insert(
MI.operands_begin() + 1, {MO0});
4591 MI.setDesc(
get(RISCV::BNE));
4596 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4598 MI.removeOperand(0);
4599 MI.insert(
MI.operands_begin() + 1, {MO0});
4600 MI.setDesc(
get(RISCV::BEQ));
4608#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4609 RISCV::PseudoV##OP##_##LMUL##_TIED
4611#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4612 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4613 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4614 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4615 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4616 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4617 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4619#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4620 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4621 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4624#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4625 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4626 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4627 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4628 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4629 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4630 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4633#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4634 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4636#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4637 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4638 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4639 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4640 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4641 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4642 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4643 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4644 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4645 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4647#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4648 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4649 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4652#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4653 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4654 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4655 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4656 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4657 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4658 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4659 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4660 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4661 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4663#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4664 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4665 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4666 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4667 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4668 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4670#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4671 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4672 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4673 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4674 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4675 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4682 switch (
MI.getOpcode()) {
4690 MI.getNumExplicitOperands() == 7 &&
4691 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4698 switch (
MI.getOpcode()) {
4710 .
add(
MI.getOperand(0))
4712 .
add(
MI.getOperand(1))
4713 .
add(
MI.getOperand(2))
4714 .
add(
MI.getOperand(3))
4715 .
add(
MI.getOperand(4))
4716 .
add(
MI.getOperand(5))
4717 .
add(
MI.getOperand(6));
4726 MI.getNumExplicitOperands() == 6);
4733 switch (
MI.getOpcode()) {
4745 .
add(
MI.getOperand(0))
4747 .
add(
MI.getOperand(1))
4748 .
add(
MI.getOperand(2))
4749 .
add(
MI.getOperand(3))
4750 .
add(
MI.getOperand(4))
4751 .
add(
MI.getOperand(5));
4758 unsigned NumOps =
MI.getNumOperands();
4761 if (
Op.isReg() &&
Op.isKill())
4769 if (
MI.getOperand(0).isEarlyClobber()) {
4783#undef CASE_WIDEOP_OPCODE_COMMON
4784#undef CASE_WIDEOP_OPCODE_LMULS
4785#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4786#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4787#undef CASE_FP_WIDEOP_OPCODE_COMMON
4788#undef CASE_FP_WIDEOP_OPCODE_LMULS
4789#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4790#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4799 if (ShiftAmount == 0)
4805 }
else if (
int ShXAmount, ShiftAmount;
4807 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4810 switch (ShXAmount) {
4812 Opc = RISCV::SH1ADD;
4815 Opc = RISCV::SH2ADD;
4818 Opc = RISCV::SH3ADD;
4833 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4844 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4854 }
else if (
STI.hasStdExtZmmul()) {
4855 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4864 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4865 if (Amount & (1U << ShiftAmount)) {
4869 .
addImm(ShiftAmount - PrevShiftAmount)
4871 if (Amount >> (ShiftAmount + 1)) {
4874 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4885 PrevShiftAmount = ShiftAmount;
4888 assert(Acc &&
"Expected valid accumulator");
4898 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4906 ?
STI.getTailDupAggressiveThreshold()
4913 unsigned Opcode =
MI.getOpcode();
4914 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4920std::optional<std::pair<unsigned, unsigned>>
4924 return std::nullopt;
4925 case RISCV::PseudoVSPILL2_M1:
4926 case RISCV::PseudoVRELOAD2_M1:
4927 return std::make_pair(2u, 1u);
4928 case RISCV::PseudoVSPILL2_M2:
4929 case RISCV::PseudoVRELOAD2_M2:
4930 return std::make_pair(2u, 2u);
4931 case RISCV::PseudoVSPILL2_M4:
4932 case RISCV::PseudoVRELOAD2_M4:
4933 return std::make_pair(2u, 4u);
4934 case RISCV::PseudoVSPILL3_M1:
4935 case RISCV::PseudoVRELOAD3_M1:
4936 return std::make_pair(3u, 1u);
4937 case RISCV::PseudoVSPILL3_M2:
4938 case RISCV::PseudoVRELOAD3_M2:
4939 return std::make_pair(3u, 2u);
4940 case RISCV::PseudoVSPILL4_M1:
4941 case RISCV::PseudoVRELOAD4_M1:
4942 return std::make_pair(4u, 1u);
4943 case RISCV::PseudoVSPILL4_M2:
4944 case RISCV::PseudoVRELOAD4_M2:
4945 return std::make_pair(4u, 2u);
4946 case RISCV::PseudoVSPILL5_M1:
4947 case RISCV::PseudoVRELOAD5_M1:
4948 return std::make_pair(5u, 1u);
4949 case RISCV::PseudoVSPILL6_M1:
4950 case RISCV::PseudoVRELOAD6_M1:
4951 return std::make_pair(6u, 1u);
4952 case RISCV::PseudoVSPILL7_M1:
4953 case RISCV::PseudoVRELOAD7_M1:
4954 return std::make_pair(7u, 1u);
4955 case RISCV::PseudoVSPILL8_M1:
4956 case RISCV::PseudoVRELOAD8_M1:
4957 return std::make_pair(8u, 1u);
4962 int16_t MI1FrmOpIdx =
4963 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
4964 int16_t MI2FrmOpIdx =
4965 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
4966 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4973std::optional<unsigned>
4977 return std::nullopt;
4980 case RISCV::VSLL_VX:
4981 case RISCV::VSRL_VX:
4982 case RISCV::VSRA_VX:
4984 case RISCV::VSSRL_VX:
4985 case RISCV::VSSRA_VX:
4987 case RISCV::VROL_VX:
4988 case RISCV::VROR_VX:
4993 case RISCV::VNSRL_WX:
4994 case RISCV::VNSRA_WX:
4996 case RISCV::VNCLIPU_WX:
4997 case RISCV::VNCLIP_WX:
4999 case RISCV::VWSLL_VX:
5004 case RISCV::VADD_VX:
5005 case RISCV::VSUB_VX:
5006 case RISCV::VRSUB_VX:
5008 case RISCV::VWADDU_VX:
5009 case RISCV::VWSUBU_VX:
5010 case RISCV::VWADD_VX:
5011 case RISCV::VWSUB_VX:
5012 case RISCV::VWADDU_WX:
5013 case RISCV::VWSUBU_WX:
5014 case RISCV::VWADD_WX:
5015 case RISCV::VWSUB_WX:
5017 case RISCV::VADC_VXM:
5018 case RISCV::VADC_VIM:
5019 case RISCV::VMADC_VXM:
5020 case RISCV::VMADC_VIM:
5021 case RISCV::VMADC_VX:
5022 case RISCV::VSBC_VXM:
5023 case RISCV::VMSBC_VXM:
5024 case RISCV::VMSBC_VX:
5026 case RISCV::VAND_VX:
5028 case RISCV::VXOR_VX:
5030 case RISCV::VMSEQ_VX:
5031 case RISCV::VMSNE_VX:
5032 case RISCV::VMSLTU_VX:
5033 case RISCV::VMSLT_VX:
5034 case RISCV::VMSLEU_VX:
5035 case RISCV::VMSLE_VX:
5036 case RISCV::VMSGTU_VX:
5037 case RISCV::VMSGT_VX:
5039 case RISCV::VMINU_VX:
5040 case RISCV::VMIN_VX:
5041 case RISCV::VMAXU_VX:
5042 case RISCV::VMAX_VX:
5044 case RISCV::VMUL_VX:
5045 case RISCV::VMULH_VX:
5046 case RISCV::VMULHU_VX:
5047 case RISCV::VMULHSU_VX:
5049 case RISCV::VDIVU_VX:
5050 case RISCV::VDIV_VX:
5051 case RISCV::VREMU_VX:
5052 case RISCV::VREM_VX:
5054 case RISCV::VWMUL_VX:
5055 case RISCV::VWMULU_VX:
5056 case RISCV::VWMULSU_VX:
5058 case RISCV::VMACC_VX:
5059 case RISCV::VNMSAC_VX:
5060 case RISCV::VMADD_VX:
5061 case RISCV::VNMSUB_VX:
5063 case RISCV::VWMACCU_VX:
5064 case RISCV::VWMACC_VX:
5065 case RISCV::VWMACCSU_VX:
5066 case RISCV::VWMACCUS_VX:
5068 case RISCV::VMERGE_VXM:
5070 case RISCV::VMV_V_X:
5072 case RISCV::VSADDU_VX:
5073 case RISCV::VSADD_VX:
5074 case RISCV::VSSUBU_VX:
5075 case RISCV::VSSUB_VX:
5077 case RISCV::VAADDU_VX:
5078 case RISCV::VAADD_VX:
5079 case RISCV::VASUBU_VX:
5080 case RISCV::VASUB_VX:
5082 case RISCV::VSMUL_VX:
5084 case RISCV::VMV_S_X:
5086 case RISCV::VANDN_VX:
5087 return 1U << Log2SEW;
5093 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
5096 return RVV->BaseInstr;
5106 unsigned Scaled = Log2SEW + (DestEEW - 1);
5120 return std::nullopt;
5125 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
5126 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
5127 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
5128 LHS.getReg() == RHS.getReg())
5132 if (LHS.isImm() && LHS.getImm() == 0)
5138 if (!LHSImm || !RHSImm)
5140 return LHSImm <= RHSImm;
5152 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
5154 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5164 std::optional<bool> createTripCountGreaterCondition(
5165 int TC, MachineBasicBlock &
MBB,
5166 SmallVectorImpl<MachineOperand> &CondParam)
override {
5174 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
5176 void adjustTripCount(
int TripCountAdjust)
override {}
5180std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5188 if (
TBB == LoopBB && FBB == LoopBB)
5195 assert((
TBB == LoopBB || FBB == LoopBB) &&
5196 "The Loop must be a single-basic-block loop");
5207 if (!Reg.isVirtual())
5209 return MRI.getVRegDef(Reg);
5214 if (LHS && LHS->isPHI())
5216 if (RHS && RHS->isPHI())
5219 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5225 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5242 case RISCV::FDIV_H_INX:
5243 case RISCV::FDIV_S_INX:
5244 case RISCV::FDIV_D_INX:
5245 case RISCV::FDIV_D_IN32X:
5246 case RISCV::FSQRT_H:
5247 case RISCV::FSQRT_S:
5248 case RISCV::FSQRT_D:
5249 case RISCV::FSQRT_H_INX:
5250 case RISCV::FSQRT_S_INX:
5251 case RISCV::FSQRT_D_INX:
5252 case RISCV::FSQRT_D_IN32X:
5254 case RISCV::VDIV_VV:
5255 case RISCV::VDIV_VX:
5256 case RISCV::VDIVU_VV:
5257 case RISCV::VDIVU_VX:
5258 case RISCV::VREM_VV:
5259 case RISCV::VREM_VX:
5260 case RISCV::VREMU_VV:
5261 case RISCV::VREMU_VX:
5263 case RISCV::VFDIV_VV:
5264 case RISCV::VFDIV_VF:
5265 case RISCV::VFRDIV_VF:
5266 case RISCV::VFSQRT_V:
5267 case RISCV::VFRSQRT7_V:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static bool analyzeCandidate(outliner::Candidate &C)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getLoadPredicatedOpcode(unsigned Opcode)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_ATOMIC_ORDERING
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isValidAtomicOrdering(Int I)
static const MachineMemOperand::Flags MONontemporalBit0
unsigned getDeadRegState(bool B)
constexpr bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
constexpr bool has_single_bit(T Value) noexcept
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
constexpr bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.