41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
44#define GET_INSTRINFO_CTOR_DTOR
45#include "RISCVGenInstrInfo.inc"
47#define DEBUG_TYPE "riscv-instr-info"
49 "Number of registers within vector register groups spilled");
51 "Number of registers within vector register groups reloaded");
55 cl::desc(
"Prefer whole register move for vector registers."));
58 "riscv-force-machine-combiner-strategy",
cl::Hidden,
59 cl::desc(
"Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
65 "MinInstrCount strategy.")));
71#define GET_RISCVVPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
78#define GET_RISCVMaskedPseudosTable_IMPL
79#include "RISCVGenSearchableTables.inc"
85 RISCV::ADJCALLSTACKUP),
88#define GET_INSTRINFO_HELPERS
89#include "RISCVGenInstrInfo.inc"
92 if (
STI.hasStdExtZca())
101 int &FrameIndex)
const {
111 case RISCV::VL1RE8_V:
112 case RISCV::VL1RE16_V:
113 case RISCV::VL1RE32_V:
114 case RISCV::VL1RE64_V:
117 case RISCV::VL2RE8_V:
118 case RISCV::VL2RE16_V:
119 case RISCV::VL2RE32_V:
120 case RISCV::VL2RE64_V:
123 case RISCV::VL4RE8_V:
124 case RISCV::VL4RE16_V:
125 case RISCV::VL4RE32_V:
126 case RISCV::VL4RE64_V:
129 case RISCV::VL8RE8_V:
130 case RISCV::VL8RE16_V:
131 case RISCV::VL8RE32_V:
132 case RISCV::VL8RE64_V:
140 switch (
MI.getOpcode()) {
164 case RISCV::VL1RE8_V:
165 case RISCV::VL2RE8_V:
166 case RISCV::VL4RE8_V:
167 case RISCV::VL8RE8_V:
168 if (!
MI.getOperand(1).isFI())
170 FrameIndex =
MI.getOperand(1).getIndex();
173 return MI.getOperand(0).getReg();
176 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
177 MI.getOperand(2).getImm() == 0) {
178 FrameIndex =
MI.getOperand(1).getIndex();
179 return MI.getOperand(0).getReg();
186 int &FrameIndex)
const {
194 switch (
MI.getOpcode()) {
219 if (!
MI.getOperand(1).isFI())
221 FrameIndex =
MI.getOperand(1).getIndex();
224 return MI.getOperand(0).getReg();
227 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
228 MI.getOperand(2).getImm() == 0) {
229 FrameIndex =
MI.getOperand(1).getIndex();
230 return MI.getOperand(0).getReg();
240 case RISCV::VFMV_V_F:
243 case RISCV::VFMV_S_F:
245 return MI.getOperand(1).isUndef();
253 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
264 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
265 "Unexpected COPY instruction.");
269 bool FoundDef =
false;
270 bool FirstVSetVLI =
false;
271 unsigned FirstSEW = 0;
274 if (
MBBI->isMetaInstruction())
277 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
287 unsigned FirstVType =
MBBI->getOperand(2).getImm();
292 if (FirstLMul != LMul)
297 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
303 unsigned VType =
MBBI->getOperand(2).getImm();
321 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
323 }
else if (
MBBI->getNumDefs()) {
326 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
332 if (!MO.isReg() || !MO.isDef())
334 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
349 if (MO.getReg() != SrcReg)
390 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
391 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
393 assert(!Fractional &&
"It is impossible be fractional lmul here.");
394 unsigned NumRegs = NF * LMulVal;
400 SrcEncoding += NumRegs - 1;
401 DstEncoding += NumRegs - 1;
407 unsigned,
unsigned> {
415 uint16_t Diff = DstEncoding - SrcEncoding;
416 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
417 DstEncoding % 8 == 7)
419 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
420 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
421 DstEncoding % 4 == 3)
423 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
424 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
425 DstEncoding % 2 == 1)
427 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
430 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
435 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
437 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
438 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
440 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
441 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
443 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
446 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
449 while (
I != NumRegs) {
454 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
455 GetCopyInfo(SrcEncoding, DstEncoding);
459 if (LMul == LMulCopied &&
462 if (DefMBBI->getOpcode() == VIOpc)
469 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
471 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
479 MIB = MIB.add(DefMBBI->getOperand(2));
487 MIB.addImm(Log2SEW ? Log2SEW : 3);
499 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
500 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
509 bool RenamableDest,
bool RenamableSrc)
const {
513 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
520 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
526 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
532 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
534 if (
STI.hasStdExtZdinx()) {
543 if (
STI.hasStdExtP()) {
552 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
553 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
555 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
557 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
561 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
562 .
addReg(EvenReg, KillFlag)
565 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
572 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
573 RISCV::GPRRegClass.
contains(DstReg)) {
575 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
580 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
582 if (
STI.hasStdExtZfh()) {
583 Opc = RISCV::FSGNJ_H;
586 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
587 "Unexpected extensions");
589 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
590 &RISCV::FPR32RegClass);
591 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
592 &RISCV::FPR32RegClass);
593 Opc = RISCV::FSGNJ_S;
597 .
addReg(SrcReg, KillFlag);
601 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
604 .
addReg(SrcReg, KillFlag);
608 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
611 .
addReg(SrcReg, KillFlag);
615 if (RISCV::FPR32RegClass.
contains(DstReg) &&
616 RISCV::GPRRegClass.
contains(SrcReg)) {
618 .
addReg(SrcReg, KillFlag);
622 if (RISCV::GPRRegClass.
contains(DstReg) &&
623 RISCV::FPR32RegClass.
contains(SrcReg)) {
625 .
addReg(SrcReg, KillFlag);
629 if (RISCV::FPR64RegClass.
contains(DstReg) &&
630 RISCV::GPRRegClass.
contains(SrcReg)) {
631 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
633 .
addReg(SrcReg, KillFlag);
637 if (RISCV::GPRRegClass.
contains(DstReg) &&
638 RISCV::FPR64RegClass.
contains(SrcReg)) {
639 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
641 .
addReg(SrcReg, KillFlag);
647 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
658 Register SrcReg,
bool IsKill,
int FI,
667 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
668 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
670 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
671 Opcode = RISCV::SH_INX;
672 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
673 Opcode = RISCV::SW_INX;
674 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
675 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
676 Alignment >=
STI.getZilsdAlign()) {
677 Opcode = RISCV::SD_RV32;
679 Opcode = RISCV::PseudoRV32ZdinxSD;
681 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
683 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
685 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
687 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
688 Opcode = RISCV::VS1R_V;
689 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
690 Opcode = RISCV::VS2R_V;
691 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
692 Opcode = RISCV::VS4R_V;
693 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
694 Opcode = RISCV::VS8R_V;
695 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
696 Opcode = RISCV::PseudoVSPILL2_M1;
697 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
698 Opcode = RISCV::PseudoVSPILL2_M2;
699 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
700 Opcode = RISCV::PseudoVSPILL2_M4;
701 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
702 Opcode = RISCV::PseudoVSPILL3_M1;
703 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
704 Opcode = RISCV::PseudoVSPILL3_M2;
705 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
706 Opcode = RISCV::PseudoVSPILL4_M1;
707 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
708 Opcode = RISCV::PseudoVSPILL4_M2;
709 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
710 Opcode = RISCV::PseudoVSPILL5_M1;
711 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
712 Opcode = RISCV::PseudoVSPILL6_M1;
713 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
714 Opcode = RISCV::PseudoVSPILL7_M1;
715 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
716 Opcode = RISCV::PseudoVSPILL8_M1;
759 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
760 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
762 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
763 Opcode = RISCV::LH_INX;
764 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
765 Opcode = RISCV::LW_INX;
766 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
767 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
768 Alignment >=
STI.getZilsdAlign()) {
769 Opcode = RISCV::LD_RV32;
771 Opcode = RISCV::PseudoRV32ZdinxLD;
773 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
775 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
777 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
779 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
780 Opcode = RISCV::VL1RE8_V;
781 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
782 Opcode = RISCV::VL2RE8_V;
783 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
784 Opcode = RISCV::VL4RE8_V;
785 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
786 Opcode = RISCV::VL8RE8_V;
787 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
788 Opcode = RISCV::PseudoVRELOAD2_M1;
789 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
790 Opcode = RISCV::PseudoVRELOAD2_M2;
791 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
792 Opcode = RISCV::PseudoVRELOAD2_M4;
793 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
794 Opcode = RISCV::PseudoVRELOAD3_M1;
795 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
796 Opcode = RISCV::PseudoVRELOAD3_M2;
797 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
798 Opcode = RISCV::PseudoVRELOAD4_M1;
799 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
800 Opcode = RISCV::PseudoVRELOAD4_M2;
801 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
802 Opcode = RISCV::PseudoVRELOAD5_M1;
803 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
804 Opcode = RISCV::PseudoVRELOAD6_M1;
805 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
806 Opcode = RISCV::PseudoVRELOAD7_M1;
807 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
808 Opcode = RISCV::PseudoVRELOAD8_M1;
846 if (
Ops.size() != 1 ||
Ops[0] != 1)
849 switch (
MI.getOpcode()) {
851 if (RISCVInstrInfo::isSEXT_W(
MI))
853 if (RISCVInstrInfo::isZEXT_W(
MI))
855 if (RISCVInstrInfo::isZEXT_B(
MI))
862 case RISCV::ZEXT_H_RV32:
863 case RISCV::ZEXT_H_RV64:
870 case RISCV::VMV_X_S: {
873 if (ST.getXLen() < (1U << Log2SEW))
888 case RISCV::VFMV_F_S: {
915 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
924 return RISCV::PseudoCCLB;
926 return RISCV::PseudoCCLBU;
928 return RISCV::PseudoCCLH;
930 return RISCV::PseudoCCLHU;
932 return RISCV::PseudoCCLW;
934 return RISCV::PseudoCCLWU;
936 return RISCV::PseudoCCLD;
938 return RISCV::PseudoCCQC_E_LB;
939 case RISCV::QC_E_LBU:
940 return RISCV::PseudoCCQC_E_LBU;
942 return RISCV::PseudoCCQC_E_LH;
943 case RISCV::QC_E_LHU:
944 return RISCV::PseudoCCQC_E_LHU;
946 return RISCV::PseudoCCQC_E_LW;
957 if (
MI.getOpcode() != RISCV::PseudoCCMOVGPR)
962 if (!
STI.hasShortForwardBranchILoad() || !PredOpc)
966 if (
Ops.size() != 1 || (
Ops[0] != 1 &&
Ops[0] != 2))
969 bool Invert =
Ops[0] == 2;
978 MI.getDebugLoc(),
get(PredOpc), DestReg);
989 unsigned BCC =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
995 NewMI.
add({
MI.getOperand(
MI.getNumExplicitOperands() - 2),
996 MI.getOperand(
MI.getNumExplicitOperands() - 1)});
1005 bool DstIsDead)
const {
1021 bool SrcRenamable =
false;
1025 bool LastItem = ++Num == Seq.
size();
1030 switch (Inst.getOpndKind()) {
1040 .
addReg(SrcReg, SrcRegState)
1047 .
addReg(SrcReg, SrcRegState)
1048 .
addReg(SrcReg, SrcRegState)
1054 .
addReg(SrcReg, SrcRegState)
1062 SrcRenamable = DstRenamable;
1072 case RISCV::CV_BEQIMM:
1073 case RISCV::QC_BEQI:
1074 case RISCV::QC_E_BEQI:
1075 case RISCV::NDS_BBC:
1076 case RISCV::NDS_BEQC:
1080 case RISCV::QC_BNEI:
1081 case RISCV::QC_E_BNEI:
1082 case RISCV::CV_BNEIMM:
1083 case RISCV::NDS_BBS:
1084 case RISCV::NDS_BNEC:
1087 case RISCV::QC_BLTI:
1088 case RISCV::QC_E_BLTI:
1091 case RISCV::QC_BGEI:
1092 case RISCV::QC_E_BGEI:
1095 case RISCV::QC_BLTUI:
1096 case RISCV::QC_E_BLTUI:
1099 case RISCV::QC_BGEUI:
1100 case RISCV::QC_E_BGEUI:
1132 "Unknown conditional branch");
1143 case RISCV::QC_MVEQ:
1144 return RISCV::QC_MVNE;
1145 case RISCV::QC_MVNE:
1146 return RISCV::QC_MVEQ;
1147 case RISCV::QC_MVLT:
1148 return RISCV::QC_MVGE;
1149 case RISCV::QC_MVGE:
1150 return RISCV::QC_MVLT;
1151 case RISCV::QC_MVLTU:
1152 return RISCV::QC_MVGEU;
1153 case RISCV::QC_MVGEU:
1154 return RISCV::QC_MVLTU;
1155 case RISCV::QC_MVEQI:
1156 return RISCV::QC_MVNEI;
1157 case RISCV::QC_MVNEI:
1158 return RISCV::QC_MVEQI;
1159 case RISCV::QC_MVLTI:
1160 return RISCV::QC_MVGEI;
1161 case RISCV::QC_MVGEI:
1162 return RISCV::QC_MVLTI;
1163 case RISCV::QC_MVLTUI:
1164 return RISCV::QC_MVGEUI;
1165 case RISCV::QC_MVGEUI:
1166 return RISCV::QC_MVLTUI;
1171 switch (SelectOpc) {
1190 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1200 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1205 return RISCV::CV_BEQIMM;
1207 return RISCV::CV_BNEIMM;
1210 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1215 return RISCV::QC_BEQI;
1217 return RISCV::QC_BNEI;
1219 return RISCV::QC_BLTI;
1221 return RISCV::QC_BGEI;
1224 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1229 return RISCV::QC_BLTUI;
1231 return RISCV::QC_BGEUI;
1234 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1239 return RISCV::QC_E_BEQI;
1241 return RISCV::QC_E_BNEI;
1243 return RISCV::QC_E_BLTI;
1245 return RISCV::QC_E_BGEI;
1248 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1253 return RISCV::QC_E_BLTUI;
1255 return RISCV::QC_E_BGEUI;
1258 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1263 return RISCV::NDS_BBC;
1265 return RISCV::NDS_BBS;
1268 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1273 return RISCV::NDS_BEQC;
1275 return RISCV::NDS_BNEC;
1321 case RISCV::CV_BEQIMM:
1322 return RISCV::CV_BNEIMM;
1323 case RISCV::CV_BNEIMM:
1324 return RISCV::CV_BEQIMM;
1325 case RISCV::QC_BEQI:
1326 return RISCV::QC_BNEI;
1327 case RISCV::QC_BNEI:
1328 return RISCV::QC_BEQI;
1329 case RISCV::QC_BLTI:
1330 return RISCV::QC_BGEI;
1331 case RISCV::QC_BGEI:
1332 return RISCV::QC_BLTI;
1333 case RISCV::QC_BLTUI:
1334 return RISCV::QC_BGEUI;
1335 case RISCV::QC_BGEUI:
1336 return RISCV::QC_BLTUI;
1337 case RISCV::QC_E_BEQI:
1338 return RISCV::QC_E_BNEI;
1339 case RISCV::QC_E_BNEI:
1340 return RISCV::QC_E_BEQI;
1341 case RISCV::QC_E_BLTI:
1342 return RISCV::QC_E_BGEI;
1343 case RISCV::QC_E_BGEI:
1344 return RISCV::QC_E_BLTI;
1345 case RISCV::QC_E_BLTUI:
1346 return RISCV::QC_E_BGEUI;
1347 case RISCV::QC_E_BGEUI:
1348 return RISCV::QC_E_BLTUI;
1349 case RISCV::NDS_BBC:
1350 return RISCV::NDS_BBS;
1351 case RISCV::NDS_BBS:
1352 return RISCV::NDS_BBC;
1353 case RISCV::NDS_BEQC:
1354 return RISCV::NDS_BNEC;
1355 case RISCV::NDS_BNEC:
1356 return RISCV::NDS_BEQC;
1364 bool AllowModify)
const {
1365 TBB = FBB =
nullptr;
1370 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1376 int NumTerminators = 0;
1377 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1380 if (J->getDesc().isUnconditionalBranch() ||
1381 J->getDesc().isIndirectBranch()) {
1388 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1389 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1390 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1393 I = FirstUncondOrIndirectBr;
1397 if (
I->getDesc().isIndirectBranch())
1401 if (
I->isPreISelOpcode())
1405 if (NumTerminators > 2)
1409 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1415 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1421 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1422 I->getDesc().isUnconditionalBranch()) {
1433 int *BytesRemoved)
const {
1440 if (!
I->getDesc().isUnconditionalBranch() &&
1441 !
I->getDesc().isConditionalBranch())
1447 I->eraseFromParent();
1451 if (
I ==
MBB.begin())
1454 if (!
I->getDesc().isConditionalBranch())
1460 I->eraseFromParent();
1473 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1475 "RISC-V branch conditions have two components!");
1509 assert(RS &&
"RegScavenger required for long branching");
1511 "new block should be inserted for expanding unconditional branch");
1514 "restore block should be inserted for restoring clobbered registers");
1523 "Branch offsets outside of the signed 32-bit range not supported");
1529 auto II =
MBB.end();
1535 RS->enterBasicBlockEnd(
MBB);
1537 if (
STI.hasStdExtZicfilp())
1538 RC = &RISCV::GPRX7RegClass;
1540 RS->scavengeRegisterBackwards(*RC,
MI.getIterator(),
1544 RS->setRegUsed(TmpGPR);
1549 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1551 if (
STI.hasStdExtZicfilp())
1555 if (FrameIndex == -1)
1560 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1563 MI.getOperand(1).setMBB(&RestoreBB);
1567 TRI->eliminateFrameIndex(RestoreBB.
back(),
1577 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1587 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1588 MI->getOperand(1).getReg() == RISCV::X0) {
1589 Imm =
MI->getOperand(2).getImm();
1594 if (
MI->getOpcode() == RISCV::BSETI &&
MI->getOperand(1).isReg() &&
1595 MI->getOperand(1).getReg() == RISCV::X0 &&
1596 MI->getOperand(2).getImm() == 11) {
1610 if (Reg == RISCV::X0) {
1618 bool IsSigned =
false;
1619 bool IsEquality =
false;
1620 switch (
MI.getOpcode()) {
1656 MI.eraseFromParent();
1682 auto searchConst = [&](int64_t C1) ->
Register {
1684 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1687 I.getOperand(0).getReg().isVirtual();
1690 return DefC1->getOperand(0).getReg();
1702 if (
isFromLoadImm(MRI, LHS, C0) && C0 != 0 && LHS.getReg().isVirtual() &&
1703 MRI.
hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1705 if (
Register RegZ = searchConst(C0 + 1)) {
1713 MI.eraseFromParent();
1723 if (
isFromLoadImm(MRI, RHS, C0) && C0 != 0 && RHS.getReg().isVirtual() &&
1726 if (
Register RegZ = searchConst(C0 - 1)) {
1734 MI.eraseFromParent();
1744 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1746 int NumOp =
MI.getNumExplicitOperands();
1747 return MI.getOperand(NumOp - 1).getMBB();
1751 int64_t BrOffset)
const {
1752 unsigned XLen =
STI.getXLen();
1759 case RISCV::NDS_BBC:
1760 case RISCV::NDS_BBS:
1761 case RISCV::NDS_BEQC:
1762 case RISCV::NDS_BNEC:
1772 case RISCV::CV_BEQIMM:
1773 case RISCV::CV_BNEIMM:
1774 case RISCV::QC_BEQI:
1775 case RISCV::QC_BNEI:
1776 case RISCV::QC_BGEI:
1777 case RISCV::QC_BLTI:
1778 case RISCV::QC_BLTUI:
1779 case RISCV::QC_BGEUI:
1780 case RISCV::QC_E_BEQI:
1781 case RISCV::QC_E_BNEI:
1782 case RISCV::QC_E_BGEI:
1783 case RISCV::QC_E_BLTI:
1784 case RISCV::QC_E_BLTUI:
1785 case RISCV::QC_E_BGEUI:
1788 case RISCV::PseudoBR:
1790 case RISCV::PseudoJump:
1801 case RISCV::ADD:
return RISCV::PseudoCCADD;
1802 case RISCV::SUB:
return RISCV::PseudoCCSUB;
1803 case RISCV::SLL:
return RISCV::PseudoCCSLL;
1804 case RISCV::SRL:
return RISCV::PseudoCCSRL;
1805 case RISCV::SRA:
return RISCV::PseudoCCSRA;
1806 case RISCV::AND:
return RISCV::PseudoCCAND;
1807 case RISCV::OR:
return RISCV::PseudoCCOR;
1808 case RISCV::XOR:
return RISCV::PseudoCCXOR;
1809 case RISCV::MAX:
return RISCV::PseudoCCMAX;
1810 case RISCV::MAXU:
return RISCV::PseudoCCMAXU;
1811 case RISCV::MIN:
return RISCV::PseudoCCMIN;
1812 case RISCV::MINU:
return RISCV::PseudoCCMINU;
1813 case RISCV::MUL:
return RISCV::PseudoCCMUL;
1814 case RISCV::LUI:
return RISCV::PseudoCCLUI;
1815 case RISCV::QC_LI:
return RISCV::PseudoCCQC_LI;
1816 case RISCV::QC_E_LI:
return RISCV::PseudoCCQC_E_LI;
1818 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
1819 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
1820 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
1821 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
1822 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
1823 case RISCV::ORI:
return RISCV::PseudoCCORI;
1824 case RISCV::XORI:
return RISCV::PseudoCCXORI;
1826 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
1827 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
1828 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
1829 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
1830 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
1832 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
1833 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
1834 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
1835 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
1837 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
1838 case RISCV::ORN:
return RISCV::PseudoCCORN;
1839 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
1841 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
1842 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
1846 return RISCV::INSTRUCTION_LIST_END;
1855 if (!
Reg.isVirtual())
1863 if (!STI.hasShortForwardBranchIMinMax() &&
1864 (
MI->getOpcode() == RISCV::MAX ||
MI->getOpcode() == RISCV::MIN ||
1865 MI->getOpcode() == RISCV::MINU ||
MI->getOpcode() == RISCV::MAXU))
1868 if (!STI.hasShortForwardBranchIMul() &&
MI->getOpcode() == RISCV::MUL)
1875 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1876 MI->getOperand(1).getReg() == RISCV::X0)
1881 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1894 bool DontMoveAcrossStores =
true;
1895 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1903 bool PreferFalse)
const {
1904 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1905 "Unknown select instruction");
1906 if (!
STI.hasShortForwardBranchIALU())
1912 bool Invert = !
DefMI;
1920 Register DestReg =
MI.getOperand(0).getReg();
1926 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1933 NewMI.
add(FalseReg);
1941 unsigned BCCOpcode =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
1947 NewMI.
add(
MI.getOperand(
MI.getNumExplicitOperands() - 2));
1948 NewMI.
add(
MI.getOperand(
MI.getNumExplicitOperands() - 1));
1958 if (
DefMI->getParent() !=
MI.getParent())
1962 DefMI->eraseFromParent();
1967 if (
MI.isMetaInstruction())
1970 unsigned Opcode =
MI.getOpcode();
1972 if (Opcode == TargetOpcode::INLINEASM ||
1973 Opcode == TargetOpcode::INLINEASM_BR) {
1975 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1980 if (
STI.hasStdExtZca()) {
1981 if (isCompressibleInst(
MI,
STI))
1988 if (Opcode == TargetOpcode::BUNDLE)
1989 return getInstBundleLength(
MI);
1991 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1992 if (isCompressibleInst(
MI,
STI))
1997 case RISCV::PseudoMV_FPR16INX:
1998 case RISCV::PseudoMV_FPR32INX:
2000 return STI.hasStdExtZca() ? 2 : 4;
2002 case RISCV::PseudoCCMOVGPRNoX0:
2003 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2006 case RISCV::PseudoCCMOVGPR:
2007 case RISCV::PseudoCCADD:
2008 case RISCV::PseudoCCSUB:
2009 case RISCV::PseudoCCSLL:
2010 case RISCV::PseudoCCSRL:
2011 case RISCV::PseudoCCSRA:
2012 case RISCV::PseudoCCAND:
2013 case RISCV::PseudoCCOR:
2014 case RISCV::PseudoCCXOR:
2015 case RISCV::PseudoCCADDI:
2016 case RISCV::PseudoCCANDI:
2017 case RISCV::PseudoCCORI:
2018 case RISCV::PseudoCCXORI:
2019 case RISCV::PseudoCCLUI:
2020 case RISCV::PseudoCCSLLI:
2021 case RISCV::PseudoCCSRLI:
2022 case RISCV::PseudoCCSRAI:
2023 case RISCV::PseudoCCADDW:
2024 case RISCV::PseudoCCSUBW:
2025 case RISCV::PseudoCCSLLW:
2026 case RISCV::PseudoCCSRLW:
2027 case RISCV::PseudoCCSRAW:
2028 case RISCV::PseudoCCADDIW:
2029 case RISCV::PseudoCCSLLIW:
2030 case RISCV::PseudoCCSRLIW:
2031 case RISCV::PseudoCCSRAIW:
2032 case RISCV::PseudoCCANDN:
2033 case RISCV::PseudoCCORN:
2034 case RISCV::PseudoCCXNOR:
2035 case RISCV::PseudoCCMAX:
2036 case RISCV::PseudoCCMIN:
2037 case RISCV::PseudoCCMAXU:
2038 case RISCV::PseudoCCMINU:
2039 case RISCV::PseudoCCMUL:
2040 case RISCV::PseudoCCLB:
2041 case RISCV::PseudoCCLH:
2042 case RISCV::PseudoCCLW:
2043 case RISCV::PseudoCCLHU:
2044 case RISCV::PseudoCCLBU:
2045 case RISCV::PseudoCCLWU:
2046 case RISCV::PseudoCCLD:
2047 case RISCV::PseudoCCQC_LI:
2048 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2051 case RISCV::PseudoCCQC_E_LI:
2052 case RISCV::PseudoCCQC_E_LB:
2053 case RISCV::PseudoCCQC_E_LH:
2054 case RISCV::PseudoCCQC_E_LW:
2055 case RISCV::PseudoCCQC_E_LHU:
2056 case RISCV::PseudoCCQC_E_LBU:
2057 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2060 case TargetOpcode::STACKMAP:
2063 case TargetOpcode::PATCHPOINT:
2066 case TargetOpcode::STATEPOINT: {
2070 return std::max(NumBytes, 8U);
2072 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2073 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
2074 case TargetOpcode::PATCHABLE_TAIL_CALL: {
2077 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
2078 F.hasFnAttribute(
"patchable-function-entry")) {
2080 if (
F.getFnAttribute(
"patchable-function-entry")
2082 .getAsInteger(10, Num))
2083 return get(Opcode).getSize();
2086 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
2090 return STI.is64Bit() ? 68 : 44;
2093 return get(Opcode).getSize();
2097unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
2101 while (++
I != E &&
I->isInsideBundle()) {
2102 assert(!
I->isBundle() &&
"No nested bundle!");
2109 const unsigned Opcode =
MI.getOpcode();
2113 case RISCV::FSGNJ_D:
2114 case RISCV::FSGNJ_S:
2115 case RISCV::FSGNJ_H:
2116 case RISCV::FSGNJ_D_INX:
2117 case RISCV::FSGNJ_D_IN32X:
2118 case RISCV::FSGNJ_S_INX:
2119 case RISCV::FSGNJ_H_INX:
2121 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2122 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
2126 return (
MI.getOperand(1).isReg() &&
2127 MI.getOperand(1).getReg() == RISCV::X0) ||
2128 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
2130 return MI.isAsCheapAsAMove();
2133std::optional<DestSourcePair>
2137 switch (
MI.getOpcode()) {
2143 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2144 MI.getOperand(2).isReg())
2146 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2147 MI.getOperand(1).isReg())
2152 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
2153 MI.getOperand(2).getImm() == 0)
2157 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2158 MI.getOperand(1).isReg())
2162 case RISCV::SH1ADD_UW:
2164 case RISCV::SH2ADD_UW:
2166 case RISCV::SH3ADD_UW:
2167 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2168 MI.getOperand(2).isReg())
2171 case RISCV::FSGNJ_D:
2172 case RISCV::FSGNJ_S:
2173 case RISCV::FSGNJ_H:
2174 case RISCV::FSGNJ_D_INX:
2175 case RISCV::FSGNJ_D_IN32X:
2176 case RISCV::FSGNJ_S_INX:
2177 case RISCV::FSGNJ_H_INX:
2179 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2180 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2184 return std::nullopt;
2192 const auto &SchedModel =
STI.getSchedModel();
2193 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2205 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2209 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2210 RISCV::OpName::frm) < 0;
2212 "New instructions require FRM whereas the old one does not have it");
2219 for (
auto *NewMI : InsInstrs) {
2221 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2222 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2264bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2265 bool Invert)
const {
2266#define OPCODE_LMUL_CASE(OPC) \
2267 case RISCV::OPC##_M1: \
2268 case RISCV::OPC##_M2: \
2269 case RISCV::OPC##_M4: \
2270 case RISCV::OPC##_M8: \
2271 case RISCV::OPC##_MF2: \
2272 case RISCV::OPC##_MF4: \
2273 case RISCV::OPC##_MF8
2275#define OPCODE_LMUL_MASK_CASE(OPC) \
2276 case RISCV::OPC##_M1_MASK: \
2277 case RISCV::OPC##_M2_MASK: \
2278 case RISCV::OPC##_M4_MASK: \
2279 case RISCV::OPC##_M8_MASK: \
2280 case RISCV::OPC##_MF2_MASK: \
2281 case RISCV::OPC##_MF4_MASK: \
2282 case RISCV::OPC##_MF8_MASK
2287 Opcode = *InvOpcode;
2304#undef OPCODE_LMUL_MASK_CASE
2305#undef OPCODE_LMUL_CASE
2308bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2319 const uint64_t TSFlags =
Desc.TSFlags;
2321 auto checkImmOperand = [&](
unsigned OpIdx) {
2325 auto checkRegOperand = [&](
unsigned OpIdx) {
2333 if (!checkRegOperand(1))
2348 bool SeenMI2 =
false;
2349 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2358 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2359 Register SrcReg = It->getOperand(1).getReg();
2377 if (MI1VReg != SrcReg)
2386 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2426bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2427 bool &Commuted)
const {
2431 "Expect the present of passthrough operand.");
2437 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2438 areRVVInstsReassociable(Inst, *MI2);
2442 return areRVVInstsReassociable(Inst, *MI1) &&
2443 (isVectorAssociativeAndCommutative(*MI1) ||
2444 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2451 if (!isVectorAssociativeAndCommutative(Inst) &&
2452 !isVectorAssociativeAndCommutative(Inst,
true))
2478 for (
unsigned I = 0;
I < 5; ++
I)
2484 bool &Commuted)
const {
2485 if (isVectorAssociativeAndCommutative(Inst) ||
2486 isVectorAssociativeAndCommutative(Inst,
true))
2487 return hasReassociableVectorSibling(Inst, Commuted);
2493 unsigned OperandIdx = Commuted ? 2 : 1;
2497 int16_t InstFrmOpIdx =
2498 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2499 int16_t SiblingFrmOpIdx =
2500 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2502 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2507 bool Invert)
const {
2508 if (isVectorAssociativeAndCommutative(Inst, Invert))
2516 Opc = *InverseOpcode;
2561std::optional<unsigned>
2563#define RVV_OPC_LMUL_CASE(OPC, INV) \
2564 case RISCV::OPC##_M1: \
2565 return RISCV::INV##_M1; \
2566 case RISCV::OPC##_M2: \
2567 return RISCV::INV##_M2; \
2568 case RISCV::OPC##_M4: \
2569 return RISCV::INV##_M4; \
2570 case RISCV::OPC##_M8: \
2571 return RISCV::INV##_M8; \
2572 case RISCV::OPC##_MF2: \
2573 return RISCV::INV##_MF2; \
2574 case RISCV::OPC##_MF4: \
2575 return RISCV::INV##_MF4; \
2576 case RISCV::OPC##_MF8: \
2577 return RISCV::INV##_MF8
2579#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2580 case RISCV::OPC##_M1_MASK: \
2581 return RISCV::INV##_M1_MASK; \
2582 case RISCV::OPC##_M2_MASK: \
2583 return RISCV::INV##_M2_MASK; \
2584 case RISCV::OPC##_M4_MASK: \
2585 return RISCV::INV##_M4_MASK; \
2586 case RISCV::OPC##_M8_MASK: \
2587 return RISCV::INV##_M8_MASK; \
2588 case RISCV::OPC##_MF2_MASK: \
2589 return RISCV::INV##_MF2_MASK; \
2590 case RISCV::OPC##_MF4_MASK: \
2591 return RISCV::INV##_MF4_MASK; \
2592 case RISCV::OPC##_MF8_MASK: \
2593 return RISCV::INV##_MF8_MASK
2597 return std::nullopt;
2599 return RISCV::FSUB_H;
2601 return RISCV::FSUB_S;
2603 return RISCV::FSUB_D;
2605 return RISCV::FADD_H;
2607 return RISCV::FADD_S;
2609 return RISCV::FADD_D;
2626#undef RVV_OPC_LMUL_MASK_CASE
2627#undef RVV_OPC_LMUL_CASE
2632 bool DoRegPressureReduce) {
2659 bool DoRegPressureReduce) {
2666 DoRegPressureReduce)) {
2672 DoRegPressureReduce)) {
2682 bool DoRegPressureReduce) {
2690 unsigned CombineOpc) {
2697 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2711 unsigned OuterShiftAmt) {
2717 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2744 case RISCV::SH1ADD_UW:
2746 case RISCV::SH2ADD_UW:
2748 case RISCV::SH3ADD_UW:
2794 bool DoRegPressureReduce)
const {
2803 DoRegPressureReduce);
2811 return RISCV::FMADD_H;
2813 return RISCV::FMADD_S;
2815 return RISCV::FMADD_D;
2860 bool Mul1IsKill = Mul1.
isKill();
2861 bool Mul2IsKill = Mul2.
isKill();
2862 bool AddendIsKill = Addend.
isKill();
2871 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2896 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2903 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2906 switch (InnerShiftAmt - OuterShiftAmt) {
2910 InnerOpc = RISCV::ADD;
2913 InnerOpc = RISCV::SH1ADD;
2916 InnerOpc = RISCV::SH2ADD;
2919 InnerOpc = RISCV::SH3ADD;
2937 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2954 DelInstrs, InstrIdxForVirtReg);
2981 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2983 unsigned OpType = Operand.OperandType;
2989 ErrInfo =
"Expected an immediate operand.";
2992 int64_t Imm = MO.
getImm();
2998#define CASE_OPERAND_UIMM(NUM) \
2999 case RISCVOp::OPERAND_UIMM##NUM: \
3000 Ok = isUInt<NUM>(Imm); \
3002#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX) \
3003 case RISCVOp::OPERAND_UIMM##BITS##_LSB##SUFFIX: { \
3004 constexpr size_t NumZeros = sizeof(#SUFFIX) - 1; \
3005 Ok = isShiftedUInt<BITS - NumZeros, NumZeros>(Imm); \
3008#define CASE_OPERAND_SIMM(NUM) \
3009 case RISCVOp::OPERAND_SIMM##NUM: \
3010 Ok = isInt<NUM>(Imm); \
3044 Ok = Imm >= 1 && Imm <= 32;
3047 Ok = Imm >= 1 && Imm <= 64;
3068 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
3079 Ok = Imm >= -15 && Imm <= 16;
3107 Ok = Ok && Imm != 0;
3110 Ok = (
isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);
3113 Ok = Imm >= 0 && Imm <= 10;
3116 Ok = Imm >= 0 && Imm <= 7;
3119 Ok = Imm >= 1 && Imm <= 10;
3122 Ok = Imm >= 2 && Imm <= 14;
3131 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3166 Ok = Imm == 1 || Imm == 2 || Imm == 4;
3170 ErrInfo =
"Invalid immediate";
3179 ErrInfo =
"Expected a non-register operand.";
3183 ErrInfo =
"Invalid immediate";
3192 ErrInfo =
"Expected a non-register operand.";
3196 ErrInfo =
"Invalid immediate";
3204 ErrInfo =
"Expected a non-register operand.";
3208 ErrInfo =
"Invalid immediate";
3214 int64_t Imm = MO.
getImm();
3217 ErrInfo =
"Invalid immediate";
3220 }
else if (!MO.
isReg()) {
3221 ErrInfo =
"Expected a register or immediate operand.";
3227 ErrInfo =
"Expected a register or immediate operand.";
3237 if (!
Op.isImm() && !
Op.isReg()) {
3238 ErrInfo =
"Invalid operand type for VL operand";
3241 if (
Op.isReg() &&
Op.getReg().isValid()) {
3244 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {
3245 ErrInfo =
"Invalid register class for VL operand";
3250 ErrInfo =
"VL operand w/o SEW operand?";
3256 if (!
MI.getOperand(
OpIdx).isImm()) {
3257 ErrInfo =
"SEW value expected to be an immediate";
3262 ErrInfo =
"Unexpected SEW value";
3265 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3267 ErrInfo =
"Unexpected SEW value";
3273 if (!
MI.getOperand(
OpIdx).isImm()) {
3274 ErrInfo =
"Policy operand expected to be an immediate";
3279 ErrInfo =
"Invalid Policy Value";
3283 ErrInfo =
"policy operand w/o VL operand?";
3291 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3292 ErrInfo =
"policy operand w/o tied operand?";
3299 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3300 ErrInfo =
"dynamic rounding mode should read FRM";
3322 case RISCV::LD_RV32:
3332 case RISCV::SD_RV32:
3348 int64_t NewOffset = OldOffset + Disp;
3370 "Addressing mode not supported for folding");
3443 case RISCV::LD_RV32:
3446 case RISCV::SD_RV32:
3453 OffsetIsScalable =
false;
3469 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3477 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3480 auto Base1 = MO1->getValue();
3481 auto Base2 = MO2->getValue();
3482 if (!Base1 || !Base2)
3490 return Base1 == Base2;
3496 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3497 unsigned NumBytes)
const {
3500 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3505 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3511 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3517 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3567 int64_t OffsetA = 0, OffsetB = 0;
3573 int LowOffset = std::min(OffsetA, OffsetB);
3574 int HighOffset = std::max(OffsetA, OffsetB);
3575 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3577 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3584std::pair<unsigned, unsigned>
3587 return std::make_pair(TF & Mask, TF & ~Mask);
3593 static const std::pair<unsigned, const char *> TargetFlags[] = {
3594 {MO_CALL,
"riscv-call"},
3595 {MO_LO,
"riscv-lo"},
3596 {MO_HI,
"riscv-hi"},
3597 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3598 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3599 {MO_GOT_HI,
"riscv-got-hi"},
3600 {MO_TPREL_LO,
"riscv-tprel-lo"},
3601 {MO_TPREL_HI,
"riscv-tprel-hi"},
3602 {MO_TPREL_ADD,
"riscv-tprel-add"},
3603 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3604 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3605 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3606 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3607 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3608 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3616 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3629 unsigned &Flags)
const {
3648 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3649 F.hasFnAttribute(
"patchable-function-entry");
3654 return MI.readsRegister(RegNo,
TRI) ||
3655 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3660 return MI.modifiesRegister(RegNo,
TRI) ||
3661 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3665 if (!
MBB.back().isReturn())
3691 if (
C.back().isReturn() &&
3692 !
C.isAvailableAcrossAndOutOfSeq(TailExpandUseReg, RegInfo)) {
3694 LLVM_DEBUG(
dbgs() <<
"Cannot be outlined between: " <<
C.front() <<
"and "
3696 LLVM_DEBUG(
dbgs() <<
"Because the tail-call register is live across "
3697 "the proposed outlined function call\n");
3703 if (
C.back().isReturn()) {
3705 "The candidate who uses return instruction must be outlined "
3717 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, RegInfo);
3720std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3723 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3724 unsigned MinRepeats)
const {
3732 if (RepeatedSequenceLocs.size() < MinRepeats)
3733 return std::nullopt;
3737 unsigned InstrSizeCExt =
3739 unsigned CallOverhead = 0, FrameOverhead = 0;
3742 unsigned CFICount = 0;
3743 for (
auto &
I : Candidate) {
3744 if (
I.isCFIInstruction())
3755 std::vector<MCCFIInstruction> CFIInstructions =
3756 C.getMF()->getFrameInstructions();
3758 if (CFICount > 0 && CFICount != CFIInstructions.size())
3759 return std::nullopt;
3767 CallOverhead = 4 + InstrSizeCExt;
3774 FrameOverhead = InstrSizeCExt;
3780 return std::nullopt;
3782 for (
auto &
C : RepeatedSequenceLocs)
3783 C.setCallInfo(MOCI, CallOverhead);
3785 unsigned SequenceSize = 0;
3786 for (
auto &
MI : Candidate)
3789 return std::make_unique<outliner::OutlinedFunction>(
3790 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3796 unsigned Flags)
const {
3800 MBB->getParent()->getSubtarget().getRegisterInfo();
3801 const auto &
F =
MI.getMF()->getFunction();
3806 if (
MI.isCFIInstruction())
3814 for (
const auto &MO :
MI.operands()) {
3819 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3820 F.hasSection() ||
F.getSectionPrefix()))
3837 MBB.addLiveIn(RISCV::X5);
3852 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3860 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3871 return std::nullopt;
3875 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3876 MI.getOperand(2).isImm())
3877 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3879 return std::nullopt;
3887 std::string GenericComment =
3889 if (!GenericComment.empty())
3890 return GenericComment;
3894 return std::string();
3896 std::string Comment;
3903 switch (OpInfo.OperandType) {
3906 unsigned Imm =
Op.getImm();
3911 unsigned Imm =
Op.getImm();
3916 unsigned Imm =
Op.getImm();
3922 unsigned Log2SEW =
Op.getImm();
3923 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3929 unsigned Policy =
Op.getImm();
3931 "Invalid Policy Value");
3937 if (
Op.isImm() &&
Op.getImm() == -1)
3959#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3960 RISCV::Pseudo##OP##_##LMUL
3962#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3963 RISCV::Pseudo##OP##_##LMUL##_MASK
3965#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3966 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3967 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3969#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3970 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3971 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3972 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3973 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3974 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3975 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3977#define CASE_RVV_OPCODE_UNMASK(OP) \
3978 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3979 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3981#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3982 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3983 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3984 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3985 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3986 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3987 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3989#define CASE_RVV_OPCODE_MASK(OP) \
3990 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3991 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3993#define CASE_RVV_OPCODE_WIDEN(OP) \
3994 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3995 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3997#define CASE_RVV_OPCODE(OP) \
3998 CASE_RVV_OPCODE_UNMASK(OP): \
3999 case CASE_RVV_OPCODE_MASK(OP)
4003#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
4004 RISCV::PseudoV##OP##_##TYPE##_##LMUL
4006#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
4007 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
4008 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
4009 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
4010 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
4011 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
4012 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
4013 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
4016#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
4017 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
4019#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
4020 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
4021 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
4022 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
4023 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
4025#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
4026 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
4027 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
4029#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
4030 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
4031 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
4033#define CASE_VFMA_OPCODE_VV(OP) \
4034 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
4035 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
4036 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
4037 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
4039#define CASE_VFMA_SPLATS(OP) \
4040 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
4041 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
4042 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
4043 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
4047 unsigned &SrcOpIdx1,
4048 unsigned &SrcOpIdx2)
const {
4050 if (!
Desc.isCommutable())
4053 switch (
MI.getOpcode()) {
4054 case RISCV::TH_MVEQZ:
4055 case RISCV::TH_MVNEZ:
4059 if (
MI.getOperand(2).getReg() == RISCV::X0)
4062 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4063 case RISCV::QC_SELECTIEQ:
4064 case RISCV::QC_SELECTINE:
4065 case RISCV::QC_SELECTIIEQ:
4066 case RISCV::QC_SELECTIINE:
4067 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4068 case RISCV::QC_MVEQ:
4069 case RISCV::QC_MVNE:
4070 case RISCV::QC_MVLT:
4071 case RISCV::QC_MVGE:
4072 case RISCV::QC_MVLTU:
4073 case RISCV::QC_MVGEU:
4074 case RISCV::QC_MVEQI:
4075 case RISCV::QC_MVNEI:
4076 case RISCV::QC_MVLTI:
4077 case RISCV::QC_MVGEI:
4078 case RISCV::QC_MVLTUI:
4079 case RISCV::QC_MVGEUI:
4080 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
4081 case RISCV::TH_MULA:
4082 case RISCV::TH_MULAW:
4083 case RISCV::TH_MULAH:
4084 case RISCV::TH_MULS:
4085 case RISCV::TH_MULSW:
4086 case RISCV::TH_MULSH:
4088 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4089 case RISCV::PseudoCCMOVGPRNoX0:
4090 case RISCV::PseudoCCMOVGPR:
4092 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4123 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4150 unsigned CommutableOpIdx1 = 1;
4151 unsigned CommutableOpIdx2 = 3;
4152 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4173 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
4175 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
4179 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
4180 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
4186 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
4187 SrcOpIdx2 == CommuteAnyOperandIndex) {
4190 unsigned CommutableOpIdx1 = SrcOpIdx1;
4191 if (SrcOpIdx1 == SrcOpIdx2) {
4194 CommutableOpIdx1 = 1;
4195 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
4197 CommutableOpIdx1 = SrcOpIdx2;
4202 unsigned CommutableOpIdx2;
4203 if (CommutableOpIdx1 != 1) {
4205 CommutableOpIdx2 = 1;
4207 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
4212 if (Op1Reg !=
MI.getOperand(2).getReg())
4213 CommutableOpIdx2 = 2;
4215 CommutableOpIdx2 = 3;
4220 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4233#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4234 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4235 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4238#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4239 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4240 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4241 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4242 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4243 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4244 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4245 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4248#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4249 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4250 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4253#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4254 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4255 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4256 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4257 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4259#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4260 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4261 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4263#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4264 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4265 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4267#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4268 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4269 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4270 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4271 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4273#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4274 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4275 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4276 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4277 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4283 unsigned OpIdx2)
const {
4286 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4290 switch (
MI.getOpcode()) {
4291 case RISCV::TH_MVEQZ:
4292 case RISCV::TH_MVNEZ: {
4293 auto &WorkingMI = cloneIfNew(
MI);
4294 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4295 : RISCV::TH_MVEQZ));
4299 case RISCV::QC_SELECTIEQ:
4300 case RISCV::QC_SELECTINE:
4301 case RISCV::QC_SELECTIIEQ:
4302 case RISCV::QC_SELECTIINE:
4304 case RISCV::QC_MVEQ:
4305 case RISCV::QC_MVNE:
4306 case RISCV::QC_MVLT:
4307 case RISCV::QC_MVGE:
4308 case RISCV::QC_MVLTU:
4309 case RISCV::QC_MVGEU:
4310 case RISCV::QC_MVEQI:
4311 case RISCV::QC_MVNEI:
4312 case RISCV::QC_MVLTI:
4313 case RISCV::QC_MVGEI:
4314 case RISCV::QC_MVLTUI:
4315 case RISCV::QC_MVGEUI: {
4316 auto &WorkingMI = cloneIfNew(
MI);
4321 case RISCV::PseudoCCMOVGPRNoX0:
4322 case RISCV::PseudoCCMOVGPR: {
4324 unsigned BCC =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
4326 auto &WorkingMI = cloneIfNew(
MI);
4327 WorkingMI.getOperand(
MI.getNumExplicitOperands() - 3).setImm(BCC);
4351 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4352 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4354 switch (
MI.getOpcode()) {
4377 auto &WorkingMI = cloneIfNew(
MI);
4378 WorkingMI.setDesc(
get(
Opc));
4388 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4391 if (OpIdx1 == 3 || OpIdx2 == 3) {
4393 switch (
MI.getOpcode()) {
4404 auto &WorkingMI = cloneIfNew(
MI);
4405 WorkingMI.setDesc(
get(
Opc));
4417#undef CASE_VMA_CHANGE_OPCODE_COMMON
4418#undef CASE_VMA_CHANGE_OPCODE_LMULS
4419#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4420#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4421#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4422#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4423#undef CASE_VFMA_CHANGE_OPCODE_VV
4424#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4426#undef CASE_RVV_OPCODE_UNMASK_LMUL
4427#undef CASE_RVV_OPCODE_MASK_LMUL
4428#undef CASE_RVV_OPCODE_LMUL
4429#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4430#undef CASE_RVV_OPCODE_UNMASK
4431#undef CASE_RVV_OPCODE_MASK_WIDEN
4432#undef CASE_RVV_OPCODE_MASK
4433#undef CASE_RVV_OPCODE_WIDEN
4434#undef CASE_RVV_OPCODE
4436#undef CASE_VMA_OPCODE_COMMON
4437#undef CASE_VMA_OPCODE_LMULS
4438#undef CASE_VFMA_OPCODE_COMMON
4439#undef CASE_VFMA_OPCODE_LMULS_M1
4440#undef CASE_VFMA_OPCODE_LMULS_MF2
4441#undef CASE_VFMA_OPCODE_LMULS_MF4
4442#undef CASE_VFMA_OPCODE_VV
4443#undef CASE_VFMA_SPLATS
4446 switch (
MI.getOpcode()) {
4454 if (
MI.getOperand(1).getReg() == RISCV::X0)
4455 commuteInstruction(
MI);
4457 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4458 MI.getOperand(2).ChangeToImmediate(0);
4459 MI.setDesc(
get(RISCV::ADDI));
4463 if (
MI.getOpcode() == RISCV::XOR &&
4464 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4465 MI.getOperand(1).setReg(RISCV::X0);
4466 MI.getOperand(2).ChangeToImmediate(0);
4467 MI.setDesc(
get(RISCV::ADDI));
4474 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4475 MI.setDesc(
get(RISCV::ADDI));
4481 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4482 MI.getOperand(2).ChangeToImmediate(0);
4483 MI.setDesc(
get(RISCV::ADDI));
4489 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4490 MI.getOperand(2).ChangeToImmediate(0);
4491 MI.setDesc(
get(RISCV::ADDIW));
4498 if (
MI.getOperand(1).getReg() == RISCV::X0)
4499 commuteInstruction(
MI);
4501 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4502 MI.getOperand(2).ChangeToImmediate(0);
4503 MI.setDesc(
get(RISCV::ADDIW));
4508 case RISCV::SH1ADD_UW:
4510 case RISCV::SH2ADD_UW:
4512 case RISCV::SH3ADD_UW:
4514 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4515 MI.removeOperand(1);
4517 MI.setDesc(
get(RISCV::ADDI));
4521 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4522 MI.removeOperand(2);
4523 unsigned Opc =
MI.getOpcode();
4524 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4525 Opc == RISCV::SH3ADD_UW) {
4527 MI.setDesc(
get(RISCV::SLLI_UW));
4531 MI.setDesc(
get(RISCV::SLLI));
4545 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4546 MI.getOperand(2).getReg() == RISCV::X0) {
4547 MI.getOperand(1).setReg(RISCV::X0);
4548 MI.getOperand(2).ChangeToImmediate(0);
4549 MI.setDesc(
get(RISCV::ADDI));
4555 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4556 MI.getOperand(2).setImm(0);
4557 MI.setDesc(
get(RISCV::ADDI));
4565 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4566 MI.getOperand(2).ChangeToImmediate(0);
4567 MI.setDesc(
get(RISCV::ADDI));
4571 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4572 MI.getOperand(2).ChangeToImmediate(0);
4573 MI.setDesc(
get(RISCV::ADDI));
4581 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4582 MI.getOperand(2).ChangeToImmediate(0);
4583 MI.setDesc(
get(RISCV::ADDI));
4593 case RISCV::SLLI_UW:
4595 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4596 MI.getOperand(2).setImm(0);
4597 MI.setDesc(
get(RISCV::ADDI));
4605 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4606 MI.getOperand(2).getReg() == RISCV::X0) {
4607 MI.getOperand(2).ChangeToImmediate(0);
4608 MI.setDesc(
get(RISCV::ADDI));
4612 if (
MI.getOpcode() == RISCV::ADD_UW &&
4613 MI.getOperand(1).getReg() == RISCV::X0) {
4614 MI.removeOperand(1);
4616 MI.setDesc(
get(RISCV::ADDI));
4622 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4623 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4624 MI.setDesc(
get(RISCV::ADDI));
4630 case RISCV::ZEXT_H_RV32:
4631 case RISCV::ZEXT_H_RV64:
4634 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4636 MI.setDesc(
get(RISCV::ADDI));
4645 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4646 MI.getOperand(2).ChangeToImmediate(0);
4647 MI.setDesc(
get(RISCV::ADDI));
4654 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4656 MI.removeOperand(0);
4657 MI.insert(
MI.operands_begin() + 1, {MO0});
4662 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4664 MI.removeOperand(0);
4665 MI.insert(
MI.operands_begin() + 1, {MO0});
4666 MI.setDesc(
get(RISCV::BNE));
4671 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4673 MI.removeOperand(0);
4674 MI.insert(
MI.operands_begin() + 1, {MO0});
4675 MI.setDesc(
get(RISCV::BEQ));
4683#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4684 RISCV::PseudoV##OP##_##LMUL##_TIED
4686#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4687 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4688 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4689 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4690 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4691 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4692 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4694#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4695 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4696 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4699#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4700 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4701 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4702 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4703 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4704 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4705 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4708#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4709 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4711#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4712 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4713 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4714 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4715 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4716 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4717 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4718 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4719 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4720 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4722#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4723 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4724 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4727#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4728 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4729 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4730 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4731 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4732 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4733 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4734 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4735 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4736 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4738#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4739 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4740 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4741 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4742 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4743 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4745#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4746 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4747 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4748 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4749 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4750 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4757 switch (
MI.getOpcode()) {
4765 MI.getNumExplicitOperands() == 7 &&
4766 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4773 switch (
MI.getOpcode()) {
4785 .
add(
MI.getOperand(0))
4787 .
add(
MI.getOperand(1))
4788 .
add(
MI.getOperand(2))
4789 .
add(
MI.getOperand(3))
4790 .
add(
MI.getOperand(4))
4791 .
add(
MI.getOperand(5))
4792 .
add(
MI.getOperand(6));
4801 MI.getNumExplicitOperands() == 6);
4808 switch (
MI.getOpcode()) {
4820 .
add(
MI.getOperand(0))
4822 .
add(
MI.getOperand(1))
4823 .
add(
MI.getOperand(2))
4824 .
add(
MI.getOperand(3))
4825 .
add(
MI.getOperand(4))
4826 .
add(
MI.getOperand(5));
4833 unsigned NumOps =
MI.getNumOperands();
4836 if (
Op.isReg() &&
Op.isKill())
4844 if (
MI.getOperand(0).isEarlyClobber()) {
4858#undef CASE_WIDEOP_OPCODE_COMMON
4859#undef CASE_WIDEOP_OPCODE_LMULS
4860#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4861#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4862#undef CASE_FP_WIDEOP_OPCODE_COMMON
4863#undef CASE_FP_WIDEOP_OPCODE_LMULS
4864#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4865#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4874 if (ShiftAmount == 0)
4880 }
else if (
int ShXAmount, ShiftAmount;
4882 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4885 switch (ShXAmount) {
4887 Opc = RISCV::SH1ADD;
4890 Opc = RISCV::SH2ADD;
4893 Opc = RISCV::SH3ADD;
4929 }
else if (
STI.hasStdExtZmmul()) {
4939 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4940 if (Amount & (1U << ShiftAmount)) {
4944 .
addImm(ShiftAmount - PrevShiftAmount)
4946 if (Amount >> (ShiftAmount + 1)) {
4960 PrevShiftAmount = ShiftAmount;
4963 assert(Acc &&
"Expected valid accumulator");
4973 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4981 ?
STI.getTailDupAggressiveThreshold()
4988 unsigned Opcode =
MI.getOpcode();
4989 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4998 return MI.isCopy() &&
MI.getOperand(0).getReg().isPhysical() &&
5000 TRI->getMinimalPhysRegClass(
MI.getOperand(0).getReg()));
5003std::optional<std::pair<unsigned, unsigned>>
5007 return std::nullopt;
5008 case RISCV::PseudoVSPILL2_M1:
5009 case RISCV::PseudoVRELOAD2_M1:
5010 return std::make_pair(2u, 1u);
5011 case RISCV::PseudoVSPILL2_M2:
5012 case RISCV::PseudoVRELOAD2_M2:
5013 return std::make_pair(2u, 2u);
5014 case RISCV::PseudoVSPILL2_M4:
5015 case RISCV::PseudoVRELOAD2_M4:
5016 return std::make_pair(2u, 4u);
5017 case RISCV::PseudoVSPILL3_M1:
5018 case RISCV::PseudoVRELOAD3_M1:
5019 return std::make_pair(3u, 1u);
5020 case RISCV::PseudoVSPILL3_M2:
5021 case RISCV::PseudoVRELOAD3_M2:
5022 return std::make_pair(3u, 2u);
5023 case RISCV::PseudoVSPILL4_M1:
5024 case RISCV::PseudoVRELOAD4_M1:
5025 return std::make_pair(4u, 1u);
5026 case RISCV::PseudoVSPILL4_M2:
5027 case RISCV::PseudoVRELOAD4_M2:
5028 return std::make_pair(4u, 2u);
5029 case RISCV::PseudoVSPILL5_M1:
5030 case RISCV::PseudoVRELOAD5_M1:
5031 return std::make_pair(5u, 1u);
5032 case RISCV::PseudoVSPILL6_M1:
5033 case RISCV::PseudoVRELOAD6_M1:
5034 return std::make_pair(6u, 1u);
5035 case RISCV::PseudoVSPILL7_M1:
5036 case RISCV::PseudoVRELOAD7_M1:
5037 return std::make_pair(7u, 1u);
5038 case RISCV::PseudoVSPILL8_M1:
5039 case RISCV::PseudoVRELOAD8_M1:
5040 return std::make_pair(8u, 1u);
5045 int16_t MI1FrmOpIdx =
5046 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
5047 int16_t MI2FrmOpIdx =
5048 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
5049 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
5056std::optional<unsigned>
5060 return std::nullopt;
5063 case RISCV::VSLL_VX:
5064 case RISCV::VSRL_VX:
5065 case RISCV::VSRA_VX:
5067 case RISCV::VSSRL_VX:
5068 case RISCV::VSSRA_VX:
5070 case RISCV::VROL_VX:
5071 case RISCV::VROR_VX:
5076 case RISCV::VNSRL_WX:
5077 case RISCV::VNSRA_WX:
5079 case RISCV::VNCLIPU_WX:
5080 case RISCV::VNCLIP_WX:
5082 case RISCV::VWSLL_VX:
5087 case RISCV::VADD_VX:
5088 case RISCV::VSUB_VX:
5089 case RISCV::VRSUB_VX:
5091 case RISCV::VWADDU_VX:
5092 case RISCV::VWSUBU_VX:
5093 case RISCV::VWADD_VX:
5094 case RISCV::VWSUB_VX:
5095 case RISCV::VWADDU_WX:
5096 case RISCV::VWSUBU_WX:
5097 case RISCV::VWADD_WX:
5098 case RISCV::VWSUB_WX:
5100 case RISCV::VADC_VXM:
5101 case RISCV::VADC_VIM:
5102 case RISCV::VMADC_VXM:
5103 case RISCV::VMADC_VIM:
5104 case RISCV::VMADC_VX:
5105 case RISCV::VSBC_VXM:
5106 case RISCV::VMSBC_VXM:
5107 case RISCV::VMSBC_VX:
5109 case RISCV::VAND_VX:
5111 case RISCV::VXOR_VX:
5113 case RISCV::VMSEQ_VX:
5114 case RISCV::VMSNE_VX:
5115 case RISCV::VMSLTU_VX:
5116 case RISCV::VMSLT_VX:
5117 case RISCV::VMSLEU_VX:
5118 case RISCV::VMSLE_VX:
5119 case RISCV::VMSGTU_VX:
5120 case RISCV::VMSGT_VX:
5122 case RISCV::VMINU_VX:
5123 case RISCV::VMIN_VX:
5124 case RISCV::VMAXU_VX:
5125 case RISCV::VMAX_VX:
5127 case RISCV::VMUL_VX:
5128 case RISCV::VMULH_VX:
5129 case RISCV::VMULHU_VX:
5130 case RISCV::VMULHSU_VX:
5132 case RISCV::VDIVU_VX:
5133 case RISCV::VDIV_VX:
5134 case RISCV::VREMU_VX:
5135 case RISCV::VREM_VX:
5137 case RISCV::VWMUL_VX:
5138 case RISCV::VWMULU_VX:
5139 case RISCV::VWMULSU_VX:
5141 case RISCV::VMACC_VX:
5142 case RISCV::VNMSAC_VX:
5143 case RISCV::VMADD_VX:
5144 case RISCV::VNMSUB_VX:
5146 case RISCV::VWMACCU_VX:
5147 case RISCV::VWMACC_VX:
5148 case RISCV::VWMACCSU_VX:
5149 case RISCV::VWMACCUS_VX:
5151 case RISCV::VMERGE_VXM:
5153 case RISCV::VMV_V_X:
5155 case RISCV::VSADDU_VX:
5156 case RISCV::VSADD_VX:
5157 case RISCV::VSSUBU_VX:
5158 case RISCV::VSSUB_VX:
5160 case RISCV::VAADDU_VX:
5161 case RISCV::VAADD_VX:
5162 case RISCV::VASUBU_VX:
5163 case RISCV::VASUB_VX:
5165 case RISCV::VSMUL_VX:
5167 case RISCV::VMV_S_X:
5169 case RISCV::VANDN_VX:
5170 return 1U << Log2SEW;
5176 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
5179 return RVV->BaseInstr;
5189 unsigned Scaled = Log2SEW + (DestEEW - 1);
5203 return std::nullopt;
5208 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
5209 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
5210 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
5211 LHS.getReg() == RHS.getReg())
5215 if (LHS.isImm() && LHS.getImm() == 0)
5221 if (!LHSImm || !RHSImm)
5223 return LHSImm <= RHSImm;
5235 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
5237 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5247 std::optional<bool> createTripCountGreaterCondition(
5248 int TC, MachineBasicBlock &
MBB,
5249 SmallVectorImpl<MachineOperand> &CondParam)
override {
5257 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
5259 void adjustTripCount(
int TripCountAdjust)
override {}
5263std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5271 if (
TBB == LoopBB && FBB == LoopBB)
5278 assert((
TBB == LoopBB || FBB == LoopBB) &&
5279 "The Loop must be a single-basic-block loop");
5290 if (!Reg.isVirtual())
5297 if (LHS && LHS->isPHI())
5299 if (RHS && RHS->isPHI())
5302 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5308 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5325 case RISCV::FDIV_H_INX:
5326 case RISCV::FDIV_S_INX:
5327 case RISCV::FDIV_D_INX:
5328 case RISCV::FDIV_D_IN32X:
5329 case RISCV::FSQRT_H:
5330 case RISCV::FSQRT_S:
5331 case RISCV::FSQRT_D:
5332 case RISCV::FSQRT_H_INX:
5333 case RISCV::FSQRT_S_INX:
5334 case RISCV::FSQRT_D_INX:
5335 case RISCV::FSQRT_D_IN32X:
5337 case RISCV::VDIV_VV:
5338 case RISCV::VDIV_VX:
5339 case RISCV::VDIVU_VV:
5340 case RISCV::VDIVU_VX:
5341 case RISCV::VREM_VV:
5342 case RISCV::VREM_VX:
5343 case RISCV::VREMU_VV:
5344 case RISCV::VREMU_VX:
5346 case RISCV::VFDIV_VV:
5347 case RISCV::VFDIV_VF:
5348 case RISCV::VFRDIV_VF:
5349 case RISCV::VFSQRT_V:
5350 case RISCV::VFRSQRT7_V:
5356 if (
MI->getOpcode() != TargetOpcode::COPY)
5361 Register DstReg =
MI->getOperand(0).getReg();
5364 :
TRI->getMinimalPhysRegClass(DstReg);
5374 auto [RCLMul, RCFractional] =
5376 return (!RCFractional && LMul == RCLMul) || (RCFractional && LMul == 1);
5380 if (
MI.memoperands_empty())
5395 if (MO.getReg().isPhysical())
5398 if (MO.getReg().isPhysical())
5400 bool SawStore =
false;
5403 if (
II->definesRegister(PhysReg,
nullptr))
5406 if (
II->definesRegister(PhysReg,
nullptr) ||
5407 II->readsRegister(PhysReg,
nullptr))
5409 if (
II->mayStore()) {
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getLoadPredicatedOpcode(unsigned Opcode)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI void clearKillFlags(Register Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
LLVM_ABI void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
const TargetRegisterInfo * getTargetRegisterInfo() const
LLVM_ABI bool isConstantPhysReg(MCRegister PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool isVRegCopy(const MachineInstr *MI, unsigned LMul=0) const
Return true if MI is a COPY to a vector register of a specific LMul, or any kind of vector registers ...
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
static bool isSafeToMove(const MachineInstr &From, const MachineInstr &To)
Return true if moving From down to To won't cause any physical register reads or writes to be clobber...
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool analyzeCandidate(outliner::Candidate &C) const
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
bool requiresNTLHint(const MachineInstr &MI) const
Return true if the instruction requires an NTL hint to be emitted.
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
self_iterator getIterator()
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getInverseBranchOpcode(unsigned BCC)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static int getVXRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_ATOMIC_ORDERING
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
bool isVectorCopy(const TargetRegisterInfo *TRI, const MachineInstr &MI)
Return true if MI is a copy that will be lowered to one or more vmvNr.vs.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isValidAtomicOrdering(Int I)
constexpr RegState getKillRegState(bool B)
static const MachineMemOperand::Flags MONontemporalBit0
constexpr RegState getDeadRegState(bool B)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr RegState getRenamableRegState(bool B)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr RegState getDefRegState(bool B)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.