41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
44#define GET_INSTRINFO_CTOR_DTOR
45#include "RISCVGenInstrInfo.inc"
47#define DEBUG_TYPE "riscv-instr-info"
49 "Number of registers within vector register groups spilled");
51 "Number of registers within vector register groups reloaded");
55 cl::desc(
"Prefer whole register move for vector registers."));
58 "riscv-force-machine-combiner-strategy",
cl::Hidden,
59 cl::desc(
"Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
65 "MinInstrCount strategy.")));
71#define GET_RISCVVPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
78#define GET_RISCVMaskedPseudosTable_IMPL
79#include "RISCVGenSearchableTables.inc"
85 RISCV::ADJCALLSTACKUP),
88#define GET_INSTRINFO_HELPERS
89#include "RISCVGenInstrInfo.inc"
92 if (
STI.hasStdExtZca())
101 int &FrameIndex)
const {
111 case RISCV::VL1RE8_V:
112 case RISCV::VL1RE16_V:
113 case RISCV::VL1RE32_V:
114 case RISCV::VL1RE64_V:
117 case RISCV::VL2RE8_V:
118 case RISCV::VL2RE16_V:
119 case RISCV::VL2RE32_V:
120 case RISCV::VL2RE64_V:
123 case RISCV::VL4RE8_V:
124 case RISCV::VL4RE16_V:
125 case RISCV::VL4RE32_V:
126 case RISCV::VL4RE64_V:
129 case RISCV::VL8RE8_V:
130 case RISCV::VL8RE16_V:
131 case RISCV::VL8RE32_V:
132 case RISCV::VL8RE64_V:
140 switch (
MI.getOpcode()) {
164 case RISCV::VL1RE8_V:
165 case RISCV::VL2RE8_V:
166 case RISCV::VL4RE8_V:
167 case RISCV::VL8RE8_V:
168 if (!
MI.getOperand(1).isFI())
170 FrameIndex =
MI.getOperand(1).getIndex();
173 return MI.getOperand(0).getReg();
176 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
177 MI.getOperand(2).getImm() == 0) {
178 FrameIndex =
MI.getOperand(1).getIndex();
179 return MI.getOperand(0).getReg();
186 int &FrameIndex)
const {
194 switch (
MI.getOpcode()) {
219 if (!
MI.getOperand(1).isFI())
221 FrameIndex =
MI.getOperand(1).getIndex();
224 return MI.getOperand(0).getReg();
227 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
228 MI.getOperand(2).getImm() == 0) {
229 FrameIndex =
MI.getOperand(1).getIndex();
230 return MI.getOperand(0).getReg();
240 case RISCV::VFMV_V_F:
243 case RISCV::VFMV_S_F:
245 return MI.getOperand(1).isUndef();
253 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
264 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
265 "Unexpected COPY instruction.");
269 bool FoundDef =
false;
270 bool FirstVSetVLI =
false;
271 unsigned FirstSEW = 0;
274 if (
MBBI->isMetaInstruction())
277 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
287 unsigned FirstVType =
MBBI->getOperand(2).getImm();
292 if (FirstLMul != LMul)
297 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
303 unsigned VType =
MBBI->getOperand(2).getImm();
321 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
323 }
else if (
MBBI->getNumDefs()) {
326 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
332 if (!MO.isReg() || !MO.isDef())
334 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
349 if (MO.getReg() != SrcReg)
390 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
391 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
393 assert(!Fractional &&
"It is impossible be fractional lmul here.");
394 unsigned NumRegs = NF * LMulVal;
400 SrcEncoding += NumRegs - 1;
401 DstEncoding += NumRegs - 1;
407 unsigned,
unsigned> {
415 uint16_t Diff = DstEncoding - SrcEncoding;
416 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
417 DstEncoding % 8 == 7)
419 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
420 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
421 DstEncoding % 4 == 3)
423 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
424 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
425 DstEncoding % 2 == 1)
427 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
430 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
435 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
437 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
438 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
440 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
441 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
443 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
446 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
449 while (
I != NumRegs) {
454 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
455 GetCopyInfo(SrcEncoding, DstEncoding);
459 if (LMul == LMulCopied &&
462 if (DefMBBI->getOpcode() == VIOpc)
469 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
471 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
479 MIB = MIB.add(DefMBBI->getOperand(2));
487 MIB.addImm(Log2SEW ? Log2SEW : 3);
499 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
500 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
509 bool RenamableDest,
bool RenamableSrc)
const {
513 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
520 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
526 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
532 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
534 if (
STI.hasStdExtZdinx()) {
543 if (
STI.hasStdExtP()) {
552 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
553 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
555 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
557 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
561 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
562 .
addReg(EvenReg, KillFlag)
565 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
572 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
573 RISCV::GPRRegClass.
contains(DstReg)) {
575 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
580 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
582 if (
STI.hasStdExtZfh()) {
583 Opc = RISCV::FSGNJ_H;
586 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
587 "Unexpected extensions");
589 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
590 &RISCV::FPR32RegClass);
591 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
592 &RISCV::FPR32RegClass);
593 Opc = RISCV::FSGNJ_S;
597 .
addReg(SrcReg, KillFlag);
601 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
604 .
addReg(SrcReg, KillFlag);
608 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
611 .
addReg(SrcReg, KillFlag);
615 if (RISCV::FPR32RegClass.
contains(DstReg) &&
616 RISCV::GPRRegClass.
contains(SrcReg)) {
618 .
addReg(SrcReg, KillFlag);
622 if (RISCV::GPRRegClass.
contains(DstReg) &&
623 RISCV::FPR32RegClass.
contains(SrcReg)) {
625 .
addReg(SrcReg, KillFlag);
629 if (RISCV::FPR64RegClass.
contains(DstReg) &&
630 RISCV::GPRRegClass.
contains(SrcReg)) {
631 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
633 .
addReg(SrcReg, KillFlag);
637 if (RISCV::GPRRegClass.
contains(DstReg) &&
638 RISCV::FPR64RegClass.
contains(SrcReg)) {
639 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
641 .
addReg(SrcReg, KillFlag);
647 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
658 Register SrcReg,
bool IsKill,
int FI,
667 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
668 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
670 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
671 Opcode = RISCV::SH_INX;
672 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
673 Opcode = RISCV::SW_INX;
674 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
675 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
676 Alignment >=
STI.getZilsdAlign()) {
677 Opcode = RISCV::SD_RV32;
679 Opcode = RISCV::PseudoRV32ZdinxSD;
681 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
683 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
685 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
687 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
688 Opcode = RISCV::VS1R_V;
689 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
690 Opcode = RISCV::VS2R_V;
691 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
692 Opcode = RISCV::VS4R_V;
693 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
694 Opcode = RISCV::VS8R_V;
695 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
696 Opcode = RISCV::PseudoVSPILL2_M1;
697 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
698 Opcode = RISCV::PseudoVSPILL2_M2;
699 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
700 Opcode = RISCV::PseudoVSPILL2_M4;
701 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
702 Opcode = RISCV::PseudoVSPILL3_M1;
703 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
704 Opcode = RISCV::PseudoVSPILL3_M2;
705 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
706 Opcode = RISCV::PseudoVSPILL4_M1;
707 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
708 Opcode = RISCV::PseudoVSPILL4_M2;
709 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
710 Opcode = RISCV::PseudoVSPILL5_M1;
711 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
712 Opcode = RISCV::PseudoVSPILL6_M1;
713 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
714 Opcode = RISCV::PseudoVSPILL7_M1;
715 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
716 Opcode = RISCV::PseudoVSPILL8_M1;
759 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
760 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
762 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
763 Opcode = RISCV::LH_INX;
764 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
765 Opcode = RISCV::LW_INX;
766 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
767 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
768 Alignment >=
STI.getZilsdAlign()) {
769 Opcode = RISCV::LD_RV32;
771 Opcode = RISCV::PseudoRV32ZdinxLD;
773 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
775 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
777 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
779 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
780 Opcode = RISCV::VL1RE8_V;
781 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
782 Opcode = RISCV::VL2RE8_V;
783 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
784 Opcode = RISCV::VL4RE8_V;
785 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
786 Opcode = RISCV::VL8RE8_V;
787 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
788 Opcode = RISCV::PseudoVRELOAD2_M1;
789 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
790 Opcode = RISCV::PseudoVRELOAD2_M2;
791 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
792 Opcode = RISCV::PseudoVRELOAD2_M4;
793 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
794 Opcode = RISCV::PseudoVRELOAD3_M1;
795 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
796 Opcode = RISCV::PseudoVRELOAD3_M2;
797 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
798 Opcode = RISCV::PseudoVRELOAD4_M1;
799 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
800 Opcode = RISCV::PseudoVRELOAD4_M2;
801 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
802 Opcode = RISCV::PseudoVRELOAD5_M1;
803 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
804 Opcode = RISCV::PseudoVRELOAD6_M1;
805 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
806 Opcode = RISCV::PseudoVRELOAD7_M1;
807 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
808 Opcode = RISCV::PseudoVRELOAD8_M1;
846 if (
Ops.size() != 1 ||
Ops[0] != 1)
849 switch (
MI.getOpcode()) {
851 if (RISCVInstrInfo::isSEXT_W(
MI))
853 if (RISCVInstrInfo::isZEXT_W(
MI))
855 if (RISCVInstrInfo::isZEXT_B(
MI))
862 case RISCV::ZEXT_H_RV32:
863 case RISCV::ZEXT_H_RV64:
870 case RISCV::VMV_X_S: {
873 if (ST.getXLen() < (1U << Log2SEW))
888 case RISCV::VFMV_F_S: {
915 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
924 return RISCV::PseudoCCLB;
926 return RISCV::PseudoCCLBU;
928 return RISCV::PseudoCCLH;
930 return RISCV::PseudoCCLHU;
932 return RISCV::PseudoCCLW;
934 return RISCV::PseudoCCLWU;
936 return RISCV::PseudoCCLD;
938 return RISCV::PseudoCCQC_E_LB;
939 case RISCV::QC_E_LBU:
940 return RISCV::PseudoCCQC_E_LBU;
942 return RISCV::PseudoCCQC_E_LH;
943 case RISCV::QC_E_LHU:
944 return RISCV::PseudoCCQC_E_LHU;
946 return RISCV::PseudoCCQC_E_LW;
957 if (
MI.getOpcode() != RISCV::PseudoCCMOVGPR)
962 if (!
STI.hasShortForwardBranchILoad() || !PredOpc)
966 if (
Ops.size() != 1 || (
Ops[0] != 1 &&
Ops[0] != 2))
969 bool Invert =
Ops[0] == 2;
978 MI.getDebugLoc(),
get(PredOpc), DestReg);
989 unsigned BCC =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
995 NewMI.
add({
MI.getOperand(
MI.getNumExplicitOperands() - 2),
996 MI.getOperand(
MI.getNumExplicitOperands() - 1)});
1005 bool DstIsDead)
const {
1021 bool SrcRenamable =
false;
1025 bool LastItem = ++Num == Seq.
size();
1030 switch (Inst.getOpndKind()) {
1040 .
addReg(SrcReg, SrcRegState)
1047 .
addReg(SrcReg, SrcRegState)
1048 .
addReg(SrcReg, SrcRegState)
1054 .
addReg(SrcReg, SrcRegState)
1062 SrcRenamable = DstRenamable;
1072 case RISCV::CV_BEQIMM:
1073 case RISCV::QC_BEQI:
1074 case RISCV::QC_E_BEQI:
1075 case RISCV::NDS_BBC:
1076 case RISCV::NDS_BEQC:
1080 case RISCV::QC_BNEI:
1081 case RISCV::QC_E_BNEI:
1082 case RISCV::CV_BNEIMM:
1083 case RISCV::NDS_BBS:
1084 case RISCV::NDS_BNEC:
1087 case RISCV::QC_BLTI:
1088 case RISCV::QC_E_BLTI:
1091 case RISCV::QC_BGEI:
1092 case RISCV::QC_E_BGEI:
1095 case RISCV::QC_BLTUI:
1096 case RISCV::QC_E_BLTUI:
1099 case RISCV::QC_BGEUI:
1100 case RISCV::QC_E_BGEUI:
1132 "Unknown conditional branch");
1143 case RISCV::QC_MVEQ:
1144 return RISCV::QC_MVNE;
1145 case RISCV::QC_MVNE:
1146 return RISCV::QC_MVEQ;
1147 case RISCV::QC_MVLT:
1148 return RISCV::QC_MVGE;
1149 case RISCV::QC_MVGE:
1150 return RISCV::QC_MVLT;
1151 case RISCV::QC_MVLTU:
1152 return RISCV::QC_MVGEU;
1153 case RISCV::QC_MVGEU:
1154 return RISCV::QC_MVLTU;
1155 case RISCV::QC_MVEQI:
1156 return RISCV::QC_MVNEI;
1157 case RISCV::QC_MVNEI:
1158 return RISCV::QC_MVEQI;
1159 case RISCV::QC_MVLTI:
1160 return RISCV::QC_MVGEI;
1161 case RISCV::QC_MVGEI:
1162 return RISCV::QC_MVLTI;
1163 case RISCV::QC_MVLTUI:
1164 return RISCV::QC_MVGEUI;
1165 case RISCV::QC_MVGEUI:
1166 return RISCV::QC_MVLTUI;
1171 switch (SelectOpc) {
1190 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1200 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1205 return RISCV::CV_BEQIMM;
1207 return RISCV::CV_BNEIMM;
1210 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1215 return RISCV::QC_BEQI;
1217 return RISCV::QC_BNEI;
1219 return RISCV::QC_BLTI;
1221 return RISCV::QC_BGEI;
1224 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1229 return RISCV::QC_BLTUI;
1231 return RISCV::QC_BGEUI;
1234 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1239 return RISCV::QC_E_BEQI;
1241 return RISCV::QC_E_BNEI;
1243 return RISCV::QC_E_BLTI;
1245 return RISCV::QC_E_BGEI;
1248 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1253 return RISCV::QC_E_BLTUI;
1255 return RISCV::QC_E_BGEUI;
1258 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1263 return RISCV::NDS_BBC;
1265 return RISCV::NDS_BBS;
1268 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1273 return RISCV::NDS_BEQC;
1275 return RISCV::NDS_BNEC;
1321 case RISCV::CV_BEQIMM:
1322 return RISCV::CV_BNEIMM;
1323 case RISCV::CV_BNEIMM:
1324 return RISCV::CV_BEQIMM;
1325 case RISCV::QC_BEQI:
1326 return RISCV::QC_BNEI;
1327 case RISCV::QC_BNEI:
1328 return RISCV::QC_BEQI;
1329 case RISCV::QC_BLTI:
1330 return RISCV::QC_BGEI;
1331 case RISCV::QC_BGEI:
1332 return RISCV::QC_BLTI;
1333 case RISCV::QC_BLTUI:
1334 return RISCV::QC_BGEUI;
1335 case RISCV::QC_BGEUI:
1336 return RISCV::QC_BLTUI;
1337 case RISCV::QC_E_BEQI:
1338 return RISCV::QC_E_BNEI;
1339 case RISCV::QC_E_BNEI:
1340 return RISCV::QC_E_BEQI;
1341 case RISCV::QC_E_BLTI:
1342 return RISCV::QC_E_BGEI;
1343 case RISCV::QC_E_BGEI:
1344 return RISCV::QC_E_BLTI;
1345 case RISCV::QC_E_BLTUI:
1346 return RISCV::QC_E_BGEUI;
1347 case RISCV::QC_E_BGEUI:
1348 return RISCV::QC_E_BLTUI;
1349 case RISCV::NDS_BBC:
1350 return RISCV::NDS_BBS;
1351 case RISCV::NDS_BBS:
1352 return RISCV::NDS_BBC;
1353 case RISCV::NDS_BEQC:
1354 return RISCV::NDS_BNEC;
1355 case RISCV::NDS_BNEC:
1356 return RISCV::NDS_BEQC;
1364 bool AllowModify)
const {
1365 TBB = FBB =
nullptr;
1370 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1376 int NumTerminators = 0;
1377 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1380 if (J->getDesc().isUnconditionalBranch() ||
1381 J->getDesc().isIndirectBranch()) {
1388 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1389 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1390 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1393 I = FirstUncondOrIndirectBr;
1397 if (
I->getDesc().isIndirectBranch())
1401 if (
I->isPreISelOpcode())
1405 if (NumTerminators > 2)
1409 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1415 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1421 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1422 I->getDesc().isUnconditionalBranch()) {
1433 int *BytesRemoved)
const {
1440 if (!
I->getDesc().isUnconditionalBranch() &&
1441 !
I->getDesc().isConditionalBranch())
1447 I->eraseFromParent();
1451 if (
I ==
MBB.begin())
1454 if (!
I->getDesc().isConditionalBranch())
1460 I->eraseFromParent();
1473 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1475 "RISC-V branch conditions have two components!");
1509 assert(RS &&
"RegScavenger required for long branching");
1511 "new block should be inserted for expanding unconditional branch");
1514 "restore block should be inserted for restoring clobbered registers");
1523 "Branch offsets outside of the signed 32-bit range not supported");
1529 auto II =
MBB.end();
1535 RS->enterBasicBlockEnd(
MBB);
1537 if (
STI.hasStdExtZicfilp())
1538 RC = &RISCV::GPRX7RegClass;
1540 RS->scavengeRegisterBackwards(*RC,
MI.getIterator(),
1544 RS->setRegUsed(TmpGPR);
1549 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1551 if (
STI.hasStdExtZicfilp())
1555 if (FrameIndex == -1)
1560 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1563 MI.getOperand(1).setMBB(&RestoreBB);
1567 TRI->eliminateFrameIndex(RestoreBB.
back(),
1577 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1587 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1588 MI->getOperand(1).getReg() == RISCV::X0) {
1589 Imm =
MI->getOperand(2).getImm();
1594 if (
MI->getOpcode() == RISCV::BSETI &&
MI->getOperand(1).isReg() &&
1595 MI->getOperand(1).getReg() == RISCV::X0 &&
1596 MI->getOperand(2).getImm() == 11) {
1610 if (Reg == RISCV::X0) {
1618 bool IsSigned =
false;
1619 bool IsEquality =
false;
1620 switch (
MI.getOpcode()) {
1656 MI.eraseFromParent();
1682 auto searchConst = [&](int64_t C1) ->
Register {
1684 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1687 I.getOperand(0).getReg().isVirtual();
1690 return DefC1->getOperand(0).getReg();
1702 if (
isFromLoadImm(MRI, LHS, C0) && C0 != 0 && LHS.getReg().isVirtual() &&
1703 MRI.
hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1705 if (
Register RegZ = searchConst(C0 + 1)) {
1713 MI.eraseFromParent();
1723 if (
isFromLoadImm(MRI, RHS, C0) && C0 != 0 && RHS.getReg().isVirtual() &&
1726 if (
Register RegZ = searchConst(C0 - 1)) {
1734 MI.eraseFromParent();
1744 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1746 int NumOp =
MI.getNumExplicitOperands();
1747 return MI.getOperand(NumOp - 1).getMBB();
1751 int64_t BrOffset)
const {
1752 unsigned XLen =
STI.getXLen();
1759 case RISCV::NDS_BBC:
1760 case RISCV::NDS_BBS:
1761 case RISCV::NDS_BEQC:
1762 case RISCV::NDS_BNEC:
1772 case RISCV::CV_BEQIMM:
1773 case RISCV::CV_BNEIMM:
1774 case RISCV::QC_BEQI:
1775 case RISCV::QC_BNEI:
1776 case RISCV::QC_BGEI:
1777 case RISCV::QC_BLTI:
1778 case RISCV::QC_BLTUI:
1779 case RISCV::QC_BGEUI:
1780 case RISCV::QC_E_BEQI:
1781 case RISCV::QC_E_BNEI:
1782 case RISCV::QC_E_BGEI:
1783 case RISCV::QC_E_BLTI:
1784 case RISCV::QC_E_BLTUI:
1785 case RISCV::QC_E_BGEUI:
1788 case RISCV::PseudoBR:
1790 case RISCV::PseudoJump:
1801 case RISCV::ADD:
return RISCV::PseudoCCADD;
1802 case RISCV::SUB:
return RISCV::PseudoCCSUB;
1803 case RISCV::SLL:
return RISCV::PseudoCCSLL;
1804 case RISCV::SRL:
return RISCV::PseudoCCSRL;
1805 case RISCV::SRA:
return RISCV::PseudoCCSRA;
1806 case RISCV::AND:
return RISCV::PseudoCCAND;
1807 case RISCV::OR:
return RISCV::PseudoCCOR;
1808 case RISCV::XOR:
return RISCV::PseudoCCXOR;
1809 case RISCV::MAX:
return RISCV::PseudoCCMAX;
1810 case RISCV::MAXU:
return RISCV::PseudoCCMAXU;
1811 case RISCV::MIN:
return RISCV::PseudoCCMIN;
1812 case RISCV::MINU:
return RISCV::PseudoCCMINU;
1813 case RISCV::MUL:
return RISCV::PseudoCCMUL;
1814 case RISCV::LUI:
return RISCV::PseudoCCLUI;
1815 case RISCV::QC_LI:
return RISCV::PseudoCCQC_LI;
1816 case RISCV::QC_E_LI:
return RISCV::PseudoCCQC_E_LI;
1818 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
1819 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
1820 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
1821 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
1822 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
1823 case RISCV::ORI:
return RISCV::PseudoCCORI;
1824 case RISCV::XORI:
return RISCV::PseudoCCXORI;
1826 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
1827 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
1828 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
1829 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
1830 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
1832 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
1833 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
1834 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
1835 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
1837 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
1838 case RISCV::ORN:
return RISCV::PseudoCCORN;
1839 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
1841 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
1842 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
1846 return RISCV::INSTRUCTION_LIST_END;
1855 if (!
Reg.isVirtual())
1863 if (!STI.hasShortForwardBranchIMinMax() &&
1864 (
MI->getOpcode() == RISCV::MAX ||
MI->getOpcode() == RISCV::MIN ||
1865 MI->getOpcode() == RISCV::MINU ||
MI->getOpcode() == RISCV::MAXU))
1868 if (!STI.hasShortForwardBranchIMul() &&
MI->getOpcode() == RISCV::MUL)
1875 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1876 MI->getOperand(1).getReg() == RISCV::X0)
1881 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1894 bool DontMoveAcrossStores =
true;
1895 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1903 bool PreferFalse)
const {
1904 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1905 "Unknown select instruction");
1906 if (!
STI.hasShortForwardBranchIALU())
1912 bool Invert = !
DefMI;
1920 Register DestReg =
MI.getOperand(0).getReg();
1926 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1933 NewMI.
add(FalseReg);
1941 unsigned BCCOpcode =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
1947 NewMI.
add(
MI.getOperand(
MI.getNumExplicitOperands() - 2));
1948 NewMI.
add(
MI.getOperand(
MI.getNumExplicitOperands() - 1));
1958 if (
DefMI->getParent() !=
MI.getParent())
1962 DefMI->eraseFromParent();
1967 if (
MI.isMetaInstruction())
1970 unsigned Opcode =
MI.getOpcode();
1972 if (Opcode == TargetOpcode::INLINEASM ||
1973 Opcode == TargetOpcode::INLINEASM_BR) {
1975 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1980 if (
STI.hasStdExtZca()) {
1981 if (isCompressibleInst(
MI,
STI))
1988 if (Opcode == TargetOpcode::BUNDLE)
1989 return getInstBundleLength(
MI);
1991 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1992 if (isCompressibleInst(
MI,
STI))
1997 case RISCV::PseudoMV_FPR16INX:
1998 case RISCV::PseudoMV_FPR32INX:
2000 return STI.hasStdExtZca() ? 2 : 4;
2002 case RISCV::PseudoCCMOVGPRNoX0:
2003 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2006 case RISCV::PseudoCCMOVGPR:
2007 case RISCV::PseudoCCADD:
2008 case RISCV::PseudoCCSUB:
2009 case RISCV::PseudoCCSLL:
2010 case RISCV::PseudoCCSRL:
2011 case RISCV::PseudoCCSRA:
2012 case RISCV::PseudoCCAND:
2013 case RISCV::PseudoCCOR:
2014 case RISCV::PseudoCCXOR:
2015 case RISCV::PseudoCCADDI:
2016 case RISCV::PseudoCCANDI:
2017 case RISCV::PseudoCCORI:
2018 case RISCV::PseudoCCXORI:
2019 case RISCV::PseudoCCLUI:
2020 case RISCV::PseudoCCSLLI:
2021 case RISCV::PseudoCCSRLI:
2022 case RISCV::PseudoCCSRAI:
2023 case RISCV::PseudoCCADDW:
2024 case RISCV::PseudoCCSUBW:
2025 case RISCV::PseudoCCSLLW:
2026 case RISCV::PseudoCCSRLW:
2027 case RISCV::PseudoCCSRAW:
2028 case RISCV::PseudoCCADDIW:
2029 case RISCV::PseudoCCSLLIW:
2030 case RISCV::PseudoCCSRLIW:
2031 case RISCV::PseudoCCSRAIW:
2032 case RISCV::PseudoCCANDN:
2033 case RISCV::PseudoCCORN:
2034 case RISCV::PseudoCCXNOR:
2035 case RISCV::PseudoCCMAX:
2036 case RISCV::PseudoCCMIN:
2037 case RISCV::PseudoCCMAXU:
2038 case RISCV::PseudoCCMINU:
2039 case RISCV::PseudoCCMUL:
2040 case RISCV::PseudoCCLB:
2041 case RISCV::PseudoCCLH:
2042 case RISCV::PseudoCCLW:
2043 case RISCV::PseudoCCLHU:
2044 case RISCV::PseudoCCLBU:
2045 case RISCV::PseudoCCLWU:
2046 case RISCV::PseudoCCLD:
2047 case RISCV::PseudoCCQC_LI:
2048 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2051 case RISCV::PseudoCCQC_E_LI:
2052 case RISCV::PseudoCCQC_E_LB:
2053 case RISCV::PseudoCCQC_E_LH:
2054 case RISCV::PseudoCCQC_E_LW:
2055 case RISCV::PseudoCCQC_E_LHU:
2056 case RISCV::PseudoCCQC_E_LBU:
2057 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2060 case TargetOpcode::STACKMAP:
2063 case TargetOpcode::PATCHPOINT:
2066 case TargetOpcode::STATEPOINT: {
2070 return std::max(NumBytes, 8U);
2072 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2073 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
2074 case TargetOpcode::PATCHABLE_TAIL_CALL: {
2077 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
2078 F.hasFnAttribute(
"patchable-function-entry")) {
2080 if (
F.getFnAttribute(
"patchable-function-entry")
2082 .getAsInteger(10, Num))
2083 return get(Opcode).getSize();
2086 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
2090 return STI.is64Bit() ? 68 : 44;
2093 return get(Opcode).getSize();
2097unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
2101 while (++
I != E &&
I->isInsideBundle()) {
2102 assert(!
I->isBundle() &&
"No nested bundle!");
2109 const unsigned Opcode =
MI.getOpcode();
2113 case RISCV::FSGNJ_D:
2114 case RISCV::FSGNJ_S:
2115 case RISCV::FSGNJ_H:
2116 case RISCV::FSGNJ_D_INX:
2117 case RISCV::FSGNJ_D_IN32X:
2118 case RISCV::FSGNJ_S_INX:
2119 case RISCV::FSGNJ_H_INX:
2121 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2122 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
2126 return (
MI.getOperand(1).isReg() &&
2127 MI.getOperand(1).getReg() == RISCV::X0) ||
2128 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
2130 return MI.isAsCheapAsAMove();
2133std::optional<DestSourcePair>
2137 switch (
MI.getOpcode()) {
2143 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2144 MI.getOperand(2).isReg())
2146 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2147 MI.getOperand(1).isReg())
2152 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
2153 MI.getOperand(2).getImm() == 0)
2157 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2158 MI.getOperand(1).isReg())
2162 case RISCV::SH1ADD_UW:
2164 case RISCV::SH2ADD_UW:
2166 case RISCV::SH3ADD_UW:
2167 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2168 MI.getOperand(2).isReg())
2171 case RISCV::FSGNJ_D:
2172 case RISCV::FSGNJ_S:
2173 case RISCV::FSGNJ_H:
2174 case RISCV::FSGNJ_D_INX:
2175 case RISCV::FSGNJ_D_IN32X:
2176 case RISCV::FSGNJ_S_INX:
2177 case RISCV::FSGNJ_H_INX:
2179 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2180 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2184 return std::nullopt;
2192 const auto &SchedModel =
STI.getSchedModel();
2193 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2205 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2209 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2210 RISCV::OpName::frm) < 0;
2212 "New instructions require FRM whereas the old one does not have it");
2219 for (
auto *NewMI : InsInstrs) {
2221 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2222 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2264bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2265 bool Invert)
const {
2266#define OPCODE_LMUL_CASE(OPC) \
2267 case RISCV::OPC##_M1: \
2268 case RISCV::OPC##_M2: \
2269 case RISCV::OPC##_M4: \
2270 case RISCV::OPC##_M8: \
2271 case RISCV::OPC##_MF2: \
2272 case RISCV::OPC##_MF4: \
2273 case RISCV::OPC##_MF8
2275#define OPCODE_LMUL_MASK_CASE(OPC) \
2276 case RISCV::OPC##_M1_MASK: \
2277 case RISCV::OPC##_M2_MASK: \
2278 case RISCV::OPC##_M4_MASK: \
2279 case RISCV::OPC##_M8_MASK: \
2280 case RISCV::OPC##_MF2_MASK: \
2281 case RISCV::OPC##_MF4_MASK: \
2282 case RISCV::OPC##_MF8_MASK
2287 Opcode = *InvOpcode;
2304#undef OPCODE_LMUL_MASK_CASE
2305#undef OPCODE_LMUL_CASE
2308bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2319 const uint64_t TSFlags =
Desc.TSFlags;
2321 auto checkImmOperand = [&](
unsigned OpIdx) {
2325 auto checkRegOperand = [&](
unsigned OpIdx) {
2333 if (!checkRegOperand(1))
2348 bool SeenMI2 =
false;
2349 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2358 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2359 Register SrcReg = It->getOperand(1).getReg();
2377 if (MI1VReg != SrcReg)
2386 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2426bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2427 bool &Commuted)
const {
2431 "Expect the present of passthrough operand.");
2437 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2438 areRVVInstsReassociable(Inst, *MI2);
2442 return areRVVInstsReassociable(Inst, *MI1) &&
2443 (isVectorAssociativeAndCommutative(*MI1) ||
2444 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2451 if (!isVectorAssociativeAndCommutative(Inst) &&
2452 !isVectorAssociativeAndCommutative(Inst,
true))
2478 for (
unsigned I = 0;
I < 5; ++
I)
2484 bool &Commuted)
const {
2485 if (isVectorAssociativeAndCommutative(Inst) ||
2486 isVectorAssociativeAndCommutative(Inst,
true))
2487 return hasReassociableVectorSibling(Inst, Commuted);
2493 unsigned OperandIdx = Commuted ? 2 : 1;
2497 int16_t InstFrmOpIdx =
2498 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2499 int16_t SiblingFrmOpIdx =
2500 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2502 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2507 bool Invert)
const {
2508 if (isVectorAssociativeAndCommutative(Inst, Invert))
2516 Opc = *InverseOpcode;
2561std::optional<unsigned>
2563#define RVV_OPC_LMUL_CASE(OPC, INV) \
2564 case RISCV::OPC##_M1: \
2565 return RISCV::INV##_M1; \
2566 case RISCV::OPC##_M2: \
2567 return RISCV::INV##_M2; \
2568 case RISCV::OPC##_M4: \
2569 return RISCV::INV##_M4; \
2570 case RISCV::OPC##_M8: \
2571 return RISCV::INV##_M8; \
2572 case RISCV::OPC##_MF2: \
2573 return RISCV::INV##_MF2; \
2574 case RISCV::OPC##_MF4: \
2575 return RISCV::INV##_MF4; \
2576 case RISCV::OPC##_MF8: \
2577 return RISCV::INV##_MF8
2579#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2580 case RISCV::OPC##_M1_MASK: \
2581 return RISCV::INV##_M1_MASK; \
2582 case RISCV::OPC##_M2_MASK: \
2583 return RISCV::INV##_M2_MASK; \
2584 case RISCV::OPC##_M4_MASK: \
2585 return RISCV::INV##_M4_MASK; \
2586 case RISCV::OPC##_M8_MASK: \
2587 return RISCV::INV##_M8_MASK; \
2588 case RISCV::OPC##_MF2_MASK: \
2589 return RISCV::INV##_MF2_MASK; \
2590 case RISCV::OPC##_MF4_MASK: \
2591 return RISCV::INV##_MF4_MASK; \
2592 case RISCV::OPC##_MF8_MASK: \
2593 return RISCV::INV##_MF8_MASK
2597 return std::nullopt;
2599 return RISCV::FSUB_H;
2601 return RISCV::FSUB_S;
2603 return RISCV::FSUB_D;
2605 return RISCV::FADD_H;
2607 return RISCV::FADD_S;
2609 return RISCV::FADD_D;
2626#undef RVV_OPC_LMUL_MASK_CASE
2627#undef RVV_OPC_LMUL_CASE
2632 bool DoRegPressureReduce) {
2659 bool DoRegPressureReduce) {
2666 DoRegPressureReduce)) {
2672 DoRegPressureReduce)) {
2682 bool DoRegPressureReduce) {
2690 unsigned CombineOpc) {
2697 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2711 unsigned OuterShiftAmt) {
2717 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2744 case RISCV::SH1ADD_UW:
2746 case RISCV::SH2ADD_UW:
2748 case RISCV::SH3ADD_UW:
2794 bool DoRegPressureReduce)
const {
2803 DoRegPressureReduce);
2811 return RISCV::FMADD_H;
2813 return RISCV::FMADD_S;
2815 return RISCV::FMADD_D;
2860 bool Mul1IsKill = Mul1.
isKill();
2861 bool Mul2IsKill = Mul2.
isKill();
2862 bool AddendIsKill = Addend.
isKill();
2871 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2896 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2903 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2906 switch (InnerShiftAmt - OuterShiftAmt) {
2910 InnerOpc = RISCV::ADD;
2913 InnerOpc = RISCV::SH1ADD;
2916 InnerOpc = RISCV::SH2ADD;
2919 InnerOpc = RISCV::SH3ADD;
2937 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2954 DelInstrs, InstrIdxForVirtReg);
2981 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2983 unsigned OpType = Operand.OperandType;
2989 ErrInfo =
"Expected an immediate operand.";
2992 int64_t Imm = MO.
getImm();
2998#define CASE_OPERAND_UIMM(NUM) \
2999 case RISCVOp::OPERAND_UIMM##NUM: \
3000 Ok = isUInt<NUM>(Imm); \
3002#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX) \
3003 case RISCVOp::OPERAND_UIMM##BITS##_LSB##SUFFIX: { \
3004 constexpr size_t NumZeros = sizeof(#SUFFIX) - 1; \
3005 Ok = isShiftedUInt<BITS - NumZeros, NumZeros>(Imm); \
3008#define CASE_OPERAND_SIMM(NUM) \
3009 case RISCVOp::OPERAND_SIMM##NUM: \
3010 Ok = isInt<NUM>(Imm); \
3044 Ok = Imm >= 1 && Imm <= 32;
3065 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
3076 Ok = Imm >= -15 && Imm <= 16;
3104 Ok = Ok && Imm != 0;
3107 Ok = (
isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);
3110 Ok = Imm >= 0 && Imm <= 10;
3113 Ok = Imm >= 0 && Imm <= 7;
3116 Ok = Imm >= 1 && Imm <= 10;
3119 Ok = Imm >= 2 && Imm <= 14;
3128 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3163 Ok = Imm == 1 || Imm == 2 || Imm == 4;
3167 ErrInfo =
"Invalid immediate";
3176 ErrInfo =
"Expected a non-register operand.";
3180 ErrInfo =
"Invalid immediate";
3189 ErrInfo =
"Expected a non-register operand.";
3193 ErrInfo =
"Invalid immediate";
3201 ErrInfo =
"Expected a non-register operand.";
3205 ErrInfo =
"Invalid immediate";
3211 int64_t Imm = MO.
getImm();
3214 ErrInfo =
"Invalid immediate";
3217 }
else if (!MO.
isReg()) {
3218 ErrInfo =
"Expected a register or immediate operand.";
3224 ErrInfo =
"Expected a register or immediate operand.";
3234 if (!
Op.isImm() && !
Op.isReg()) {
3235 ErrInfo =
"Invalid operand type for VL operand";
3238 if (
Op.isReg() &&
Op.getReg().isValid()) {
3241 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {
3242 ErrInfo =
"Invalid register class for VL operand";
3247 ErrInfo =
"VL operand w/o SEW operand?";
3253 if (!
MI.getOperand(
OpIdx).isImm()) {
3254 ErrInfo =
"SEW value expected to be an immediate";
3259 ErrInfo =
"Unexpected SEW value";
3262 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3264 ErrInfo =
"Unexpected SEW value";
3270 if (!
MI.getOperand(
OpIdx).isImm()) {
3271 ErrInfo =
"Policy operand expected to be an immediate";
3276 ErrInfo =
"Invalid Policy Value";
3280 ErrInfo =
"policy operand w/o VL operand?";
3288 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3289 ErrInfo =
"policy operand w/o tied operand?";
3296 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3297 ErrInfo =
"dynamic rounding mode should read FRM";
3319 case RISCV::LD_RV32:
3329 case RISCV::SD_RV32:
3345 int64_t NewOffset = OldOffset + Disp;
3367 "Addressing mode not supported for folding");
3440 case RISCV::LD_RV32:
3443 case RISCV::SD_RV32:
3450 OffsetIsScalable =
false;
3466 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3474 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3477 auto Base1 = MO1->getValue();
3478 auto Base2 = MO2->getValue();
3479 if (!Base1 || !Base2)
3487 return Base1 == Base2;
3493 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3494 unsigned NumBytes)
const {
3497 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3502 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3508 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3514 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3564 int64_t OffsetA = 0, OffsetB = 0;
3570 int LowOffset = std::min(OffsetA, OffsetB);
3571 int HighOffset = std::max(OffsetA, OffsetB);
3572 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3574 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3581std::pair<unsigned, unsigned>
3584 return std::make_pair(TF & Mask, TF & ~Mask);
3590 static const std::pair<unsigned, const char *> TargetFlags[] = {
3591 {MO_CALL,
"riscv-call"},
3592 {MO_LO,
"riscv-lo"},
3593 {MO_HI,
"riscv-hi"},
3594 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3595 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3596 {MO_GOT_HI,
"riscv-got-hi"},
3597 {MO_TPREL_LO,
"riscv-tprel-lo"},
3598 {MO_TPREL_HI,
"riscv-tprel-hi"},
3599 {MO_TPREL_ADD,
"riscv-tprel-add"},
3600 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3601 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3602 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3603 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3604 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3605 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3613 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3626 unsigned &Flags)
const {
3645 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3646 F.hasFnAttribute(
"patchable-function-entry");
3651 return MI.readsRegister(RegNo,
TRI) ||
3652 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3657 return MI.modifiesRegister(RegNo,
TRI) ||
3658 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3662 if (!
MBB.back().isReturn())
3688 if (
C.back().isReturn() &&
3689 !
C.isAvailableAcrossAndOutOfSeq(TailExpandUseReg, RegInfo)) {
3691 LLVM_DEBUG(
dbgs() <<
"Cannot be outlined between: " <<
C.front() <<
"and "
3693 LLVM_DEBUG(
dbgs() <<
"Because the tail-call register is live across "
3694 "the proposed outlined function call\n");
3700 if (
C.back().isReturn()) {
3702 "The candidate who uses return instruction must be outlined "
3714 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, RegInfo);
3717std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3720 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3721 unsigned MinRepeats)
const {
3729 if (RepeatedSequenceLocs.size() < MinRepeats)
3730 return std::nullopt;
3734 unsigned InstrSizeCExt =
3736 unsigned CallOverhead = 0, FrameOverhead = 0;
3739 unsigned CFICount = 0;
3740 for (
auto &
I : Candidate) {
3741 if (
I.isCFIInstruction())
3752 std::vector<MCCFIInstruction> CFIInstructions =
3753 C.getMF()->getFrameInstructions();
3755 if (CFICount > 0 && CFICount != CFIInstructions.size())
3756 return std::nullopt;
3764 CallOverhead = 4 + InstrSizeCExt;
3771 FrameOverhead = InstrSizeCExt;
3777 return std::nullopt;
3779 for (
auto &
C : RepeatedSequenceLocs)
3780 C.setCallInfo(MOCI, CallOverhead);
3782 unsigned SequenceSize = 0;
3783 for (
auto &
MI : Candidate)
3786 return std::make_unique<outliner::OutlinedFunction>(
3787 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3793 unsigned Flags)
const {
3797 MBB->getParent()->getSubtarget().getRegisterInfo();
3798 const auto &
F =
MI.getMF()->getFunction();
3803 if (
MI.isCFIInstruction())
3811 for (
const auto &MO :
MI.operands()) {
3816 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3817 F.hasSection() ||
F.getSectionPrefix()))
3834 MBB.addLiveIn(RISCV::X5);
3849 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3857 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3868 return std::nullopt;
3872 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3873 MI.getOperand(2).isImm())
3874 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3876 return std::nullopt;
3884 std::string GenericComment =
3886 if (!GenericComment.empty())
3887 return GenericComment;
3891 return std::string();
3893 std::string Comment;
3900 switch (OpInfo.OperandType) {
3903 unsigned Imm =
Op.getImm();
3908 unsigned Imm =
Op.getImm();
3913 unsigned Imm =
Op.getImm();
3919 unsigned Log2SEW =
Op.getImm();
3920 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3926 unsigned Policy =
Op.getImm();
3928 "Invalid Policy Value");
3934 if (
Op.isImm() &&
Op.getImm() == -1)
3956#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3957 RISCV::Pseudo##OP##_##LMUL
3959#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3960 RISCV::Pseudo##OP##_##LMUL##_MASK
3962#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3963 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3964 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3966#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3967 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3968 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3969 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3970 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3971 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3972 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3974#define CASE_RVV_OPCODE_UNMASK(OP) \
3975 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3976 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3978#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3979 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3980 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3981 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3982 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3983 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3984 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3986#define CASE_RVV_OPCODE_MASK(OP) \
3987 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3988 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3990#define CASE_RVV_OPCODE_WIDEN(OP) \
3991 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3992 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3994#define CASE_RVV_OPCODE(OP) \
3995 CASE_RVV_OPCODE_UNMASK(OP): \
3996 case CASE_RVV_OPCODE_MASK(OP)
4000#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
4001 RISCV::PseudoV##OP##_##TYPE##_##LMUL
4003#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
4004 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
4005 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
4006 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
4007 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
4008 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
4009 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
4010 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
4013#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
4014 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
4016#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
4017 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
4018 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
4019 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
4020 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
4022#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
4023 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
4024 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
4026#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
4027 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
4028 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
4030#define CASE_VFMA_OPCODE_VV(OP) \
4031 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
4032 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
4033 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
4034 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
4036#define CASE_VFMA_SPLATS(OP) \
4037 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
4038 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
4039 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
4040 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
4044 unsigned &SrcOpIdx1,
4045 unsigned &SrcOpIdx2)
const {
4047 if (!
Desc.isCommutable())
4050 switch (
MI.getOpcode()) {
4051 case RISCV::TH_MVEQZ:
4052 case RISCV::TH_MVNEZ:
4056 if (
MI.getOperand(2).getReg() == RISCV::X0)
4059 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4060 case RISCV::QC_SELECTIEQ:
4061 case RISCV::QC_SELECTINE:
4062 case RISCV::QC_SELECTIIEQ:
4063 case RISCV::QC_SELECTIINE:
4064 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4065 case RISCV::QC_MVEQ:
4066 case RISCV::QC_MVNE:
4067 case RISCV::QC_MVLT:
4068 case RISCV::QC_MVGE:
4069 case RISCV::QC_MVLTU:
4070 case RISCV::QC_MVGEU:
4071 case RISCV::QC_MVEQI:
4072 case RISCV::QC_MVNEI:
4073 case RISCV::QC_MVLTI:
4074 case RISCV::QC_MVGEI:
4075 case RISCV::QC_MVLTUI:
4076 case RISCV::QC_MVGEUI:
4077 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
4078 case RISCV::TH_MULA:
4079 case RISCV::TH_MULAW:
4080 case RISCV::TH_MULAH:
4081 case RISCV::TH_MULS:
4082 case RISCV::TH_MULSW:
4083 case RISCV::TH_MULSH:
4085 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4086 case RISCV::PseudoCCMOVGPRNoX0:
4087 case RISCV::PseudoCCMOVGPR:
4089 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4120 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4147 unsigned CommutableOpIdx1 = 1;
4148 unsigned CommutableOpIdx2 = 3;
4149 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4170 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
4172 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
4176 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
4177 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
4183 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
4184 SrcOpIdx2 == CommuteAnyOperandIndex) {
4187 unsigned CommutableOpIdx1 = SrcOpIdx1;
4188 if (SrcOpIdx1 == SrcOpIdx2) {
4191 CommutableOpIdx1 = 1;
4192 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
4194 CommutableOpIdx1 = SrcOpIdx2;
4199 unsigned CommutableOpIdx2;
4200 if (CommutableOpIdx1 != 1) {
4202 CommutableOpIdx2 = 1;
4204 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
4209 if (Op1Reg !=
MI.getOperand(2).getReg())
4210 CommutableOpIdx2 = 2;
4212 CommutableOpIdx2 = 3;
4217 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4230#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4231 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4232 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4235#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4236 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4237 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4238 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4239 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4240 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4241 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4242 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4245#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4246 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4247 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4250#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4251 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4252 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4253 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4254 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4256#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4257 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4258 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4260#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4261 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4262 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4264#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4265 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4266 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4267 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4268 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4270#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4271 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4272 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4273 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4274 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4280 unsigned OpIdx2)
const {
4283 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4287 switch (
MI.getOpcode()) {
4288 case RISCV::TH_MVEQZ:
4289 case RISCV::TH_MVNEZ: {
4290 auto &WorkingMI = cloneIfNew(
MI);
4291 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4292 : RISCV::TH_MVEQZ));
4296 case RISCV::QC_SELECTIEQ:
4297 case RISCV::QC_SELECTINE:
4298 case RISCV::QC_SELECTIIEQ:
4299 case RISCV::QC_SELECTIINE:
4301 case RISCV::QC_MVEQ:
4302 case RISCV::QC_MVNE:
4303 case RISCV::QC_MVLT:
4304 case RISCV::QC_MVGE:
4305 case RISCV::QC_MVLTU:
4306 case RISCV::QC_MVGEU:
4307 case RISCV::QC_MVEQI:
4308 case RISCV::QC_MVNEI:
4309 case RISCV::QC_MVLTI:
4310 case RISCV::QC_MVGEI:
4311 case RISCV::QC_MVLTUI:
4312 case RISCV::QC_MVGEUI: {
4313 auto &WorkingMI = cloneIfNew(
MI);
4318 case RISCV::PseudoCCMOVGPRNoX0:
4319 case RISCV::PseudoCCMOVGPR: {
4321 unsigned BCC =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
4323 auto &WorkingMI = cloneIfNew(
MI);
4324 WorkingMI.getOperand(
MI.getNumExplicitOperands() - 3).setImm(BCC);
4348 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4349 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4351 switch (
MI.getOpcode()) {
4374 auto &WorkingMI = cloneIfNew(
MI);
4375 WorkingMI.setDesc(
get(
Opc));
4385 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4388 if (OpIdx1 == 3 || OpIdx2 == 3) {
4390 switch (
MI.getOpcode()) {
4401 auto &WorkingMI = cloneIfNew(
MI);
4402 WorkingMI.setDesc(
get(
Opc));
4414#undef CASE_VMA_CHANGE_OPCODE_COMMON
4415#undef CASE_VMA_CHANGE_OPCODE_LMULS
4416#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4417#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4418#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4419#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4420#undef CASE_VFMA_CHANGE_OPCODE_VV
4421#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4423#undef CASE_RVV_OPCODE_UNMASK_LMUL
4424#undef CASE_RVV_OPCODE_MASK_LMUL
4425#undef CASE_RVV_OPCODE_LMUL
4426#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4427#undef CASE_RVV_OPCODE_UNMASK
4428#undef CASE_RVV_OPCODE_MASK_WIDEN
4429#undef CASE_RVV_OPCODE_MASK
4430#undef CASE_RVV_OPCODE_WIDEN
4431#undef CASE_RVV_OPCODE
4433#undef CASE_VMA_OPCODE_COMMON
4434#undef CASE_VMA_OPCODE_LMULS
4435#undef CASE_VFMA_OPCODE_COMMON
4436#undef CASE_VFMA_OPCODE_LMULS_M1
4437#undef CASE_VFMA_OPCODE_LMULS_MF2
4438#undef CASE_VFMA_OPCODE_LMULS_MF4
4439#undef CASE_VFMA_OPCODE_VV
4440#undef CASE_VFMA_SPLATS
4443 switch (
MI.getOpcode()) {
4451 if (
MI.getOperand(1).getReg() == RISCV::X0)
4452 commuteInstruction(
MI);
4454 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4455 MI.getOperand(2).ChangeToImmediate(0);
4456 MI.setDesc(
get(RISCV::ADDI));
4460 if (
MI.getOpcode() == RISCV::XOR &&
4461 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4462 MI.getOperand(1).setReg(RISCV::X0);
4463 MI.getOperand(2).ChangeToImmediate(0);
4464 MI.setDesc(
get(RISCV::ADDI));
4471 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4472 MI.setDesc(
get(RISCV::ADDI));
4478 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4479 MI.getOperand(2).ChangeToImmediate(0);
4480 MI.setDesc(
get(RISCV::ADDI));
4486 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4487 MI.getOperand(2).ChangeToImmediate(0);
4488 MI.setDesc(
get(RISCV::ADDIW));
4495 if (
MI.getOperand(1).getReg() == RISCV::X0)
4496 commuteInstruction(
MI);
4498 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4499 MI.getOperand(2).ChangeToImmediate(0);
4500 MI.setDesc(
get(RISCV::ADDIW));
4505 case RISCV::SH1ADD_UW:
4507 case RISCV::SH2ADD_UW:
4509 case RISCV::SH3ADD_UW:
4511 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4512 MI.removeOperand(1);
4514 MI.setDesc(
get(RISCV::ADDI));
4518 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4519 MI.removeOperand(2);
4520 unsigned Opc =
MI.getOpcode();
4521 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4522 Opc == RISCV::SH3ADD_UW) {
4524 MI.setDesc(
get(RISCV::SLLI_UW));
4528 MI.setDesc(
get(RISCV::SLLI));
4542 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4543 MI.getOperand(2).getReg() == RISCV::X0) {
4544 MI.getOperand(1).setReg(RISCV::X0);
4545 MI.getOperand(2).ChangeToImmediate(0);
4546 MI.setDesc(
get(RISCV::ADDI));
4552 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4553 MI.getOperand(2).setImm(0);
4554 MI.setDesc(
get(RISCV::ADDI));
4562 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4563 MI.getOperand(2).ChangeToImmediate(0);
4564 MI.setDesc(
get(RISCV::ADDI));
4568 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4569 MI.getOperand(2).ChangeToImmediate(0);
4570 MI.setDesc(
get(RISCV::ADDI));
4578 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4579 MI.getOperand(2).ChangeToImmediate(0);
4580 MI.setDesc(
get(RISCV::ADDI));
4590 case RISCV::SLLI_UW:
4592 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4593 MI.getOperand(2).setImm(0);
4594 MI.setDesc(
get(RISCV::ADDI));
4602 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4603 MI.getOperand(2).getReg() == RISCV::X0) {
4604 MI.getOperand(2).ChangeToImmediate(0);
4605 MI.setDesc(
get(RISCV::ADDI));
4609 if (
MI.getOpcode() == RISCV::ADD_UW &&
4610 MI.getOperand(1).getReg() == RISCV::X0) {
4611 MI.removeOperand(1);
4613 MI.setDesc(
get(RISCV::ADDI));
4619 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4620 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4621 MI.setDesc(
get(RISCV::ADDI));
4627 case RISCV::ZEXT_H_RV32:
4628 case RISCV::ZEXT_H_RV64:
4631 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4633 MI.setDesc(
get(RISCV::ADDI));
4642 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4643 MI.getOperand(2).ChangeToImmediate(0);
4644 MI.setDesc(
get(RISCV::ADDI));
4651 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4653 MI.removeOperand(0);
4654 MI.insert(
MI.operands_begin() + 1, {MO0});
4659 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4661 MI.removeOperand(0);
4662 MI.insert(
MI.operands_begin() + 1, {MO0});
4663 MI.setDesc(
get(RISCV::BNE));
4668 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4670 MI.removeOperand(0);
4671 MI.insert(
MI.operands_begin() + 1, {MO0});
4672 MI.setDesc(
get(RISCV::BEQ));
4680#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4681 RISCV::PseudoV##OP##_##LMUL##_TIED
4683#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4684 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4685 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4686 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4687 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4688 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4689 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4691#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4692 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4693 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4696#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4697 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4698 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4699 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4700 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4701 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4702 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4705#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4706 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4708#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4709 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4710 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4711 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4712 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4713 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4714 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4715 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4716 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4717 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4719#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4720 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4721 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4724#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4725 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4726 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4727 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4728 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4729 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4730 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4731 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4732 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4733 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4735#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4736 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4737 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4738 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4739 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4740 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4742#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4743 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4744 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4745 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4746 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4747 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4754 switch (
MI.getOpcode()) {
4762 MI.getNumExplicitOperands() == 7 &&
4763 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4770 switch (
MI.getOpcode()) {
4782 .
add(
MI.getOperand(0))
4784 .
add(
MI.getOperand(1))
4785 .
add(
MI.getOperand(2))
4786 .
add(
MI.getOperand(3))
4787 .
add(
MI.getOperand(4))
4788 .
add(
MI.getOperand(5))
4789 .
add(
MI.getOperand(6));
4798 MI.getNumExplicitOperands() == 6);
4805 switch (
MI.getOpcode()) {
4817 .
add(
MI.getOperand(0))
4819 .
add(
MI.getOperand(1))
4820 .
add(
MI.getOperand(2))
4821 .
add(
MI.getOperand(3))
4822 .
add(
MI.getOperand(4))
4823 .
add(
MI.getOperand(5));
4830 unsigned NumOps =
MI.getNumOperands();
4833 if (
Op.isReg() &&
Op.isKill())
4841 if (
MI.getOperand(0).isEarlyClobber()) {
4855#undef CASE_WIDEOP_OPCODE_COMMON
4856#undef CASE_WIDEOP_OPCODE_LMULS
4857#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4858#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4859#undef CASE_FP_WIDEOP_OPCODE_COMMON
4860#undef CASE_FP_WIDEOP_OPCODE_LMULS
4861#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4862#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4871 if (ShiftAmount == 0)
4877 }
else if (
int ShXAmount, ShiftAmount;
4879 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4882 switch (ShXAmount) {
4884 Opc = RISCV::SH1ADD;
4887 Opc = RISCV::SH2ADD;
4890 Opc = RISCV::SH3ADD;
4926 }
else if (
STI.hasStdExtZmmul()) {
4936 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4937 if (Amount & (1U << ShiftAmount)) {
4941 .
addImm(ShiftAmount - PrevShiftAmount)
4943 if (Amount >> (ShiftAmount + 1)) {
4957 PrevShiftAmount = ShiftAmount;
4960 assert(Acc &&
"Expected valid accumulator");
4970 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4978 ?
STI.getTailDupAggressiveThreshold()
4985 unsigned Opcode =
MI.getOpcode();
4986 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4995 return MI.isCopy() &&
MI.getOperand(0).getReg().isPhysical() &&
4997 TRI->getMinimalPhysRegClass(
MI.getOperand(0).getReg()));
5000std::optional<std::pair<unsigned, unsigned>>
5004 return std::nullopt;
5005 case RISCV::PseudoVSPILL2_M1:
5006 case RISCV::PseudoVRELOAD2_M1:
5007 return std::make_pair(2u, 1u);
5008 case RISCV::PseudoVSPILL2_M2:
5009 case RISCV::PseudoVRELOAD2_M2:
5010 return std::make_pair(2u, 2u);
5011 case RISCV::PseudoVSPILL2_M4:
5012 case RISCV::PseudoVRELOAD2_M4:
5013 return std::make_pair(2u, 4u);
5014 case RISCV::PseudoVSPILL3_M1:
5015 case RISCV::PseudoVRELOAD3_M1:
5016 return std::make_pair(3u, 1u);
5017 case RISCV::PseudoVSPILL3_M2:
5018 case RISCV::PseudoVRELOAD3_M2:
5019 return std::make_pair(3u, 2u);
5020 case RISCV::PseudoVSPILL4_M1:
5021 case RISCV::PseudoVRELOAD4_M1:
5022 return std::make_pair(4u, 1u);
5023 case RISCV::PseudoVSPILL4_M2:
5024 case RISCV::PseudoVRELOAD4_M2:
5025 return std::make_pair(4u, 2u);
5026 case RISCV::PseudoVSPILL5_M1:
5027 case RISCV::PseudoVRELOAD5_M1:
5028 return std::make_pair(5u, 1u);
5029 case RISCV::PseudoVSPILL6_M1:
5030 case RISCV::PseudoVRELOAD6_M1:
5031 return std::make_pair(6u, 1u);
5032 case RISCV::PseudoVSPILL7_M1:
5033 case RISCV::PseudoVRELOAD7_M1:
5034 return std::make_pair(7u, 1u);
5035 case RISCV::PseudoVSPILL8_M1:
5036 case RISCV::PseudoVRELOAD8_M1:
5037 return std::make_pair(8u, 1u);
5042 int16_t MI1FrmOpIdx =
5043 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
5044 int16_t MI2FrmOpIdx =
5045 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
5046 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
5053std::optional<unsigned>
5057 return std::nullopt;
5060 case RISCV::VSLL_VX:
5061 case RISCV::VSRL_VX:
5062 case RISCV::VSRA_VX:
5064 case RISCV::VSSRL_VX:
5065 case RISCV::VSSRA_VX:
5067 case RISCV::VROL_VX:
5068 case RISCV::VROR_VX:
5073 case RISCV::VNSRL_WX:
5074 case RISCV::VNSRA_WX:
5076 case RISCV::VNCLIPU_WX:
5077 case RISCV::VNCLIP_WX:
5079 case RISCV::VWSLL_VX:
5084 case RISCV::VADD_VX:
5085 case RISCV::VSUB_VX:
5086 case RISCV::VRSUB_VX:
5088 case RISCV::VWADDU_VX:
5089 case RISCV::VWSUBU_VX:
5090 case RISCV::VWADD_VX:
5091 case RISCV::VWSUB_VX:
5092 case RISCV::VWADDU_WX:
5093 case RISCV::VWSUBU_WX:
5094 case RISCV::VWADD_WX:
5095 case RISCV::VWSUB_WX:
5097 case RISCV::VADC_VXM:
5098 case RISCV::VADC_VIM:
5099 case RISCV::VMADC_VXM:
5100 case RISCV::VMADC_VIM:
5101 case RISCV::VMADC_VX:
5102 case RISCV::VSBC_VXM:
5103 case RISCV::VMSBC_VXM:
5104 case RISCV::VMSBC_VX:
5106 case RISCV::VAND_VX:
5108 case RISCV::VXOR_VX:
5110 case RISCV::VMSEQ_VX:
5111 case RISCV::VMSNE_VX:
5112 case RISCV::VMSLTU_VX:
5113 case RISCV::VMSLT_VX:
5114 case RISCV::VMSLEU_VX:
5115 case RISCV::VMSLE_VX:
5116 case RISCV::VMSGTU_VX:
5117 case RISCV::VMSGT_VX:
5119 case RISCV::VMINU_VX:
5120 case RISCV::VMIN_VX:
5121 case RISCV::VMAXU_VX:
5122 case RISCV::VMAX_VX:
5124 case RISCV::VMUL_VX:
5125 case RISCV::VMULH_VX:
5126 case RISCV::VMULHU_VX:
5127 case RISCV::VMULHSU_VX:
5129 case RISCV::VDIVU_VX:
5130 case RISCV::VDIV_VX:
5131 case RISCV::VREMU_VX:
5132 case RISCV::VREM_VX:
5134 case RISCV::VWMUL_VX:
5135 case RISCV::VWMULU_VX:
5136 case RISCV::VWMULSU_VX:
5138 case RISCV::VMACC_VX:
5139 case RISCV::VNMSAC_VX:
5140 case RISCV::VMADD_VX:
5141 case RISCV::VNMSUB_VX:
5143 case RISCV::VWMACCU_VX:
5144 case RISCV::VWMACC_VX:
5145 case RISCV::VWMACCSU_VX:
5146 case RISCV::VWMACCUS_VX:
5148 case RISCV::VMERGE_VXM:
5150 case RISCV::VMV_V_X:
5152 case RISCV::VSADDU_VX:
5153 case RISCV::VSADD_VX:
5154 case RISCV::VSSUBU_VX:
5155 case RISCV::VSSUB_VX:
5157 case RISCV::VAADDU_VX:
5158 case RISCV::VAADD_VX:
5159 case RISCV::VASUBU_VX:
5160 case RISCV::VASUB_VX:
5162 case RISCV::VSMUL_VX:
5164 case RISCV::VMV_S_X:
5166 case RISCV::VANDN_VX:
5167 return 1U << Log2SEW;
5173 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
5176 return RVV->BaseInstr;
5186 unsigned Scaled = Log2SEW + (DestEEW - 1);
5200 return std::nullopt;
5205 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
5206 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
5207 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
5208 LHS.getReg() == RHS.getReg())
5212 if (LHS.isImm() && LHS.getImm() == 0)
5218 if (!LHSImm || !RHSImm)
5220 return LHSImm <= RHSImm;
5232 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
5234 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5244 std::optional<bool> createTripCountGreaterCondition(
5245 int TC, MachineBasicBlock &
MBB,
5246 SmallVectorImpl<MachineOperand> &CondParam)
override {
5254 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
5256 void adjustTripCount(
int TripCountAdjust)
override {}
5260std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5268 if (
TBB == LoopBB && FBB == LoopBB)
5275 assert((
TBB == LoopBB || FBB == LoopBB) &&
5276 "The Loop must be a single-basic-block loop");
5287 if (!Reg.isVirtual())
5294 if (LHS && LHS->isPHI())
5296 if (RHS && RHS->isPHI())
5299 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5305 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5322 case RISCV::FDIV_H_INX:
5323 case RISCV::FDIV_S_INX:
5324 case RISCV::FDIV_D_INX:
5325 case RISCV::FDIV_D_IN32X:
5326 case RISCV::FSQRT_H:
5327 case RISCV::FSQRT_S:
5328 case RISCV::FSQRT_D:
5329 case RISCV::FSQRT_H_INX:
5330 case RISCV::FSQRT_S_INX:
5331 case RISCV::FSQRT_D_INX:
5332 case RISCV::FSQRT_D_IN32X:
5334 case RISCV::VDIV_VV:
5335 case RISCV::VDIV_VX:
5336 case RISCV::VDIVU_VV:
5337 case RISCV::VDIVU_VX:
5338 case RISCV::VREM_VV:
5339 case RISCV::VREM_VX:
5340 case RISCV::VREMU_VV:
5341 case RISCV::VREMU_VX:
5343 case RISCV::VFDIV_VV:
5344 case RISCV::VFDIV_VF:
5345 case RISCV::VFRDIV_VF:
5346 case RISCV::VFSQRT_V:
5347 case RISCV::VFRSQRT7_V:
5353 if (
MI->getOpcode() != TargetOpcode::COPY)
5358 Register DstReg =
MI->getOperand(0).getReg();
5361 :
TRI->getMinimalPhysRegClass(DstReg);
5371 auto [RCLMul, RCFractional] =
5373 return (!RCFractional && LMul == RCLMul) || (RCFractional && LMul == 1);
5377 if (
MI.memoperands_empty())
5392 if (MO.getReg().isPhysical())
5395 if (MO.getReg().isPhysical())
5397 bool SawStore =
false;
5400 if (
II->definesRegister(PhysReg,
nullptr))
5403 if (
II->definesRegister(PhysReg,
nullptr) ||
5404 II->readsRegister(PhysReg,
nullptr))
5406 if (
II->mayStore()) {
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getLoadPredicatedOpcode(unsigned Opcode)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI void clearKillFlags(Register Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
LLVM_ABI void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
const TargetRegisterInfo * getTargetRegisterInfo() const
LLVM_ABI bool isConstantPhysReg(MCRegister PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool isVRegCopy(const MachineInstr *MI, unsigned LMul=0) const
Return true if MI is a COPY to a vector register of a specific LMul, or any kind of vector registers ...
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
static bool isSafeToMove(const MachineInstr &From, const MachineInstr &To)
Return true if moving From down to To won't cause any physical register reads or writes to be clobber...
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool analyzeCandidate(outliner::Candidate &C) const
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
bool requiresNTLHint(const MachineInstr &MI) const
Return true if the instruction requires an NTL hint to be emitted.
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
self_iterator getIterator()
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getInverseBranchOpcode(unsigned BCC)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static int getVXRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_ATOMIC_ORDERING
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
bool isVectorCopy(const TargetRegisterInfo *TRI, const MachineInstr &MI)
Return true if MI is a copy that will be lowered to one or more vmvNr.vs.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isValidAtomicOrdering(Int I)
constexpr RegState getKillRegState(bool B)
static const MachineMemOperand::Flags MONontemporalBit0
constexpr RegState getDeadRegState(bool B)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr RegState getRenamableRegState(bool B)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr RegState getDefRegState(bool B)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.