41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
44#define GET_INSTRINFO_CTOR_DTOR
45#include "RISCVGenInstrInfo.inc"
47#define DEBUG_TYPE "riscv-instr-info"
49 "Number of registers within vector register groups spilled");
51 "Number of registers within vector register groups reloaded");
55 cl::desc(
"Prefer whole register move for vector registers."));
58 "riscv-force-machine-combiner-strategy",
cl::Hidden,
59 cl::desc(
"Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
65 "MinInstrCount strategy.")));
69 cl::desc(
"Enable RegSave strategy in machine outliner (save X5 to a "
70 "temporary register when X5 is live across outlined calls)."));
76#define GET_RISCVVPseudosTable_IMPL
77#include "RISCVGenSearchableTables.inc"
83#define GET_RISCVMaskedPseudosTable_IMPL
84#include "RISCVGenSearchableTables.inc"
90 RISCV::ADJCALLSTACKUP),
93#define GET_INSTRINFO_HELPERS
94#include "RISCVGenInstrInfo.inc"
97 if (
STI.hasStdExtZca())
106 int &FrameIndex)
const {
116 case RISCV::VL1RE8_V:
117 case RISCV::VL1RE16_V:
118 case RISCV::VL1RE32_V:
119 case RISCV::VL1RE64_V:
122 case RISCV::VL2RE8_V:
123 case RISCV::VL2RE16_V:
124 case RISCV::VL2RE32_V:
125 case RISCV::VL2RE64_V:
128 case RISCV::VL4RE8_V:
129 case RISCV::VL4RE16_V:
130 case RISCV::VL4RE32_V:
131 case RISCV::VL4RE64_V:
134 case RISCV::VL8RE8_V:
135 case RISCV::VL8RE16_V:
136 case RISCV::VL8RE32_V:
137 case RISCV::VL8RE64_V:
145 switch (
MI.getOpcode()) {
169 case RISCV::VL1RE8_V:
170 case RISCV::VL2RE8_V:
171 case RISCV::VL4RE8_V:
172 case RISCV::VL8RE8_V:
173 if (!
MI.getOperand(1).isFI())
175 FrameIndex =
MI.getOperand(1).getIndex();
178 return MI.getOperand(0).getReg();
181 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
182 MI.getOperand(2).getImm() == 0) {
183 FrameIndex =
MI.getOperand(1).getIndex();
184 return MI.getOperand(0).getReg();
191 int &FrameIndex)
const {
199 switch (
MI.getOpcode()) {
224 if (!
MI.getOperand(1).isFI())
226 FrameIndex =
MI.getOperand(1).getIndex();
229 return MI.getOperand(0).getReg();
232 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
233 MI.getOperand(2).getImm() == 0) {
234 FrameIndex =
MI.getOperand(1).getIndex();
235 return MI.getOperand(0).getReg();
245 case RISCV::VFMV_V_F:
248 case RISCV::VFMV_S_F:
250 return MI.getOperand(1).isUndef();
258 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
269 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
270 "Unexpected COPY instruction.");
274 bool FoundDef =
false;
275 bool FirstVSetVLI =
false;
276 unsigned FirstSEW = 0;
279 if (
MBBI->isMetaInstruction())
282 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
292 unsigned FirstVType =
MBBI->getOperand(2).getImm();
297 if (FirstLMul != LMul)
302 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
308 unsigned VType =
MBBI->getOperand(2).getImm();
326 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
328 }
else if (
MBBI->getNumDefs()) {
331 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
337 if (!MO.isReg() || !MO.isDef())
339 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
354 if (MO.getReg() != SrcReg)
395 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
396 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
398 assert(!Fractional &&
"It is impossible be fractional lmul here.");
399 unsigned NumRegs = NF * LMulVal;
405 SrcEncoding += NumRegs - 1;
406 DstEncoding += NumRegs - 1;
412 unsigned,
unsigned> {
420 uint16_t Diff = DstEncoding - SrcEncoding;
421 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
422 DstEncoding % 8 == 7)
424 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
425 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
426 DstEncoding % 4 == 3)
428 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
429 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
430 DstEncoding % 2 == 1)
432 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
435 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
440 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
442 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
443 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
445 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
446 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
448 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
451 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
454 while (
I != NumRegs) {
459 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
460 GetCopyInfo(SrcEncoding, DstEncoding);
464 if (LMul == LMulCopied &&
467 if (DefMBBI->getOpcode() == VIOpc)
474 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
476 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
484 MIB = MIB.add(DefMBBI->getOperand(2));
492 MIB.addImm(Log2SEW ? Log2SEW : 3);
504 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
505 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
514 bool RenamableDest,
bool RenamableSrc)
const {
518 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
525 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
531 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
537 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
539 if (
STI.hasStdExtZdinx()) {
548 if (
STI.hasStdExtP()) {
557 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
558 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
560 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
562 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
566 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
567 .
addReg(EvenReg, KillFlag)
570 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
577 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
578 RISCV::GPRRegClass.
contains(DstReg)) {
580 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
585 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
587 if (
STI.hasStdExtZfh()) {
588 Opc = RISCV::FSGNJ_H;
591 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
592 "Unexpected extensions");
594 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
595 &RISCV::FPR32RegClass);
596 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
597 &RISCV::FPR32RegClass);
598 Opc = RISCV::FSGNJ_S;
602 .
addReg(SrcReg, KillFlag);
606 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
609 .
addReg(SrcReg, KillFlag);
613 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
616 .
addReg(SrcReg, KillFlag);
620 if (RISCV::FPR32RegClass.
contains(DstReg) &&
621 RISCV::GPRRegClass.
contains(SrcReg)) {
623 .
addReg(SrcReg, KillFlag);
627 if (RISCV::GPRRegClass.
contains(DstReg) &&
628 RISCV::FPR32RegClass.
contains(SrcReg)) {
630 .
addReg(SrcReg, KillFlag);
634 if (RISCV::FPR64RegClass.
contains(DstReg) &&
635 RISCV::GPRRegClass.
contains(SrcReg)) {
636 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
638 .
addReg(SrcReg, KillFlag);
642 if (RISCV::GPRRegClass.
contains(DstReg) &&
643 RISCV::FPR64RegClass.
contains(SrcReg)) {
644 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
646 .
addReg(SrcReg, KillFlag);
652 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
663 Register SrcReg,
bool IsKill,
int FI,
672 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
673 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
675 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
676 Opcode = RISCV::SH_INX;
677 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
678 Opcode = RISCV::SW_INX;
679 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
680 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
681 Alignment >=
STI.getZilsdAlign()) {
682 Opcode = RISCV::SD_RV32;
684 Opcode = RISCV::PseudoRV32ZdinxSD;
686 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
688 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
690 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
692 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
693 Opcode = RISCV::VS1R_V;
694 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
695 Opcode = RISCV::VS2R_V;
696 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
697 Opcode = RISCV::VS4R_V;
698 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
699 Opcode = RISCV::VS8R_V;
700 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
701 Opcode = RISCV::PseudoVSPILL2_M1;
702 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
703 Opcode = RISCV::PseudoVSPILL2_M2;
704 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
705 Opcode = RISCV::PseudoVSPILL2_M4;
706 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
707 Opcode = RISCV::PseudoVSPILL3_M1;
708 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
709 Opcode = RISCV::PseudoVSPILL3_M2;
710 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
711 Opcode = RISCV::PseudoVSPILL4_M1;
712 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
713 Opcode = RISCV::PseudoVSPILL4_M2;
714 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
715 Opcode = RISCV::PseudoVSPILL5_M1;
716 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
717 Opcode = RISCV::PseudoVSPILL6_M1;
718 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
719 Opcode = RISCV::PseudoVSPILL7_M1;
720 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
721 Opcode = RISCV::PseudoVSPILL8_M1;
764 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
765 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
767 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
768 Opcode = RISCV::LH_INX;
769 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
770 Opcode = RISCV::LW_INX;
771 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
772 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
773 Alignment >=
STI.getZilsdAlign()) {
774 Opcode = RISCV::LD_RV32;
776 Opcode = RISCV::PseudoRV32ZdinxLD;
778 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
780 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
782 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
784 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
785 Opcode = RISCV::VL1RE8_V;
786 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
787 Opcode = RISCV::VL2RE8_V;
788 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
789 Opcode = RISCV::VL4RE8_V;
790 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
791 Opcode = RISCV::VL8RE8_V;
792 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
793 Opcode = RISCV::PseudoVRELOAD2_M1;
794 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
795 Opcode = RISCV::PseudoVRELOAD2_M2;
796 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
797 Opcode = RISCV::PseudoVRELOAD2_M4;
798 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
799 Opcode = RISCV::PseudoVRELOAD3_M1;
800 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
801 Opcode = RISCV::PseudoVRELOAD3_M2;
802 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
803 Opcode = RISCV::PseudoVRELOAD4_M1;
804 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
805 Opcode = RISCV::PseudoVRELOAD4_M2;
806 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
807 Opcode = RISCV::PseudoVRELOAD5_M1;
808 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
809 Opcode = RISCV::PseudoVRELOAD6_M1;
810 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
811 Opcode = RISCV::PseudoVRELOAD7_M1;
812 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
813 Opcode = RISCV::PseudoVRELOAD8_M1;
851 if (
Ops.size() != 1 ||
Ops[0] != 1)
854 switch (
MI.getOpcode()) {
856 if (RISCVInstrInfo::isSEXT_W(
MI))
858 if (RISCVInstrInfo::isZEXT_W(
MI))
860 if (RISCVInstrInfo::isZEXT_B(
MI))
867 case RISCV::ZEXT_H_RV32:
868 case RISCV::ZEXT_H_RV64:
875 case RISCV::VMV_X_S: {
878 if (ST.getXLen() < (1U << Log2SEW))
893 case RISCV::VFMV_F_S: {
921 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
930 return RISCV::PseudoCCLB;
932 return RISCV::PseudoCCLBU;
934 return RISCV::PseudoCCLH;
936 return RISCV::PseudoCCLHU;
938 return RISCV::PseudoCCLW;
940 return RISCV::PseudoCCLWU;
942 return RISCV::PseudoCCLD;
944 return RISCV::PseudoCCQC_E_LB;
945 case RISCV::QC_E_LBU:
946 return RISCV::PseudoCCQC_E_LBU;
948 return RISCV::PseudoCCQC_E_LH;
949 case RISCV::QC_E_LHU:
950 return RISCV::PseudoCCQC_E_LHU;
952 return RISCV::PseudoCCQC_E_LW;
963 if (
MI.getOpcode() != RISCV::PseudoCCMOVGPR)
968 if (!
STI.hasShortForwardBranchILoad() || !PredOpc)
972 if (
Ops.size() != 1 || (
Ops[0] != 1 &&
Ops[0] != 2))
975 bool Invert =
Ops[0] == 2;
984 MI.getDebugLoc(),
get(PredOpc), DestReg);
995 unsigned BCC =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
1001 NewMI.
add({
MI.getOperand(
MI.getNumExplicitOperands() - 2),
1002 MI.getOperand(
MI.getNumExplicitOperands() - 1)});
1011 bool DstIsDead)
const {
1027 bool SrcRenamable =
false;
1031 bool LastItem = ++Num == Seq.
size();
1036 switch (Inst.getOpndKind()) {
1046 .
addReg(SrcReg, SrcRegState)
1053 .
addReg(SrcReg, SrcRegState)
1054 .
addReg(SrcReg, SrcRegState)
1060 .
addReg(SrcReg, SrcRegState)
1068 SrcRenamable = DstRenamable;
1078 case RISCV::CV_BEQIMM:
1079 case RISCV::QC_BEQI:
1080 case RISCV::QC_E_BEQI:
1081 case RISCV::NDS_BBC:
1082 case RISCV::NDS_BEQC:
1086 case RISCV::QC_BNEI:
1087 case RISCV::QC_E_BNEI:
1088 case RISCV::CV_BNEIMM:
1089 case RISCV::NDS_BBS:
1090 case RISCV::NDS_BNEC:
1093 case RISCV::QC_BLTI:
1094 case RISCV::QC_E_BLTI:
1097 case RISCV::QC_BGEI:
1098 case RISCV::QC_E_BGEI:
1101 case RISCV::QC_BLTUI:
1102 case RISCV::QC_E_BLTUI:
1105 case RISCV::QC_BGEUI:
1106 case RISCV::QC_E_BGEUI:
1138 "Unknown conditional branch");
1149 case RISCV::QC_MVEQ:
1150 return RISCV::QC_MVNE;
1151 case RISCV::QC_MVNE:
1152 return RISCV::QC_MVEQ;
1153 case RISCV::QC_MVLT:
1154 return RISCV::QC_MVGE;
1155 case RISCV::QC_MVGE:
1156 return RISCV::QC_MVLT;
1157 case RISCV::QC_MVLTU:
1158 return RISCV::QC_MVGEU;
1159 case RISCV::QC_MVGEU:
1160 return RISCV::QC_MVLTU;
1161 case RISCV::QC_MVEQI:
1162 return RISCV::QC_MVNEI;
1163 case RISCV::QC_MVNEI:
1164 return RISCV::QC_MVEQI;
1165 case RISCV::QC_MVLTI:
1166 return RISCV::QC_MVGEI;
1167 case RISCV::QC_MVGEI:
1168 return RISCV::QC_MVLTI;
1169 case RISCV::QC_MVLTUI:
1170 return RISCV::QC_MVGEUI;
1171 case RISCV::QC_MVGEUI:
1172 return RISCV::QC_MVLTUI;
1177 switch (SelectOpc) {
1196 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1206 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1211 return RISCV::CV_BEQIMM;
1213 return RISCV::CV_BNEIMM;
1216 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1221 return RISCV::QC_BEQI;
1223 return RISCV::QC_BNEI;
1225 return RISCV::QC_BLTI;
1227 return RISCV::QC_BGEI;
1230 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1235 return RISCV::QC_BLTUI;
1237 return RISCV::QC_BGEUI;
1240 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1245 return RISCV::QC_E_BEQI;
1247 return RISCV::QC_E_BNEI;
1249 return RISCV::QC_E_BLTI;
1251 return RISCV::QC_E_BGEI;
1254 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1259 return RISCV::QC_E_BLTUI;
1261 return RISCV::QC_E_BGEUI;
1264 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1269 return RISCV::NDS_BBC;
1271 return RISCV::NDS_BBS;
1274 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1279 return RISCV::NDS_BEQC;
1281 return RISCV::NDS_BNEC;
1327 case RISCV::CV_BEQIMM:
1328 return RISCV::CV_BNEIMM;
1329 case RISCV::CV_BNEIMM:
1330 return RISCV::CV_BEQIMM;
1331 case RISCV::QC_BEQI:
1332 return RISCV::QC_BNEI;
1333 case RISCV::QC_BNEI:
1334 return RISCV::QC_BEQI;
1335 case RISCV::QC_BLTI:
1336 return RISCV::QC_BGEI;
1337 case RISCV::QC_BGEI:
1338 return RISCV::QC_BLTI;
1339 case RISCV::QC_BLTUI:
1340 return RISCV::QC_BGEUI;
1341 case RISCV::QC_BGEUI:
1342 return RISCV::QC_BLTUI;
1343 case RISCV::QC_E_BEQI:
1344 return RISCV::QC_E_BNEI;
1345 case RISCV::QC_E_BNEI:
1346 return RISCV::QC_E_BEQI;
1347 case RISCV::QC_E_BLTI:
1348 return RISCV::QC_E_BGEI;
1349 case RISCV::QC_E_BGEI:
1350 return RISCV::QC_E_BLTI;
1351 case RISCV::QC_E_BLTUI:
1352 return RISCV::QC_E_BGEUI;
1353 case RISCV::QC_E_BGEUI:
1354 return RISCV::QC_E_BLTUI;
1355 case RISCV::NDS_BBC:
1356 return RISCV::NDS_BBS;
1357 case RISCV::NDS_BBS:
1358 return RISCV::NDS_BBC;
1359 case RISCV::NDS_BEQC:
1360 return RISCV::NDS_BNEC;
1361 case RISCV::NDS_BNEC:
1362 return RISCV::NDS_BEQC;
1370 bool AllowModify)
const {
1371 TBB = FBB =
nullptr;
1376 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1382 int NumTerminators = 0;
1383 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1386 if (J->getDesc().isUnconditionalBranch() ||
1387 J->getDesc().isIndirectBranch()) {
1394 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1395 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1396 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1399 I = FirstUncondOrIndirectBr;
1403 if (
I->getDesc().isIndirectBranch())
1407 if (
I->isPreISelOpcode())
1411 if (NumTerminators > 2)
1415 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1421 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1427 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1428 I->getDesc().isUnconditionalBranch()) {
1439 int *BytesRemoved)
const {
1446 if (!
I->getDesc().isUnconditionalBranch() &&
1447 !
I->getDesc().isConditionalBranch())
1453 I->eraseFromParent();
1457 if (
I ==
MBB.begin())
1460 if (!
I->getDesc().isConditionalBranch())
1466 I->eraseFromParent();
1479 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1481 "RISC-V branch conditions have two components!");
1515 assert(RS &&
"RegScavenger required for long branching");
1517 "new block should be inserted for expanding unconditional branch");
1520 "restore block should be inserted for restoring clobbered registers");
1529 "Branch offsets outside of the signed 32-bit range not supported");
1535 auto II =
MBB.end();
1541 RS->enterBasicBlockEnd(
MBB);
1548 RC = &RISCV::GPRX7RegClass;
1550 RS->scavengeRegisterBackwards(*RC,
MI.getIterator(),
1554 RS->setRegUsed(TmpGPR);
1559 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1565 if (FrameIndex == -1)
1570 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1573 MI.getOperand(1).setMBB(&RestoreBB);
1577 TRI->eliminateFrameIndex(RestoreBB.
back(),
1587 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1597 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1598 MI->getOperand(1).getReg() == RISCV::X0) {
1599 Imm =
MI->getOperand(2).getImm();
1604 if (
MI->getOpcode() == RISCV::BSETI &&
MI->getOperand(1).isReg() &&
1605 MI->getOperand(1).getReg() == RISCV::X0 &&
1606 MI->getOperand(2).getImm() == 11) {
1620 if (Reg == RISCV::X0) {
1628 bool IsSigned =
false;
1629 bool IsEquality =
false;
1630 switch (
MI.getOpcode()) {
1666 MI.eraseFromParent();
1692 auto searchConst = [&](int64_t C1) ->
Register {
1694 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1697 I.getOperand(0).getReg().isVirtual();
1700 return DefC1->getOperand(0).getReg();
1712 if (
isFromLoadImm(MRI, LHS, C0) && C0 != 0 && LHS.getReg().isVirtual() &&
1713 MRI.
hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1715 if (
Register RegZ = searchConst(C0 + 1)) {
1723 MI.eraseFromParent();
1733 if (
isFromLoadImm(MRI, RHS, C0) && C0 != 0 && RHS.getReg().isVirtual() &&
1736 if (
Register RegZ = searchConst(C0 - 1)) {
1744 MI.eraseFromParent();
1754 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1756 int NumOp =
MI.getNumExplicitOperands();
1757 return MI.getOperand(NumOp - 1).getMBB();
1761 int64_t BrOffset)
const {
1762 unsigned XLen =
STI.getXLen();
1769 case RISCV::NDS_BBC:
1770 case RISCV::NDS_BBS:
1771 case RISCV::NDS_BEQC:
1772 case RISCV::NDS_BNEC:
1782 case RISCV::CV_BEQIMM:
1783 case RISCV::CV_BNEIMM:
1784 case RISCV::QC_BEQI:
1785 case RISCV::QC_BNEI:
1786 case RISCV::QC_BGEI:
1787 case RISCV::QC_BLTI:
1788 case RISCV::QC_BLTUI:
1789 case RISCV::QC_BGEUI:
1790 case RISCV::QC_E_BEQI:
1791 case RISCV::QC_E_BNEI:
1792 case RISCV::QC_E_BGEI:
1793 case RISCV::QC_E_BLTI:
1794 case RISCV::QC_E_BLTUI:
1795 case RISCV::QC_E_BGEUI:
1798 case RISCV::PseudoBR:
1800 case RISCV::PseudoJump:
1811 case RISCV::ADD:
return RISCV::PseudoCCADD;
1812 case RISCV::SUB:
return RISCV::PseudoCCSUB;
1813 case RISCV::SLL:
return RISCV::PseudoCCSLL;
1814 case RISCV::SRL:
return RISCV::PseudoCCSRL;
1815 case RISCV::SRA:
return RISCV::PseudoCCSRA;
1816 case RISCV::AND:
return RISCV::PseudoCCAND;
1817 case RISCV::OR:
return RISCV::PseudoCCOR;
1818 case RISCV::XOR:
return RISCV::PseudoCCXOR;
1819 case RISCV::MAX:
return RISCV::PseudoCCMAX;
1820 case RISCV::MAXU:
return RISCV::PseudoCCMAXU;
1821 case RISCV::MIN:
return RISCV::PseudoCCMIN;
1822 case RISCV::MINU:
return RISCV::PseudoCCMINU;
1823 case RISCV::MUL:
return RISCV::PseudoCCMUL;
1824 case RISCV::LUI:
return RISCV::PseudoCCLUI;
1825 case RISCV::QC_LI:
return RISCV::PseudoCCQC_LI;
1826 case RISCV::QC_E_LI:
return RISCV::PseudoCCQC_E_LI;
1828 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
1829 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
1830 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
1831 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
1832 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
1833 case RISCV::ORI:
return RISCV::PseudoCCORI;
1834 case RISCV::XORI:
return RISCV::PseudoCCXORI;
1836 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
1837 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
1838 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
1839 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
1840 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
1842 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
1843 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
1844 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
1845 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
1847 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
1848 case RISCV::ORN:
return RISCV::PseudoCCORN;
1849 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
1851 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
1852 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
1856 return RISCV::INSTRUCTION_LIST_END;
1865 if (!
Reg.isVirtual())
1873 if (!STI.hasShortForwardBranchIMinMax() &&
1874 (
MI->getOpcode() == RISCV::MAX ||
MI->getOpcode() == RISCV::MIN ||
1875 MI->getOpcode() == RISCV::MINU ||
MI->getOpcode() == RISCV::MAXU))
1878 if (!STI.hasShortForwardBranchIMul() &&
MI->getOpcode() == RISCV::MUL)
1885 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1886 MI->getOperand(1).getReg() == RISCV::X0)
1891 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1904 bool DontMoveAcrossStores =
true;
1905 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1913 bool PreferFalse)
const {
1914 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1915 "Unknown select instruction");
1916 if (!
STI.hasShortForwardBranchIALU())
1922 bool Invert = !
DefMI;
1930 Register DestReg =
MI.getOperand(0).getReg();
1936 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1943 NewMI.
add(FalseReg);
1951 unsigned BCCOpcode =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
1957 NewMI.
add(
MI.getOperand(
MI.getNumExplicitOperands() - 2));
1958 NewMI.
add(
MI.getOperand(
MI.getNumExplicitOperands() - 1));
1968 if (
DefMI->getParent() !=
MI.getParent())
1972 DefMI->eraseFromParent();
1977 if (
MI.isMetaInstruction())
1980 unsigned Opcode =
MI.getOpcode();
1982 if (Opcode == TargetOpcode::INLINEASM ||
1983 Opcode == TargetOpcode::INLINEASM_BR) {
1985 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1990 if (
STI.hasStdExtZca()) {
1991 if (isCompressibleInst(
MI,
STI))
1998 if (Opcode == TargetOpcode::BUNDLE)
1999 return getInstBundleSize(
MI);
2001 if (
MI.getParent() &&
MI.getParent()->getParent()) {
2002 if (isCompressibleInst(
MI,
STI))
2007 case RISCV::PseudoMV_FPR16INX:
2008 case RISCV::PseudoMV_FPR32INX:
2010 return STI.hasStdExtZca() ? 2 : 4;
2012 case RISCV::PseudoCCMOVGPRNoX0:
2013 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2016 case RISCV::PseudoCCMOVGPR:
2017 case RISCV::PseudoCCADD:
2018 case RISCV::PseudoCCSUB:
2019 case RISCV::PseudoCCSLL:
2020 case RISCV::PseudoCCSRL:
2021 case RISCV::PseudoCCSRA:
2022 case RISCV::PseudoCCAND:
2023 case RISCV::PseudoCCOR:
2024 case RISCV::PseudoCCXOR:
2025 case RISCV::PseudoCCADDI:
2026 case RISCV::PseudoCCANDI:
2027 case RISCV::PseudoCCORI:
2028 case RISCV::PseudoCCXORI:
2029 case RISCV::PseudoCCLUI:
2030 case RISCV::PseudoCCSLLI:
2031 case RISCV::PseudoCCSRLI:
2032 case RISCV::PseudoCCSRAI:
2033 case RISCV::PseudoCCADDW:
2034 case RISCV::PseudoCCSUBW:
2035 case RISCV::PseudoCCSLLW:
2036 case RISCV::PseudoCCSRLW:
2037 case RISCV::PseudoCCSRAW:
2038 case RISCV::PseudoCCADDIW:
2039 case RISCV::PseudoCCSLLIW:
2040 case RISCV::PseudoCCSRLIW:
2041 case RISCV::PseudoCCSRAIW:
2042 case RISCV::PseudoCCANDN:
2043 case RISCV::PseudoCCORN:
2044 case RISCV::PseudoCCXNOR:
2045 case RISCV::PseudoCCMAX:
2046 case RISCV::PseudoCCMIN:
2047 case RISCV::PseudoCCMAXU:
2048 case RISCV::PseudoCCMINU:
2049 case RISCV::PseudoCCMUL:
2050 case RISCV::PseudoCCLB:
2051 case RISCV::PseudoCCLH:
2052 case RISCV::PseudoCCLW:
2053 case RISCV::PseudoCCLHU:
2054 case RISCV::PseudoCCLBU:
2055 case RISCV::PseudoCCLWU:
2056 case RISCV::PseudoCCLD:
2057 case RISCV::PseudoCCQC_LI:
2058 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2061 case RISCV::PseudoCCQC_E_LI:
2062 case RISCV::PseudoCCQC_E_LB:
2063 case RISCV::PseudoCCQC_E_LH:
2064 case RISCV::PseudoCCQC_E_LW:
2065 case RISCV::PseudoCCQC_E_LHU:
2066 case RISCV::PseudoCCQC_E_LBU:
2067 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2070 case TargetOpcode::STACKMAP:
2073 case TargetOpcode::PATCHPOINT:
2076 case TargetOpcode::STATEPOINT: {
2080 return std::max(NumBytes, 8U);
2082 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2083 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
2084 case TargetOpcode::PATCHABLE_TAIL_CALL: {
2087 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
2088 F.hasFnAttribute(
"patchable-function-entry")) {
2090 if (
F.getFnAttribute(
"patchable-function-entry")
2092 .getAsInteger(10, Num))
2093 return get(Opcode).getSize();
2096 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
2100 return STI.is64Bit() ? 68 : 44;
2103 return get(Opcode).getSize();
2108 const unsigned Opcode =
MI.getOpcode();
2112 case RISCV::FSGNJ_D:
2113 case RISCV::FSGNJ_S:
2114 case RISCV::FSGNJ_H:
2115 case RISCV::FSGNJ_D_INX:
2116 case RISCV::FSGNJ_D_IN32X:
2117 case RISCV::FSGNJ_S_INX:
2118 case RISCV::FSGNJ_H_INX:
2120 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2121 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
2125 return (
MI.getOperand(1).isReg() &&
2126 MI.getOperand(1).getReg() == RISCV::X0) ||
2127 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
2129 return MI.isAsCheapAsAMove();
2132std::optional<DestSourcePair>
2136 switch (
MI.getOpcode()) {
2142 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2143 MI.getOperand(2).isReg())
2145 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2146 MI.getOperand(1).isReg())
2151 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
2152 MI.getOperand(2).getImm() == 0)
2156 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2157 MI.getOperand(1).isReg())
2161 case RISCV::SH1ADD_UW:
2163 case RISCV::SH2ADD_UW:
2165 case RISCV::SH3ADD_UW:
2166 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2167 MI.getOperand(2).isReg())
2170 case RISCV::FSGNJ_D:
2171 case RISCV::FSGNJ_S:
2172 case RISCV::FSGNJ_H:
2173 case RISCV::FSGNJ_D_INX:
2174 case RISCV::FSGNJ_D_IN32X:
2175 case RISCV::FSGNJ_S_INX:
2176 case RISCV::FSGNJ_H_INX:
2178 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2179 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2183 return std::nullopt;
2191 const auto &SchedModel =
STI.getSchedModel();
2192 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2204 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2208 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2209 RISCV::OpName::frm) < 0;
2211 "New instructions require FRM whereas the old one does not have it");
2218 for (
auto *NewMI : InsInstrs) {
2220 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2221 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2263bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2264 bool Invert)
const {
2265#define OPCODE_LMUL_CASE(OPC) \
2266 case RISCV::OPC##_M1: \
2267 case RISCV::OPC##_M2: \
2268 case RISCV::OPC##_M4: \
2269 case RISCV::OPC##_M8: \
2270 case RISCV::OPC##_MF2: \
2271 case RISCV::OPC##_MF4: \
2272 case RISCV::OPC##_MF8
2274#define OPCODE_LMUL_MASK_CASE(OPC) \
2275 case RISCV::OPC##_M1_MASK: \
2276 case RISCV::OPC##_M2_MASK: \
2277 case RISCV::OPC##_M4_MASK: \
2278 case RISCV::OPC##_M8_MASK: \
2279 case RISCV::OPC##_MF2_MASK: \
2280 case RISCV::OPC##_MF4_MASK: \
2281 case RISCV::OPC##_MF8_MASK
2286 Opcode = *InvOpcode;
2303#undef OPCODE_LMUL_MASK_CASE
2304#undef OPCODE_LMUL_CASE
2307bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2318 const uint64_t TSFlags =
Desc.TSFlags;
2320 auto checkImmOperand = [&](
unsigned OpIdx) {
2324 auto checkRegOperand = [&](
unsigned OpIdx) {
2332 if (!checkRegOperand(1))
2347 bool SeenMI2 =
false;
2348 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2357 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2358 Register SrcReg = It->getOperand(1).getReg();
2376 if (MI1VReg != SrcReg)
2385 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2425bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2426 bool &Commuted)
const {
2430 "Expect the present of passthrough operand.");
2436 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2437 areRVVInstsReassociable(Inst, *MI2);
2441 return areRVVInstsReassociable(Inst, *MI1) &&
2442 (isVectorAssociativeAndCommutative(*MI1) ||
2443 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2450 if (!isVectorAssociativeAndCommutative(Inst) &&
2451 !isVectorAssociativeAndCommutative(Inst,
true))
2477 for (
unsigned I = 0;
I < 5; ++
I)
2483 bool &Commuted)
const {
2484 if (isVectorAssociativeAndCommutative(Inst) ||
2485 isVectorAssociativeAndCommutative(Inst,
true))
2486 return hasReassociableVectorSibling(Inst, Commuted);
2492 unsigned OperandIdx = Commuted ? 2 : 1;
2496 int16_t InstFrmOpIdx =
2497 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2498 int16_t SiblingFrmOpIdx =
2499 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2501 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2506 bool Invert)
const {
2507 if (isVectorAssociativeAndCommutative(Inst, Invert))
2515 Opc = *InverseOpcode;
2560std::optional<unsigned>
2562#define RVV_OPC_LMUL_CASE(OPC, INV) \
2563 case RISCV::OPC##_M1: \
2564 return RISCV::INV##_M1; \
2565 case RISCV::OPC##_M2: \
2566 return RISCV::INV##_M2; \
2567 case RISCV::OPC##_M4: \
2568 return RISCV::INV##_M4; \
2569 case RISCV::OPC##_M8: \
2570 return RISCV::INV##_M8; \
2571 case RISCV::OPC##_MF2: \
2572 return RISCV::INV##_MF2; \
2573 case RISCV::OPC##_MF4: \
2574 return RISCV::INV##_MF4; \
2575 case RISCV::OPC##_MF8: \
2576 return RISCV::INV##_MF8
2578#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2579 case RISCV::OPC##_M1_MASK: \
2580 return RISCV::INV##_M1_MASK; \
2581 case RISCV::OPC##_M2_MASK: \
2582 return RISCV::INV##_M2_MASK; \
2583 case RISCV::OPC##_M4_MASK: \
2584 return RISCV::INV##_M4_MASK; \
2585 case RISCV::OPC##_M8_MASK: \
2586 return RISCV::INV##_M8_MASK; \
2587 case RISCV::OPC##_MF2_MASK: \
2588 return RISCV::INV##_MF2_MASK; \
2589 case RISCV::OPC##_MF4_MASK: \
2590 return RISCV::INV##_MF4_MASK; \
2591 case RISCV::OPC##_MF8_MASK: \
2592 return RISCV::INV##_MF8_MASK
2596 return std::nullopt;
2598 return RISCV::FSUB_H;
2600 return RISCV::FSUB_S;
2602 return RISCV::FSUB_D;
2604 return RISCV::FADD_H;
2606 return RISCV::FADD_S;
2608 return RISCV::FADD_D;
2625#undef RVV_OPC_LMUL_MASK_CASE
2626#undef RVV_OPC_LMUL_CASE
2631 bool DoRegPressureReduce) {
2658 bool DoRegPressureReduce) {
2665 DoRegPressureReduce)) {
2671 DoRegPressureReduce)) {
2681 bool DoRegPressureReduce) {
2689 unsigned CombineOpc) {
2696 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2710 unsigned OuterShiftAmt) {
2716 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2743 case RISCV::SH1ADD_UW:
2745 case RISCV::SH2ADD_UW:
2747 case RISCV::SH3ADD_UW:
2793 bool DoRegPressureReduce)
const {
2802 DoRegPressureReduce);
2810 return RISCV::FMADD_H;
2812 return RISCV::FMADD_S;
2814 return RISCV::FMADD_D;
2859 bool Mul1IsKill = Mul1.
isKill();
2860 bool Mul2IsKill = Mul2.
isKill();
2861 bool AddendIsKill = Addend.
isKill();
2870 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2895 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2902 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2905 switch (InnerShiftAmt - OuterShiftAmt) {
2909 InnerOpc = RISCV::ADD;
2912 InnerOpc = RISCV::SH1ADD;
2915 InnerOpc = RISCV::SH2ADD;
2918 InnerOpc = RISCV::SH3ADD;
2936 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2953 DelInstrs, InstrIdxForVirtReg);
2980 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2982 unsigned OpType = Operand.OperandType;
2988 ErrInfo =
"Expected an immediate operand.";
2991 int64_t Imm = MO.
getImm();
2997#define CASE_OPERAND_UIMM(NUM) \
2998 case RISCVOp::OPERAND_UIMM##NUM: \
2999 Ok = isUInt<NUM>(Imm); \
3001#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX) \
3002 case RISCVOp::OPERAND_UIMM##BITS##_LSB##SUFFIX: { \
3003 constexpr size_t NumZeros = sizeof(#SUFFIX) - 1; \
3004 Ok = isShiftedUInt<BITS - NumZeros, NumZeros>(Imm); \
3007#define CASE_OPERAND_SIMM(NUM) \
3008 case RISCVOp::OPERAND_SIMM##NUM: \
3009 Ok = isInt<NUM>(Imm); \
3043 Ok = Imm >= 1 && Imm <= 32;
3046 Ok = Imm >= 1 && Imm <= 64;
3067 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
3078 Ok = Imm >= -15 && Imm <= 16;
3106 Ok = Ok && Imm != 0;
3109 Ok = (
isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);
3112 Ok = Imm >= 0 && Imm <= 10;
3115 Ok = Imm >= 0 && Imm <= 7;
3118 Ok = Imm >= 1 && Imm <= 10;
3121 Ok = Imm >= 2 && Imm <= 14;
3130 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3165 Ok = Imm == 1 || Imm == 2 || Imm == 4;
3169 ErrInfo =
"Invalid immediate";
3178 ErrInfo =
"Expected a non-register operand.";
3182 ErrInfo =
"Invalid immediate";
3191 ErrInfo =
"Expected a non-register operand.";
3195 ErrInfo =
"Invalid immediate";
3203 ErrInfo =
"Expected a non-register operand.";
3207 ErrInfo =
"Invalid immediate";
3213 int64_t Imm = MO.
getImm();
3216 ErrInfo =
"Invalid immediate";
3219 }
else if (!MO.
isReg()) {
3220 ErrInfo =
"Expected a register or immediate operand.";
3226 ErrInfo =
"Expected a register or immediate operand.";
3236 if (!
Op.isImm() && !
Op.isReg()) {
3237 ErrInfo =
"Invalid operand type for VL operand";
3240 if (
Op.isReg() &&
Op.getReg().isValid()) {
3243 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {
3244 ErrInfo =
"Invalid register class for VL operand";
3249 ErrInfo =
"VL operand w/o SEW operand?";
3255 if (!
MI.getOperand(
OpIdx).isImm()) {
3256 ErrInfo =
"SEW value expected to be an immediate";
3261 ErrInfo =
"Unexpected SEW value";
3264 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3266 ErrInfo =
"Unexpected SEW value";
3272 if (!
MI.getOperand(
OpIdx).isImm()) {
3273 ErrInfo =
"Policy operand expected to be an immediate";
3278 ErrInfo =
"Invalid Policy Value";
3282 ErrInfo =
"policy operand w/o VL operand?";
3290 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3291 ErrInfo =
"policy operand w/o tied operand?";
3298 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3299 ErrInfo =
"dynamic rounding mode should read FRM";
3321 case RISCV::LD_RV32:
3331 case RISCV::SD_RV32:
3347 int64_t NewOffset = OldOffset + Disp;
3369 "Addressing mode not supported for folding");
3442 case RISCV::LD_RV32:
3445 case RISCV::SD_RV32:
3452 OffsetIsScalable =
false;
3468 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3476 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3479 auto Base1 = MO1->getValue();
3480 auto Base2 = MO2->getValue();
3481 if (!Base1 || !Base2)
3489 return Base1 == Base2;
3495 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3496 unsigned NumBytes)
const {
3499 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3504 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3510 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3516 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3566 int64_t OffsetA = 0, OffsetB = 0;
3572 int LowOffset = std::min(OffsetA, OffsetB);
3573 int HighOffset = std::max(OffsetA, OffsetB);
3574 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3576 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3583std::pair<unsigned, unsigned>
3586 return std::make_pair(TF & Mask, TF & ~Mask);
3592 static const std::pair<unsigned, const char *> TargetFlags[] = {
3593 {MO_CALL,
"riscv-call"},
3594 {MO_LO,
"riscv-lo"},
3595 {MO_HI,
"riscv-hi"},
3596 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3597 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3598 {MO_GOT_HI,
"riscv-got-hi"},
3599 {MO_TPREL_LO,
"riscv-tprel-lo"},
3600 {MO_TPREL_HI,
"riscv-tprel-hi"},
3601 {MO_TPREL_ADD,
"riscv-tprel-add"},
3602 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3603 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3604 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3605 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3606 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3607 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3615 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3628 unsigned &Flags)
const {
3648 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3649 F.hasFnAttribute(
"patchable-function-entry");
3654 return MI.readsRegister(RegNo,
TRI) ||
3655 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3660 return MI.modifiesRegister(RegNo,
TRI) ||
3661 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3665 if (!
MBB.back().isReturn())
3704 if (
C.isAvailableAcrossAndOutOfSeq(
Reg,
TRI) &&
3705 C.isAvailableInsideSeq(
Reg,
TRI)) {
3719 if (
C.back().isReturn() &&
3720 !
C.isAvailableAcrossAndOutOfSeq(TailExpandUseReg, RegInfo)) {
3722 LLVM_DEBUG(
dbgs() <<
"Cannot be outlined between: " <<
C.front() <<
"and "
3724 LLVM_DEBUG(
dbgs() <<
"Because the tail-call register is live across "
3725 "the proposed outlined function call\n");
3731 if (
C.back().isReturn()) {
3733 "The candidate who uses return instruction must be outlined "
3740 if (!
C.isAvailableInsideSeq(RISCV::X5, RegInfo))
3744 if (
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, RegInfo))
3754std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3757 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3758 unsigned MinRepeats)
const {
3766 if (RepeatedSequenceLocs.size() < MinRepeats)
3767 return std::nullopt;
3771 unsigned InstrSizeCExt =
3773 unsigned CallOverhead = 0, FrameOverhead = 0;
3776 unsigned CFICount = 0;
3777 for (
auto &
I : Candidate) {
3778 if (
I.isCFIInstruction())
3789 std::vector<MCCFIInstruction> CFIInstructions =
3790 C.getMF()->getFrameInstructions();
3792 if (CFICount > 0 && CFICount != CFIInstructions.size())
3793 return std::nullopt;
3801 CallOverhead = 4 + InstrSizeCExt;
3808 FrameOverhead = InstrSizeCExt;
3814 return std::nullopt;
3818 for (
auto &
C : RepeatedSequenceLocs) {
3820 if (
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, RegInfo)) {
3822 unsigned CandCallOverhead = 8;
3827 unsigned CandCallOverhead = InstrSizeCExt + 8 + InstrSizeCExt;
3832 for (
auto &
C : RepeatedSequenceLocs)
3833 C.setCallInfo(MOCI, CallOverhead);
3836 unsigned SequenceSize = 0;
3837 for (
auto &
MI : Candidate)
3840 return std::make_unique<outliner::OutlinedFunction>(
3841 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3847 unsigned Flags)
const {
3851 MBB->getParent()->getSubtarget().getRegisterInfo();
3852 const auto &
F =
MI.getMF()->getFunction();
3857 if (
MI.isCFIInstruction())
3865 for (
const auto &MO :
MI.operands()) {
3870 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3871 F.hasSection() ||
F.getSectionPrefix()))
3888 MBB.addLiveIn(RISCV::X5);
3903 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3910 assert(SaveReg &&
"Cannot find an available register to save/restore X5.");
3921 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3937 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3948 return std::nullopt;
3952 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3953 MI.getOperand(2).isImm())
3954 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3956 return std::nullopt;
3964 std::string GenericComment =
3966 if (!GenericComment.empty())
3967 return GenericComment;
3971 return std::string();
3973 std::string Comment;
3980 switch (OpInfo.OperandType) {
3983 unsigned Imm =
Op.getImm();
3988 unsigned Imm =
Op.getImm();
3993 unsigned Imm =
Op.getImm();
3999 unsigned Log2SEW =
Op.getImm();
4000 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
4006 unsigned Policy =
Op.getImm();
4008 "Invalid Policy Value");
4014 if (
Op.isImm() &&
Op.getImm() == -1)
4036#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
4037 RISCV::Pseudo##OP##_##LMUL
4039#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
4040 RISCV::Pseudo##OP##_##LMUL##_MASK
4042#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
4043 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
4044 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
4046#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
4047 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
4048 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
4049 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
4050 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
4051 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
4052 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
4054#define CASE_RVV_OPCODE_UNMASK(OP) \
4055 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
4056 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
4058#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
4059 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
4060 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
4061 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
4062 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
4063 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
4064 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
4066#define CASE_RVV_OPCODE_MASK(OP) \
4067 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
4068 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
4070#define CASE_RVV_OPCODE_WIDEN(OP) \
4071 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
4072 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
4074#define CASE_RVV_OPCODE(OP) \
4075 CASE_RVV_OPCODE_UNMASK(OP): \
4076 case CASE_RVV_OPCODE_MASK(OP)
4080#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
4081 RISCV::PseudoV##OP##_##TYPE##_##LMUL
4083#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
4084 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
4085 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
4086 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
4087 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
4088 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
4089 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
4090 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
4093#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
4094 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
4096#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
4097 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
4098 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
4099 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
4100 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
4102#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
4103 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
4104 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
4106#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
4107 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
4108 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
4110#define CASE_VFMA_OPCODE_VV(OP) \
4111 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
4112 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
4113 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
4114 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
4116#define CASE_VFMA_SPLATS(OP) \
4117 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
4118 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
4119 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
4120 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
4124 unsigned &SrcOpIdx1,
4125 unsigned &SrcOpIdx2)
const {
4127 if (!
Desc.isCommutable())
4130 switch (
MI.getOpcode()) {
4131 case RISCV::TH_MVEQZ:
4132 case RISCV::TH_MVNEZ:
4136 if (
MI.getOperand(2).getReg() == RISCV::X0)
4139 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4140 case RISCV::QC_SELECTIEQ:
4141 case RISCV::QC_SELECTINE:
4142 case RISCV::QC_SELECTIIEQ:
4143 case RISCV::QC_SELECTIINE:
4144 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4145 case RISCV::QC_MVEQ:
4146 case RISCV::QC_MVNE:
4147 case RISCV::QC_MVLT:
4148 case RISCV::QC_MVGE:
4149 case RISCV::QC_MVLTU:
4150 case RISCV::QC_MVGEU:
4151 case RISCV::QC_MVEQI:
4152 case RISCV::QC_MVNEI:
4153 case RISCV::QC_MVLTI:
4154 case RISCV::QC_MVGEI:
4155 case RISCV::QC_MVLTUI:
4156 case RISCV::QC_MVGEUI:
4157 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
4158 case RISCV::TH_MULA:
4159 case RISCV::TH_MULAW:
4160 case RISCV::TH_MULAH:
4161 case RISCV::TH_MULS:
4162 case RISCV::TH_MULSW:
4163 case RISCV::TH_MULSH:
4165 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4166 case RISCV::PseudoCCMOVGPRNoX0:
4167 case RISCV::PseudoCCMOVGPR:
4169 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4210 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4237 unsigned CommutableOpIdx1 = 1;
4238 unsigned CommutableOpIdx2 = 3;
4239 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4260 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
4262 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
4266 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
4267 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
4273 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
4274 SrcOpIdx2 == CommuteAnyOperandIndex) {
4277 unsigned CommutableOpIdx1 = SrcOpIdx1;
4278 if (SrcOpIdx1 == SrcOpIdx2) {
4281 CommutableOpIdx1 = 1;
4282 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
4284 CommutableOpIdx1 = SrcOpIdx2;
4289 unsigned CommutableOpIdx2;
4290 if (CommutableOpIdx1 != 1) {
4292 CommutableOpIdx2 = 1;
4294 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
4299 if (Op1Reg !=
MI.getOperand(2).getReg())
4300 CommutableOpIdx2 = 2;
4302 CommutableOpIdx2 = 3;
4307 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4320#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4321 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4322 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4325#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4326 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4327 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4328 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4329 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4330 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4331 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4332 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4335#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4336 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4337 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4340#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4341 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4342 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4343 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4344 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4346#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4347 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4348 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4350#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4351 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4352 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4354#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4355 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4356 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4357 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4358 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4360#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4361 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4362 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4363 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4364 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4370 unsigned OpIdx2)
const {
4373 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4377 switch (
MI.getOpcode()) {
4378 case RISCV::TH_MVEQZ:
4379 case RISCV::TH_MVNEZ: {
4380 auto &WorkingMI = cloneIfNew(
MI);
4381 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4382 : RISCV::TH_MVEQZ));
4386 case RISCV::QC_SELECTIEQ:
4387 case RISCV::QC_SELECTINE:
4388 case RISCV::QC_SELECTIIEQ:
4389 case RISCV::QC_SELECTIINE:
4391 case RISCV::QC_MVEQ:
4392 case RISCV::QC_MVNE:
4393 case RISCV::QC_MVLT:
4394 case RISCV::QC_MVGE:
4395 case RISCV::QC_MVLTU:
4396 case RISCV::QC_MVGEU:
4397 case RISCV::QC_MVEQI:
4398 case RISCV::QC_MVNEI:
4399 case RISCV::QC_MVLTI:
4400 case RISCV::QC_MVGEI:
4401 case RISCV::QC_MVLTUI:
4402 case RISCV::QC_MVGEUI: {
4403 auto &WorkingMI = cloneIfNew(
MI);
4408 case RISCV::PseudoCCMOVGPRNoX0:
4409 case RISCV::PseudoCCMOVGPR: {
4411 unsigned BCC =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
4413 auto &WorkingMI = cloneIfNew(
MI);
4414 WorkingMI.getOperand(
MI.getNumExplicitOperands() - 3).setImm(BCC);
4438 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4439 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4441 switch (
MI.getOpcode()) {
4464 auto &WorkingMI = cloneIfNew(
MI);
4465 WorkingMI.setDesc(
get(
Opc));
4475 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4478 if (OpIdx1 == 3 || OpIdx2 == 3) {
4480 switch (
MI.getOpcode()) {
4491 auto &WorkingMI = cloneIfNew(
MI);
4492 WorkingMI.setDesc(
get(
Opc));
4504#undef CASE_VMA_CHANGE_OPCODE_COMMON
4505#undef CASE_VMA_CHANGE_OPCODE_LMULS
4506#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4507#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4508#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4509#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4510#undef CASE_VFMA_CHANGE_OPCODE_VV
4511#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4513#undef CASE_RVV_OPCODE_UNMASK_LMUL
4514#undef CASE_RVV_OPCODE_MASK_LMUL
4515#undef CASE_RVV_OPCODE_LMUL
4516#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4517#undef CASE_RVV_OPCODE_UNMASK
4518#undef CASE_RVV_OPCODE_MASK_WIDEN
4519#undef CASE_RVV_OPCODE_MASK
4520#undef CASE_RVV_OPCODE_WIDEN
4521#undef CASE_RVV_OPCODE
4523#undef CASE_VMA_OPCODE_COMMON
4524#undef CASE_VMA_OPCODE_LMULS
4525#undef CASE_VFMA_OPCODE_COMMON
4526#undef CASE_VFMA_OPCODE_LMULS_M1
4527#undef CASE_VFMA_OPCODE_LMULS_MF2
4528#undef CASE_VFMA_OPCODE_LMULS_MF4
4529#undef CASE_VFMA_OPCODE_VV
4530#undef CASE_VFMA_SPLATS
4533 switch (
MI.getOpcode()) {
4541 if (
MI.getOperand(1).getReg() == RISCV::X0)
4542 commuteInstruction(
MI);
4544 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4545 MI.getOperand(2).ChangeToImmediate(0);
4546 MI.setDesc(
get(RISCV::ADDI));
4550 if (
MI.getOpcode() == RISCV::XOR &&
4551 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4552 MI.getOperand(1).setReg(RISCV::X0);
4553 MI.getOperand(2).ChangeToImmediate(0);
4554 MI.setDesc(
get(RISCV::ADDI));
4561 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4562 MI.setDesc(
get(RISCV::ADDI));
4568 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4569 MI.getOperand(2).ChangeToImmediate(0);
4570 MI.setDesc(
get(RISCV::ADDI));
4576 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4577 MI.getOperand(2).ChangeToImmediate(0);
4578 MI.setDesc(
get(RISCV::ADDIW));
4585 if (
MI.getOperand(1).getReg() == RISCV::X0)
4586 commuteInstruction(
MI);
4588 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4589 MI.getOperand(2).ChangeToImmediate(0);
4590 MI.setDesc(
get(RISCV::ADDIW));
4595 case RISCV::SH1ADD_UW:
4597 case RISCV::SH2ADD_UW:
4599 case RISCV::SH3ADD_UW:
4601 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4602 MI.removeOperand(1);
4604 MI.setDesc(
get(RISCV::ADDI));
4608 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4609 MI.removeOperand(2);
4610 unsigned Opc =
MI.getOpcode();
4611 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4612 Opc == RISCV::SH3ADD_UW) {
4614 MI.setDesc(
get(RISCV::SLLI_UW));
4618 MI.setDesc(
get(RISCV::SLLI));
4632 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4633 MI.getOperand(2).getReg() == RISCV::X0) {
4634 MI.getOperand(1).setReg(RISCV::X0);
4635 MI.getOperand(2).ChangeToImmediate(0);
4636 MI.setDesc(
get(RISCV::ADDI));
4642 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4643 MI.getOperand(2).setImm(0);
4644 MI.setDesc(
get(RISCV::ADDI));
4652 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4653 MI.getOperand(2).ChangeToImmediate(0);
4654 MI.setDesc(
get(RISCV::ADDI));
4658 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4659 MI.getOperand(2).ChangeToImmediate(0);
4660 MI.setDesc(
get(RISCV::ADDI));
4668 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4669 MI.getOperand(2).ChangeToImmediate(0);
4670 MI.setDesc(
get(RISCV::ADDI));
4680 case RISCV::SLLI_UW:
4682 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4683 MI.getOperand(2).setImm(0);
4684 MI.setDesc(
get(RISCV::ADDI));
4692 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4693 MI.getOperand(2).getReg() == RISCV::X0) {
4694 MI.getOperand(2).ChangeToImmediate(0);
4695 MI.setDesc(
get(RISCV::ADDI));
4699 if (
MI.getOpcode() == RISCV::ADD_UW &&
4700 MI.getOperand(1).getReg() == RISCV::X0) {
4701 MI.removeOperand(1);
4703 MI.setDesc(
get(RISCV::ADDI));
4709 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4710 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4711 MI.setDesc(
get(RISCV::ADDI));
4717 case RISCV::ZEXT_H_RV32:
4718 case RISCV::ZEXT_H_RV64:
4721 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4723 MI.setDesc(
get(RISCV::ADDI));
4732 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4733 MI.getOperand(2).ChangeToImmediate(0);
4734 MI.setDesc(
get(RISCV::ADDI));
4741 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4743 MI.removeOperand(0);
4744 MI.insert(
MI.operands_begin() + 1, {MO0});
4749 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4751 MI.removeOperand(0);
4752 MI.insert(
MI.operands_begin() + 1, {MO0});
4753 MI.setDesc(
get(RISCV::BNE));
4758 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4760 MI.removeOperand(0);
4761 MI.insert(
MI.operands_begin() + 1, {MO0});
4762 MI.setDesc(
get(RISCV::BEQ));
4770#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4771 RISCV::PseudoV##OP##_##LMUL##_TIED
4773#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4774 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4775 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4776 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4777 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4778 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4779 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4781#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4782 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4783 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4786#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4787 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4788 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4789 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4790 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4791 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4792 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4795#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4796 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4798#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4799 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4800 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4801 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4802 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4803 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4804 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4805 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4806 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4807 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4809#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4810 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4811 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4814#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4815 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4816 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4817 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4818 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4819 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4820 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4821 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4822 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4823 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4825#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4826 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4827 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4828 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4829 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4830 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4832#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4833 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4834 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4835 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4836 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4837 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4844 switch (
MI.getOpcode()) {
4852 MI.getNumExplicitOperands() == 7 &&
4853 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4860 switch (
MI.getOpcode()) {
4872 .
add(
MI.getOperand(0))
4874 .
add(
MI.getOperand(1))
4875 .
add(
MI.getOperand(2))
4876 .
add(
MI.getOperand(3))
4877 .
add(
MI.getOperand(4))
4878 .
add(
MI.getOperand(5))
4879 .
add(
MI.getOperand(6));
4888 MI.getNumExplicitOperands() == 6);
4895 switch (
MI.getOpcode()) {
4907 .
add(
MI.getOperand(0))
4909 .
add(
MI.getOperand(1))
4910 .
add(
MI.getOperand(2))
4911 .
add(
MI.getOperand(3))
4912 .
add(
MI.getOperand(4))
4913 .
add(
MI.getOperand(5));
4920 unsigned NumOps =
MI.getNumOperands();
4923 if (
Op.isReg() &&
Op.isKill())
4931 if (
MI.getOperand(0).isEarlyClobber()) {
4945#undef CASE_WIDEOP_OPCODE_COMMON
4946#undef CASE_WIDEOP_OPCODE_LMULS
4947#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4948#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4949#undef CASE_FP_WIDEOP_OPCODE_COMMON
4950#undef CASE_FP_WIDEOP_OPCODE_LMULS
4951#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4952#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4961 if (ShiftAmount == 0)
4967 }
else if (
int ShXAmount, ShiftAmount;
4969 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4972 switch (ShXAmount) {
4974 Opc = RISCV::SH1ADD;
4977 Opc = RISCV::SH2ADD;
4980 Opc = RISCV::SH3ADD;
5016 }
else if (
STI.hasStdExtZmmul()) {
5026 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
5027 if (Amount & (1U << ShiftAmount)) {
5031 .
addImm(ShiftAmount - PrevShiftAmount)
5033 if (Amount >> (ShiftAmount + 1)) {
5047 PrevShiftAmount = ShiftAmount;
5050 assert(Acc &&
"Expected valid accumulator");
5060 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
5068 ?
STI.getTailDupAggressiveThreshold()
5075 unsigned Opcode =
MI.getOpcode();
5076 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
5085 return MI.isCopy() &&
MI.getOperand(0).getReg().isPhysical() &&
5087 TRI->getMinimalPhysRegClass(
MI.getOperand(0).getReg()));
5090std::optional<std::pair<unsigned, unsigned>>
5094 return std::nullopt;
5095 case RISCV::PseudoVSPILL2_M1:
5096 case RISCV::PseudoVRELOAD2_M1:
5097 return std::make_pair(2u, 1u);
5098 case RISCV::PseudoVSPILL2_M2:
5099 case RISCV::PseudoVRELOAD2_M2:
5100 return std::make_pair(2u, 2u);
5101 case RISCV::PseudoVSPILL2_M4:
5102 case RISCV::PseudoVRELOAD2_M4:
5103 return std::make_pair(2u, 4u);
5104 case RISCV::PseudoVSPILL3_M1:
5105 case RISCV::PseudoVRELOAD3_M1:
5106 return std::make_pair(3u, 1u);
5107 case RISCV::PseudoVSPILL3_M2:
5108 case RISCV::PseudoVRELOAD3_M2:
5109 return std::make_pair(3u, 2u);
5110 case RISCV::PseudoVSPILL4_M1:
5111 case RISCV::PseudoVRELOAD4_M1:
5112 return std::make_pair(4u, 1u);
5113 case RISCV::PseudoVSPILL4_M2:
5114 case RISCV::PseudoVRELOAD4_M2:
5115 return std::make_pair(4u, 2u);
5116 case RISCV::PseudoVSPILL5_M1:
5117 case RISCV::PseudoVRELOAD5_M1:
5118 return std::make_pair(5u, 1u);
5119 case RISCV::PseudoVSPILL6_M1:
5120 case RISCV::PseudoVRELOAD6_M1:
5121 return std::make_pair(6u, 1u);
5122 case RISCV::PseudoVSPILL7_M1:
5123 case RISCV::PseudoVRELOAD7_M1:
5124 return std::make_pair(7u, 1u);
5125 case RISCV::PseudoVSPILL8_M1:
5126 case RISCV::PseudoVRELOAD8_M1:
5127 return std::make_pair(8u, 1u);
5132 int16_t MI1FrmOpIdx =
5133 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
5134 int16_t MI2FrmOpIdx =
5135 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
5136 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
5143std::optional<unsigned>
5147 return std::nullopt;
5150 case RISCV::VSLL_VX:
5151 case RISCV::VSRL_VX:
5152 case RISCV::VSRA_VX:
5154 case RISCV::VSSRL_VX:
5155 case RISCV::VSSRA_VX:
5157 case RISCV::VROL_VX:
5158 case RISCV::VROR_VX:
5163 case RISCV::VNSRL_WX:
5164 case RISCV::VNSRA_WX:
5166 case RISCV::VNCLIPU_WX:
5167 case RISCV::VNCLIP_WX:
5169 case RISCV::VWSLL_VX:
5174 case RISCV::VADD_VX:
5175 case RISCV::VSUB_VX:
5176 case RISCV::VRSUB_VX:
5178 case RISCV::VWADDU_VX:
5179 case RISCV::VWSUBU_VX:
5180 case RISCV::VWADD_VX:
5181 case RISCV::VWSUB_VX:
5182 case RISCV::VWADDU_WX:
5183 case RISCV::VWSUBU_WX:
5184 case RISCV::VWADD_WX:
5185 case RISCV::VWSUB_WX:
5187 case RISCV::VADC_VXM:
5188 case RISCV::VADC_VIM:
5189 case RISCV::VMADC_VXM:
5190 case RISCV::VMADC_VIM:
5191 case RISCV::VMADC_VX:
5192 case RISCV::VSBC_VXM:
5193 case RISCV::VMSBC_VXM:
5194 case RISCV::VMSBC_VX:
5196 case RISCV::VAND_VX:
5198 case RISCV::VXOR_VX:
5200 case RISCV::VMSEQ_VX:
5201 case RISCV::VMSNE_VX:
5202 case RISCV::VMSLTU_VX:
5203 case RISCV::VMSLT_VX:
5204 case RISCV::VMSLEU_VX:
5205 case RISCV::VMSLE_VX:
5206 case RISCV::VMSGTU_VX:
5207 case RISCV::VMSGT_VX:
5209 case RISCV::VMINU_VX:
5210 case RISCV::VMIN_VX:
5211 case RISCV::VMAXU_VX:
5212 case RISCV::VMAX_VX:
5214 case RISCV::VMUL_VX:
5215 case RISCV::VMULH_VX:
5216 case RISCV::VMULHU_VX:
5217 case RISCV::VMULHSU_VX:
5219 case RISCV::VDIVU_VX:
5220 case RISCV::VDIV_VX:
5221 case RISCV::VREMU_VX:
5222 case RISCV::VREM_VX:
5224 case RISCV::VWMUL_VX:
5225 case RISCV::VWMULU_VX:
5226 case RISCV::VWMULSU_VX:
5228 case RISCV::VMACC_VX:
5229 case RISCV::VNMSAC_VX:
5230 case RISCV::VMADD_VX:
5231 case RISCV::VNMSUB_VX:
5233 case RISCV::VWMACCU_VX:
5234 case RISCV::VWMACC_VX:
5235 case RISCV::VWMACCSU_VX:
5236 case RISCV::VWMACCUS_VX:
5238 case RISCV::VMERGE_VXM:
5240 case RISCV::VMV_V_X:
5242 case RISCV::VSADDU_VX:
5243 case RISCV::VSADD_VX:
5244 case RISCV::VSSUBU_VX:
5245 case RISCV::VSSUB_VX:
5247 case RISCV::VAADDU_VX:
5248 case RISCV::VAADD_VX:
5249 case RISCV::VASUBU_VX:
5250 case RISCV::VASUB_VX:
5252 case RISCV::VSMUL_VX:
5254 case RISCV::VMV_S_X:
5256 case RISCV::VANDN_VX:
5257 return 1U << Log2SEW;
5263 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
5266 return RVV->BaseInstr;
5276 unsigned Scaled = Log2SEW + (DestEEW - 1);
5290 return std::nullopt;
5295 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
5296 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
5297 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
5298 LHS.getReg() == RHS.getReg())
5302 if (LHS.isImm() && LHS.getImm() == 0)
5308 if (!LHSImm || !RHSImm)
5310 return LHSImm <= RHSImm;
5322 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
5324 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5334 std::optional<bool> createTripCountGreaterCondition(
5335 int TC, MachineBasicBlock &
MBB,
5336 SmallVectorImpl<MachineOperand> &CondParam)
override {
5344 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
5346 void adjustTripCount(
int TripCountAdjust)
override {}
5350std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5358 if (
TBB == LoopBB && FBB == LoopBB)
5365 assert((
TBB == LoopBB || FBB == LoopBB) &&
5366 "The Loop must be a single-basic-block loop");
5377 if (!Reg.isVirtual())
5384 if (LHS && LHS->isPHI())
5386 if (RHS && RHS->isPHI())
5389 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5395 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5412 case RISCV::FDIV_H_INX:
5413 case RISCV::FDIV_S_INX:
5414 case RISCV::FDIV_D_INX:
5415 case RISCV::FDIV_D_IN32X:
5416 case RISCV::FSQRT_H:
5417 case RISCV::FSQRT_S:
5418 case RISCV::FSQRT_D:
5419 case RISCV::FSQRT_H_INX:
5420 case RISCV::FSQRT_S_INX:
5421 case RISCV::FSQRT_D_INX:
5422 case RISCV::FSQRT_D_IN32X:
5424 case RISCV::VDIV_VV:
5425 case RISCV::VDIV_VX:
5426 case RISCV::VDIVU_VV:
5427 case RISCV::VDIVU_VX:
5428 case RISCV::VREM_VV:
5429 case RISCV::VREM_VX:
5430 case RISCV::VREMU_VV:
5431 case RISCV::VREMU_VX:
5433 case RISCV::VFDIV_VV:
5434 case RISCV::VFDIV_VF:
5435 case RISCV::VFRDIV_VF:
5436 case RISCV::VFSQRT_V:
5437 case RISCV::VFRSQRT7_V:
5443 if (
MI->getOpcode() != TargetOpcode::COPY)
5448 Register DstReg =
MI->getOperand(0).getReg();
5451 :
TRI->getMinimalPhysRegClass(DstReg);
5461 auto [RCLMul, RCFractional] =
5463 return (!RCFractional && LMul == RCLMul) || (RCFractional && LMul == 1);
5467 if (
MI.memoperands_empty())
5482 if (MO.getReg().isPhysical())
5485 if (MO.getReg().isPhysical())
5487 bool SawStore =
false;
5490 if (
II->definesRegister(PhysReg,
nullptr))
5493 if (
II->definesRegister(PhysReg,
nullptr) ||
5494 II->readsRegister(PhysReg,
nullptr))
5496 if (
II->mayStore()) {
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
static cl::opt< bool > OutlinerEnableRegSave("riscv-outliner-regsave", cl::init(true), cl::Hidden, cl::desc("Enable RegSave strategy in machine outliner (save X5 to a " "temporary register when X5 is live across outlined calls)."))
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getLoadPredicatedOpcode(unsigned Opcode)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static Register findRegisterToSaveX5To(outliner::Candidate &C, const TargetRegisterInfo &TRI)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define CASE_RVV_OPCODE_LMUL(OP, LMUL)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI void clearKillFlags(Register Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
LLVM_ABI void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
const TargetRegisterInfo * getTargetRegisterInfo() const
LLVM_ABI bool isConstantPhysReg(MCRegister PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool isVRegCopy(const MachineInstr *MI, unsigned LMul=0) const
Return true if MI is a COPY to a vector register of a specific LMul, or any kind of vector registers ...
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
static bool isSafeToMove(const MachineInstr &From, const MachineInstr &To)
Return true if moving From down to To won't cause any physical register reads or writes to be clobber...
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
bool analyzeCandidate(outliner::Candidate &C) const
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
bool requiresNTLHint(const MachineInstr &MI) const
Return true if the instruction requires an NTL hint to be emitted.
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, MachineInstr *&CopyMI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
self_iterator getIterator()
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getInverseBranchOpcode(unsigned BCC)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static int getVXRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_ATOMIC_ORDERING
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
static bool isValidVType(unsigned VType)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
bool isVectorCopy(const TargetRegisterInfo *TRI, const MachineInstr &MI)
Return true if MI is a copy that will be lowered to one or more vmvNr.vs.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isValidAtomicOrdering(Int I)
constexpr RegState getKillRegState(bool B)
static const MachineMemOperand::Flags MONontemporalBit0
constexpr RegState getDeadRegState(bool B)
unsigned M1(unsigned Val)
constexpr bool has_single_bit(T Value) noexcept
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr RegState getRenamableRegState(bool B)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr RegState getDefRegState(bool B)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.