41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
44#define GET_INSTRINFO_CTOR_DTOR
45#include "RISCVGenInstrInfo.inc"
47#define DEBUG_TYPE "riscv-instr-info"
49 "Number of registers within vector register groups spilled");
51 "Number of registers within vector register groups reloaded");
55 cl::desc(
"Prefer whole register move for vector registers."));
58 "riscv-force-machine-combiner-strategy",
cl::Hidden,
59 cl::desc(
"Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
65 "MinInstrCount strategy.")));
69 cl::desc(
"Enable RegSave strategy in machine outliner (save X5 to a "
70 "temporary register when X5 is live across outlined calls)."));
76#define GET_RISCVVPseudosTable_IMPL
77#include "RISCVGenSearchableTables.inc"
83#define GET_RISCVMaskedPseudosTable_IMPL
84#include "RISCVGenSearchableTables.inc"
90 RISCV::ADJCALLSTACKUP),
93#define GET_INSTRINFO_HELPERS
94#include "RISCVGenInstrInfo.inc"
97 if (
STI.hasStdExtZca())
106 int &FrameIndex)
const {
116 case RISCV::VL1RE8_V:
117 case RISCV::VL1RE16_V:
118 case RISCV::VL1RE32_V:
119 case RISCV::VL1RE64_V:
122 case RISCV::VL2RE8_V:
123 case RISCV::VL2RE16_V:
124 case RISCV::VL2RE32_V:
125 case RISCV::VL2RE64_V:
128 case RISCV::VL4RE8_V:
129 case RISCV::VL4RE16_V:
130 case RISCV::VL4RE32_V:
131 case RISCV::VL4RE64_V:
134 case RISCV::VL8RE8_V:
135 case RISCV::VL8RE16_V:
136 case RISCV::VL8RE32_V:
137 case RISCV::VL8RE64_V:
145 switch (
MI.getOpcode()) {
169 case RISCV::VL1RE8_V:
170 case RISCV::VL2RE8_V:
171 case RISCV::VL4RE8_V:
172 case RISCV::VL8RE8_V:
173 if (!
MI.getOperand(1).isFI())
175 FrameIndex =
MI.getOperand(1).getIndex();
178 return MI.getOperand(0).getReg();
181 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
182 MI.getOperand(2).getImm() == 0) {
183 FrameIndex =
MI.getOperand(1).getIndex();
184 return MI.getOperand(0).getReg();
191 int &FrameIndex)
const {
199 switch (
MI.getOpcode()) {
224 if (!
MI.getOperand(1).isFI())
226 FrameIndex =
MI.getOperand(1).getIndex();
229 return MI.getOperand(0).getReg();
232 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
233 MI.getOperand(2).getImm() == 0) {
234 FrameIndex =
MI.getOperand(1).getIndex();
235 return MI.getOperand(0).getReg();
245 case RISCV::VFMV_V_F:
248 case RISCV::VFMV_S_F:
250 return MI.getOperand(1).isUndef();
258 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
269 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
270 "Unexpected COPY instruction.");
274 bool FoundDef =
false;
275 bool FirstVSetVLI =
false;
276 unsigned FirstSEW = 0;
279 if (
MBBI->isMetaInstruction())
282 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
292 unsigned FirstVType =
MBBI->getOperand(2).getImm();
297 if (FirstLMul != LMul)
302 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
308 unsigned VType =
MBBI->getOperand(2).getImm();
326 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
328 }
else if (
MBBI->getNumDefs()) {
331 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
337 if (!MO.isReg() || !MO.isDef())
339 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
354 if (MO.getReg() != SrcReg)
395 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
396 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
398 assert(!Fractional &&
"It is impossible be fractional lmul here.");
399 unsigned NumRegs = NF * LMulVal;
405 SrcEncoding += NumRegs - 1;
406 DstEncoding += NumRegs - 1;
412 unsigned,
unsigned> {
420 uint16_t Diff = DstEncoding - SrcEncoding;
421 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
422 DstEncoding % 8 == 7)
424 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
425 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
426 DstEncoding % 4 == 3)
428 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
429 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
430 DstEncoding % 2 == 1)
432 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
435 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
440 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
442 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
443 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
445 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
446 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
448 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
451 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
454 while (
I != NumRegs) {
459 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
460 GetCopyInfo(SrcEncoding, DstEncoding);
464 if (LMul == LMulCopied &&
467 if (DefMBBI->getOpcode() == VIOpc)
474 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
476 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
484 MIB = MIB.add(DefMBBI->getOperand(2));
492 MIB.addImm(Log2SEW ? Log2SEW : 3);
504 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
505 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
514 bool RenamableDest,
bool RenamableSrc)
const {
518 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
525 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
531 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
537 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
539 if (
STI.hasStdExtZdinx()) {
548 if (
STI.hasStdExtP()) {
557 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
558 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
560 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
562 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
566 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
567 .
addReg(EvenReg, KillFlag)
570 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
577 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
578 RISCV::GPRRegClass.
contains(DstReg)) {
580 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
585 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
587 if (
STI.hasStdExtZfh()) {
588 Opc = RISCV::FSGNJ_H;
591 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
592 "Unexpected extensions");
594 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
595 &RISCV::FPR32RegClass);
596 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
597 &RISCV::FPR32RegClass);
598 Opc = RISCV::FSGNJ_S;
602 .
addReg(SrcReg, KillFlag);
606 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
609 .
addReg(SrcReg, KillFlag);
613 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
616 .
addReg(SrcReg, KillFlag);
620 if (RISCV::FPR32RegClass.
contains(DstReg) &&
621 RISCV::GPRRegClass.
contains(SrcReg)) {
623 .
addReg(SrcReg, KillFlag);
627 if (RISCV::GPRRegClass.
contains(DstReg) &&
628 RISCV::FPR32RegClass.
contains(SrcReg)) {
630 .
addReg(SrcReg, KillFlag);
634 if (RISCV::FPR64RegClass.
contains(DstReg) &&
635 RISCV::GPRRegClass.
contains(SrcReg)) {
636 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
638 .
addReg(SrcReg, KillFlag);
642 if (RISCV::GPRRegClass.
contains(DstReg) &&
643 RISCV::FPR64RegClass.
contains(SrcReg)) {
644 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
646 .
addReg(SrcReg, KillFlag);
652 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
663 Register SrcReg,
bool IsKill,
int FI,
672 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
673 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
675 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
676 Opcode = RISCV::SH_INX;
677 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
678 Opcode = RISCV::SW_INX;
679 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
680 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
681 Alignment >=
STI.getZilsdAlign()) {
682 Opcode = RISCV::SD_RV32;
684 Opcode = RISCV::PseudoRV32ZdinxSD;
686 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
688 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
690 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
692 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
693 Opcode = RISCV::VS1R_V;
694 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
695 Opcode = RISCV::VS2R_V;
696 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
697 Opcode = RISCV::VS4R_V;
698 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
699 Opcode = RISCV::VS8R_V;
700 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
701 Opcode = RISCV::PseudoVSPILL2_M1;
702 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
703 Opcode = RISCV::PseudoVSPILL2_M2;
704 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
705 Opcode = RISCV::PseudoVSPILL2_M4;
706 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
707 Opcode = RISCV::PseudoVSPILL3_M1;
708 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
709 Opcode = RISCV::PseudoVSPILL3_M2;
710 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
711 Opcode = RISCV::PseudoVSPILL4_M1;
712 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
713 Opcode = RISCV::PseudoVSPILL4_M2;
714 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
715 Opcode = RISCV::PseudoVSPILL5_M1;
716 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
717 Opcode = RISCV::PseudoVSPILL6_M1;
718 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
719 Opcode = RISCV::PseudoVSPILL7_M1;
720 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
721 Opcode = RISCV::PseudoVSPILL8_M1;
764 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
765 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
767 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
768 Opcode = RISCV::LH_INX;
769 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
770 Opcode = RISCV::LW_INX;
771 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
772 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
773 Alignment >=
STI.getZilsdAlign()) {
774 Opcode = RISCV::LD_RV32;
776 Opcode = RISCV::PseudoRV32ZdinxLD;
778 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
780 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
782 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
784 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
785 Opcode = RISCV::VL1RE8_V;
786 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
787 Opcode = RISCV::VL2RE8_V;
788 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
789 Opcode = RISCV::VL4RE8_V;
790 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
791 Opcode = RISCV::VL8RE8_V;
792 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
793 Opcode = RISCV::PseudoVRELOAD2_M1;
794 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
795 Opcode = RISCV::PseudoVRELOAD2_M2;
796 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
797 Opcode = RISCV::PseudoVRELOAD2_M4;
798 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
799 Opcode = RISCV::PseudoVRELOAD3_M1;
800 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
801 Opcode = RISCV::PseudoVRELOAD3_M2;
802 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
803 Opcode = RISCV::PseudoVRELOAD4_M1;
804 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
805 Opcode = RISCV::PseudoVRELOAD4_M2;
806 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
807 Opcode = RISCV::PseudoVRELOAD5_M1;
808 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
809 Opcode = RISCV::PseudoVRELOAD6_M1;
810 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
811 Opcode = RISCV::PseudoVRELOAD7_M1;
812 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
813 Opcode = RISCV::PseudoVRELOAD8_M1;
851 if (
Ops.size() != 1 ||
Ops[0] != 1)
854 switch (
MI.getOpcode()) {
856 if (RISCVInstrInfo::isSEXT_W(
MI))
858 if (RISCVInstrInfo::isZEXT_W(
MI))
860 if (RISCVInstrInfo::isZEXT_B(
MI))
867 case RISCV::ZEXT_H_RV32:
868 case RISCV::ZEXT_H_RV64:
875 case RISCV::VMV_X_S: {
878 if (ST.getXLen() < (1U << Log2SEW))
893 case RISCV::VFMV_F_S: {
921 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
930 return RISCV::PseudoCCLB;
932 return RISCV::PseudoCCLBU;
934 return RISCV::PseudoCCLH;
936 return RISCV::PseudoCCLHU;
938 return RISCV::PseudoCCLW;
940 return RISCV::PseudoCCLWU;
942 return RISCV::PseudoCCLD;
944 return RISCV::PseudoCCQC_E_LB;
945 case RISCV::QC_E_LBU:
946 return RISCV::PseudoCCQC_E_LBU;
948 return RISCV::PseudoCCQC_E_LH;
949 case RISCV::QC_E_LHU:
950 return RISCV::PseudoCCQC_E_LHU;
952 return RISCV::PseudoCCQC_E_LW;
964 if (
MI.getOpcode() != RISCV::PseudoCCMOVGPR)
969 if (!
STI.hasShortForwardBranchILoad() || !PredOpc)
973 if (
Ops.size() != 1 || (
Ops[0] != 1 &&
Ops[0] != 2))
976 bool Invert =
Ops[0] == 2;
985 MI.getDebugLoc(),
get(PredOpc), DestReg);
996 unsigned BCC =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
1002 NewMI.
add({
MI.getOperand(
MI.getNumExplicitOperands() - 2),
1003 MI.getOperand(
MI.getNumExplicitOperands() - 1)});
1012 bool DstIsDead)
const {
1028 bool SrcRenamable =
false;
1032 bool LastItem = ++Num == Seq.
size();
1037 switch (Inst.getOpndKind()) {
1047 .
addReg(SrcReg, SrcRegState)
1054 .
addReg(SrcReg, SrcRegState)
1055 .
addReg(SrcReg, SrcRegState)
1061 .
addReg(SrcReg, SrcRegState)
1069 SrcRenamable = DstRenamable;
1079 case RISCV::CV_BEQIMM:
1080 case RISCV::QC_BEQI:
1081 case RISCV::QC_E_BEQI:
1082 case RISCV::NDS_BBC:
1083 case RISCV::NDS_BEQC:
1087 case RISCV::QC_BNEI:
1088 case RISCV::QC_E_BNEI:
1089 case RISCV::CV_BNEIMM:
1090 case RISCV::NDS_BBS:
1091 case RISCV::NDS_BNEC:
1094 case RISCV::QC_BLTI:
1095 case RISCV::QC_E_BLTI:
1098 case RISCV::QC_BGEI:
1099 case RISCV::QC_E_BGEI:
1102 case RISCV::QC_BLTUI:
1103 case RISCV::QC_E_BLTUI:
1106 case RISCV::QC_BGEUI:
1107 case RISCV::QC_E_BGEUI:
1139 "Unknown conditional branch");
1150 case RISCV::QC_MVEQ:
1151 return RISCV::QC_MVNE;
1152 case RISCV::QC_MVNE:
1153 return RISCV::QC_MVEQ;
1154 case RISCV::QC_MVLT:
1155 return RISCV::QC_MVGE;
1156 case RISCV::QC_MVGE:
1157 return RISCV::QC_MVLT;
1158 case RISCV::QC_MVLTU:
1159 return RISCV::QC_MVGEU;
1160 case RISCV::QC_MVGEU:
1161 return RISCV::QC_MVLTU;
1162 case RISCV::QC_MVEQI:
1163 return RISCV::QC_MVNEI;
1164 case RISCV::QC_MVNEI:
1165 return RISCV::QC_MVEQI;
1166 case RISCV::QC_MVLTI:
1167 return RISCV::QC_MVGEI;
1168 case RISCV::QC_MVGEI:
1169 return RISCV::QC_MVLTI;
1170 case RISCV::QC_MVLTUI:
1171 return RISCV::QC_MVGEUI;
1172 case RISCV::QC_MVGEUI:
1173 return RISCV::QC_MVLTUI;
1178 switch (SelectOpc) {
1197 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1207 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1212 return RISCV::CV_BEQIMM;
1214 return RISCV::CV_BNEIMM;
1217 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1222 return RISCV::QC_BEQI;
1224 return RISCV::QC_BNEI;
1226 return RISCV::QC_BLTI;
1228 return RISCV::QC_BGEI;
1231 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1236 return RISCV::QC_BLTUI;
1238 return RISCV::QC_BGEUI;
1241 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1246 return RISCV::QC_E_BEQI;
1248 return RISCV::QC_E_BNEI;
1250 return RISCV::QC_E_BLTI;
1252 return RISCV::QC_E_BGEI;
1255 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1260 return RISCV::QC_E_BLTUI;
1262 return RISCV::QC_E_BGEUI;
1265 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1270 return RISCV::NDS_BBC;
1272 return RISCV::NDS_BBS;
1275 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1280 return RISCV::NDS_BEQC;
1282 return RISCV::NDS_BNEC;
1328 case RISCV::CV_BEQIMM:
1329 return RISCV::CV_BNEIMM;
1330 case RISCV::CV_BNEIMM:
1331 return RISCV::CV_BEQIMM;
1332 case RISCV::QC_BEQI:
1333 return RISCV::QC_BNEI;
1334 case RISCV::QC_BNEI:
1335 return RISCV::QC_BEQI;
1336 case RISCV::QC_BLTI:
1337 return RISCV::QC_BGEI;
1338 case RISCV::QC_BGEI:
1339 return RISCV::QC_BLTI;
1340 case RISCV::QC_BLTUI:
1341 return RISCV::QC_BGEUI;
1342 case RISCV::QC_BGEUI:
1343 return RISCV::QC_BLTUI;
1344 case RISCV::QC_E_BEQI:
1345 return RISCV::QC_E_BNEI;
1346 case RISCV::QC_E_BNEI:
1347 return RISCV::QC_E_BEQI;
1348 case RISCV::QC_E_BLTI:
1349 return RISCV::QC_E_BGEI;
1350 case RISCV::QC_E_BGEI:
1351 return RISCV::QC_E_BLTI;
1352 case RISCV::QC_E_BLTUI:
1353 return RISCV::QC_E_BGEUI;
1354 case RISCV::QC_E_BGEUI:
1355 return RISCV::QC_E_BLTUI;
1356 case RISCV::NDS_BBC:
1357 return RISCV::NDS_BBS;
1358 case RISCV::NDS_BBS:
1359 return RISCV::NDS_BBC;
1360 case RISCV::NDS_BEQC:
1361 return RISCV::NDS_BNEC;
1362 case RISCV::NDS_BNEC:
1363 return RISCV::NDS_BEQC;
1371 bool AllowModify)
const {
1372 TBB = FBB =
nullptr;
1377 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1383 int NumTerminators = 0;
1384 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1387 if (J->getDesc().isUnconditionalBranch() ||
1388 J->getDesc().isIndirectBranch()) {
1395 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1396 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1397 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1400 I = FirstUncondOrIndirectBr;
1404 if (
I->getDesc().isIndirectBranch())
1408 if (
I->isPreISelOpcode())
1412 if (NumTerminators > 2)
1416 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1422 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1428 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1429 I->getDesc().isUnconditionalBranch()) {
1440 int *BytesRemoved)
const {
1447 if (!
I->getDesc().isUnconditionalBranch() &&
1448 !
I->getDesc().isConditionalBranch())
1454 I->eraseFromParent();
1458 if (
I ==
MBB.begin())
1461 if (!
I->getDesc().isConditionalBranch())
1467 I->eraseFromParent();
1480 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1482 "RISC-V branch conditions have two components!");
1516 assert(RS &&
"RegScavenger required for long branching");
1518 "new block should be inserted for expanding unconditional branch");
1521 "restore block should be inserted for restoring clobbered registers");
1530 "Branch offsets outside of the signed 32-bit range not supported");
1536 auto II =
MBB.end();
1542 RS->enterBasicBlockEnd(
MBB);
1549 RC = &RISCV::GPRX7RegClass;
1551 RS->scavengeRegisterBackwards(*RC,
MI.getIterator(),
1555 RS->setRegUsed(TmpGPR);
1560 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1566 if (FrameIndex == -1)
1571 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1574 MI.getOperand(1).setMBB(&RestoreBB);
1578 TRI->eliminateFrameIndex(RestoreBB.
back(),
1588 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1598 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1599 MI->getOperand(1).getReg() == RISCV::X0) {
1600 Imm =
MI->getOperand(2).getImm();
1605 if (
MI->getOpcode() == RISCV::BSETI &&
MI->getOperand(1).isReg() &&
1606 MI->getOperand(1).getReg() == RISCV::X0 &&
1607 MI->getOperand(2).getImm() == 11) {
1621 if (Reg == RISCV::X0) {
1629 bool IsSigned =
false;
1630 bool IsEquality =
false;
1631 switch (
MI.getOpcode()) {
1667 MI.eraseFromParent();
1693 auto searchConst = [&](int64_t C1) ->
Register {
1695 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1698 I.getOperand(0).getReg().isVirtual();
1701 return DefC1->getOperand(0).getReg();
1713 if (
isFromLoadImm(MRI, LHS, C0) && C0 != 0 && LHS.getReg().isVirtual() &&
1714 MRI.
hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1716 if (
Register RegZ = searchConst(C0 + 1)) {
1724 MI.eraseFromParent();
1734 if (
isFromLoadImm(MRI, RHS, C0) && C0 != 0 && RHS.getReg().isVirtual() &&
1737 if (
Register RegZ = searchConst(C0 - 1)) {
1745 MI.eraseFromParent();
1755 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1757 int NumOp =
MI.getNumExplicitOperands();
1758 return MI.getOperand(NumOp - 1).getMBB();
1762 int64_t BrOffset)
const {
1763 unsigned XLen =
STI.getXLen();
1770 case RISCV::NDS_BBC:
1771 case RISCV::NDS_BBS:
1772 case RISCV::NDS_BEQC:
1773 case RISCV::NDS_BNEC:
1783 case RISCV::CV_BEQIMM:
1784 case RISCV::CV_BNEIMM:
1785 case RISCV::QC_BEQI:
1786 case RISCV::QC_BNEI:
1787 case RISCV::QC_BGEI:
1788 case RISCV::QC_BLTI:
1789 case RISCV::QC_BLTUI:
1790 case RISCV::QC_BGEUI:
1791 case RISCV::QC_E_BEQI:
1792 case RISCV::QC_E_BNEI:
1793 case RISCV::QC_E_BGEI:
1794 case RISCV::QC_E_BLTI:
1795 case RISCV::QC_E_BLTUI:
1796 case RISCV::QC_E_BGEUI:
1799 case RISCV::PseudoBR:
1801 case RISCV::PseudoJump:
1812 case RISCV::ADD:
return RISCV::PseudoCCADD;
1813 case RISCV::SUB:
return RISCV::PseudoCCSUB;
1814 case RISCV::SLL:
return RISCV::PseudoCCSLL;
1815 case RISCV::SRL:
return RISCV::PseudoCCSRL;
1816 case RISCV::SRA:
return RISCV::PseudoCCSRA;
1817 case RISCV::AND:
return RISCV::PseudoCCAND;
1818 case RISCV::OR:
return RISCV::PseudoCCOR;
1819 case RISCV::XOR:
return RISCV::PseudoCCXOR;
1820 case RISCV::MAX:
return RISCV::PseudoCCMAX;
1821 case RISCV::MAXU:
return RISCV::PseudoCCMAXU;
1822 case RISCV::MIN:
return RISCV::PseudoCCMIN;
1823 case RISCV::MINU:
return RISCV::PseudoCCMINU;
1824 case RISCV::MUL:
return RISCV::PseudoCCMUL;
1825 case RISCV::LUI:
return RISCV::PseudoCCLUI;
1826 case RISCV::QC_LI:
return RISCV::PseudoCCQC_LI;
1827 case RISCV::QC_E_LI:
return RISCV::PseudoCCQC_E_LI;
1829 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
1830 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
1831 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
1832 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
1833 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
1834 case RISCV::ORI:
return RISCV::PseudoCCORI;
1835 case RISCV::XORI:
return RISCV::PseudoCCXORI;
1837 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
1838 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
1839 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
1840 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
1841 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
1843 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
1844 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
1845 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
1846 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
1848 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
1849 case RISCV::ORN:
return RISCV::PseudoCCORN;
1850 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
1852 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
1853 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
1857 return RISCV::INSTRUCTION_LIST_END;
1866 if (!
Reg.isVirtual())
1874 if (!STI.hasShortForwardBranchIMinMax() &&
1875 (
MI->getOpcode() == RISCV::MAX ||
MI->getOpcode() == RISCV::MIN ||
1876 MI->getOpcode() == RISCV::MINU ||
MI->getOpcode() == RISCV::MAXU))
1879 if (!STI.hasShortForwardBranchIMul() &&
MI->getOpcode() == RISCV::MUL)
1886 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1887 MI->getOperand(1).getReg() == RISCV::X0)
1892 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1905 bool DontMoveAcrossStores =
true;
1906 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1914 bool PreferFalse)
const {
1915 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1916 "Unknown select instruction");
1917 if (!
STI.hasShortForwardBranchIALU())
1923 bool Invert = !
DefMI;
1931 Register DestReg =
MI.getOperand(0).getReg();
1937 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1944 NewMI.
add(FalseReg);
1952 unsigned BCCOpcode =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
1958 NewMI.
add(
MI.getOperand(
MI.getNumExplicitOperands() - 2));
1959 NewMI.
add(
MI.getOperand(
MI.getNumExplicitOperands() - 1));
1969 if (
DefMI->getParent() !=
MI.getParent())
1973 DefMI->eraseFromParent();
1978 if (
MI.isMetaInstruction())
1981 unsigned Opcode =
MI.getOpcode();
1983 if (Opcode == TargetOpcode::INLINEASM ||
1984 Opcode == TargetOpcode::INLINEASM_BR) {
1986 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1991 if (
STI.hasStdExtZca()) {
1992 if (isCompressibleInst(
MI,
STI))
1999 if (Opcode == TargetOpcode::BUNDLE)
2000 return getInstBundleSize(
MI);
2002 if (
MI.getParent() &&
MI.getParent()->getParent()) {
2003 if (isCompressibleInst(
MI,
STI))
2008 case RISCV::PseudoMV_FPR16INX:
2009 case RISCV::PseudoMV_FPR32INX:
2011 return STI.hasStdExtZca() ? 2 : 4;
2013 case RISCV::PseudoCCMOVGPRNoX0:
2014 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2017 case RISCV::PseudoCCMOVGPR:
2018 case RISCV::PseudoCCADD:
2019 case RISCV::PseudoCCSUB:
2020 case RISCV::PseudoCCSLL:
2021 case RISCV::PseudoCCSRL:
2022 case RISCV::PseudoCCSRA:
2023 case RISCV::PseudoCCAND:
2024 case RISCV::PseudoCCOR:
2025 case RISCV::PseudoCCXOR:
2026 case RISCV::PseudoCCADDI:
2027 case RISCV::PseudoCCANDI:
2028 case RISCV::PseudoCCORI:
2029 case RISCV::PseudoCCXORI:
2030 case RISCV::PseudoCCLUI:
2031 case RISCV::PseudoCCSLLI:
2032 case RISCV::PseudoCCSRLI:
2033 case RISCV::PseudoCCSRAI:
2034 case RISCV::PseudoCCADDW:
2035 case RISCV::PseudoCCSUBW:
2036 case RISCV::PseudoCCSLLW:
2037 case RISCV::PseudoCCSRLW:
2038 case RISCV::PseudoCCSRAW:
2039 case RISCV::PseudoCCADDIW:
2040 case RISCV::PseudoCCSLLIW:
2041 case RISCV::PseudoCCSRLIW:
2042 case RISCV::PseudoCCSRAIW:
2043 case RISCV::PseudoCCANDN:
2044 case RISCV::PseudoCCORN:
2045 case RISCV::PseudoCCXNOR:
2046 case RISCV::PseudoCCMAX:
2047 case RISCV::PseudoCCMIN:
2048 case RISCV::PseudoCCMAXU:
2049 case RISCV::PseudoCCMINU:
2050 case RISCV::PseudoCCMUL:
2051 case RISCV::PseudoCCLB:
2052 case RISCV::PseudoCCLH:
2053 case RISCV::PseudoCCLW:
2054 case RISCV::PseudoCCLHU:
2055 case RISCV::PseudoCCLBU:
2056 case RISCV::PseudoCCLWU:
2057 case RISCV::PseudoCCLD:
2058 case RISCV::PseudoCCQC_LI:
2059 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2062 case RISCV::PseudoCCQC_E_LI:
2063 case RISCV::PseudoCCQC_E_LB:
2064 case RISCV::PseudoCCQC_E_LH:
2065 case RISCV::PseudoCCQC_E_LW:
2066 case RISCV::PseudoCCQC_E_LHU:
2067 case RISCV::PseudoCCQC_E_LBU:
2068 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2071 case TargetOpcode::STACKMAP:
2074 case TargetOpcode::PATCHPOINT:
2077 case TargetOpcode::STATEPOINT: {
2081 return std::max(NumBytes, 8U);
2083 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2084 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
2085 case TargetOpcode::PATCHABLE_TAIL_CALL: {
2088 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
2089 F.hasFnAttribute(
"patchable-function-entry")) {
2091 F.getFnAttributeAsParsedInteger(
"patchable-function-entry");
2093 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
2097 return STI.is64Bit() ? 68 : 44;
2100 return get(Opcode).getSize();
2105 const unsigned Opcode =
MI.getOpcode();
2109 case RISCV::FSGNJ_D:
2110 case RISCV::FSGNJ_S:
2111 case RISCV::FSGNJ_H:
2112 case RISCV::FSGNJ_D_INX:
2113 case RISCV::FSGNJ_D_IN32X:
2114 case RISCV::FSGNJ_S_INX:
2115 case RISCV::FSGNJ_H_INX:
2117 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2118 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
2122 return (
MI.getOperand(1).isReg() &&
2123 MI.getOperand(1).getReg() == RISCV::X0) ||
2124 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
2126 return MI.isAsCheapAsAMove();
2129std::optional<DestSourcePair>
2133 switch (
MI.getOpcode()) {
2139 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2140 MI.getOperand(2).isReg())
2142 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2143 MI.getOperand(1).isReg())
2148 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
2149 MI.getOperand(2).getImm() == 0)
2153 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2154 MI.getOperand(1).isReg())
2158 case RISCV::SH1ADD_UW:
2160 case RISCV::SH2ADD_UW:
2162 case RISCV::SH3ADD_UW:
2163 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2164 MI.getOperand(2).isReg())
2167 case RISCV::FSGNJ_D:
2168 case RISCV::FSGNJ_S:
2169 case RISCV::FSGNJ_H:
2170 case RISCV::FSGNJ_D_INX:
2171 case RISCV::FSGNJ_D_IN32X:
2172 case RISCV::FSGNJ_S_INX:
2173 case RISCV::FSGNJ_H_INX:
2175 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2176 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2180 return std::nullopt;
2188 const auto &SchedModel =
STI.getSchedModel();
2189 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2201 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2205 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2206 RISCV::OpName::frm) < 0;
2208 "New instructions require FRM whereas the old one does not have it");
2215 for (
auto *NewMI : InsInstrs) {
2217 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2218 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2260bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2261 bool Invert)
const {
2262#define OPCODE_LMUL_CASE(OPC) \
2263 case RISCV::OPC##_M1: \
2264 case RISCV::OPC##_M2: \
2265 case RISCV::OPC##_M4: \
2266 case RISCV::OPC##_M8: \
2267 case RISCV::OPC##_MF2: \
2268 case RISCV::OPC##_MF4: \
2269 case RISCV::OPC##_MF8
2271#define OPCODE_LMUL_MASK_CASE(OPC) \
2272 case RISCV::OPC##_M1_MASK: \
2273 case RISCV::OPC##_M2_MASK: \
2274 case RISCV::OPC##_M4_MASK: \
2275 case RISCV::OPC##_M8_MASK: \
2276 case RISCV::OPC##_MF2_MASK: \
2277 case RISCV::OPC##_MF4_MASK: \
2278 case RISCV::OPC##_MF8_MASK
2283 Opcode = *InvOpcode;
2300#undef OPCODE_LMUL_MASK_CASE
2301#undef OPCODE_LMUL_CASE
2304bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2315 const uint64_t TSFlags =
Desc.TSFlags;
2317 auto checkImmOperand = [&](
unsigned OpIdx) {
2321 auto checkRegOperand = [&](
unsigned OpIdx) {
2329 if (!checkRegOperand(1))
2344 bool SeenMI2 =
false;
2345 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2354 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2355 Register SrcReg = It->getOperand(1).getReg();
2373 if (MI1VReg != SrcReg)
2382 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2422bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2423 bool &Commuted)
const {
2427 "Expect the present of passthrough operand.");
2433 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2434 areRVVInstsReassociable(Inst, *MI2);
2438 return areRVVInstsReassociable(Inst, *MI1) &&
2439 (isVectorAssociativeAndCommutative(*MI1) ||
2440 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2447 if (!isVectorAssociativeAndCommutative(Inst) &&
2448 !isVectorAssociativeAndCommutative(Inst,
true))
2474 for (
unsigned I = 0;
I < 5; ++
I)
2480 bool &Commuted)
const {
2481 if (isVectorAssociativeAndCommutative(Inst) ||
2482 isVectorAssociativeAndCommutative(Inst,
true))
2483 return hasReassociableVectorSibling(Inst, Commuted);
2489 unsigned OperandIdx = Commuted ? 2 : 1;
2493 int16_t InstFrmOpIdx =
2494 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2495 int16_t SiblingFrmOpIdx =
2496 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2498 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2503 bool Invert)
const {
2504 if (isVectorAssociativeAndCommutative(Inst, Invert))
2512 Opc = *InverseOpcode;
2557std::optional<unsigned>
2559#define RVV_OPC_LMUL_CASE(OPC, INV) \
2560 case RISCV::OPC##_M1: \
2561 return RISCV::INV##_M1; \
2562 case RISCV::OPC##_M2: \
2563 return RISCV::INV##_M2; \
2564 case RISCV::OPC##_M4: \
2565 return RISCV::INV##_M4; \
2566 case RISCV::OPC##_M8: \
2567 return RISCV::INV##_M8; \
2568 case RISCV::OPC##_MF2: \
2569 return RISCV::INV##_MF2; \
2570 case RISCV::OPC##_MF4: \
2571 return RISCV::INV##_MF4; \
2572 case RISCV::OPC##_MF8: \
2573 return RISCV::INV##_MF8
2575#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2576 case RISCV::OPC##_M1_MASK: \
2577 return RISCV::INV##_M1_MASK; \
2578 case RISCV::OPC##_M2_MASK: \
2579 return RISCV::INV##_M2_MASK; \
2580 case RISCV::OPC##_M4_MASK: \
2581 return RISCV::INV##_M4_MASK; \
2582 case RISCV::OPC##_M8_MASK: \
2583 return RISCV::INV##_M8_MASK; \
2584 case RISCV::OPC##_MF2_MASK: \
2585 return RISCV::INV##_MF2_MASK; \
2586 case RISCV::OPC##_MF4_MASK: \
2587 return RISCV::INV##_MF4_MASK; \
2588 case RISCV::OPC##_MF8_MASK: \
2589 return RISCV::INV##_MF8_MASK
2593 return std::nullopt;
2595 return RISCV::FSUB_H;
2597 return RISCV::FSUB_S;
2599 return RISCV::FSUB_D;
2601 return RISCV::FADD_H;
2603 return RISCV::FADD_S;
2605 return RISCV::FADD_D;
2622#undef RVV_OPC_LMUL_MASK_CASE
2623#undef RVV_OPC_LMUL_CASE
2628 bool DoRegPressureReduce) {
2655 bool DoRegPressureReduce) {
2662 DoRegPressureReduce)) {
2668 DoRegPressureReduce)) {
2678 bool DoRegPressureReduce) {
2686 unsigned CombineOpc) {
2693 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2707 unsigned OuterShiftAmt) {
2713 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2740 case RISCV::SH1ADD_UW:
2742 case RISCV::SH2ADD_UW:
2744 case RISCV::SH3ADD_UW:
2790 bool DoRegPressureReduce)
const {
2799 DoRegPressureReduce);
2807 return RISCV::FMADD_H;
2809 return RISCV::FMADD_S;
2811 return RISCV::FMADD_D;
2856 bool Mul1IsKill = Mul1.
isKill();
2857 bool Mul2IsKill = Mul2.
isKill();
2858 bool AddendIsKill = Addend.
isKill();
2867 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2892 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2899 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2902 switch (InnerShiftAmt - OuterShiftAmt) {
2906 InnerOpc = RISCV::ADD;
2909 InnerOpc = RISCV::SH1ADD;
2912 InnerOpc = RISCV::SH2ADD;
2915 InnerOpc = RISCV::SH3ADD;
2933 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2950 DelInstrs, InstrIdxForVirtReg);
2977 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2979 unsigned OpType = Operand.OperandType;
2985 ErrInfo =
"Expected an immediate operand.";
2988 int64_t Imm = MO.
getImm();
2994#define CASE_OPERAND_UIMM(NUM) \
2995 case RISCVOp::OPERAND_UIMM##NUM: \
2996 Ok = isUInt<NUM>(Imm); \
2998#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX) \
2999 case RISCVOp::OPERAND_UIMM##BITS##_LSB##SUFFIX: { \
3000 constexpr size_t NumZeros = sizeof(#SUFFIX) - 1; \
3001 Ok = isShiftedUInt<BITS - NumZeros, NumZeros>(Imm); \
3004#define CASE_OPERAND_SIMM(NUM) \
3005 case RISCVOp::OPERAND_SIMM##NUM: \
3006 Ok = isInt<NUM>(Imm); \
3040 Ok = Imm >= 1 && Imm <= 32;
3043 Ok = Imm >= 1 && Imm <= 64;
3064 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
3075 Ok = Imm >= -15 && Imm <= 16;
3103 Ok = Ok && Imm != 0;
3106 Ok = (
isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);
3109 Ok = Imm >= 0 && Imm <= 10;
3112 Ok = Imm >= 0 && Imm <= 7;
3115 Ok = Imm >= 1 && Imm <= 10;
3118 Ok = Imm >= 2 && Imm <= 14;
3127 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3162 Ok = Imm == 1 || Imm == 2 || Imm == 4;
3166 ErrInfo =
"Invalid immediate";
3175 ErrInfo =
"Expected a non-register operand.";
3179 ErrInfo =
"Invalid immediate";
3188 ErrInfo =
"Expected a non-register operand.";
3192 ErrInfo =
"Invalid immediate";
3200 ErrInfo =
"Expected a non-register operand.";
3204 ErrInfo =
"Invalid immediate";
3210 int64_t Imm = MO.
getImm();
3213 ErrInfo =
"Invalid immediate";
3216 }
else if (!MO.
isReg()) {
3217 ErrInfo =
"Expected a register or immediate operand.";
3223 ErrInfo =
"Expected a register or immediate operand.";
3233 if (!
Op.isImm() && !
Op.isReg()) {
3234 ErrInfo =
"Invalid operand type for VL operand";
3237 if (
Op.isReg() &&
Op.getReg().isValid()) {
3240 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {
3241 ErrInfo =
"Invalid register class for VL operand";
3246 ErrInfo =
"VL operand w/o SEW operand?";
3252 if (!
MI.getOperand(
OpIdx).isImm()) {
3253 ErrInfo =
"SEW value expected to be an immediate";
3258 ErrInfo =
"Unexpected SEW value";
3261 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3263 ErrInfo =
"Unexpected SEW value";
3269 if (!
MI.getOperand(
OpIdx).isImm()) {
3270 ErrInfo =
"Policy operand expected to be an immediate";
3275 ErrInfo =
"Invalid Policy Value";
3279 ErrInfo =
"policy operand w/o VL operand?";
3287 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3288 ErrInfo =
"policy operand w/o tied operand?";
3295 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3296 ErrInfo =
"dynamic rounding mode should read FRM";
3318 case RISCV::LD_RV32:
3328 case RISCV::SD_RV32:
3344 int64_t NewOffset = OldOffset + Disp;
3366 "Addressing mode not supported for folding");
3439 case RISCV::LD_RV32:
3442 case RISCV::SD_RV32:
3449 OffsetIsScalable =
false;
3465 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3473 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3476 auto Base1 = MO1->getValue();
3477 auto Base2 = MO2->getValue();
3478 if (!Base1 || !Base2)
3486 return Base1 == Base2;
3492 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3493 unsigned NumBytes)
const {
3496 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3501 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3507 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3513 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3563 int64_t OffsetA = 0, OffsetB = 0;
3569 int LowOffset = std::min(OffsetA, OffsetB);
3570 int HighOffset = std::max(OffsetA, OffsetB);
3571 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3573 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3580std::pair<unsigned, unsigned>
3583 return std::make_pair(TF & Mask, TF & ~Mask);
3589 static const std::pair<unsigned, const char *> TargetFlags[] = {
3590 {MO_CALL,
"riscv-call"},
3591 {MO_LO,
"riscv-lo"},
3592 {MO_HI,
"riscv-hi"},
3593 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3594 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3595 {MO_GOT_HI,
"riscv-got-hi"},
3596 {MO_TPREL_LO,
"riscv-tprel-lo"},
3597 {MO_TPREL_HI,
"riscv-tprel-hi"},
3598 {MO_TPREL_ADD,
"riscv-tprel-add"},
3599 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3600 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3601 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3602 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3603 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3604 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3612 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3625 unsigned &Flags)
const {
3645 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3646 F.hasFnAttribute(
"patchable-function-entry");
3651 return MI.readsRegister(RegNo,
TRI) ||
3652 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3657 return MI.modifiesRegister(RegNo,
TRI) ||
3658 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3662 if (!
MBB.back().isReturn())
3701 if (
C.isAvailableAcrossAndOutOfSeq(
Reg,
TRI) &&
3702 C.isAvailableInsideSeq(
Reg,
TRI)) {
3716 if (
C.back().isReturn() &&
3717 !
C.isAvailableAcrossAndOutOfSeq(TailExpandUseReg, RegInfo)) {
3719 LLVM_DEBUG(
dbgs() <<
"Cannot be outlined between: " <<
C.front() <<
"and "
3721 LLVM_DEBUG(
dbgs() <<
"Because the tail-call register is live across "
3722 "the proposed outlined function call\n");
3728 if (
C.back().isReturn()) {
3730 "The candidate who uses return instruction must be outlined "
3737 if (!
C.isAvailableInsideSeq(RISCV::X5, RegInfo))
3741 if (
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, RegInfo))
3751std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3754 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3755 unsigned MinRepeats)
const {
3763 if (RepeatedSequenceLocs.size() < MinRepeats)
3764 return std::nullopt;
3768 unsigned InstrSizeCExt =
3770 unsigned CallOverhead = 0, FrameOverhead = 0;
3773 unsigned CFICount = 0;
3774 for (
auto &
I : Candidate) {
3775 if (
I.isCFIInstruction())
3786 std::vector<MCCFIInstruction> CFIInstructions =
3787 C.getMF()->getFrameInstructions();
3789 if (CFICount > 0 && CFICount != CFIInstructions.size())
3790 return std::nullopt;
3798 CallOverhead = 4 + InstrSizeCExt;
3805 FrameOverhead = InstrSizeCExt;
3811 return std::nullopt;
3815 for (
auto &
C : RepeatedSequenceLocs) {
3817 if (
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, RegInfo)) {
3819 unsigned CandCallOverhead = 8;
3824 unsigned CandCallOverhead = InstrSizeCExt + 8 + InstrSizeCExt;
3829 for (
auto &
C : RepeatedSequenceLocs)
3830 C.setCallInfo(MOCI, CallOverhead);
3833 unsigned SequenceSize = 0;
3834 for (
auto &
MI : Candidate)
3837 return std::make_unique<outliner::OutlinedFunction>(
3838 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3844 unsigned Flags)
const {
3848 MBB->getParent()->getSubtarget().getRegisterInfo();
3849 const auto &
F =
MI.getMF()->getFunction();
3854 if (
MI.isCFIInstruction())
3862 for (
const auto &MO :
MI.operands()) {
3867 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3868 F.hasSection() ||
F.getSectionPrefix()))
3885 MBB.addLiveIn(RISCV::X5);
3900 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3907 assert(SaveReg &&
"Cannot find an available register to save/restore X5.");
3918 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3934 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3945 return std::nullopt;
3949 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3950 MI.getOperand(2).isImm())
3951 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3953 return std::nullopt;
3961 std::string GenericComment =
3963 if (!GenericComment.empty())
3964 return GenericComment;
3968 return std::string();
3970 std::string Comment;
3977 switch (OpInfo.OperandType) {
3980 unsigned Imm =
Op.getImm();
3985 unsigned Imm =
Op.getImm();
3990 unsigned Imm =
Op.getImm();
3996 unsigned Log2SEW =
Op.getImm();
3997 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
4003 unsigned Policy =
Op.getImm();
4005 "Invalid Policy Value");
4011 if (
Op.isImm() &&
Op.getImm() == -1)
4033#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
4034 RISCV::Pseudo##OP##_##LMUL
4036#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
4037 RISCV::Pseudo##OP##_##LMUL##_MASK
4039#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
4040 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
4041 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
4043#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
4044 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
4045 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
4046 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
4047 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
4048 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
4049 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
4051#define CASE_RVV_OPCODE_UNMASK(OP) \
4052 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
4053 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
4055#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
4056 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
4057 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
4058 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
4059 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
4060 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
4061 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
4063#define CASE_RVV_OPCODE_MASK(OP) \
4064 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
4065 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
4067#define CASE_RVV_OPCODE_WIDEN(OP) \
4068 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
4069 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
4071#define CASE_RVV_OPCODE(OP) \
4072 CASE_RVV_OPCODE_UNMASK(OP): \
4073 case CASE_RVV_OPCODE_MASK(OP)
4077#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
4078 RISCV::PseudoV##OP##_##TYPE##_##LMUL
4080#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
4081 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
4082 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
4083 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
4084 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
4085 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
4086 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
4087 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
4090#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
4091 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
4093#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
4094 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
4095 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
4096 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
4097 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
4099#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
4100 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
4101 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
4103#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
4104 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
4105 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
4107#define CASE_VFMA_OPCODE_VV(OP) \
4108 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
4109 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
4110 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
4111 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
4113#define CASE_VFMA_SPLATS(OP) \
4114 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
4115 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
4116 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
4117 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
4121 unsigned &SrcOpIdx1,
4122 unsigned &SrcOpIdx2)
const {
4124 if (!
Desc.isCommutable())
4127 switch (
MI.getOpcode()) {
4128 case RISCV::TH_MVEQZ:
4129 case RISCV::TH_MVNEZ:
4133 if (
MI.getOperand(2).getReg() == RISCV::X0)
4136 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4137 case RISCV::QC_SELECTIEQ:
4138 case RISCV::QC_SELECTINE:
4139 case RISCV::QC_SELECTIIEQ:
4140 case RISCV::QC_SELECTIINE:
4141 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4142 case RISCV::QC_MVEQ:
4143 case RISCV::QC_MVNE:
4144 case RISCV::QC_MVLT:
4145 case RISCV::QC_MVGE:
4146 case RISCV::QC_MVLTU:
4147 case RISCV::QC_MVGEU:
4148 case RISCV::QC_MVEQI:
4149 case RISCV::QC_MVNEI:
4150 case RISCV::QC_MVLTI:
4151 case RISCV::QC_MVGEI:
4152 case RISCV::QC_MVLTUI:
4153 case RISCV::QC_MVGEUI:
4154 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
4155 case RISCV::TH_MULA:
4156 case RISCV::TH_MULAW:
4157 case RISCV::TH_MULAH:
4158 case RISCV::TH_MULS:
4159 case RISCV::TH_MULSW:
4160 case RISCV::TH_MULSH:
4162 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4163 case RISCV::PseudoCCMOVGPRNoX0:
4164 case RISCV::PseudoCCMOVGPR:
4166 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4207 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4234 unsigned CommutableOpIdx1 = 1;
4235 unsigned CommutableOpIdx2 = 3;
4236 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4257 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
4259 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
4263 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
4264 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
4270 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
4271 SrcOpIdx2 == CommuteAnyOperandIndex) {
4274 unsigned CommutableOpIdx1 = SrcOpIdx1;
4275 if (SrcOpIdx1 == SrcOpIdx2) {
4278 CommutableOpIdx1 = 1;
4279 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
4281 CommutableOpIdx1 = SrcOpIdx2;
4286 unsigned CommutableOpIdx2;
4287 if (CommutableOpIdx1 != 1) {
4289 CommutableOpIdx2 = 1;
4291 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
4296 if (Op1Reg !=
MI.getOperand(2).getReg())
4297 CommutableOpIdx2 = 2;
4299 CommutableOpIdx2 = 3;
4304 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4317#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4318 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4319 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4322#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4323 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4324 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4325 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4326 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4327 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4328 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4329 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4332#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4333 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4334 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4337#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4338 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4339 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4340 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4341 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4343#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4344 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4345 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4347#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4348 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4349 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4351#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4352 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4353 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4354 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4355 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4357#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4358 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4359 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4360 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4361 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4367 unsigned OpIdx2)
const {
4370 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4374 switch (
MI.getOpcode()) {
4375 case RISCV::TH_MVEQZ:
4376 case RISCV::TH_MVNEZ: {
4377 auto &WorkingMI = cloneIfNew(
MI);
4378 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4379 : RISCV::TH_MVEQZ));
4383 case RISCV::QC_SELECTIEQ:
4384 case RISCV::QC_SELECTINE:
4385 case RISCV::QC_SELECTIIEQ:
4386 case RISCV::QC_SELECTIINE:
4388 case RISCV::QC_MVEQ:
4389 case RISCV::QC_MVNE:
4390 case RISCV::QC_MVLT:
4391 case RISCV::QC_MVGE:
4392 case RISCV::QC_MVLTU:
4393 case RISCV::QC_MVGEU:
4394 case RISCV::QC_MVEQI:
4395 case RISCV::QC_MVNEI:
4396 case RISCV::QC_MVLTI:
4397 case RISCV::QC_MVGEI:
4398 case RISCV::QC_MVLTUI:
4399 case RISCV::QC_MVGEUI: {
4400 auto &WorkingMI = cloneIfNew(
MI);
4405 case RISCV::PseudoCCMOVGPRNoX0:
4406 case RISCV::PseudoCCMOVGPR: {
4408 unsigned BCC =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
4410 auto &WorkingMI = cloneIfNew(
MI);
4411 WorkingMI.getOperand(
MI.getNumExplicitOperands() - 3).setImm(BCC);
4435 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4436 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4438 switch (
MI.getOpcode()) {
4461 auto &WorkingMI = cloneIfNew(
MI);
4462 WorkingMI.setDesc(
get(
Opc));
4472 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4475 if (OpIdx1 == 3 || OpIdx2 == 3) {
4477 switch (
MI.getOpcode()) {
4488 auto &WorkingMI = cloneIfNew(
MI);
4489 WorkingMI.setDesc(
get(
Opc));
4501#undef CASE_VMA_CHANGE_OPCODE_COMMON
4502#undef CASE_VMA_CHANGE_OPCODE_LMULS
4503#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4504#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4505#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4506#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4507#undef CASE_VFMA_CHANGE_OPCODE_VV
4508#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4510#undef CASE_RVV_OPCODE_UNMASK_LMUL
4511#undef CASE_RVV_OPCODE_MASK_LMUL
4512#undef CASE_RVV_OPCODE_LMUL
4513#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4514#undef CASE_RVV_OPCODE_UNMASK
4515#undef CASE_RVV_OPCODE_MASK_WIDEN
4516#undef CASE_RVV_OPCODE_MASK
4517#undef CASE_RVV_OPCODE_WIDEN
4518#undef CASE_RVV_OPCODE
4520#undef CASE_VMA_OPCODE_COMMON
4521#undef CASE_VMA_OPCODE_LMULS
4522#undef CASE_VFMA_OPCODE_COMMON
4523#undef CASE_VFMA_OPCODE_LMULS_M1
4524#undef CASE_VFMA_OPCODE_LMULS_MF2
4525#undef CASE_VFMA_OPCODE_LMULS_MF4
4526#undef CASE_VFMA_OPCODE_VV
4527#undef CASE_VFMA_SPLATS
4530 switch (
MI.getOpcode()) {
4538 if (
MI.getOperand(1).getReg() == RISCV::X0)
4539 commuteInstruction(
MI);
4541 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4542 MI.getOperand(2).ChangeToImmediate(0);
4543 MI.setDesc(
get(RISCV::ADDI));
4547 if (
MI.getOpcode() == RISCV::XOR &&
4548 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4549 MI.getOperand(1).setReg(RISCV::X0);
4550 MI.getOperand(2).ChangeToImmediate(0);
4551 MI.setDesc(
get(RISCV::ADDI));
4558 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4559 MI.setDesc(
get(RISCV::ADDI));
4565 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4566 MI.getOperand(2).ChangeToImmediate(0);
4567 MI.setDesc(
get(RISCV::ADDI));
4573 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4574 MI.getOperand(2).ChangeToImmediate(0);
4575 MI.setDesc(
get(RISCV::ADDIW));
4582 if (
MI.getOperand(1).getReg() == RISCV::X0)
4583 commuteInstruction(
MI);
4585 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4586 MI.getOperand(2).ChangeToImmediate(0);
4587 MI.setDesc(
get(RISCV::ADDIW));
4592 case RISCV::SH1ADD_UW:
4594 case RISCV::SH2ADD_UW:
4596 case RISCV::SH3ADD_UW:
4598 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4599 MI.removeOperand(1);
4601 MI.setDesc(
get(RISCV::ADDI));
4605 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4606 MI.removeOperand(2);
4607 unsigned Opc =
MI.getOpcode();
4608 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4609 Opc == RISCV::SH3ADD_UW) {
4611 MI.setDesc(
get(RISCV::SLLI_UW));
4615 MI.setDesc(
get(RISCV::SLLI));
4629 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4630 MI.getOperand(2).getReg() == RISCV::X0) {
4631 MI.getOperand(1).setReg(RISCV::X0);
4632 MI.getOperand(2).ChangeToImmediate(0);
4633 MI.setDesc(
get(RISCV::ADDI));
4639 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4640 MI.getOperand(2).setImm(0);
4641 MI.setDesc(
get(RISCV::ADDI));
4649 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4650 MI.getOperand(2).ChangeToImmediate(0);
4651 MI.setDesc(
get(RISCV::ADDI));
4655 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4656 MI.getOperand(2).ChangeToImmediate(0);
4657 MI.setDesc(
get(RISCV::ADDI));
4665 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4666 MI.getOperand(2).ChangeToImmediate(0);
4667 MI.setDesc(
get(RISCV::ADDI));
4677 case RISCV::SLLI_UW:
4679 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4680 MI.getOperand(2).setImm(0);
4681 MI.setDesc(
get(RISCV::ADDI));
4689 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4690 MI.getOperand(2).getReg() == RISCV::X0) {
4691 MI.getOperand(2).ChangeToImmediate(0);
4692 MI.setDesc(
get(RISCV::ADDI));
4696 if (
MI.getOpcode() == RISCV::ADD_UW &&
4697 MI.getOperand(1).getReg() == RISCV::X0) {
4698 MI.removeOperand(1);
4700 MI.setDesc(
get(RISCV::ADDI));
4706 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4707 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4708 MI.setDesc(
get(RISCV::ADDI));
4714 case RISCV::ZEXT_H_RV32:
4715 case RISCV::ZEXT_H_RV64:
4718 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4720 MI.setDesc(
get(RISCV::ADDI));
4729 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4730 MI.getOperand(2).ChangeToImmediate(0);
4731 MI.setDesc(
get(RISCV::ADDI));
4738 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4740 MI.removeOperand(0);
4741 MI.insert(
MI.operands_begin() + 1, {MO0});
4746 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4748 MI.removeOperand(0);
4749 MI.insert(
MI.operands_begin() + 1, {MO0});
4750 MI.setDesc(
get(RISCV::BNE));
4755 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4757 MI.removeOperand(0);
4758 MI.insert(
MI.operands_begin() + 1, {MO0});
4759 MI.setDesc(
get(RISCV::BEQ));
4767#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4768 RISCV::PseudoV##OP##_##LMUL##_TIED
4770#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4771 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4772 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4773 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4774 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4775 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4776 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4778#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4779 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4780 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4783#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4784 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4785 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4786 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4787 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4788 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4789 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4792#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4793 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4795#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4796 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4797 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4798 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4799 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4800 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4801 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4802 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4803 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4804 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4806#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4807 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4808 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4811#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4812 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4813 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4814 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4815 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4816 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4817 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4818 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4819 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4820 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4822#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4823 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4824 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4825 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4826 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4827 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4829#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4830 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4831 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4832 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4833 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4834 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4841 switch (
MI.getOpcode()) {
4849 MI.getNumExplicitOperands() == 7 &&
4850 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4857 switch (
MI.getOpcode()) {
4869 .
add(
MI.getOperand(0))
4871 .
add(
MI.getOperand(1))
4872 .
add(
MI.getOperand(2))
4873 .
add(
MI.getOperand(3))
4874 .
add(
MI.getOperand(4))
4875 .
add(
MI.getOperand(5))
4876 .
add(
MI.getOperand(6));
4885 MI.getNumExplicitOperands() == 6);
4892 switch (
MI.getOpcode()) {
4904 .
add(
MI.getOperand(0))
4906 .
add(
MI.getOperand(1))
4907 .
add(
MI.getOperand(2))
4908 .
add(
MI.getOperand(3))
4909 .
add(
MI.getOperand(4))
4910 .
add(
MI.getOperand(5));
4917 unsigned NumOps =
MI.getNumOperands();
4920 if (
Op.isReg() &&
Op.isKill())
4928 if (
MI.getOperand(0).isEarlyClobber()) {
4942#undef CASE_WIDEOP_OPCODE_COMMON
4943#undef CASE_WIDEOP_OPCODE_LMULS
4944#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4945#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4946#undef CASE_FP_WIDEOP_OPCODE_COMMON
4947#undef CASE_FP_WIDEOP_OPCODE_LMULS
4948#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4949#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4958 if (ShiftAmount == 0)
4964 }
else if (
int ShXAmount, ShiftAmount;
4966 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4969 switch (ShXAmount) {
4971 Opc = RISCV::SH1ADD;
4974 Opc = RISCV::SH2ADD;
4977 Opc = RISCV::SH3ADD;
5013 }
else if (
STI.hasStdExtZmmul()) {
5023 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
5024 if (Amount & (1U << ShiftAmount)) {
5028 .
addImm(ShiftAmount - PrevShiftAmount)
5030 if (Amount >> (ShiftAmount + 1)) {
5044 PrevShiftAmount = ShiftAmount;
5047 assert(Acc &&
"Expected valid accumulator");
5057 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
5065 ?
STI.getTailDupAggressiveThreshold()
5072 unsigned Opcode =
MI.getOpcode();
5073 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
5082 return MI.isCopy() &&
MI.getOperand(0).getReg().isPhysical() &&
5084 TRI->getMinimalPhysRegClass(
MI.getOperand(0).getReg()));
5087std::optional<std::pair<unsigned, unsigned>>
5091 return std::nullopt;
5092 case RISCV::PseudoVSPILL2_M1:
5093 case RISCV::PseudoVRELOAD2_M1:
5094 return std::make_pair(2u, 1u);
5095 case RISCV::PseudoVSPILL2_M2:
5096 case RISCV::PseudoVRELOAD2_M2:
5097 return std::make_pair(2u, 2u);
5098 case RISCV::PseudoVSPILL2_M4:
5099 case RISCV::PseudoVRELOAD2_M4:
5100 return std::make_pair(2u, 4u);
5101 case RISCV::PseudoVSPILL3_M1:
5102 case RISCV::PseudoVRELOAD3_M1:
5103 return std::make_pair(3u, 1u);
5104 case RISCV::PseudoVSPILL3_M2:
5105 case RISCV::PseudoVRELOAD3_M2:
5106 return std::make_pair(3u, 2u);
5107 case RISCV::PseudoVSPILL4_M1:
5108 case RISCV::PseudoVRELOAD4_M1:
5109 return std::make_pair(4u, 1u);
5110 case RISCV::PseudoVSPILL4_M2:
5111 case RISCV::PseudoVRELOAD4_M2:
5112 return std::make_pair(4u, 2u);
5113 case RISCV::PseudoVSPILL5_M1:
5114 case RISCV::PseudoVRELOAD5_M1:
5115 return std::make_pair(5u, 1u);
5116 case RISCV::PseudoVSPILL6_M1:
5117 case RISCV::PseudoVRELOAD6_M1:
5118 return std::make_pair(6u, 1u);
5119 case RISCV::PseudoVSPILL7_M1:
5120 case RISCV::PseudoVRELOAD7_M1:
5121 return std::make_pair(7u, 1u);
5122 case RISCV::PseudoVSPILL8_M1:
5123 case RISCV::PseudoVRELOAD8_M1:
5124 return std::make_pair(8u, 1u);
5129 int16_t MI1FrmOpIdx =
5130 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
5131 int16_t MI2FrmOpIdx =
5132 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
5133 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
5140std::optional<unsigned>
5144 return std::nullopt;
5147 case RISCV::VSLL_VX:
5148 case RISCV::VSRL_VX:
5149 case RISCV::VSRA_VX:
5151 case RISCV::VSSRL_VX:
5152 case RISCV::VSSRA_VX:
5154 case RISCV::VROL_VX:
5155 case RISCV::VROR_VX:
5160 case RISCV::VNSRL_WX:
5161 case RISCV::VNSRA_WX:
5163 case RISCV::VNCLIPU_WX:
5164 case RISCV::VNCLIP_WX:
5166 case RISCV::VWSLL_VX:
5171 case RISCV::VADD_VX:
5172 case RISCV::VSUB_VX:
5173 case RISCV::VRSUB_VX:
5175 case RISCV::VWADDU_VX:
5176 case RISCV::VWSUBU_VX:
5177 case RISCV::VWADD_VX:
5178 case RISCV::VWSUB_VX:
5179 case RISCV::VWADDU_WX:
5180 case RISCV::VWSUBU_WX:
5181 case RISCV::VWADD_WX:
5182 case RISCV::VWSUB_WX:
5184 case RISCV::VADC_VXM:
5185 case RISCV::VADC_VIM:
5186 case RISCV::VMADC_VXM:
5187 case RISCV::VMADC_VIM:
5188 case RISCV::VMADC_VX:
5189 case RISCV::VSBC_VXM:
5190 case RISCV::VMSBC_VXM:
5191 case RISCV::VMSBC_VX:
5193 case RISCV::VAND_VX:
5195 case RISCV::VXOR_VX:
5197 case RISCV::VMSEQ_VX:
5198 case RISCV::VMSNE_VX:
5199 case RISCV::VMSLTU_VX:
5200 case RISCV::VMSLT_VX:
5201 case RISCV::VMSLEU_VX:
5202 case RISCV::VMSLE_VX:
5203 case RISCV::VMSGTU_VX:
5204 case RISCV::VMSGT_VX:
5206 case RISCV::VMINU_VX:
5207 case RISCV::VMIN_VX:
5208 case RISCV::VMAXU_VX:
5209 case RISCV::VMAX_VX:
5211 case RISCV::VMUL_VX:
5212 case RISCV::VMULH_VX:
5213 case RISCV::VMULHU_VX:
5214 case RISCV::VMULHSU_VX:
5216 case RISCV::VDIVU_VX:
5217 case RISCV::VDIV_VX:
5218 case RISCV::VREMU_VX:
5219 case RISCV::VREM_VX:
5221 case RISCV::VWMUL_VX:
5222 case RISCV::VWMULU_VX:
5223 case RISCV::VWMULSU_VX:
5225 case RISCV::VMACC_VX:
5226 case RISCV::VNMSAC_VX:
5227 case RISCV::VMADD_VX:
5228 case RISCV::VNMSUB_VX:
5230 case RISCV::VWMACCU_VX:
5231 case RISCV::VWMACC_VX:
5232 case RISCV::VWMACCSU_VX:
5233 case RISCV::VWMACCUS_VX:
5235 case RISCV::VMERGE_VXM:
5237 case RISCV::VMV_V_X:
5239 case RISCV::VSADDU_VX:
5240 case RISCV::VSADD_VX:
5241 case RISCV::VSSUBU_VX:
5242 case RISCV::VSSUB_VX:
5244 case RISCV::VAADDU_VX:
5245 case RISCV::VAADD_VX:
5246 case RISCV::VASUBU_VX:
5247 case RISCV::VASUB_VX:
5249 case RISCV::VSMUL_VX:
5251 case RISCV::VMV_S_X:
5253 case RISCV::VANDN_VX:
5254 return 1U << Log2SEW;
5260 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
5263 return RVV->BaseInstr;
5273 unsigned Scaled = Log2SEW + (DestEEW - 1);
5287 return std::nullopt;
5292 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
5293 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
5294 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
5295 LHS.getReg() == RHS.getReg())
5299 if (LHS.isImm() && LHS.getImm() == 0)
5305 if (!LHSImm || !RHSImm)
5307 return LHSImm <= RHSImm;
5319 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
5321 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5331 std::optional<bool> createTripCountGreaterCondition(
5332 int TC, MachineBasicBlock &
MBB,
5333 SmallVectorImpl<MachineOperand> &CondParam)
override {
5341 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
5343 void adjustTripCount(
int TripCountAdjust)
override {}
5347std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5355 if (
TBB == LoopBB && FBB == LoopBB)
5362 assert((
TBB == LoopBB || FBB == LoopBB) &&
5363 "The Loop must be a single-basic-block loop");
5374 if (!Reg.isVirtual())
5381 if (LHS && LHS->isPHI())
5383 if (RHS && RHS->isPHI())
5386 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5392 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5409 case RISCV::FDIV_H_INX:
5410 case RISCV::FDIV_S_INX:
5411 case RISCV::FDIV_D_INX:
5412 case RISCV::FDIV_D_IN32X:
5413 case RISCV::FSQRT_H:
5414 case RISCV::FSQRT_S:
5415 case RISCV::FSQRT_D:
5416 case RISCV::FSQRT_H_INX:
5417 case RISCV::FSQRT_S_INX:
5418 case RISCV::FSQRT_D_INX:
5419 case RISCV::FSQRT_D_IN32X:
5421 case RISCV::VDIV_VV:
5422 case RISCV::VDIV_VX:
5423 case RISCV::VDIVU_VV:
5424 case RISCV::VDIVU_VX:
5425 case RISCV::VREM_VV:
5426 case RISCV::VREM_VX:
5427 case RISCV::VREMU_VV:
5428 case RISCV::VREMU_VX:
5430 case RISCV::VFDIV_VV:
5431 case RISCV::VFDIV_VF:
5432 case RISCV::VFRDIV_VF:
5433 case RISCV::VFSQRT_V:
5434 case RISCV::VFRSQRT7_V:
5440 if (
MI->getOpcode() != TargetOpcode::COPY)
5445 Register DstReg =
MI->getOperand(0).getReg();
5448 :
TRI->getMinimalPhysRegClass(DstReg);
5458 auto [RCLMul, RCFractional] =
5460 return (!RCFractional && LMul == RCLMul) || (RCFractional && LMul == 1);
5464 if (
MI.memoperands_empty())
5479 if (MO.getReg().isPhysical())
5482 if (MO.getReg().isPhysical())
5484 bool SawStore =
false;
5487 if (
II->definesRegister(PhysReg,
nullptr))
5490 if (
II->definesRegister(PhysReg,
nullptr) ||
5491 II->readsRegister(PhysReg,
nullptr))
5493 if (
II->mayStore()) {
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
static cl::opt< bool > OutlinerEnableRegSave("riscv-outliner-regsave", cl::init(true), cl::Hidden, cl::desc("Enable RegSave strategy in machine outliner (save X5 to a " "temporary register when X5 is live across outlined calls)."))
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getLoadPredicatedOpcode(unsigned Opcode)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static Register findRegisterToSaveX5To(outliner::Candidate &C, const TargetRegisterInfo &TRI)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define CASE_RVV_OPCODE_LMUL(OP, LMUL)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
Get the first element.
bool empty() const
Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI void clearKillFlags(Register Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
LLVM_ABI void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
const TargetRegisterInfo * getTargetRegisterInfo() const
LLVM_ABI bool isConstantPhysReg(MCRegister PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
static bool isSafeToMove(const MachineInstr &From, const MachineBasicBlock::iterator &To)
Return true if moving From down to To won't cause any physical register reads or writes to be clobber...
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool isVRegCopy(const MachineInstr *MI, unsigned LMul=0) const
Return true if MI is a COPY to a vector register of a specific LMul, or any kind of vector registers ...
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
bool analyzeCandidate(outliner::Candidate &C) const
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
bool requiresNTLHint(const MachineInstr &MI) const
Return true if the instruction requires an NTL hint to be emitted.
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, MachineInstr *&CopyMI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo & getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
self_iterator getIterator()
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getInverseBranchOpcode(unsigned BCC)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static int getVXRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_ATOMIC_ORDERING
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
static bool isValidVType(unsigned VType)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
bool isVectorCopy(const TargetRegisterInfo *TRI, const MachineInstr &MI)
Return true if MI is a copy that will be lowered to one or more vmvNr.vs.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isValidAtomicOrdering(Int I)
constexpr RegState getKillRegState(bool B)
static const MachineMemOperand::Flags MONontemporalBit0
constexpr RegState getDeadRegState(bool B)
unsigned M1(unsigned Val)
constexpr bool has_single_bit(T Value) noexcept
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr RegState getRenamableRegState(bool B)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr RegState getDefRegState(bool B)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.