40#define GEN_CHECK_COMPRESS_INSTR
41#include "RISCVGenCompressInstEmitter.inc"
43#define GET_INSTRINFO_CTOR_DTOR
44#define GET_INSTRINFO_NAMED_OPS
45#include "RISCVGenInstrInfo.inc"
47#define DEBUG_TYPE "riscv-instr-info"
49 "Number of registers within vector register groups spilled");
51 "Number of registers within vector register groups reloaded");
55 cl::desc(
"Prefer whole register move for vector registers."));
58 "riscv-force-machine-combiner-strategy",
cl::Hidden,
59 cl::desc(
"Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
65 "MinInstrCount strategy.")));
71#define GET_RISCVVPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
78#define GET_RISCVMaskedPseudosTable_IMPL
79#include "RISCVGenSearchableTables.inc"
87#define GET_INSTRINFO_HELPERS
88#include "RISCVGenInstrInfo.inc"
91 if (
STI.hasStdExtZca())
100 int &FrameIndex)
const {
110 case RISCV::VL1RE8_V:
111 case RISCV::VL1RE16_V:
112 case RISCV::VL1RE32_V:
113 case RISCV::VL1RE64_V:
116 case RISCV::VL2RE8_V:
117 case RISCV::VL2RE16_V:
118 case RISCV::VL2RE32_V:
119 case RISCV::VL2RE64_V:
122 case RISCV::VL4RE8_V:
123 case RISCV::VL4RE16_V:
124 case RISCV::VL4RE32_V:
125 case RISCV::VL4RE64_V:
128 case RISCV::VL8RE8_V:
129 case RISCV::VL8RE16_V:
130 case RISCV::VL8RE32_V:
131 case RISCV::VL8RE64_V:
139 switch (
MI.getOpcode()) {
163 case RISCV::VL1RE8_V:
164 case RISCV::VL2RE8_V:
165 case RISCV::VL4RE8_V:
166 case RISCV::VL8RE8_V:
167 if (!
MI.getOperand(1).isFI())
169 FrameIndex =
MI.getOperand(1).getIndex();
172 return MI.getOperand(0).getReg();
175 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
176 MI.getOperand(2).getImm() == 0) {
177 FrameIndex =
MI.getOperand(1).getIndex();
178 return MI.getOperand(0).getReg();
185 int &FrameIndex)
const {
193 switch (
MI.getOpcode()) {
218 if (!
MI.getOperand(1).isFI())
220 FrameIndex =
MI.getOperand(1).getIndex();
223 return MI.getOperand(0).getReg();
226 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
227 MI.getOperand(2).getImm() == 0) {
228 FrameIndex =
MI.getOperand(1).getIndex();
229 return MI.getOperand(0).getReg();
239 case RISCV::VFMV_V_F:
242 case RISCV::VFMV_S_F:
244 return MI.getOperand(1).isUndef();
252 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
263 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
264 "Unexpected COPY instruction.");
268 bool FoundDef =
false;
269 bool FirstVSetVLI =
false;
270 unsigned FirstSEW = 0;
273 if (
MBBI->isMetaInstruction())
276 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
286 unsigned FirstVType =
MBBI->getOperand(2).getImm();
291 if (FirstLMul != LMul)
296 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
302 unsigned VType =
MBBI->getOperand(2).getImm();
320 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
322 }
else if (
MBBI->getNumDefs()) {
325 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
331 if (!MO.isReg() || !MO.isDef())
333 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
348 if (MO.getReg() != SrcReg)
389 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
390 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
392 assert(!Fractional &&
"It is impossible be fractional lmul here.");
393 unsigned NumRegs = NF * LMulVal;
399 SrcEncoding += NumRegs - 1;
400 DstEncoding += NumRegs - 1;
406 unsigned,
unsigned> {
414 uint16_t Diff = DstEncoding - SrcEncoding;
415 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
416 DstEncoding % 8 == 7)
418 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
419 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
420 DstEncoding % 4 == 3)
422 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
423 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
424 DstEncoding % 2 == 1)
426 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
429 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
434 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
436 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
437 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
439 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
440 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
442 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
445 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
448 while (
I != NumRegs) {
453 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
454 GetCopyInfo(SrcEncoding, DstEncoding);
458 if (LMul == LMulCopied &&
461 if (DefMBBI->getOpcode() == VIOpc)
468 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
470 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
478 MIB = MIB.add(DefMBBI->getOperand(2));
486 MIB.addImm(Log2SEW ? Log2SEW : 3);
498 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
499 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
508 bool RenamableDest,
bool RenamableSrc)
const {
512 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
519 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
525 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
531 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
532 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
533 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
535 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
537 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
541 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
542 .
addReg(EvenReg, KillFlag)
545 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
552 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
553 RISCV::GPRRegClass.
contains(DstReg)) {
555 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
560 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
562 if (
STI.hasStdExtZfh()) {
563 Opc = RISCV::FSGNJ_H;
566 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
567 "Unexpected extensions");
569 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
570 &RISCV::FPR32RegClass);
571 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
572 &RISCV::FPR32RegClass);
573 Opc = RISCV::FSGNJ_S;
577 .
addReg(SrcReg, KillFlag);
581 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
584 .
addReg(SrcReg, KillFlag);
588 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
591 .
addReg(SrcReg, KillFlag);
595 if (RISCV::FPR32RegClass.
contains(DstReg) &&
596 RISCV::GPRRegClass.
contains(SrcReg)) {
598 .
addReg(SrcReg, KillFlag);
602 if (RISCV::GPRRegClass.
contains(DstReg) &&
603 RISCV::FPR32RegClass.
contains(SrcReg)) {
605 .
addReg(SrcReg, KillFlag);
609 if (RISCV::FPR64RegClass.
contains(DstReg) &&
610 RISCV::GPRRegClass.
contains(SrcReg)) {
611 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
613 .
addReg(SrcReg, KillFlag);
617 if (RISCV::GPRRegClass.
contains(DstReg) &&
618 RISCV::FPR64RegClass.
contains(SrcReg)) {
619 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
621 .
addReg(SrcReg, KillFlag);
627 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
638 Register SrcReg,
bool IsKill,
int FI,
647 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
648 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
649 RISCV::SW : RISCV::SD;
650 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
651 Opcode = RISCV::SH_INX;
652 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
653 Opcode = RISCV::SW_INX;
654 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
655 Opcode = RISCV::PseudoRV32ZdinxSD;
656 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
658 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
660 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
662 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
663 Opcode = RISCV::VS1R_V;
664 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
665 Opcode = RISCV::VS2R_V;
666 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
667 Opcode = RISCV::VS4R_V;
668 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
669 Opcode = RISCV::VS8R_V;
670 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
671 Opcode = RISCV::PseudoVSPILL2_M1;
672 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
673 Opcode = RISCV::PseudoVSPILL2_M2;
674 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
675 Opcode = RISCV::PseudoVSPILL2_M4;
676 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
677 Opcode = RISCV::PseudoVSPILL3_M1;
678 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
679 Opcode = RISCV::PseudoVSPILL3_M2;
680 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
681 Opcode = RISCV::PseudoVSPILL4_M1;
682 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
683 Opcode = RISCV::PseudoVSPILL4_M2;
684 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
685 Opcode = RISCV::PseudoVSPILL5_M1;
686 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
687 Opcode = RISCV::PseudoVSPILL6_M1;
688 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
689 Opcode = RISCV::PseudoVSPILL7_M1;
690 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
691 Opcode = RISCV::PseudoVSPILL8_M1;
731 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
732 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
733 RISCV::LW : RISCV::LD;
734 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
735 Opcode = RISCV::LH_INX;
736 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
737 Opcode = RISCV::LW_INX;
738 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
739 Opcode = RISCV::PseudoRV32ZdinxLD;
740 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
742 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
744 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
746 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
747 Opcode = RISCV::VL1RE8_V;
748 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
749 Opcode = RISCV::VL2RE8_V;
750 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
751 Opcode = RISCV::VL4RE8_V;
752 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
753 Opcode = RISCV::VL8RE8_V;
754 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
755 Opcode = RISCV::PseudoVRELOAD2_M1;
756 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
757 Opcode = RISCV::PseudoVRELOAD2_M2;
758 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
759 Opcode = RISCV::PseudoVRELOAD2_M4;
760 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
761 Opcode = RISCV::PseudoVRELOAD3_M1;
762 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
763 Opcode = RISCV::PseudoVRELOAD3_M2;
764 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
765 Opcode = RISCV::PseudoVRELOAD4_M1;
766 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
767 Opcode = RISCV::PseudoVRELOAD4_M2;
768 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
769 Opcode = RISCV::PseudoVRELOAD5_M1;
770 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
771 Opcode = RISCV::PseudoVRELOAD6_M1;
772 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
773 Opcode = RISCV::PseudoVRELOAD7_M1;
774 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
775 Opcode = RISCV::PseudoVRELOAD8_M1;
813 if (
Ops.size() != 1 ||
Ops[0] != 1)
816 switch (
MI.getOpcode()) {
818 if (RISCVInstrInfo::isSEXT_W(
MI))
820 if (RISCVInstrInfo::isZEXT_W(
MI))
822 if (RISCVInstrInfo::isZEXT_B(
MI))
829 case RISCV::ZEXT_H_RV32:
830 case RISCV::ZEXT_H_RV64:
837 case RISCV::VMV_X_S: {
840 if (ST.getXLen() < (1U << Log2SEW))
855 case RISCV::VFMV_F_S: {
882 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
892 bool DstIsDead)
const {
908 bool SrcRenamable =
false;
912 bool LastItem = ++Num == Seq.
size();
917 switch (Inst.getOpndKind()) {
927 .
addReg(SrcReg, SrcRegState)
934 .
addReg(SrcReg, SrcRegState)
935 .
addReg(SrcReg, SrcRegState)
941 .
addReg(SrcReg, SrcRegState)
949 SrcRenamable = DstRenamable;
959 case RISCV::CV_BEQIMM:
961 case RISCV::QC_E_BEQI:
963 case RISCV::NDS_BEQC:
968 case RISCV::QC_E_BNEI:
969 case RISCV::CV_BNEIMM:
971 case RISCV::NDS_BNEC:
975 case RISCV::QC_E_BLTI:
979 case RISCV::QC_E_BGEI:
982 case RISCV::QC_BLTUI:
983 case RISCV::QC_E_BLTUI:
986 case RISCV::QC_BGEUI:
987 case RISCV::QC_E_BGEUI:
1019 "Unknown conditional branch");
1030 case RISCV::QC_MVEQ:
1031 return RISCV::QC_MVNE;
1032 case RISCV::QC_MVNE:
1033 return RISCV::QC_MVEQ;
1034 case RISCV::QC_MVLT:
1035 return RISCV::QC_MVGE;
1036 case RISCV::QC_MVGE:
1037 return RISCV::QC_MVLT;
1038 case RISCV::QC_MVLTU:
1039 return RISCV::QC_MVGEU;
1040 case RISCV::QC_MVGEU:
1041 return RISCV::QC_MVLTU;
1042 case RISCV::QC_MVEQI:
1043 return RISCV::QC_MVNEI;
1044 case RISCV::QC_MVNEI:
1045 return RISCV::QC_MVEQI;
1046 case RISCV::QC_MVLTI:
1047 return RISCV::QC_MVGEI;
1048 case RISCV::QC_MVGEI:
1049 return RISCV::QC_MVLTI;
1050 case RISCV::QC_MVLTUI:
1051 return RISCV::QC_MVGEUI;
1052 case RISCV::QC_MVGEUI:
1053 return RISCV::QC_MVLTUI;
1058 switch (SelectOpc) {
1077 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1087 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1092 return RISCV::CV_BEQIMM;
1094 return RISCV::CV_BNEIMM;
1097 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1102 return RISCV::QC_BEQI;
1104 return RISCV::QC_BNEI;
1106 return RISCV::QC_BLTI;
1108 return RISCV::QC_BGEI;
1111 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1116 return RISCV::QC_BLTUI;
1118 return RISCV::QC_BGEUI;
1121 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1126 return RISCV::QC_E_BEQI;
1128 return RISCV::QC_E_BNEI;
1130 return RISCV::QC_E_BLTI;
1132 return RISCV::QC_E_BGEI;
1135 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1140 return RISCV::QC_E_BLTUI;
1142 return RISCV::QC_E_BGEUI;
1145 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1150 return RISCV::NDS_BBC;
1152 return RISCV::NDS_BBS;
1155 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1160 return RISCV::NDS_BEQC;
1162 return RISCV::NDS_BNEC;
1191 bool AllowModify)
const {
1192 TBB = FBB =
nullptr;
1197 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1203 int NumTerminators = 0;
1204 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1207 if (J->getDesc().isUnconditionalBranch() ||
1208 J->getDesc().isIndirectBranch()) {
1215 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1216 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1217 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1220 I = FirstUncondOrIndirectBr;
1224 if (
I->getDesc().isIndirectBranch())
1228 if (
I->isPreISelOpcode())
1232 if (NumTerminators > 2)
1236 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1242 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1248 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1249 I->getDesc().isUnconditionalBranch()) {
1260 int *BytesRemoved)
const {
1267 if (!
I->getDesc().isUnconditionalBranch() &&
1268 !
I->getDesc().isConditionalBranch())
1274 I->eraseFromParent();
1278 if (
I ==
MBB.begin())
1281 if (!
I->getDesc().isConditionalBranch())
1287 I->eraseFromParent();
1300 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1302 "RISC-V branch conditions have two components!");
1336 assert(RS &&
"RegScavenger required for long branching");
1338 "new block should be inserted for expanding unconditional branch");
1341 "restore block should be inserted for restoring clobbered registers");
1350 "Branch offsets outside of the signed 32-bit range not supported");
1355 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1356 auto II =
MBB.end();
1362 RS->enterBasicBlockEnd(
MBB);
1364 RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
MI.getIterator(),
1368 RS->setRegUsed(TmpGPR);
1373 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1376 if (FrameIndex == -1)
1381 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1384 MI.getOperand(1).setMBB(&RestoreBB);
1388 TRI->eliminateFrameIndex(RestoreBB.
back(),
1392 MRI.replaceRegWith(ScratchReg, TmpGPR);
1393 MRI.clearVirtRegs();
1398 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1403 Cond[0].setImm(RISCV::BNE);
1406 Cond[0].setImm(RISCV::BNEI);
1409 Cond[0].setImm(RISCV::BEQ);
1412 Cond[0].setImm(RISCV::BEQI);
1415 Cond[0].setImm(RISCV::BGE);
1418 Cond[0].setImm(RISCV::BLT);
1421 Cond[0].setImm(RISCV::BGEU);
1424 Cond[0].setImm(RISCV::BLTU);
1426 case RISCV::CV_BEQIMM:
1427 Cond[0].setImm(RISCV::CV_BNEIMM);
1429 case RISCV::CV_BNEIMM:
1430 Cond[0].setImm(RISCV::CV_BEQIMM);
1432 case RISCV::QC_BEQI:
1433 Cond[0].setImm(RISCV::QC_BNEI);
1435 case RISCV::QC_BNEI:
1436 Cond[0].setImm(RISCV::QC_BEQI);
1438 case RISCV::QC_BGEI:
1439 Cond[0].setImm(RISCV::QC_BLTI);
1441 case RISCV::QC_BLTI:
1442 Cond[0].setImm(RISCV::QC_BGEI);
1444 case RISCV::QC_BGEUI:
1445 Cond[0].setImm(RISCV::QC_BLTUI);
1447 case RISCV::QC_BLTUI:
1448 Cond[0].setImm(RISCV::QC_BGEUI);
1450 case RISCV::QC_E_BEQI:
1451 Cond[0].setImm(RISCV::QC_E_BNEI);
1453 case RISCV::QC_E_BNEI:
1454 Cond[0].setImm(RISCV::QC_E_BEQI);
1456 case RISCV::QC_E_BGEI:
1457 Cond[0].setImm(RISCV::QC_E_BLTI);
1459 case RISCV::QC_E_BLTI:
1460 Cond[0].setImm(RISCV::QC_E_BGEI);
1462 case RISCV::QC_E_BGEUI:
1463 Cond[0].setImm(RISCV::QC_E_BLTUI);
1465 case RISCV::QC_E_BLTUI:
1466 Cond[0].setImm(RISCV::QC_E_BGEUI);
1468 case RISCV::NDS_BBC:
1469 Cond[0].setImm(RISCV::NDS_BBS);
1471 case RISCV::NDS_BBS:
1472 Cond[0].setImm(RISCV::NDS_BBC);
1474 case RISCV::NDS_BEQC:
1475 Cond[0].setImm(RISCV::NDS_BNEC);
1477 case RISCV::NDS_BNEC:
1478 Cond[0].setImm(RISCV::NDS_BEQC);
1488 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1489 MI->getOperand(1).getReg() == RISCV::X0) {
1490 Imm =
MI->getOperand(2).getImm();
1503 if (Reg == RISCV::X0) {
1507 return Reg.isVirtual() &&
isLoadImm(
MRI.getVRegDef(Reg), Imm);
1511 bool IsSigned =
false;
1512 bool IsEquality =
false;
1513 switch (
MI.getOpcode()) {
1549 MI.eraseFromParent();
1575 auto searchConst = [&](int64_t C1) ->
Register {
1577 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1580 I.getOperand(0).getReg().isVirtual();
1583 return DefC1->getOperand(0).getReg();
1596 MRI.hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1598 if (
Register RegZ = searchConst(C0 + 1)) {
1605 MRI.clearKillFlags(RegZ);
1606 MI.eraseFromParent();
1617 MRI.hasOneUse(RHS.getReg())) {
1619 if (
Register RegZ = searchConst(C0 - 1)) {
1626 MRI.clearKillFlags(RegZ);
1627 MI.eraseFromParent();
1637 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1639 int NumOp =
MI.getNumExplicitOperands();
1640 return MI.getOperand(NumOp - 1).getMBB();
1644 int64_t BrOffset)
const {
1645 unsigned XLen =
STI.getXLen();
1652 case RISCV::NDS_BBC:
1653 case RISCV::NDS_BBS:
1654 case RISCV::NDS_BEQC:
1655 case RISCV::NDS_BNEC:
1665 case RISCV::CV_BEQIMM:
1666 case RISCV::CV_BNEIMM:
1667 case RISCV::QC_BEQI:
1668 case RISCV::QC_BNEI:
1669 case RISCV::QC_BGEI:
1670 case RISCV::QC_BLTI:
1671 case RISCV::QC_BLTUI:
1672 case RISCV::QC_BGEUI:
1673 case RISCV::QC_E_BEQI:
1674 case RISCV::QC_E_BNEI:
1675 case RISCV::QC_E_BGEI:
1676 case RISCV::QC_E_BLTI:
1677 case RISCV::QC_E_BLTUI:
1678 case RISCV::QC_E_BGEUI:
1681 case RISCV::PseudoBR:
1683 case RISCV::PseudoJump:
1693 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1694 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1695 case RISCV::SLL:
return RISCV::PseudoCCSLL;
break;
1696 case RISCV::SRL:
return RISCV::PseudoCCSRL;
break;
1697 case RISCV::SRA:
return RISCV::PseudoCCSRA;
break;
1698 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1699 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1700 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1702 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
break;
1703 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
break;
1704 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
break;
1705 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
break;
1706 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
break;
1707 case RISCV::ORI:
return RISCV::PseudoCCORI;
break;
1708 case RISCV::XORI:
return RISCV::PseudoCCXORI;
break;
1710 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1711 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1712 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
break;
1713 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
break;
1714 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
break;
1716 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
break;
1717 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
break;
1718 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
break;
1719 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
break;
1721 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
break;
1722 case RISCV::ORN:
return RISCV::PseudoCCORN;
break;
1723 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
break;
1725 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
break;
1726 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
break;
1729 return RISCV::INSTRUCTION_LIST_END;
1737 if (!
Reg.isVirtual())
1739 if (!
MRI.hasOneNonDBGUse(
Reg))
1748 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1749 MI->getOperand(1).getReg() == RISCV::X0)
1754 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1764 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1767 bool DontMoveAcrossStores =
true;
1768 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1775 unsigned &TrueOp,
unsigned &FalseOp,
1776 bool &Optimizable)
const {
1777 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1778 "Unknown select instruction");
1788 Cond.push_back(
MI.getOperand(1));
1789 Cond.push_back(
MI.getOperand(2));
1790 Cond.push_back(
MI.getOperand(3));
1792 Optimizable =
STI.hasShortForwardBranchOpt();
1799 bool PreferFalse)
const {
1800 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1801 "Unknown select instruction");
1802 if (!
STI.hasShortForwardBranchOpt())
1808 bool Invert = !
DefMI;
1816 Register DestReg =
MI.getOperand(0).getReg();
1818 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1822 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1829 NewMI.
add(
MI.getOperand(1));
1830 NewMI.
add(
MI.getOperand(2));
1839 NewMI.
add(FalseReg);
1854 if (
DefMI->getParent() !=
MI.getParent())
1858 DefMI->eraseFromParent();
1863 if (
MI.isMetaInstruction())
1866 unsigned Opcode =
MI.getOpcode();
1868 if (Opcode == TargetOpcode::INLINEASM ||
1869 Opcode == TargetOpcode::INLINEASM_BR) {
1871 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1875 if (!
MI.memoperands_empty()) {
1878 if (
STI.hasStdExtZca()) {
1879 if (isCompressibleInst(
MI,
STI))
1887 if (Opcode == TargetOpcode::BUNDLE)
1888 return getInstBundleLength(
MI);
1890 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1891 if (isCompressibleInst(
MI,
STI))
1896 case RISCV::PseudoMV_FPR16INX:
1897 case RISCV::PseudoMV_FPR32INX:
1899 return STI.hasStdExtZca() ? 2 : 4;
1900 case TargetOpcode::STACKMAP:
1903 case TargetOpcode::PATCHPOINT:
1906 case TargetOpcode::STATEPOINT: {
1910 return std::max(NumBytes, 8U);
1912 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1913 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1914 case TargetOpcode::PATCHABLE_TAIL_CALL: {
1917 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
1918 F.hasFnAttribute(
"patchable-function-entry")) {
1920 if (
F.getFnAttribute(
"patchable-function-entry")
1922 .getAsInteger(10, Num))
1923 return get(Opcode).getSize();
1926 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
1930 return STI.is64Bit() ? 68 : 44;
1933 return get(Opcode).getSize();
1937unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1941 while (++
I != E &&
I->isInsideBundle()) {
1942 assert(!
I->isBundle() &&
"No nested bundle!");
1949 const unsigned Opcode =
MI.getOpcode();
1953 case RISCV::FSGNJ_D:
1954 case RISCV::FSGNJ_S:
1955 case RISCV::FSGNJ_H:
1956 case RISCV::FSGNJ_D_INX:
1957 case RISCV::FSGNJ_D_IN32X:
1958 case RISCV::FSGNJ_S_INX:
1959 case RISCV::FSGNJ_H_INX:
1961 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1962 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1966 return (
MI.getOperand(1).isReg() &&
1967 MI.getOperand(1).getReg() == RISCV::X0) ||
1968 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1970 return MI.isAsCheapAsAMove();
1973std::optional<DestSourcePair>
1977 switch (
MI.getOpcode()) {
1983 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
1984 MI.getOperand(2).isReg())
1986 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
1987 MI.getOperand(1).isReg())
1992 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1993 MI.getOperand(2).getImm() == 0)
1997 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
1998 MI.getOperand(1).isReg())
2002 case RISCV::SH1ADD_UW:
2004 case RISCV::SH2ADD_UW:
2006 case RISCV::SH3ADD_UW:
2007 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2008 MI.getOperand(2).isReg())
2011 case RISCV::FSGNJ_D:
2012 case RISCV::FSGNJ_S:
2013 case RISCV::FSGNJ_H:
2014 case RISCV::FSGNJ_D_INX:
2015 case RISCV::FSGNJ_D_IN32X:
2016 case RISCV::FSGNJ_S_INX:
2017 case RISCV::FSGNJ_H_INX:
2019 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2020 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2024 return std::nullopt;
2032 const auto &SchedModel =
STI.getSchedModel();
2033 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2045 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2049 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2050 RISCV::OpName::frm) < 0;
2052 "New instructions require FRM whereas the old one does not have it");
2059 for (
auto *NewMI : InsInstrs) {
2061 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2062 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2104bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2105 bool Invert)
const {
2106#define OPCODE_LMUL_CASE(OPC) \
2107 case RISCV::OPC##_M1: \
2108 case RISCV::OPC##_M2: \
2109 case RISCV::OPC##_M4: \
2110 case RISCV::OPC##_M8: \
2111 case RISCV::OPC##_MF2: \
2112 case RISCV::OPC##_MF4: \
2113 case RISCV::OPC##_MF8
2115#define OPCODE_LMUL_MASK_CASE(OPC) \
2116 case RISCV::OPC##_M1_MASK: \
2117 case RISCV::OPC##_M2_MASK: \
2118 case RISCV::OPC##_M4_MASK: \
2119 case RISCV::OPC##_M8_MASK: \
2120 case RISCV::OPC##_MF2_MASK: \
2121 case RISCV::OPC##_MF4_MASK: \
2122 case RISCV::OPC##_MF8_MASK
2127 Opcode = *InvOpcode;
2144#undef OPCODE_LMUL_MASK_CASE
2145#undef OPCODE_LMUL_CASE
2148bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2155 const TargetRegisterInfo *
TRI =
MRI->getTargetRegisterInfo();
2159 const uint64_t TSFlags =
Desc.TSFlags;
2161 auto checkImmOperand = [&](
unsigned OpIdx) {
2165 auto checkRegOperand = [&](
unsigned OpIdx) {
2173 if (!checkRegOperand(1))
2188 bool SeenMI2 =
false;
2189 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2198 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2199 Register SrcReg = It->getOperand(1).getReg();
2217 if (MI1VReg != SrcReg)
2226 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2265bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2266 bool &Commuted)
const {
2270 "Expect the present of passthrough operand.");
2276 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2277 areRVVInstsReassociable(Inst, *MI2);
2281 return areRVVInstsReassociable(Inst, *MI1) &&
2282 (isVectorAssociativeAndCommutative(*MI1) ||
2283 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2290 if (!isVectorAssociativeAndCommutative(Inst) &&
2291 !isVectorAssociativeAndCommutative(Inst,
true))
2303 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
2305 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
2317 for (
unsigned I = 0;
I < 5; ++
I)
2323 bool &Commuted)
const {
2324 if (isVectorAssociativeAndCommutative(Inst) ||
2325 isVectorAssociativeAndCommutative(Inst,
true))
2326 return hasReassociableVectorSibling(Inst, Commuted);
2332 unsigned OperandIdx = Commuted ? 2 : 1;
2336 int16_t InstFrmOpIdx =
2337 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2338 int16_t SiblingFrmOpIdx =
2339 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2341 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2346 bool Invert)
const {
2347 if (isVectorAssociativeAndCommutative(Inst, Invert))
2355 Opc = *InverseOpcode;
2400std::optional<unsigned>
2402#define RVV_OPC_LMUL_CASE(OPC, INV) \
2403 case RISCV::OPC##_M1: \
2404 return RISCV::INV##_M1; \
2405 case RISCV::OPC##_M2: \
2406 return RISCV::INV##_M2; \
2407 case RISCV::OPC##_M4: \
2408 return RISCV::INV##_M4; \
2409 case RISCV::OPC##_M8: \
2410 return RISCV::INV##_M8; \
2411 case RISCV::OPC##_MF2: \
2412 return RISCV::INV##_MF2; \
2413 case RISCV::OPC##_MF4: \
2414 return RISCV::INV##_MF4; \
2415 case RISCV::OPC##_MF8: \
2416 return RISCV::INV##_MF8
2418#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2419 case RISCV::OPC##_M1_MASK: \
2420 return RISCV::INV##_M1_MASK; \
2421 case RISCV::OPC##_M2_MASK: \
2422 return RISCV::INV##_M2_MASK; \
2423 case RISCV::OPC##_M4_MASK: \
2424 return RISCV::INV##_M4_MASK; \
2425 case RISCV::OPC##_M8_MASK: \
2426 return RISCV::INV##_M8_MASK; \
2427 case RISCV::OPC##_MF2_MASK: \
2428 return RISCV::INV##_MF2_MASK; \
2429 case RISCV::OPC##_MF4_MASK: \
2430 return RISCV::INV##_MF4_MASK; \
2431 case RISCV::OPC##_MF8_MASK: \
2432 return RISCV::INV##_MF8_MASK
2436 return std::nullopt;
2438 return RISCV::FSUB_H;
2440 return RISCV::FSUB_S;
2442 return RISCV::FSUB_D;
2444 return RISCV::FADD_H;
2446 return RISCV::FADD_S;
2448 return RISCV::FADD_D;
2465#undef RVV_OPC_LMUL_MASK_CASE
2466#undef RVV_OPC_LMUL_CASE
2471 bool DoRegPressureReduce) {
2487 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2498 bool DoRegPressureReduce) {
2505 DoRegPressureReduce)) {
2511 DoRegPressureReduce)) {
2521 bool DoRegPressureReduce) {
2529 unsigned CombineOpc) {
2536 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2539 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2550 unsigned OuterShiftAmt) {
2556 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2583 case RISCV::SH1ADD_UW:
2585 case RISCV::SH2ADD_UW:
2587 case RISCV::SH3ADD_UW:
2633 bool DoRegPressureReduce)
const {
2642 DoRegPressureReduce);
2650 return RISCV::FMADD_H;
2652 return RISCV::FMADD_S;
2654 return RISCV::FMADD_D;
2699 bool Mul1IsKill = Mul1.
isKill();
2700 bool Mul2IsKill = Mul2.
isKill();
2701 bool AddendIsKill = Addend.
isKill();
2710 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2735 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2742 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2745 switch (InnerShiftAmt - OuterShiftAmt) {
2749 InnerOpc = RISCV::ADD;
2752 InnerOpc = RISCV::SH1ADD;
2755 InnerOpc = RISCV::SH2ADD;
2758 InnerOpc = RISCV::SH3ADD;
2766 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2776 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2793 DelInstrs, InstrIdxForVirtReg);
2820 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2821 unsigned OpType = Operand.OperandType;
2826 ErrInfo =
"Expected a non-register operand.";
2830 int64_t Imm = MO.
getImm();
2837#define CASE_OPERAND_UIMM(NUM) \
2838 case RISCVOp::OPERAND_UIMM##NUM: \
2839 Ok = isUInt<NUM>(Imm); \
2841#define CASE_OPERAND_SIMM(NUM) \
2842 case RISCVOp::OPERAND_SIMM##NUM: \
2843 Ok = isInt<NUM>(Imm); \
2875 Ok = (
isUInt<5>(Imm) && (Imm != 0)) || (Imm == 32);
2914 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
2924 Ok = (
isInt<5>(Imm) && Imm != -16) || Imm == 16;
2955 Ok = Ok && Imm != 0;
2959 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2962 Ok = Imm >= 0 && Imm <= 10;
2965 Ok = Imm >= 0 && Imm <= 7;
2968 Ok = Imm >= 1 && Imm <= 10;
2971 Ok = Imm >= 2 && Imm <= 14;
2980 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3013 ErrInfo =
"Invalid immediate";
3023 if (!
Op.isImm() && !
Op.isReg()) {
3024 ErrInfo =
"Invalid operand type for VL operand";
3027 if (
Op.isReg() &&
Op.getReg().isValid()) {
3029 auto *RC =
MRI.getRegClass(
Op.getReg());
3030 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
3031 ErrInfo =
"Invalid register class for VL operand";
3036 ErrInfo =
"VL operand w/o SEW operand?";
3042 if (!
MI.getOperand(
OpIdx).isImm()) {
3043 ErrInfo =
"SEW value expected to be an immediate";
3048 ErrInfo =
"Unexpected SEW value";
3051 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3053 ErrInfo =
"Unexpected SEW value";
3059 if (!
MI.getOperand(
OpIdx).isImm()) {
3060 ErrInfo =
"Policy operand expected to be an immediate";
3065 ErrInfo =
"Invalid Policy Value";
3069 ErrInfo =
"policy operand w/o VL operand?";
3077 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3078 ErrInfo =
"policy operand w/o tied operand?";
3085 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3086 ErrInfo =
"dynamic rounding mode should read FRM";
3108 case RISCV::LD_RV32:
3118 case RISCV::SD_RV32:
3134 int64_t NewOffset = OldOffset + Disp;
3156 "Addressing mode not supported for folding");
3230 case RISCV::LD_RV32:
3233 case RISCV::SD_RV32:
3240 OffsetIsScalable =
false;
3256 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3264 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3267 auto Base1 = MO1->getValue();
3268 auto Base2 = MO2->getValue();
3269 if (!Base1 || !Base2)
3277 return Base1 == Base2;
3283 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3284 unsigned NumBytes)
const {
3287 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3292 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3298 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3304 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3354 int64_t OffsetA = 0, OffsetB = 0;
3360 int LowOffset = std::min(OffsetA, OffsetB);
3361 int HighOffset = std::max(OffsetA, OffsetB);
3362 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3364 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3371std::pair<unsigned, unsigned>
3374 return std::make_pair(TF & Mask, TF & ~Mask);
3380 static const std::pair<unsigned, const char *> TargetFlags[] = {
3381 {MO_CALL,
"riscv-call"},
3382 {MO_LO,
"riscv-lo"},
3383 {MO_HI,
"riscv-hi"},
3384 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3385 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3386 {MO_GOT_HI,
"riscv-got-hi"},
3387 {MO_TPREL_LO,
"riscv-tprel-lo"},
3388 {MO_TPREL_HI,
"riscv-tprel-hi"},
3389 {MO_TPREL_ADD,
"riscv-tprel-add"},
3390 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3391 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3392 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3393 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3394 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3395 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3403 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3416 unsigned &Flags)
const {
3435 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3436 F.hasFnAttribute(
"patchable-function-entry");
3441 return MI.readsRegister(RegNo,
TRI) ||
3442 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3447 return MI.modifiesRegister(RegNo,
TRI) ||
3448 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3452 if (!
MBB.back().isReturn())
3475 if (
C.back().isReturn()) {
3477 "The candidate who uses return instruction must be outlined "
3490 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
3493std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3496 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3497 unsigned MinRepeats)
const {
3503 if (RepeatedSequenceLocs.size() < MinRepeats)
3504 return std::nullopt;
3508 unsigned InstrSizeCExt =
3510 unsigned CallOverhead = 0, FrameOverhead = 0;
3517 CallOverhead = 4 + InstrSizeCExt;
3524 FrameOverhead = InstrSizeCExt;
3527 for (
auto &
C : RepeatedSequenceLocs)
3528 C.setCallInfo(MOCI, CallOverhead);
3530 unsigned SequenceSize = 0;
3531 for (
auto &
MI : Candidate)
3534 return std::make_unique<outliner::OutlinedFunction>(
3535 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3541 unsigned Flags)
const {
3545 MBB->getParent()->getSubtarget().getRegisterInfo();
3546 const auto &
F =
MI.getMF()->getFunction();
3549 if (
MI.isCFIInstruction())
3561 for (
const auto &MO :
MI.operands()) {
3566 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3567 F.hasSection() ||
F.getSectionPrefix()))
3585 auto I =
MBB.begin();
3587 for (;
I != E; ++
I) {
3588 if (
I->isCFIInstruction()) {
3589 I->removeFromParent();
3599 MBB.addLiveIn(RISCV::X5);
3614 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3622 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3633 return std::nullopt;
3637 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3638 MI.getOperand(2).isImm())
3639 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3641 return std::nullopt;
3649 std::string GenericComment =
3651 if (!GenericComment.empty())
3652 return GenericComment;
3656 return std::string();
3660 return std::string();
3662 std::string Comment;
3669 switch (OpInfo.OperandType) {
3672 unsigned Imm =
Op.getImm();
3677 unsigned Imm =
Op.getImm();
3683 unsigned Log2SEW =
Op.getImm();
3684 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3690 unsigned Policy =
Op.getImm();
3692 "Invalid Policy Value");
3702#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3703 RISCV::Pseudo##OP##_##LMUL
3705#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3706 RISCV::Pseudo##OP##_##LMUL##_MASK
3708#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3709 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3710 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3712#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3713 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3714 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3715 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3716 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3717 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3718 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3720#define CASE_RVV_OPCODE_UNMASK(OP) \
3721 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3722 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3724#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3725 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3726 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3727 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3728 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3729 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3730 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3732#define CASE_RVV_OPCODE_MASK(OP) \
3733 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3734 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3736#define CASE_RVV_OPCODE_WIDEN(OP) \
3737 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3738 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3740#define CASE_RVV_OPCODE(OP) \
3741 CASE_RVV_OPCODE_UNMASK(OP): \
3742 case CASE_RVV_OPCODE_MASK(OP)
3746#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3747 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3749#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3750 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3751 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3752 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3753 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3754 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3755 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3756 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3759#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3760 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3762#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3763 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3764 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3765 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3766 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3768#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3769 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3770 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3772#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3773 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3774 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3776#define CASE_VFMA_OPCODE_VV(OP) \
3777 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3778 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
3779 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3780 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3782#define CASE_VFMA_SPLATS(OP) \
3783 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3784 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
3785 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3786 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3790 unsigned &SrcOpIdx1,
3791 unsigned &SrcOpIdx2)
const {
3793 if (!
Desc.isCommutable())
3796 switch (
MI.getOpcode()) {
3797 case RISCV::TH_MVEQZ:
3798 case RISCV::TH_MVNEZ:
3802 if (
MI.getOperand(2).getReg() == RISCV::X0)
3805 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3806 case RISCV::QC_SELECTIEQ:
3807 case RISCV::QC_SELECTINE:
3808 case RISCV::QC_SELECTIIEQ:
3809 case RISCV::QC_SELECTIINE:
3810 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3811 case RISCV::QC_MVEQ:
3812 case RISCV::QC_MVNE:
3813 case RISCV::QC_MVLT:
3814 case RISCV::QC_MVGE:
3815 case RISCV::QC_MVLTU:
3816 case RISCV::QC_MVGEU:
3817 case RISCV::QC_MVEQI:
3818 case RISCV::QC_MVNEI:
3819 case RISCV::QC_MVLTI:
3820 case RISCV::QC_MVGEI:
3821 case RISCV::QC_MVLTUI:
3822 case RISCV::QC_MVGEUI:
3823 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
3824 case RISCV::TH_MULA:
3825 case RISCV::TH_MULAW:
3826 case RISCV::TH_MULAH:
3827 case RISCV::TH_MULS:
3828 case RISCV::TH_MULSW:
3829 case RISCV::TH_MULSH:
3831 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3832 case RISCV::PseudoCCMOVGPRNoX0:
3833 case RISCV::PseudoCCMOVGPR:
3835 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3862 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3889 unsigned CommutableOpIdx1 = 1;
3890 unsigned CommutableOpIdx2 = 3;
3891 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3912 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
3914 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
3918 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
3919 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
3925 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
3926 SrcOpIdx2 == CommuteAnyOperandIndex) {
3929 unsigned CommutableOpIdx1 = SrcOpIdx1;
3930 if (SrcOpIdx1 == SrcOpIdx2) {
3933 CommutableOpIdx1 = 1;
3934 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
3936 CommutableOpIdx1 = SrcOpIdx2;
3941 unsigned CommutableOpIdx2;
3942 if (CommutableOpIdx1 != 1) {
3944 CommutableOpIdx2 = 1;
3946 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
3951 if (Op1Reg !=
MI.getOperand(2).getReg())
3952 CommutableOpIdx2 = 2;
3954 CommutableOpIdx2 = 3;
3959 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3972#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
3973 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
3974 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
3977#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
3978 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
3979 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
3980 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
3981 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
3982 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
3983 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
3984 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
3987#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
3988 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
3989 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
3992#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
3993 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
3994 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
3995 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
3996 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
3998#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
3999 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4000 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4002#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4003 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4004 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4006#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4007 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4008 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4009 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4010 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4012#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4013 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4014 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4015 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4016 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4022 unsigned OpIdx2)
const {
4025 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4029 switch (
MI.getOpcode()) {
4030 case RISCV::TH_MVEQZ:
4031 case RISCV::TH_MVNEZ: {
4032 auto &WorkingMI = cloneIfNew(
MI);
4033 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4034 : RISCV::TH_MVEQZ));
4038 case RISCV::QC_SELECTIEQ:
4039 case RISCV::QC_SELECTINE:
4040 case RISCV::QC_SELECTIIEQ:
4041 case RISCV::QC_SELECTIINE:
4043 case RISCV::QC_MVEQ:
4044 case RISCV::QC_MVNE:
4045 case RISCV::QC_MVLT:
4046 case RISCV::QC_MVGE:
4047 case RISCV::QC_MVLTU:
4048 case RISCV::QC_MVGEU:
4049 case RISCV::QC_MVEQI:
4050 case RISCV::QC_MVNEI:
4051 case RISCV::QC_MVLTI:
4052 case RISCV::QC_MVGEI:
4053 case RISCV::QC_MVLTUI:
4054 case RISCV::QC_MVGEUI: {
4055 auto &WorkingMI = cloneIfNew(
MI);
4060 case RISCV::PseudoCCMOVGPRNoX0:
4061 case RISCV::PseudoCCMOVGPR: {
4065 auto &WorkingMI = cloneIfNew(
MI);
4066 WorkingMI.getOperand(3).setImm(CC);
4090 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4091 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4093 switch (
MI.getOpcode()) {
4116 auto &WorkingMI = cloneIfNew(
MI);
4117 WorkingMI.setDesc(
get(
Opc));
4127 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4130 if (OpIdx1 == 3 || OpIdx2 == 3) {
4132 switch (
MI.getOpcode()) {
4143 auto &WorkingMI = cloneIfNew(
MI);
4144 WorkingMI.setDesc(
get(
Opc));
4156#undef CASE_VMA_CHANGE_OPCODE_COMMON
4157#undef CASE_VMA_CHANGE_OPCODE_LMULS
4158#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4159#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4160#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4161#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4162#undef CASE_VFMA_CHANGE_OPCODE_VV
4163#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4165#undef CASE_RVV_OPCODE_UNMASK_LMUL
4166#undef CASE_RVV_OPCODE_MASK_LMUL
4167#undef CASE_RVV_OPCODE_LMUL
4168#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4169#undef CASE_RVV_OPCODE_UNMASK
4170#undef CASE_RVV_OPCODE_MASK_WIDEN
4171#undef CASE_RVV_OPCODE_MASK
4172#undef CASE_RVV_OPCODE_WIDEN
4173#undef CASE_RVV_OPCODE
4175#undef CASE_VMA_OPCODE_COMMON
4176#undef CASE_VMA_OPCODE_LMULS
4177#undef CASE_VFMA_OPCODE_COMMON
4178#undef CASE_VFMA_OPCODE_LMULS_M1
4179#undef CASE_VFMA_OPCODE_LMULS_MF2
4180#undef CASE_VFMA_OPCODE_LMULS_MF4
4181#undef CASE_VFMA_OPCODE_VV
4182#undef CASE_VFMA_SPLATS
4185 switch (
MI.getOpcode()) {
4193 if (
MI.getOperand(1).getReg() == RISCV::X0)
4194 commuteInstruction(
MI);
4196 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4197 MI.getOperand(2).ChangeToImmediate(0);
4198 MI.setDesc(
get(RISCV::ADDI));
4202 if (
MI.getOpcode() == RISCV::XOR &&
4203 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4204 MI.getOperand(1).setReg(RISCV::X0);
4205 MI.getOperand(2).ChangeToImmediate(0);
4206 MI.setDesc(
get(RISCV::ADDI));
4213 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4214 MI.setDesc(
get(RISCV::ADDI));
4220 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4221 MI.getOperand(2).ChangeToImmediate(0);
4222 MI.setDesc(
get(RISCV::ADDI));
4228 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4229 MI.getOperand(2).ChangeToImmediate(0);
4230 MI.setDesc(
get(RISCV::ADDIW));
4237 if (
MI.getOperand(1).getReg() == RISCV::X0)
4238 commuteInstruction(
MI);
4240 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4241 MI.getOperand(2).ChangeToImmediate(0);
4242 MI.setDesc(
get(RISCV::ADDIW));
4247 case RISCV::SH1ADD_UW:
4249 case RISCV::SH2ADD_UW:
4251 case RISCV::SH3ADD_UW:
4253 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4254 MI.removeOperand(1);
4256 MI.setDesc(
get(RISCV::ADDI));
4260 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4261 MI.removeOperand(2);
4262 unsigned Opc =
MI.getOpcode();
4263 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4264 Opc == RISCV::SH3ADD_UW) {
4266 MI.setDesc(
get(RISCV::SLLI_UW));
4270 MI.setDesc(
get(RISCV::SLLI));
4284 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4285 MI.getOperand(2).getReg() == RISCV::X0) {
4286 MI.getOperand(1).setReg(RISCV::X0);
4287 MI.getOperand(2).ChangeToImmediate(0);
4288 MI.setDesc(
get(RISCV::ADDI));
4294 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4295 MI.getOperand(2).setImm(0);
4296 MI.setDesc(
get(RISCV::ADDI));
4304 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4305 MI.getOperand(2).ChangeToImmediate(0);
4306 MI.setDesc(
get(RISCV::ADDI));
4310 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4311 MI.getOperand(2).ChangeToImmediate(0);
4312 MI.setDesc(
get(RISCV::ADDI));
4320 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4321 MI.getOperand(2).ChangeToImmediate(0);
4322 MI.setDesc(
get(RISCV::ADDI));
4332 case RISCV::SLLI_UW:
4334 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4335 MI.getOperand(2).setImm(0);
4336 MI.setDesc(
get(RISCV::ADDI));
4344 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4345 MI.getOperand(2).getReg() == RISCV::X0) {
4346 MI.getOperand(2).ChangeToImmediate(0);
4347 MI.setDesc(
get(RISCV::ADDI));
4351 if (
MI.getOpcode() == RISCV::ADD_UW &&
4352 MI.getOperand(1).getReg() == RISCV::X0) {
4353 MI.removeOperand(1);
4355 MI.setDesc(
get(RISCV::ADDI));
4361 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4362 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4363 MI.setDesc(
get(RISCV::ADDI));
4369 case RISCV::ZEXT_H_RV32:
4370 case RISCV::ZEXT_H_RV64:
4373 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4375 MI.setDesc(
get(RISCV::ADDI));
4384 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4385 MI.getOperand(2).ChangeToImmediate(0);
4386 MI.setDesc(
get(RISCV::ADDI));
4393 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4395 MI.removeOperand(0);
4396 MI.insert(
MI.operands_begin() + 1, {MO0});
4401 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4403 MI.removeOperand(0);
4404 MI.insert(
MI.operands_begin() + 1, {MO0});
4405 MI.setDesc(
get(RISCV::BNE));
4410 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4412 MI.removeOperand(0);
4413 MI.insert(
MI.operands_begin() + 1, {MO0});
4414 MI.setDesc(
get(RISCV::BEQ));
4422#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4423 RISCV::PseudoV##OP##_##LMUL##_TIED
4425#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4426 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4427 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4428 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4429 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4430 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4431 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4433#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4434 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4435 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4438#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4439 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4440 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4441 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4442 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4443 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4444 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4447#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4448 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4450#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4451 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4452 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4453 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4454 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4455 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4456 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4457 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4458 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4459 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4461#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4462 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4463 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4466#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4467 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4468 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4469 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4470 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4471 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4472 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4473 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4474 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4475 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4477#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4478 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4479 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4480 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4481 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4482 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4484#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4485 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4486 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4487 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4488 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4489 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4496 switch (
MI.getOpcode()) {
4504 MI.getNumExplicitOperands() == 7 &&
4505 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4512 switch (
MI.getOpcode()) {
4524 .
add(
MI.getOperand(0))
4526 .
add(
MI.getOperand(1))
4527 .
add(
MI.getOperand(2))
4528 .
add(
MI.getOperand(3))
4529 .
add(
MI.getOperand(4))
4530 .
add(
MI.getOperand(5))
4531 .
add(
MI.getOperand(6));
4540 MI.getNumExplicitOperands() == 6);
4547 switch (
MI.getOpcode()) {
4559 .
add(
MI.getOperand(0))
4561 .
add(
MI.getOperand(1))
4562 .
add(
MI.getOperand(2))
4563 .
add(
MI.getOperand(3))
4564 .
add(
MI.getOperand(4))
4565 .
add(
MI.getOperand(5));
4572 unsigned NumOps =
MI.getNumOperands();
4575 if (
Op.isReg() &&
Op.isKill())
4583 if (
MI.getOperand(0).isEarlyClobber()) {
4597#undef CASE_WIDEOP_OPCODE_COMMON
4598#undef CASE_WIDEOP_OPCODE_LMULS
4599#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4600#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4601#undef CASE_FP_WIDEOP_OPCODE_COMMON
4602#undef CASE_FP_WIDEOP_OPCODE_LMULS
4603#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4604#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4613 if (ShiftAmount == 0)
4619 }
else if (
int ShXAmount, ShiftAmount;
4621 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4624 switch (ShXAmount) {
4626 Opc = RISCV::SH1ADD;
4629 Opc = RISCV::SH2ADD;
4632 Opc = RISCV::SH3ADD;
4647 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4658 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4668 }
else if (
STI.hasStdExtZmmul()) {
4669 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4678 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4679 if (Amount & (1U << ShiftAmount)) {
4683 .
addImm(ShiftAmount - PrevShiftAmount)
4685 if (Amount >> (ShiftAmount + 1)) {
4688 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4699 PrevShiftAmount = ShiftAmount;
4702 assert(Acc &&
"Expected valid accumulator");
4712 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4720 ?
STI.getTailDupAggressiveThreshold()
4727 unsigned Opcode =
MI.getOpcode();
4728 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4734std::optional<std::pair<unsigned, unsigned>>
4738 return std::nullopt;
4739 case RISCV::PseudoVSPILL2_M1:
4740 case RISCV::PseudoVRELOAD2_M1:
4741 return std::make_pair(2u, 1u);
4742 case RISCV::PseudoVSPILL2_M2:
4743 case RISCV::PseudoVRELOAD2_M2:
4744 return std::make_pair(2u, 2u);
4745 case RISCV::PseudoVSPILL2_M4:
4746 case RISCV::PseudoVRELOAD2_M4:
4747 return std::make_pair(2u, 4u);
4748 case RISCV::PseudoVSPILL3_M1:
4749 case RISCV::PseudoVRELOAD3_M1:
4750 return std::make_pair(3u, 1u);
4751 case RISCV::PseudoVSPILL3_M2:
4752 case RISCV::PseudoVRELOAD3_M2:
4753 return std::make_pair(3u, 2u);
4754 case RISCV::PseudoVSPILL4_M1:
4755 case RISCV::PseudoVRELOAD4_M1:
4756 return std::make_pair(4u, 1u);
4757 case RISCV::PseudoVSPILL4_M2:
4758 case RISCV::PseudoVRELOAD4_M2:
4759 return std::make_pair(4u, 2u);
4760 case RISCV::PseudoVSPILL5_M1:
4761 case RISCV::PseudoVRELOAD5_M1:
4762 return std::make_pair(5u, 1u);
4763 case RISCV::PseudoVSPILL6_M1:
4764 case RISCV::PseudoVRELOAD6_M1:
4765 return std::make_pair(6u, 1u);
4766 case RISCV::PseudoVSPILL7_M1:
4767 case RISCV::PseudoVRELOAD7_M1:
4768 return std::make_pair(7u, 1u);
4769 case RISCV::PseudoVSPILL8_M1:
4770 case RISCV::PseudoVRELOAD8_M1:
4771 return std::make_pair(8u, 1u);
4776 int16_t MI1FrmOpIdx =
4777 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
4778 int16_t MI2FrmOpIdx =
4779 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
4780 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4787std::optional<unsigned>
4791 return std::nullopt;
4794 case RISCV::VSLL_VX:
4795 case RISCV::VSRL_VX:
4796 case RISCV::VSRA_VX:
4798 case RISCV::VSSRL_VX:
4799 case RISCV::VSSRA_VX:
4801 case RISCV::VROL_VX:
4802 case RISCV::VROR_VX:
4807 case RISCV::VNSRL_WX:
4808 case RISCV::VNSRA_WX:
4810 case RISCV::VNCLIPU_WX:
4811 case RISCV::VNCLIP_WX:
4813 case RISCV::VWSLL_VX:
4818 case RISCV::VADD_VX:
4819 case RISCV::VSUB_VX:
4820 case RISCV::VRSUB_VX:
4822 case RISCV::VWADDU_VX:
4823 case RISCV::VWSUBU_VX:
4824 case RISCV::VWADD_VX:
4825 case RISCV::VWSUB_VX:
4826 case RISCV::VWADDU_WX:
4827 case RISCV::VWSUBU_WX:
4828 case RISCV::VWADD_WX:
4829 case RISCV::VWSUB_WX:
4831 case RISCV::VADC_VXM:
4832 case RISCV::VADC_VIM:
4833 case RISCV::VMADC_VXM:
4834 case RISCV::VMADC_VIM:
4835 case RISCV::VMADC_VX:
4836 case RISCV::VSBC_VXM:
4837 case RISCV::VMSBC_VXM:
4838 case RISCV::VMSBC_VX:
4840 case RISCV::VAND_VX:
4842 case RISCV::VXOR_VX:
4844 case RISCV::VMSEQ_VX:
4845 case RISCV::VMSNE_VX:
4846 case RISCV::VMSLTU_VX:
4847 case RISCV::VMSLT_VX:
4848 case RISCV::VMSLEU_VX:
4849 case RISCV::VMSLE_VX:
4850 case RISCV::VMSGTU_VX:
4851 case RISCV::VMSGT_VX:
4853 case RISCV::VMINU_VX:
4854 case RISCV::VMIN_VX:
4855 case RISCV::VMAXU_VX:
4856 case RISCV::VMAX_VX:
4858 case RISCV::VMUL_VX:
4859 case RISCV::VMULH_VX:
4860 case RISCV::VMULHU_VX:
4861 case RISCV::VMULHSU_VX:
4863 case RISCV::VDIVU_VX:
4864 case RISCV::VDIV_VX:
4865 case RISCV::VREMU_VX:
4866 case RISCV::VREM_VX:
4868 case RISCV::VWMUL_VX:
4869 case RISCV::VWMULU_VX:
4870 case RISCV::VWMULSU_VX:
4872 case RISCV::VMACC_VX:
4873 case RISCV::VNMSAC_VX:
4874 case RISCV::VMADD_VX:
4875 case RISCV::VNMSUB_VX:
4877 case RISCV::VWMACCU_VX:
4878 case RISCV::VWMACC_VX:
4879 case RISCV::VWMACCSU_VX:
4880 case RISCV::VWMACCUS_VX:
4882 case RISCV::VMERGE_VXM:
4884 case RISCV::VMV_V_X:
4886 case RISCV::VSADDU_VX:
4887 case RISCV::VSADD_VX:
4888 case RISCV::VSSUBU_VX:
4889 case RISCV::VSSUB_VX:
4891 case RISCV::VAADDU_VX:
4892 case RISCV::VAADD_VX:
4893 case RISCV::VASUBU_VX:
4894 case RISCV::VASUB_VX:
4896 case RISCV::VSMUL_VX:
4898 case RISCV::VMV_S_X:
4900 case RISCV::VANDN_VX:
4901 return 1U << Log2SEW;
4907 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
4910 return RVV->BaseInstr;
4920 unsigned Scaled = Log2SEW + (DestEEW - 1);
4934 return std::nullopt;
4939 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
4940 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
4941 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
4942 LHS.getReg() == RHS.getReg())
4946 if (LHS.isImm() && LHS.getImm() == 0)
4952 if (!LHSImm || !RHSImm)
4954 return LHSImm <= RHSImm;
4966 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
4968 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
4978 std::optional<bool> createTripCountGreaterCondition(
4979 int TC, MachineBasicBlock &
MBB,
4980 SmallVectorImpl<MachineOperand> &CondParam)
override {
4988 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
4990 void adjustTripCount(
int TripCountAdjust)
override {}
4994std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5002 if (
TBB == LoopBB && FBB == LoopBB)
5009 assert((
TBB == LoopBB || FBB == LoopBB) &&
5010 "The Loop must be a single-basic-block loop");
5021 if (!Reg.isVirtual())
5023 return MRI.getVRegDef(Reg);
5028 if (LHS && LHS->isPHI())
5030 if (RHS && RHS->isPHI())
5033 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5039 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5056 case RISCV::FDIV_H_INX:
5057 case RISCV::FDIV_S_INX:
5058 case RISCV::FDIV_D_INX:
5059 case RISCV::FDIV_D_IN32X:
5060 case RISCV::FSQRT_H:
5061 case RISCV::FSQRT_S:
5062 case RISCV::FSQRT_D:
5063 case RISCV::FSQRT_H_INX:
5064 case RISCV::FSQRT_S_INX:
5065 case RISCV::FSQRT_D_INX:
5066 case RISCV::FSQRT_D_IN32X:
5068 case RISCV::VDIV_VV:
5069 case RISCV::VDIV_VX:
5070 case RISCV::VDIVU_VV:
5071 case RISCV::VDIVU_VX:
5072 case RISCV::VREM_VV:
5073 case RISCV::VREM_VX:
5074 case RISCV::VREMU_VV:
5075 case RISCV::VREMU_VX:
5077 case RISCV::VFDIV_VV:
5078 case RISCV::VFDIV_VF:
5079 case RISCV::VFRDIV_VF:
5080 case RISCV::VFSQRT_V:
5081 case RISCV::VFRSQRT7_V:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static bool analyzeCandidate(outliner::Candidate &C)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
static const MachineMemOperand::Flags MONontemporalBit0
unsigned getDeadRegState(bool B)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.