40#define GEN_CHECK_COMPRESS_INSTR
41#include "RISCVGenCompressInstEmitter.inc"
43#define GET_INSTRINFO_CTOR_DTOR
44#define GET_INSTRINFO_NAMED_OPS
45#include "RISCVGenInstrInfo.inc"
47#define DEBUG_TYPE "riscv-instr-info"
49 "Number of registers within vector register groups spilled");
51 "Number of registers within vector register groups reloaded");
55 cl::desc(
"Prefer whole register move for vector registers."));
58 "riscv-force-machine-combiner-strategy",
cl::Hidden,
59 cl::desc(
"Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
65 "MinInstrCount strategy.")));
71#define GET_RISCVVPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
78#define GET_RISCVMaskedPseudosTable_IMPL
79#include "RISCVGenSearchableTables.inc"
87#define GET_INSTRINFO_HELPERS
88#include "RISCVGenInstrInfo.inc"
91 if (
STI.hasStdExtZca())
100 int &FrameIndex)
const {
110 case RISCV::VL1RE8_V:
111 case RISCV::VL1RE16_V:
112 case RISCV::VL1RE32_V:
113 case RISCV::VL1RE64_V:
116 case RISCV::VL2RE8_V:
117 case RISCV::VL2RE16_V:
118 case RISCV::VL2RE32_V:
119 case RISCV::VL2RE64_V:
122 case RISCV::VL4RE8_V:
123 case RISCV::VL4RE16_V:
124 case RISCV::VL4RE32_V:
125 case RISCV::VL4RE64_V:
128 case RISCV::VL8RE8_V:
129 case RISCV::VL8RE16_V:
130 case RISCV::VL8RE32_V:
131 case RISCV::VL8RE64_V:
139 switch (
MI.getOpcode()) {
163 case RISCV::VL1RE8_V:
164 case RISCV::VL2RE8_V:
165 case RISCV::VL4RE8_V:
166 case RISCV::VL8RE8_V:
167 if (!
MI.getOperand(1).isFI())
169 FrameIndex =
MI.getOperand(1).getIndex();
172 return MI.getOperand(0).getReg();
175 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
176 MI.getOperand(2).getImm() == 0) {
177 FrameIndex =
MI.getOperand(1).getIndex();
178 return MI.getOperand(0).getReg();
185 int &FrameIndex)
const {
193 switch (
MI.getOpcode()) {
218 if (!
MI.getOperand(1).isFI())
220 FrameIndex =
MI.getOperand(1).getIndex();
223 return MI.getOperand(0).getReg();
226 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
227 MI.getOperand(2).getImm() == 0) {
228 FrameIndex =
MI.getOperand(1).getIndex();
229 return MI.getOperand(0).getReg();
239 case RISCV::VFMV_V_F:
242 case RISCV::VFMV_S_F:
244 return MI.getOperand(1).isUndef();
252 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
263 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
264 "Unexpected COPY instruction.");
268 bool FoundDef =
false;
269 bool FirstVSetVLI =
false;
270 unsigned FirstSEW = 0;
273 if (
MBBI->isMetaInstruction())
276 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
286 unsigned FirstVType =
MBBI->getOperand(2).getImm();
291 if (FirstLMul != LMul)
296 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
302 unsigned VType =
MBBI->getOperand(2).getImm();
320 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
322 }
else if (
MBBI->getNumDefs()) {
325 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
331 if (!MO.isReg() || !MO.isDef())
333 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
348 if (MO.getReg() != SrcReg)
389 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
390 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
392 assert(!Fractional &&
"It is impossible be fractional lmul here.");
393 unsigned NumRegs = NF * LMulVal;
399 SrcEncoding += NumRegs - 1;
400 DstEncoding += NumRegs - 1;
406 unsigned,
unsigned> {
414 uint16_t Diff = DstEncoding - SrcEncoding;
415 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
416 DstEncoding % 8 == 7)
418 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
419 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
420 DstEncoding % 4 == 3)
422 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
423 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
424 DstEncoding % 2 == 1)
426 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
429 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
434 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
436 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
437 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
439 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
440 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
442 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
445 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
448 while (
I != NumRegs) {
453 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
454 GetCopyInfo(SrcEncoding, DstEncoding);
458 if (LMul == LMulCopied &&
461 if (DefMBBI->getOpcode() == VIOpc)
468 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
470 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
478 MIB = MIB.add(DefMBBI->getOperand(2));
486 MIB.addImm(Log2SEW ? Log2SEW : 3);
498 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
499 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
508 bool RenamableDest,
bool RenamableSrc)
const {
512 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
519 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
525 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
531 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
532 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
533 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
535 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
537 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
541 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
542 .
addReg(EvenReg, KillFlag)
545 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
552 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
553 RISCV::GPRRegClass.
contains(DstReg)) {
555 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
560 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
562 if (
STI.hasStdExtZfh()) {
563 Opc = RISCV::FSGNJ_H;
566 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
567 "Unexpected extensions");
569 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
570 &RISCV::FPR32RegClass);
571 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
572 &RISCV::FPR32RegClass);
573 Opc = RISCV::FSGNJ_S;
577 .
addReg(SrcReg, KillFlag);
581 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
584 .
addReg(SrcReg, KillFlag);
588 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
591 .
addReg(SrcReg, KillFlag);
595 if (RISCV::FPR32RegClass.
contains(DstReg) &&
596 RISCV::GPRRegClass.
contains(SrcReg)) {
598 .
addReg(SrcReg, KillFlag);
602 if (RISCV::GPRRegClass.
contains(DstReg) &&
603 RISCV::FPR32RegClass.
contains(SrcReg)) {
605 .
addReg(SrcReg, KillFlag);
609 if (RISCV::FPR64RegClass.
contains(DstReg) &&
610 RISCV::GPRRegClass.
contains(SrcReg)) {
611 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
613 .
addReg(SrcReg, KillFlag);
617 if (RISCV::GPRRegClass.
contains(DstReg) &&
618 RISCV::FPR64RegClass.
contains(SrcReg)) {
619 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
621 .
addReg(SrcReg, KillFlag);
627 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
638 Register SrcReg,
bool IsKill,
int FI,
647 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
648 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
649 RISCV::SW : RISCV::SD;
650 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
651 Opcode = RISCV::SH_INX;
652 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
653 Opcode = RISCV::SW_INX;
654 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
655 Opcode = RISCV::PseudoRV32ZdinxSD;
656 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
658 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
660 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
662 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
663 Opcode = RISCV::VS1R_V;
664 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
665 Opcode = RISCV::VS2R_V;
666 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
667 Opcode = RISCV::VS4R_V;
668 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
669 Opcode = RISCV::VS8R_V;
670 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
671 Opcode = RISCV::PseudoVSPILL2_M1;
672 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
673 Opcode = RISCV::PseudoVSPILL2_M2;
674 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
675 Opcode = RISCV::PseudoVSPILL2_M4;
676 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
677 Opcode = RISCV::PseudoVSPILL3_M1;
678 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
679 Opcode = RISCV::PseudoVSPILL3_M2;
680 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
681 Opcode = RISCV::PseudoVSPILL4_M1;
682 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
683 Opcode = RISCV::PseudoVSPILL4_M2;
684 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
685 Opcode = RISCV::PseudoVSPILL5_M1;
686 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
687 Opcode = RISCV::PseudoVSPILL6_M1;
688 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
689 Opcode = RISCV::PseudoVSPILL7_M1;
690 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
691 Opcode = RISCV::PseudoVSPILL8_M1;
731 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
732 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
733 RISCV::LW : RISCV::LD;
734 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
735 Opcode = RISCV::LH_INX;
736 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
737 Opcode = RISCV::LW_INX;
738 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
739 Opcode = RISCV::PseudoRV32ZdinxLD;
740 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
742 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
744 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
746 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
747 Opcode = RISCV::VL1RE8_V;
748 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
749 Opcode = RISCV::VL2RE8_V;
750 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
751 Opcode = RISCV::VL4RE8_V;
752 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
753 Opcode = RISCV::VL8RE8_V;
754 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
755 Opcode = RISCV::PseudoVRELOAD2_M1;
756 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
757 Opcode = RISCV::PseudoVRELOAD2_M2;
758 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
759 Opcode = RISCV::PseudoVRELOAD2_M4;
760 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
761 Opcode = RISCV::PseudoVRELOAD3_M1;
762 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
763 Opcode = RISCV::PseudoVRELOAD3_M2;
764 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
765 Opcode = RISCV::PseudoVRELOAD4_M1;
766 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
767 Opcode = RISCV::PseudoVRELOAD4_M2;
768 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
769 Opcode = RISCV::PseudoVRELOAD5_M1;
770 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
771 Opcode = RISCV::PseudoVRELOAD6_M1;
772 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
773 Opcode = RISCV::PseudoVRELOAD7_M1;
774 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
775 Opcode = RISCV::PseudoVRELOAD8_M1;
813 if (
Ops.size() != 1 ||
Ops[0] != 1)
816 switch (
MI.getOpcode()) {
818 if (RISCVInstrInfo::isSEXT_W(
MI))
820 if (RISCVInstrInfo::isZEXT_W(
MI))
822 if (RISCVInstrInfo::isZEXT_B(
MI))
829 case RISCV::ZEXT_H_RV32:
830 case RISCV::ZEXT_H_RV64:
837 case RISCV::VMV_X_S: {
840 if (ST.getXLen() < (1U << Log2SEW))
855 case RISCV::VFMV_F_S: {
882 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
892 bool DstIsDead)
const {
908 bool SrcRenamable =
false;
912 bool LastItem = ++Num == Seq.
size();
917 switch (Inst.getOpndKind()) {
927 .
addReg(SrcReg, SrcRegState)
934 .
addReg(SrcReg, SrcRegState)
935 .
addReg(SrcReg, SrcRegState)
941 .
addReg(SrcReg, SrcRegState)
949 SrcRenamable = DstRenamable;
959 case RISCV::CV_BEQIMM:
961 case RISCV::QC_E_BEQI:
963 case RISCV::NDS_BEQC:
968 case RISCV::QC_E_BNEI:
969 case RISCV::CV_BNEIMM:
971 case RISCV::NDS_BNEC:
975 case RISCV::QC_E_BLTI:
979 case RISCV::QC_E_BGEI:
982 case RISCV::QC_BLTUI:
983 case RISCV::QC_E_BLTUI:
986 case RISCV::QC_BGEUI:
987 case RISCV::QC_E_BGEUI:
1019 "Unknown conditional branch");
1030 case RISCV::QC_MVEQ:
1031 return RISCV::QC_MVNE;
1032 case RISCV::QC_MVNE:
1033 return RISCV::QC_MVEQ;
1034 case RISCV::QC_MVLT:
1035 return RISCV::QC_MVGE;
1036 case RISCV::QC_MVGE:
1037 return RISCV::QC_MVLT;
1038 case RISCV::QC_MVLTU:
1039 return RISCV::QC_MVGEU;
1040 case RISCV::QC_MVGEU:
1041 return RISCV::QC_MVLTU;
1042 case RISCV::QC_MVEQI:
1043 return RISCV::QC_MVNEI;
1044 case RISCV::QC_MVNEI:
1045 return RISCV::QC_MVEQI;
1046 case RISCV::QC_MVLTI:
1047 return RISCV::QC_MVGEI;
1048 case RISCV::QC_MVGEI:
1049 return RISCV::QC_MVLTI;
1050 case RISCV::QC_MVLTUI:
1051 return RISCV::QC_MVGEUI;
1052 case RISCV::QC_MVGEUI:
1053 return RISCV::QC_MVLTUI;
1058 switch (SelectOpc) {
1077 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1087 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1092 return RISCV::CV_BEQIMM;
1094 return RISCV::CV_BNEIMM;
1097 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1102 return RISCV::QC_BEQI;
1104 return RISCV::QC_BNEI;
1106 return RISCV::QC_BLTI;
1108 return RISCV::QC_BGEI;
1111 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1116 return RISCV::QC_BLTUI;
1118 return RISCV::QC_BGEUI;
1121 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1126 return RISCV::QC_E_BEQI;
1128 return RISCV::QC_E_BNEI;
1130 return RISCV::QC_E_BLTI;
1132 return RISCV::QC_E_BGEI;
1135 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1140 return RISCV::QC_E_BLTUI;
1142 return RISCV::QC_E_BGEUI;
1145 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1150 return RISCV::NDS_BBC;
1152 return RISCV::NDS_BBS;
1155 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1160 return RISCV::NDS_BEQC;
1162 return RISCV::NDS_BNEC;
1191 bool AllowModify)
const {
1192 TBB = FBB =
nullptr;
1197 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1203 int NumTerminators = 0;
1204 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1207 if (J->getDesc().isUnconditionalBranch() ||
1208 J->getDesc().isIndirectBranch()) {
1215 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1216 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1217 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1220 I = FirstUncondOrIndirectBr;
1224 if (
I->getDesc().isIndirectBranch())
1228 if (
I->isPreISelOpcode())
1232 if (NumTerminators > 2)
1236 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1242 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1248 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1249 I->getDesc().isUnconditionalBranch()) {
1260 int *BytesRemoved)
const {
1267 if (!
I->getDesc().isUnconditionalBranch() &&
1268 !
I->getDesc().isConditionalBranch())
1274 I->eraseFromParent();
1278 if (
I ==
MBB.begin())
1281 if (!
I->getDesc().isConditionalBranch())
1287 I->eraseFromParent();
1300 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1302 "RISC-V branch conditions have two components!");
1336 assert(RS &&
"RegScavenger required for long branching");
1338 "new block should be inserted for expanding unconditional branch");
1341 "restore block should be inserted for restoring clobbered registers");
1350 "Branch offsets outside of the signed 32-bit range not supported");
1355 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1356 auto II =
MBB.end();
1367 if (TmpGPR != RISCV::NoRegister)
1373 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1376 if (FrameIndex == -1)
1381 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1384 MI.getOperand(1).setMBB(&RestoreBB);
1388 TRI->eliminateFrameIndex(RestoreBB.
back(),
1392 MRI.replaceRegWith(ScratchReg, TmpGPR);
1393 MRI.clearVirtRegs();
1398 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1403 Cond[0].setImm(RISCV::BNE);
1406 Cond[0].setImm(RISCV::BNEI);
1409 Cond[0].setImm(RISCV::BEQ);
1412 Cond[0].setImm(RISCV::BEQI);
1415 Cond[0].setImm(RISCV::BGE);
1418 Cond[0].setImm(RISCV::BLT);
1421 Cond[0].setImm(RISCV::BGEU);
1424 Cond[0].setImm(RISCV::BLTU);
1426 case RISCV::CV_BEQIMM:
1427 Cond[0].setImm(RISCV::CV_BNEIMM);
1429 case RISCV::CV_BNEIMM:
1430 Cond[0].setImm(RISCV::CV_BEQIMM);
1432 case RISCV::QC_BEQI:
1433 Cond[0].setImm(RISCV::QC_BNEI);
1435 case RISCV::QC_BNEI:
1436 Cond[0].setImm(RISCV::QC_BEQI);
1438 case RISCV::QC_BGEI:
1439 Cond[0].setImm(RISCV::QC_BLTI);
1441 case RISCV::QC_BLTI:
1442 Cond[0].setImm(RISCV::QC_BGEI);
1444 case RISCV::QC_BGEUI:
1445 Cond[0].setImm(RISCV::QC_BLTUI);
1447 case RISCV::QC_BLTUI:
1448 Cond[0].setImm(RISCV::QC_BGEUI);
1450 case RISCV::QC_E_BEQI:
1451 Cond[0].setImm(RISCV::QC_E_BNEI);
1453 case RISCV::QC_E_BNEI:
1454 Cond[0].setImm(RISCV::QC_E_BEQI);
1456 case RISCV::QC_E_BGEI:
1457 Cond[0].setImm(RISCV::QC_E_BLTI);
1459 case RISCV::QC_E_BLTI:
1460 Cond[0].setImm(RISCV::QC_E_BGEI);
1462 case RISCV::QC_E_BGEUI:
1463 Cond[0].setImm(RISCV::QC_E_BLTUI);
1465 case RISCV::QC_E_BLTUI:
1466 Cond[0].setImm(RISCV::QC_E_BGEUI);
1468 case RISCV::NDS_BBC:
1469 Cond[0].setImm(RISCV::NDS_BBS);
1471 case RISCV::NDS_BBS:
1472 Cond[0].setImm(RISCV::NDS_BBC);
1474 case RISCV::NDS_BEQC:
1475 Cond[0].setImm(RISCV::NDS_BNEC);
1477 case RISCV::NDS_BNEC:
1478 Cond[0].setImm(RISCV::NDS_BEQC);
1488 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1489 MI->getOperand(1).getReg() == RISCV::X0) {
1490 Imm =
MI->getOperand(2).getImm();
1503 if (Reg == RISCV::X0) {
1507 return Reg.isVirtual() &&
isLoadImm(
MRI.getVRegDef(Reg), Imm);
1511 bool IsSigned =
false;
1512 bool IsEquality =
false;
1513 switch (
MI.getOpcode()) {
1549 MI.eraseFromParent();
1575 auto searchConst = [&](int64_t C1) ->
Register {
1577 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1580 I.getOperand(0).getReg().isVirtual();
1583 return DefC1->getOperand(0).getReg();
1596 MRI.hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1598 if (
Register RegZ = searchConst(C0 + 1)) {
1605 MRI.clearKillFlags(RegZ);
1606 MI.eraseFromParent();
1617 MRI.hasOneUse(RHS.getReg())) {
1619 if (
Register RegZ = searchConst(C0 - 1)) {
1626 MRI.clearKillFlags(RegZ);
1627 MI.eraseFromParent();
1637 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1639 int NumOp =
MI.getNumExplicitOperands();
1640 return MI.getOperand(NumOp - 1).getMBB();
1644 int64_t BrOffset)
const {
1645 unsigned XLen =
STI.getXLen();
1652 case RISCV::NDS_BBC:
1653 case RISCV::NDS_BBS:
1654 case RISCV::NDS_BEQC:
1655 case RISCV::NDS_BNEC:
1665 case RISCV::CV_BEQIMM:
1666 case RISCV::CV_BNEIMM:
1667 case RISCV::QC_BEQI:
1668 case RISCV::QC_BNEI:
1669 case RISCV::QC_BGEI:
1670 case RISCV::QC_BLTI:
1671 case RISCV::QC_BLTUI:
1672 case RISCV::QC_BGEUI:
1673 case RISCV::QC_E_BEQI:
1674 case RISCV::QC_E_BNEI:
1675 case RISCV::QC_E_BGEI:
1676 case RISCV::QC_E_BLTI:
1677 case RISCV::QC_E_BLTUI:
1678 case RISCV::QC_E_BGEUI:
1681 case RISCV::PseudoBR:
1683 case RISCV::PseudoJump:
1693 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1694 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1695 case RISCV::SLL:
return RISCV::PseudoCCSLL;
break;
1696 case RISCV::SRL:
return RISCV::PseudoCCSRL;
break;
1697 case RISCV::SRA:
return RISCV::PseudoCCSRA;
break;
1698 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1699 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1700 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1702 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
break;
1703 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
break;
1704 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
break;
1705 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
break;
1706 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
break;
1707 case RISCV::ORI:
return RISCV::PseudoCCORI;
break;
1708 case RISCV::XORI:
return RISCV::PseudoCCXORI;
break;
1710 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1711 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1712 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
break;
1713 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
break;
1714 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
break;
1716 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
break;
1717 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
break;
1718 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
break;
1719 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
break;
1721 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
break;
1722 case RISCV::ORN:
return RISCV::PseudoCCORN;
break;
1723 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
break;
1725 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
break;
1726 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
break;
1729 return RISCV::INSTRUCTION_LIST_END;
1737 if (!
Reg.isVirtual())
1739 if (!
MRI.hasOneNonDBGUse(
Reg))
1748 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1749 MI->getOperand(1).getReg() == RISCV::X0)
1754 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1764 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1767 bool DontMoveAcrossStores =
true;
1768 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1775 unsigned &TrueOp,
unsigned &FalseOp,
1776 bool &Optimizable)
const {
1777 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1778 "Unknown select instruction");
1788 Cond.push_back(
MI.getOperand(1));
1789 Cond.push_back(
MI.getOperand(2));
1790 Cond.push_back(
MI.getOperand(3));
1792 Optimizable =
STI.hasShortForwardBranchOpt();
1799 bool PreferFalse)
const {
1800 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1801 "Unknown select instruction");
1802 if (!
STI.hasShortForwardBranchOpt())
1808 bool Invert = !
DefMI;
1816 Register DestReg =
MI.getOperand(0).getReg();
1818 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1822 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1829 NewMI.
add(
MI.getOperand(1));
1830 NewMI.
add(
MI.getOperand(2));
1839 NewMI.
add(FalseReg);
1854 if (
DefMI->getParent() !=
MI.getParent())
1858 DefMI->eraseFromParent();
1863 if (
MI.isMetaInstruction())
1866 unsigned Opcode =
MI.getOpcode();
1868 if (Opcode == TargetOpcode::INLINEASM ||
1869 Opcode == TargetOpcode::INLINEASM_BR) {
1871 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1875 if (!
MI.memoperands_empty()) {
1878 if (
STI.hasStdExtZca()) {
1879 if (isCompressibleInst(
MI,
STI))
1887 if (Opcode == TargetOpcode::BUNDLE)
1888 return getInstBundleLength(
MI);
1890 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1891 if (isCompressibleInst(
MI,
STI))
1896 case RISCV::PseudoMV_FPR16INX:
1897 case RISCV::PseudoMV_FPR32INX:
1899 return STI.hasStdExtZca() ? 2 : 4;
1900 case TargetOpcode::STACKMAP:
1903 case TargetOpcode::PATCHPOINT:
1906 case TargetOpcode::STATEPOINT: {
1910 return std::max(NumBytes, 8U);
1912 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1913 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1914 case TargetOpcode::PATCHABLE_TAIL_CALL: {
1917 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
1918 F.hasFnAttribute(
"patchable-function-entry")) {
1920 if (
F.getFnAttribute(
"patchable-function-entry")
1922 .getAsInteger(10, Num))
1923 return get(Opcode).getSize();
1926 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
1930 return STI.is64Bit() ? 68 : 44;
1933 return get(Opcode).getSize();
1937unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1941 while (++
I != E &&
I->isInsideBundle()) {
1942 assert(!
I->isBundle() &&
"No nested bundle!");
1949 const unsigned Opcode =
MI.getOpcode();
1953 case RISCV::FSGNJ_D:
1954 case RISCV::FSGNJ_S:
1955 case RISCV::FSGNJ_H:
1956 case RISCV::FSGNJ_D_INX:
1957 case RISCV::FSGNJ_D_IN32X:
1958 case RISCV::FSGNJ_S_INX:
1959 case RISCV::FSGNJ_H_INX:
1961 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1962 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1966 return (
MI.getOperand(1).isReg() &&
1967 MI.getOperand(1).getReg() == RISCV::X0) ||
1968 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1970 return MI.isAsCheapAsAMove();
1973std::optional<DestSourcePair>
1977 switch (
MI.getOpcode()) {
1983 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
1984 MI.getOperand(2).isReg())
1986 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
1987 MI.getOperand(1).isReg())
1992 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1993 MI.getOperand(2).getImm() == 0)
1997 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
1998 MI.getOperand(1).isReg())
2002 case RISCV::SH1ADD_UW:
2004 case RISCV::SH2ADD_UW:
2006 case RISCV::SH3ADD_UW:
2007 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2008 MI.getOperand(2).isReg())
2011 case RISCV::FSGNJ_D:
2012 case RISCV::FSGNJ_S:
2013 case RISCV::FSGNJ_H:
2014 case RISCV::FSGNJ_D_INX:
2015 case RISCV::FSGNJ_D_IN32X:
2016 case RISCV::FSGNJ_S_INX:
2017 case RISCV::FSGNJ_H_INX:
2019 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2020 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2024 return std::nullopt;
2032 const auto &SchedModel =
STI.getSchedModel();
2033 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2045 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2049 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2050 RISCV::OpName::frm) < 0;
2052 "New instructions require FRM whereas the old one does not have it");
2059 for (
auto *NewMI : InsInstrs) {
2061 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2062 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2104bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2105 bool Invert)
const {
2106#define OPCODE_LMUL_CASE(OPC) \
2107 case RISCV::OPC##_M1: \
2108 case RISCV::OPC##_M2: \
2109 case RISCV::OPC##_M4: \
2110 case RISCV::OPC##_M8: \
2111 case RISCV::OPC##_MF2: \
2112 case RISCV::OPC##_MF4: \
2113 case RISCV::OPC##_MF8
2115#define OPCODE_LMUL_MASK_CASE(OPC) \
2116 case RISCV::OPC##_M1_MASK: \
2117 case RISCV::OPC##_M2_MASK: \
2118 case RISCV::OPC##_M4_MASK: \
2119 case RISCV::OPC##_M8_MASK: \
2120 case RISCV::OPC##_MF2_MASK: \
2121 case RISCV::OPC##_MF4_MASK: \
2122 case RISCV::OPC##_MF8_MASK
2127 Opcode = *InvOpcode;
2144#undef OPCODE_LMUL_MASK_CASE
2145#undef OPCODE_LMUL_CASE
2148bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2155 const TargetRegisterInfo *
TRI =
MRI->getTargetRegisterInfo();
2159 const uint64_t TSFlags =
Desc.TSFlags;
2161 auto checkImmOperand = [&](
unsigned OpIdx) {
2165 auto checkRegOperand = [&](
unsigned OpIdx) {
2173 if (!checkRegOperand(1))
2188 bool SeenMI2 =
false;
2189 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2198 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2199 Register SrcReg = It->getOperand(1).getReg();
2217 if (MI1VReg != SrcReg)
2226 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2265bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2266 bool &Commuted)
const {
2270 "Expect the present of passthrough operand.");
2276 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2277 areRVVInstsReassociable(Inst, *MI2);
2281 return areRVVInstsReassociable(Inst, *MI1) &&
2282 (isVectorAssociativeAndCommutative(*MI1) ||
2283 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2290 if (!isVectorAssociativeAndCommutative(Inst) &&
2291 !isVectorAssociativeAndCommutative(Inst,
true))
2303 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
2305 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
2317 for (
unsigned I = 0;
I < 5; ++
I)
2323 bool &Commuted)
const {
2324 if (isVectorAssociativeAndCommutative(Inst) ||
2325 isVectorAssociativeAndCommutative(Inst,
true))
2326 return hasReassociableVectorSibling(Inst, Commuted);
2332 unsigned OperandIdx = Commuted ? 2 : 1;
2336 int16_t InstFrmOpIdx =
2337 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2338 int16_t SiblingFrmOpIdx =
2339 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2341 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2346 bool Invert)
const {
2347 if (isVectorAssociativeAndCommutative(Inst, Invert))
2355 Opc = *InverseOpcode;
2400std::optional<unsigned>
2402#define RVV_OPC_LMUL_CASE(OPC, INV) \
2403 case RISCV::OPC##_M1: \
2404 return RISCV::INV##_M1; \
2405 case RISCV::OPC##_M2: \
2406 return RISCV::INV##_M2; \
2407 case RISCV::OPC##_M4: \
2408 return RISCV::INV##_M4; \
2409 case RISCV::OPC##_M8: \
2410 return RISCV::INV##_M8; \
2411 case RISCV::OPC##_MF2: \
2412 return RISCV::INV##_MF2; \
2413 case RISCV::OPC##_MF4: \
2414 return RISCV::INV##_MF4; \
2415 case RISCV::OPC##_MF8: \
2416 return RISCV::INV##_MF8
2418#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2419 case RISCV::OPC##_M1_MASK: \
2420 return RISCV::INV##_M1_MASK; \
2421 case RISCV::OPC##_M2_MASK: \
2422 return RISCV::INV##_M2_MASK; \
2423 case RISCV::OPC##_M4_MASK: \
2424 return RISCV::INV##_M4_MASK; \
2425 case RISCV::OPC##_M8_MASK: \
2426 return RISCV::INV##_M8_MASK; \
2427 case RISCV::OPC##_MF2_MASK: \
2428 return RISCV::INV##_MF2_MASK; \
2429 case RISCV::OPC##_MF4_MASK: \
2430 return RISCV::INV##_MF4_MASK; \
2431 case RISCV::OPC##_MF8_MASK: \
2432 return RISCV::INV##_MF8_MASK
2436 return std::nullopt;
2438 return RISCV::FSUB_H;
2440 return RISCV::FSUB_S;
2442 return RISCV::FSUB_D;
2444 return RISCV::FADD_H;
2446 return RISCV::FADD_S;
2448 return RISCV::FADD_D;
2465#undef RVV_OPC_LMUL_MASK_CASE
2466#undef RVV_OPC_LMUL_CASE
2471 bool DoRegPressureReduce) {
2487 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2498 bool DoRegPressureReduce) {
2505 DoRegPressureReduce)) {
2511 DoRegPressureReduce)) {
2521 bool DoRegPressureReduce) {
2529 unsigned CombineOpc) {
2536 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2539 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2550 unsigned OuterShiftAmt) {
2556 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2583 case RISCV::SH1ADD_UW:
2585 case RISCV::SH2ADD_UW:
2587 case RISCV::SH3ADD_UW:
2633 bool DoRegPressureReduce)
const {
2642 DoRegPressureReduce);
2650 return RISCV::FMADD_H;
2652 return RISCV::FMADD_S;
2654 return RISCV::FMADD_D;
2699 bool Mul1IsKill = Mul1.
isKill();
2700 bool Mul2IsKill = Mul2.
isKill();
2701 bool AddendIsKill = Addend.
isKill();
2710 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2735 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2742 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2745 switch (InnerShiftAmt - OuterShiftAmt) {
2749 InnerOpc = RISCV::ADD;
2752 InnerOpc = RISCV::SH1ADD;
2755 InnerOpc = RISCV::SH2ADD;
2758 InnerOpc = RISCV::SH3ADD;
2766 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2776 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2793 DelInstrs, InstrIdxForVirtReg);
2820 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2821 unsigned OpType = Operand.OperandType;
2826 ErrInfo =
"Expected a non-register operand.";
2830 int64_t Imm = MO.
getImm();
2837#define CASE_OPERAND_UIMM(NUM) \
2838 case RISCVOp::OPERAND_UIMM##NUM: \
2839 Ok = isUInt<NUM>(Imm); \
2841#define CASE_OPERAND_SIMM(NUM) \
2842 case RISCVOp::OPERAND_SIMM##NUM: \
2843 Ok = isInt<NUM>(Imm); \
2875 Ok = (
isUInt<5>(Imm) && (Imm != 0)) || (Imm == 32);
2914 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
2924 Ok = (
isInt<5>(Imm) && Imm != -16) || Imm == 16;
2955 Ok = Ok && Imm != 0;
2959 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2962 Ok = Imm >= 0 && Imm <= 10;
2965 Ok = Imm >= 0 && Imm <= 7;
2968 Ok = Imm >= 1 && Imm <= 10;
2971 Ok = Imm >= 2 && Imm <= 14;
2980 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3010 ErrInfo =
"Invalid immediate";
3020 if (!
Op.isImm() && !
Op.isReg()) {
3021 ErrInfo =
"Invalid operand type for VL operand";
3024 if (
Op.isReg() &&
Op.getReg() != RISCV::NoRegister) {
3026 auto *RC =
MRI.getRegClass(
Op.getReg());
3027 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
3028 ErrInfo =
"Invalid register class for VL operand";
3033 ErrInfo =
"VL operand w/o SEW operand?";
3039 if (!
MI.getOperand(
OpIdx).isImm()) {
3040 ErrInfo =
"SEW value expected to be an immediate";
3045 ErrInfo =
"Unexpected SEW value";
3048 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3050 ErrInfo =
"Unexpected SEW value";
3056 if (!
MI.getOperand(
OpIdx).isImm()) {
3057 ErrInfo =
"Policy operand expected to be an immediate";
3062 ErrInfo =
"Invalid Policy Value";
3066 ErrInfo =
"policy operand w/o VL operand?";
3074 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3075 ErrInfo =
"policy operand w/o tied operand?";
3082 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3083 ErrInfo =
"dynamic rounding mode should read FRM";
3105 case RISCV::LD_RV32:
3115 case RISCV::SD_RV32:
3131 int64_t NewOffset = OldOffset + Disp;
3153 "Addressing mode not supported for folding");
3227 case RISCV::LD_RV32:
3230 case RISCV::SD_RV32:
3237 OffsetIsScalable =
false;
3253 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3261 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3264 auto Base1 = MO1->getValue();
3265 auto Base2 = MO2->getValue();
3266 if (!Base1 || !Base2)
3274 return Base1 == Base2;
3280 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3281 unsigned NumBytes)
const {
3284 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3289 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3295 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3301 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3351 int64_t OffsetA = 0, OffsetB = 0;
3357 int LowOffset = std::min(OffsetA, OffsetB);
3358 int HighOffset = std::max(OffsetA, OffsetB);
3359 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3361 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3368std::pair<unsigned, unsigned>
3371 return std::make_pair(TF & Mask, TF & ~Mask);
3377 static const std::pair<unsigned, const char *> TargetFlags[] = {
3378 {MO_CALL,
"riscv-call"},
3379 {MO_LO,
"riscv-lo"},
3380 {MO_HI,
"riscv-hi"},
3381 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3382 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3383 {MO_GOT_HI,
"riscv-got-hi"},
3384 {MO_TPREL_LO,
"riscv-tprel-lo"},
3385 {MO_TPREL_HI,
"riscv-tprel-hi"},
3386 {MO_TPREL_ADD,
"riscv-tprel-add"},
3387 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3388 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3389 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3390 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3391 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3392 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3400 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3413 unsigned &Flags)
const {
3432 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3433 F.hasFnAttribute(
"patchable-function-entry");
3438 return MI.readsRegister(RegNo,
TRI) ||
3439 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3444 return MI.modifiesRegister(RegNo,
TRI) ||
3445 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3449 if (!
MBB.back().isReturn())
3472 if (
C.back().isReturn()) {
3474 "The candidate who uses return instruction must be outlined "
3487 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
3490std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3493 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3494 unsigned MinRepeats)
const {
3500 if (RepeatedSequenceLocs.size() < MinRepeats)
3501 return std::nullopt;
3505 unsigned InstrSizeCExt =
3507 unsigned CallOverhead = 0, FrameOverhead = 0;
3514 CallOverhead = 4 + InstrSizeCExt;
3521 FrameOverhead = InstrSizeCExt;
3524 for (
auto &
C : RepeatedSequenceLocs)
3525 C.setCallInfo(MOCI, CallOverhead);
3527 unsigned SequenceSize = 0;
3528 for (
auto &
MI : Candidate)
3531 return std::make_unique<outliner::OutlinedFunction>(
3532 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3538 unsigned Flags)
const {
3542 MBB->getParent()->getSubtarget().getRegisterInfo();
3543 const auto &
F =
MI.getMF()->getFunction();
3546 if (
MI.isCFIInstruction())
3558 for (
const auto &MO :
MI.operands()) {
3563 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3564 F.hasSection() ||
F.getSectionPrefix()))
3582 auto I =
MBB.begin();
3584 for (;
I != E; ++
I) {
3585 if (
I->isCFIInstruction()) {
3586 I->removeFromParent();
3596 MBB.addLiveIn(RISCV::X5);
3611 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3619 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3630 return std::nullopt;
3634 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3635 MI.getOperand(2).isImm())
3636 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3638 return std::nullopt;
3646 std::string GenericComment =
3648 if (!GenericComment.empty())
3649 return GenericComment;
3653 return std::string();
3657 return std::string();
3659 std::string Comment;
3666 switch (OpInfo.OperandType) {
3669 unsigned Imm =
Op.getImm();
3675 unsigned Log2SEW =
Op.getImm();
3676 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3682 unsigned Policy =
Op.getImm();
3684 "Invalid Policy Value");
3694#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3695 RISCV::Pseudo##OP##_##LMUL
3697#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3698 RISCV::Pseudo##OP##_##LMUL##_MASK
3700#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3701 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3702 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3704#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3705 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3706 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3707 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3708 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3709 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3710 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3712#define CASE_RVV_OPCODE_UNMASK(OP) \
3713 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3714 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3716#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3717 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3718 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3719 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3720 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3721 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3722 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3724#define CASE_RVV_OPCODE_MASK(OP) \
3725 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3726 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3728#define CASE_RVV_OPCODE_WIDEN(OP) \
3729 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3730 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3732#define CASE_RVV_OPCODE(OP) \
3733 CASE_RVV_OPCODE_UNMASK(OP): \
3734 case CASE_RVV_OPCODE_MASK(OP)
3738#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3739 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3741#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3742 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3743 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3744 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3745 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3746 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3747 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3748 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3751#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3752 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3754#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3755 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3756 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3757 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3758 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3760#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3761 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3762 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3764#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3765 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3766 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3768#define CASE_VFMA_OPCODE_VV(OP) \
3769 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3770 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3771 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3773#define CASE_VFMA_SPLATS(OP) \
3774 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3775 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3776 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3780 unsigned &SrcOpIdx1,
3781 unsigned &SrcOpIdx2)
const {
3783 if (!
Desc.isCommutable())
3786 switch (
MI.getOpcode()) {
3787 case RISCV::TH_MVEQZ:
3788 case RISCV::TH_MVNEZ:
3792 if (
MI.getOperand(2).getReg() == RISCV::X0)
3795 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3796 case RISCV::QC_MVEQ:
3797 case RISCV::QC_MVNE:
3798 case RISCV::QC_MVLT:
3799 case RISCV::QC_MVGE:
3800 case RISCV::QC_MVLTU:
3801 case RISCV::QC_MVGEU:
3802 case RISCV::QC_MVEQI:
3803 case RISCV::QC_MVNEI:
3804 case RISCV::QC_MVLTI:
3805 case RISCV::QC_MVGEI:
3806 case RISCV::QC_MVLTUI:
3807 case RISCV::QC_MVGEUI:
3808 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
3809 case RISCV::TH_MULA:
3810 case RISCV::TH_MULAW:
3811 case RISCV::TH_MULAH:
3812 case RISCV::TH_MULS:
3813 case RISCV::TH_MULSW:
3814 case RISCV::TH_MULSH:
3816 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3817 case RISCV::PseudoCCMOVGPRNoX0:
3818 case RISCV::PseudoCCMOVGPR:
3820 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3847 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3874 unsigned CommutableOpIdx1 = 1;
3875 unsigned CommutableOpIdx2 = 3;
3876 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3897 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
3899 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
3903 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
3904 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
3910 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
3911 SrcOpIdx2 == CommuteAnyOperandIndex) {
3914 unsigned CommutableOpIdx1 = SrcOpIdx1;
3915 if (SrcOpIdx1 == SrcOpIdx2) {
3918 CommutableOpIdx1 = 1;
3919 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
3921 CommutableOpIdx1 = SrcOpIdx2;
3926 unsigned CommutableOpIdx2;
3927 if (CommutableOpIdx1 != 1) {
3929 CommutableOpIdx2 = 1;
3931 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
3936 if (Op1Reg !=
MI.getOperand(2).getReg())
3937 CommutableOpIdx2 = 2;
3939 CommutableOpIdx2 = 3;
3944 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3957#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
3958 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
3959 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
3962#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
3963 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
3964 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
3965 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
3966 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
3967 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
3968 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
3969 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
3972#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
3973 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
3974 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
3977#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
3978 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
3979 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
3980 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
3981 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
3983#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
3984 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
3985 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
3987#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
3988 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
3989 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
3991#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
3992 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
3993 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
3994 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
3996#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3997 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
3998 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
3999 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4005 unsigned OpIdx2)
const {
4008 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4012 switch (
MI.getOpcode()) {
4013 case RISCV::TH_MVEQZ:
4014 case RISCV::TH_MVNEZ: {
4015 auto &WorkingMI = cloneIfNew(
MI);
4016 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4017 : RISCV::TH_MVEQZ));
4021 case RISCV::QC_MVEQ:
4022 case RISCV::QC_MVNE:
4023 case RISCV::QC_MVLT:
4024 case RISCV::QC_MVGE:
4025 case RISCV::QC_MVLTU:
4026 case RISCV::QC_MVGEU:
4027 case RISCV::QC_MVEQI:
4028 case RISCV::QC_MVNEI:
4029 case RISCV::QC_MVLTI:
4030 case RISCV::QC_MVGEI:
4031 case RISCV::QC_MVLTUI:
4032 case RISCV::QC_MVGEUI: {
4033 auto &WorkingMI = cloneIfNew(
MI);
4038 case RISCV::PseudoCCMOVGPRNoX0:
4039 case RISCV::PseudoCCMOVGPR: {
4043 auto &WorkingMI = cloneIfNew(
MI);
4044 WorkingMI.getOperand(3).setImm(CC);
4068 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4069 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4071 switch (
MI.getOpcode()) {
4094 auto &WorkingMI = cloneIfNew(
MI);
4095 WorkingMI.setDesc(
get(
Opc));
4105 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4108 if (OpIdx1 == 3 || OpIdx2 == 3) {
4110 switch (
MI.getOpcode()) {
4121 auto &WorkingMI = cloneIfNew(
MI);
4122 WorkingMI.setDesc(
get(
Opc));
4134#undef CASE_VMA_CHANGE_OPCODE_COMMON
4135#undef CASE_VMA_CHANGE_OPCODE_LMULS
4136#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4137#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4138#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4139#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4140#undef CASE_VFMA_CHANGE_OPCODE_VV
4141#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4143#undef CASE_RVV_OPCODE_UNMASK_LMUL
4144#undef CASE_RVV_OPCODE_MASK_LMUL
4145#undef CASE_RVV_OPCODE_LMUL
4146#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4147#undef CASE_RVV_OPCODE_UNMASK
4148#undef CASE_RVV_OPCODE_MASK_WIDEN
4149#undef CASE_RVV_OPCODE_MASK
4150#undef CASE_RVV_OPCODE_WIDEN
4151#undef CASE_RVV_OPCODE
4153#undef CASE_VMA_OPCODE_COMMON
4154#undef CASE_VMA_OPCODE_LMULS
4155#undef CASE_VFMA_OPCODE_COMMON
4156#undef CASE_VFMA_OPCODE_LMULS_M1
4157#undef CASE_VFMA_OPCODE_LMULS_MF2
4158#undef CASE_VFMA_OPCODE_LMULS_MF4
4159#undef CASE_VFMA_OPCODE_VV
4160#undef CASE_VFMA_SPLATS
4163 switch (
MI.getOpcode()) {
4171 if (
MI.getOperand(1).getReg() == RISCV::X0)
4172 commuteInstruction(
MI);
4174 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4175 MI.getOperand(2).ChangeToImmediate(0);
4176 MI.setDesc(
get(RISCV::ADDI));
4180 if (
MI.getOpcode() == RISCV::XOR &&
4181 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4182 MI.getOperand(1).setReg(RISCV::X0);
4183 MI.getOperand(2).ChangeToImmediate(0);
4184 MI.setDesc(
get(RISCV::ADDI));
4191 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4192 MI.setDesc(
get(RISCV::ADDI));
4198 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4199 MI.getOperand(2).ChangeToImmediate(0);
4200 MI.setDesc(
get(RISCV::ADDI));
4206 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4207 MI.getOperand(2).ChangeToImmediate(0);
4208 MI.setDesc(
get(RISCV::ADDIW));
4215 if (
MI.getOperand(1).getReg() == RISCV::X0)
4216 commuteInstruction(
MI);
4218 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4219 MI.getOperand(2).ChangeToImmediate(0);
4220 MI.setDesc(
get(RISCV::ADDIW));
4225 case RISCV::SH1ADD_UW:
4227 case RISCV::SH2ADD_UW:
4229 case RISCV::SH3ADD_UW:
4231 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4232 MI.removeOperand(1);
4234 MI.setDesc(
get(RISCV::ADDI));
4238 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4239 MI.removeOperand(2);
4240 unsigned Opc =
MI.getOpcode();
4241 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4242 Opc == RISCV::SH3ADD_UW) {
4244 MI.setDesc(
get(RISCV::SLLI_UW));
4248 MI.setDesc(
get(RISCV::SLLI));
4262 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4263 MI.getOperand(2).getReg() == RISCV::X0) {
4264 MI.getOperand(1).setReg(RISCV::X0);
4265 MI.getOperand(2).ChangeToImmediate(0);
4266 MI.setDesc(
get(RISCV::ADDI));
4272 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4273 MI.getOperand(2).setImm(0);
4274 MI.setDesc(
get(RISCV::ADDI));
4282 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4283 MI.getOperand(2).ChangeToImmediate(0);
4284 MI.setDesc(
get(RISCV::ADDI));
4288 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4289 MI.getOperand(2).ChangeToImmediate(0);
4290 MI.setDesc(
get(RISCV::ADDI));
4298 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4299 MI.getOperand(2).ChangeToImmediate(0);
4300 MI.setDesc(
get(RISCV::ADDI));
4310 case RISCV::SLLI_UW:
4312 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4313 MI.getOperand(2).setImm(0);
4314 MI.setDesc(
get(RISCV::ADDI));
4322 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4323 MI.getOperand(2).getReg() == RISCV::X0) {
4324 MI.getOperand(2).ChangeToImmediate(0);
4325 MI.setDesc(
get(RISCV::ADDI));
4329 if (
MI.getOpcode() == RISCV::ADD_UW &&
4330 MI.getOperand(1).getReg() == RISCV::X0) {
4331 MI.removeOperand(1);
4333 MI.setDesc(
get(RISCV::ADDI));
4339 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4340 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4341 MI.setDesc(
get(RISCV::ADDI));
4347 case RISCV::ZEXT_H_RV32:
4348 case RISCV::ZEXT_H_RV64:
4351 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4353 MI.setDesc(
get(RISCV::ADDI));
4362 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4363 MI.getOperand(2).ChangeToImmediate(0);
4364 MI.setDesc(
get(RISCV::ADDI));
4371 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4373 MI.removeOperand(0);
4374 MI.insert(
MI.operands_begin() + 1, {MO0});
4379 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4381 MI.removeOperand(0);
4382 MI.insert(
MI.operands_begin() + 1, {MO0});
4383 MI.setDesc(
get(RISCV::BNE));
4388 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4390 MI.removeOperand(0);
4391 MI.insert(
MI.operands_begin() + 1, {MO0});
4392 MI.setDesc(
get(RISCV::BEQ));
4400#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4401 RISCV::PseudoV##OP##_##LMUL##_TIED
4403#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4404 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4405 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4406 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4407 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4408 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4409 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4411#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4412 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4413 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4416#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4417 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4418 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4419 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4420 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4421 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4422 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4425#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4426 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4428#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4429 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4430 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4431 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4432 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4433 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4434 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4435 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4436 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4437 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4439#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4440 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4441 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4444#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4445 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4446 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4447 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4448 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4449 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4450 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4451 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4452 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4453 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4460 switch (
MI.getOpcode()) {
4466 MI.getNumExplicitOperands() == 7 &&
4467 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4474 switch (
MI.getOpcode()) {
4484 .
add(
MI.getOperand(0))
4486 .
add(
MI.getOperand(1))
4487 .
add(
MI.getOperand(2))
4488 .
add(
MI.getOperand(3))
4489 .
add(
MI.getOperand(4))
4490 .
add(
MI.getOperand(5))
4491 .
add(
MI.getOperand(6));
4500 MI.getNumExplicitOperands() == 6);
4507 switch (
MI.getOpcode()) {
4519 .
add(
MI.getOperand(0))
4521 .
add(
MI.getOperand(1))
4522 .
add(
MI.getOperand(2))
4523 .
add(
MI.getOperand(3))
4524 .
add(
MI.getOperand(4))
4525 .
add(
MI.getOperand(5));
4532 unsigned NumOps =
MI.getNumOperands();
4535 if (
Op.isReg() &&
Op.isKill())
4543 if (
MI.getOperand(0).isEarlyClobber()) {
4557#undef CASE_WIDEOP_OPCODE_COMMON
4558#undef CASE_WIDEOP_OPCODE_LMULS
4559#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4560#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4561#undef CASE_FP_WIDEOP_OPCODE_COMMON
4562#undef CASE_FP_WIDEOP_OPCODE_LMULS
4563#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4564#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4573 if (ShiftAmount == 0)
4579 }
else if (
STI.hasShlAdd(3) &&
4586 if (Amount % 9 == 0) {
4587 Opc = RISCV::SH3ADD;
4588 ShiftAmount =
Log2_64(Amount / 9);
4589 }
else if (Amount % 5 == 0) {
4590 Opc = RISCV::SH2ADD;
4591 ShiftAmount =
Log2_64(Amount / 5);
4592 }
else if (Amount % 3 == 0) {
4593 Opc = RISCV::SH1ADD;
4594 ShiftAmount =
Log2_64(Amount / 3);
4608 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4619 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4629 }
else if (
STI.hasStdExtZmmul()) {
4630 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4639 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4640 if (Amount & (1U << ShiftAmount)) {
4644 .
addImm(ShiftAmount - PrevShiftAmount)
4646 if (Amount >> (ShiftAmount + 1)) {
4649 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4660 PrevShiftAmount = ShiftAmount;
4663 assert(Acc &&
"Expected valid accumulator");
4673 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4681 ?
STI.getTailDupAggressiveThreshold()
4688 unsigned Opcode =
MI.getOpcode();
4689 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4695std::optional<std::pair<unsigned, unsigned>>
4699 return std::nullopt;
4700 case RISCV::PseudoVSPILL2_M1:
4701 case RISCV::PseudoVRELOAD2_M1:
4702 return std::make_pair(2u, 1u);
4703 case RISCV::PseudoVSPILL2_M2:
4704 case RISCV::PseudoVRELOAD2_M2:
4705 return std::make_pair(2u, 2u);
4706 case RISCV::PseudoVSPILL2_M4:
4707 case RISCV::PseudoVRELOAD2_M4:
4708 return std::make_pair(2u, 4u);
4709 case RISCV::PseudoVSPILL3_M1:
4710 case RISCV::PseudoVRELOAD3_M1:
4711 return std::make_pair(3u, 1u);
4712 case RISCV::PseudoVSPILL3_M2:
4713 case RISCV::PseudoVRELOAD3_M2:
4714 return std::make_pair(3u, 2u);
4715 case RISCV::PseudoVSPILL4_M1:
4716 case RISCV::PseudoVRELOAD4_M1:
4717 return std::make_pair(4u, 1u);
4718 case RISCV::PseudoVSPILL4_M2:
4719 case RISCV::PseudoVRELOAD4_M2:
4720 return std::make_pair(4u, 2u);
4721 case RISCV::PseudoVSPILL5_M1:
4722 case RISCV::PseudoVRELOAD5_M1:
4723 return std::make_pair(5u, 1u);
4724 case RISCV::PseudoVSPILL6_M1:
4725 case RISCV::PseudoVRELOAD6_M1:
4726 return std::make_pair(6u, 1u);
4727 case RISCV::PseudoVSPILL7_M1:
4728 case RISCV::PseudoVRELOAD7_M1:
4729 return std::make_pair(7u, 1u);
4730 case RISCV::PseudoVSPILL8_M1:
4731 case RISCV::PseudoVRELOAD8_M1:
4732 return std::make_pair(8u, 1u);
4737 int16_t MI1FrmOpIdx =
4738 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
4739 int16_t MI2FrmOpIdx =
4740 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
4741 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4748std::optional<unsigned>
4752 return std::nullopt;
4755 case RISCV::VSLL_VX:
4756 case RISCV::VSRL_VX:
4757 case RISCV::VSRA_VX:
4759 case RISCV::VSSRL_VX:
4760 case RISCV::VSSRA_VX:
4762 case RISCV::VROL_VX:
4763 case RISCV::VROR_VX:
4768 case RISCV::VNSRL_WX:
4769 case RISCV::VNSRA_WX:
4771 case RISCV::VNCLIPU_WX:
4772 case RISCV::VNCLIP_WX:
4774 case RISCV::VWSLL_VX:
4779 case RISCV::VADD_VX:
4780 case RISCV::VSUB_VX:
4781 case RISCV::VRSUB_VX:
4783 case RISCV::VWADDU_VX:
4784 case RISCV::VWSUBU_VX:
4785 case RISCV::VWADD_VX:
4786 case RISCV::VWSUB_VX:
4787 case RISCV::VWADDU_WX:
4788 case RISCV::VWSUBU_WX:
4789 case RISCV::VWADD_WX:
4790 case RISCV::VWSUB_WX:
4792 case RISCV::VADC_VXM:
4793 case RISCV::VADC_VIM:
4794 case RISCV::VMADC_VXM:
4795 case RISCV::VMADC_VIM:
4796 case RISCV::VMADC_VX:
4797 case RISCV::VSBC_VXM:
4798 case RISCV::VMSBC_VXM:
4799 case RISCV::VMSBC_VX:
4801 case RISCV::VAND_VX:
4803 case RISCV::VXOR_VX:
4805 case RISCV::VMSEQ_VX:
4806 case RISCV::VMSNE_VX:
4807 case RISCV::VMSLTU_VX:
4808 case RISCV::VMSLT_VX:
4809 case RISCV::VMSLEU_VX:
4810 case RISCV::VMSLE_VX:
4811 case RISCV::VMSGTU_VX:
4812 case RISCV::VMSGT_VX:
4814 case RISCV::VMINU_VX:
4815 case RISCV::VMIN_VX:
4816 case RISCV::VMAXU_VX:
4817 case RISCV::VMAX_VX:
4819 case RISCV::VMUL_VX:
4820 case RISCV::VMULH_VX:
4821 case RISCV::VMULHU_VX:
4822 case RISCV::VMULHSU_VX:
4824 case RISCV::VDIVU_VX:
4825 case RISCV::VDIV_VX:
4826 case RISCV::VREMU_VX:
4827 case RISCV::VREM_VX:
4829 case RISCV::VWMUL_VX:
4830 case RISCV::VWMULU_VX:
4831 case RISCV::VWMULSU_VX:
4833 case RISCV::VMACC_VX:
4834 case RISCV::VNMSAC_VX:
4835 case RISCV::VMADD_VX:
4836 case RISCV::VNMSUB_VX:
4838 case RISCV::VWMACCU_VX:
4839 case RISCV::VWMACC_VX:
4840 case RISCV::VWMACCSU_VX:
4841 case RISCV::VWMACCUS_VX:
4843 case RISCV::VMERGE_VXM:
4845 case RISCV::VMV_V_X:
4847 case RISCV::VSADDU_VX:
4848 case RISCV::VSADD_VX:
4849 case RISCV::VSSUBU_VX:
4850 case RISCV::VSSUB_VX:
4852 case RISCV::VAADDU_VX:
4853 case RISCV::VAADD_VX:
4854 case RISCV::VASUBU_VX:
4855 case RISCV::VASUB_VX:
4857 case RISCV::VSMUL_VX:
4859 case RISCV::VMV_S_X:
4861 case RISCV::VANDN_VX:
4862 return 1U << Log2SEW;
4868 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
4871 return RVV->BaseInstr;
4881 unsigned Scaled = Log2SEW + (DestEEW - 1);
4895 return std::nullopt;
4900 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
4901 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
4902 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
4903 LHS.getReg() == RHS.getReg())
4907 if (LHS.isImm() && LHS.getImm() == 0)
4913 if (!LHSImm || !RHSImm)
4915 return LHSImm <= RHSImm;
4927 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
4929 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
4939 std::optional<bool> createTripCountGreaterCondition(
4940 int TC, MachineBasicBlock &
MBB,
4941 SmallVectorImpl<MachineOperand> &CondParam)
override {
4949 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
4951 void adjustTripCount(
int TripCountAdjust)
override {}
4955std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
4963 if (
TBB == LoopBB && FBB == LoopBB)
4970 assert((
TBB == LoopBB || FBB == LoopBB) &&
4971 "The Loop must be a single-basic-block loop");
4982 if (!Reg.isVirtual())
4984 return MRI.getVRegDef(Reg);
4989 if (LHS && LHS->isPHI())
4991 if (RHS && RHS->isPHI())
4994 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5000 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5017 case RISCV::FDIV_H_INX:
5018 case RISCV::FDIV_S_INX:
5019 case RISCV::FDIV_D_INX:
5020 case RISCV::FDIV_D_IN32X:
5021 case RISCV::FSQRT_H:
5022 case RISCV::FSQRT_S:
5023 case RISCV::FSQRT_D:
5024 case RISCV::FSQRT_H_INX:
5025 case RISCV::FSQRT_S_INX:
5026 case RISCV::FSQRT_D_INX:
5027 case RISCV::FSQRT_D_IN32X:
5029 case RISCV::VDIV_VV:
5030 case RISCV::VDIV_VX:
5031 case RISCV::VDIVU_VV:
5032 case RISCV::VDIVU_VX:
5033 case RISCV::VREM_VV:
5034 case RISCV::VREM_VX:
5035 case RISCV::VREMU_VV:
5036 case RISCV::VREMU_VX:
5038 case RISCV::VFDIV_VV:
5039 case RISCV::VFDIV_VF:
5040 case RISCV::VFRDIV_VF:
5041 case RISCV::VFSQRT_V:
5042 case RISCV::VFRSQRT7_V:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static bool analyzeCandidate(outliner::Candidate &C)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
static const MachineMemOperand::Flags MONontemporalBit0
unsigned getDeadRegState(bool B)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.