39#define GEN_CHECK_COMPRESS_INSTR
40#include "RISCVGenCompressInstEmitter.inc"
42#define GET_INSTRINFO_CTOR_DTOR
43#define GET_INSTRINFO_NAMED_OPS
44#include "RISCVGenInstrInfo.inc"
48 cl::desc(
"Prefer whole register move for vector registers."));
51 "riscv-force-machine-combiner-strategy",
cl::Hidden,
52 cl::desc(
"Force machine combiner to use a specific strategy for machine "
53 "trace metrics evaluation."),
54 cl::init(MachineTraceStrategy::TS_NumStrategies),
57 clEnumValN(MachineTraceStrategy::TS_MinInstrCount,
"min-instr",
58 "MinInstrCount strategy.")));
64#define GET_RISCVVPseudosTable_IMPL
65#include "RISCVGenSearchableTables.inc"
71#define GET_RISCVMaskedPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
90 int &FrameIndex)
const {
97 unsigned &MemBytes)
const {
98 switch (
MI.getOpcode()) {
121 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
122 MI.getOperand(2).getImm() == 0) {
123 FrameIndex =
MI.getOperand(1).getIndex();
124 return MI.getOperand(0).getReg();
131 int &FrameIndex)
const {
138 unsigned &MemBytes)
const {
139 switch (
MI.getOpcode()) {
159 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
160 MI.getOperand(2).getImm() == 0) {
161 FrameIndex =
MI.getOperand(1).getIndex();
162 return MI.getOperand(0).getReg();
170 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
181 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
182 "Unexpected COPY instruction.");
186 bool FoundDef =
false;
187 bool FirstVSetVLI =
false;
188 unsigned FirstSEW = 0;
191 if (
MBBI->isMetaInstruction())
194 if (
MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
195 MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
196 MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
206 unsigned FirstVType =
MBBI->getOperand(2).getImm();
211 if (FirstLMul != LMul)
216 if (
MBBI->getOperand(0).getReg() != RISCV::X0)
218 if (
MBBI->getOperand(1).isImm())
220 if (
MBBI->getOperand(1).getReg() != RISCV::X0)
226 unsigned VType =
MBBI->getOperand(2).getImm();
244 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
246 }
else if (
MBBI->getNumDefs()) {
249 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
255 if (!MO.isReg() || !MO.isDef())
257 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
272 if (MO.getReg() != SrcReg)
313 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
314 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
316 assert(!Fractional &&
"It is impossible be fractional lmul here.");
317 unsigned NumRegs = NF * LMulVal;
323 SrcEncoding += NumRegs - 1;
324 DstEncoding += NumRegs - 1;
330 unsigned,
unsigned> {
338 uint16_t Diff = DstEncoding - SrcEncoding;
339 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
340 DstEncoding % 8 == 7)
342 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
343 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
344 DstEncoding % 4 == 3)
346 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
347 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
348 DstEncoding % 2 == 1)
350 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
353 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
358 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
360 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
361 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
363 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
364 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
366 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
369 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
374 if (&RegClass == &RISCV::VRRegClass)
376 return TRI->getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass);
378 while (
I != NumRegs) {
383 auto [LMulCopied, RegClass, Opc, VVOpc, VIOpc] =
384 GetCopyInfo(SrcEncoding, DstEncoding);
388 if (LMul == LMulCopied &&
391 if (DefMBBI->getOpcode() == VIOpc)
397 MCRegister ActualSrcReg = FindRegWithEncoding(
398 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
399 MCRegister ActualDstReg = FindRegWithEncoding(
400 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
408 MIB = MIB.add(DefMBBI->getOperand(2));
421 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
422 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
433 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
440 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
443 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
444 .
addReg(
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even),
448 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
449 .
addReg(
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd),
456 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
457 RISCV::GPRRegClass.
contains(DstReg)) {
459 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
464 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
466 if (
STI.hasStdExtZfh()) {
467 Opc = RISCV::FSGNJ_H;
470 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
471 "Unexpected extensions");
473 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
474 &RISCV::FPR32RegClass);
475 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
476 &RISCV::FPR32RegClass);
477 Opc = RISCV::FSGNJ_S;
485 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
492 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
499 if (RISCV::FPR32RegClass.
contains(DstReg) &&
500 RISCV::GPRRegClass.
contains(SrcReg)) {
506 if (RISCV::GPRRegClass.
contains(DstReg) &&
507 RISCV::FPR32RegClass.
contains(SrcReg)) {
513 if (RISCV::FPR64RegClass.
contains(DstReg) &&
514 RISCV::GPRRegClass.
contains(SrcReg)) {
521 if (RISCV::GPRRegClass.
contains(DstReg) &&
522 RISCV::FPR64RegClass.
contains(SrcReg)) {
531 &RISCV::VRRegClass, &RISCV::VRM2RegClass, &RISCV::VRM4RegClass,
532 &RISCV::VRM8RegClass, &RISCV::VRN2M1RegClass, &RISCV::VRN2M2RegClass,
533 &RISCV::VRN2M4RegClass, &RISCV::VRN3M1RegClass, &RISCV::VRN3M2RegClass,
534 &RISCV::VRN4M1RegClass, &RISCV::VRN4M2RegClass, &RISCV::VRN5M1RegClass,
535 &RISCV::VRN6M1RegClass, &RISCV::VRN7M1RegClass, &RISCV::VRN8M1RegClass};
536 for (
const auto &RegClass : RVVRegClasses) {
537 if (RegClass->contains(DstReg, SrcReg)) {
548 Register SrcReg,
bool IsKill,
int FI,
556 bool IsScalableVector =
true;
557 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
558 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
559 RISCV::SW : RISCV::SD;
560 IsScalableVector =
false;
561 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
562 Opcode = RISCV::PseudoRV32ZdinxSD;
563 IsScalableVector =
false;
564 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
566 IsScalableVector =
false;
567 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
569 IsScalableVector =
false;
570 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
572 IsScalableVector =
false;
573 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
574 Opcode = RISCV::VS1R_V;
575 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
576 Opcode = RISCV::VS2R_V;
577 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
578 Opcode = RISCV::VS4R_V;
579 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
580 Opcode = RISCV::VS8R_V;
581 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
582 Opcode = RISCV::PseudoVSPILL2_M1;
583 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
584 Opcode = RISCV::PseudoVSPILL2_M2;
585 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
586 Opcode = RISCV::PseudoVSPILL2_M4;
587 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
588 Opcode = RISCV::PseudoVSPILL3_M1;
589 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
590 Opcode = RISCV::PseudoVSPILL3_M2;
591 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
592 Opcode = RISCV::PseudoVSPILL4_M1;
593 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
594 Opcode = RISCV::PseudoVSPILL4_M2;
595 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
596 Opcode = RISCV::PseudoVSPILL5_M1;
597 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
598 Opcode = RISCV::PseudoVSPILL6_M1;
599 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
600 Opcode = RISCV::PseudoVSPILL7_M1;
601 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
602 Opcode = RISCV::PseudoVSPILL8_M1;
606 if (IsScalableVector) {
639 bool IsScalableVector =
true;
640 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
641 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
642 RISCV::LW : RISCV::LD;
643 IsScalableVector =
false;
644 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
645 Opcode = RISCV::PseudoRV32ZdinxLD;
646 IsScalableVector =
false;
647 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
649 IsScalableVector =
false;
650 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
652 IsScalableVector =
false;
653 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
655 IsScalableVector =
false;
656 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
657 Opcode = RISCV::VL1RE8_V;
658 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
659 Opcode = RISCV::VL2RE8_V;
660 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
661 Opcode = RISCV::VL4RE8_V;
662 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
663 Opcode = RISCV::VL8RE8_V;
664 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
665 Opcode = RISCV::PseudoVRELOAD2_M1;
666 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
667 Opcode = RISCV::PseudoVRELOAD2_M2;
668 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
669 Opcode = RISCV::PseudoVRELOAD2_M4;
670 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
671 Opcode = RISCV::PseudoVRELOAD3_M1;
672 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
673 Opcode = RISCV::PseudoVRELOAD3_M2;
674 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
675 Opcode = RISCV::PseudoVRELOAD4_M1;
676 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
677 Opcode = RISCV::PseudoVRELOAD4_M2;
678 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
679 Opcode = RISCV::PseudoVRELOAD5_M1;
680 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
681 Opcode = RISCV::PseudoVRELOAD6_M1;
682 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
683 Opcode = RISCV::PseudoVRELOAD7_M1;
684 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
685 Opcode = RISCV::PseudoVRELOAD8_M1;
689 if (IsScalableVector) {
723 if (Ops.
size() != 1 || Ops[0] != 1)
727 switch (
MI.getOpcode()) {
734 LoadOpc = RISCV::LWU;
738 LoadOpc = RISCV::LBU;
748 case RISCV::ZEXT_H_RV32:
749 case RISCV::ZEXT_H_RV64:
750 LoadOpc = RISCV::LHU;
760 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(LoadOpc),
771 bool DstIsDead)
const {
777 if (!isUInt<32>(Val))
781 Val = SignExtend64<32>(Val);
787 bool SrcRenamable =
false;
791 bool LastItem = ++Num == Seq.
size();
796 switch (Inst.getOpndKind()) {
806 .
addReg(SrcReg, SrcRegState)
813 .
addReg(SrcReg, SrcRegState)
814 .
addReg(SrcReg, SrcRegState)
820 .
addReg(SrcReg, SrcRegState)
828 SrcRenamable = DstRenamable;
836 case RISCV::CV_BEQIMM:
838 case RISCV::CV_BNEIMM:
862 "Unknown conditional branch");
875 return Imm ? RISCV::CV_BEQIMM : RISCV::BEQ;
877 return Imm ? RISCV::CV_BNEIMM : RISCV::BNE;
917 bool AllowModify)
const {
923 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
929 int NumTerminators = 0;
930 for (
auto J =
I.getReverse(); J !=
MBB.
rend() && isUnpredicatedTerminator(*J);
933 if (J->getDesc().isUnconditionalBranch() ||
934 J->getDesc().isIndirectBranch()) {
941 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.
end()) {
942 while (std::next(FirstUncondOrIndirectBr) !=
MBB.
end()) {
943 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
946 I = FirstUncondOrIndirectBr;
950 if (
I->getDesc().isIndirectBranch())
954 if (
I->isPreISelOpcode())
958 if (NumTerminators > 2)
962 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
968 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
974 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
975 I->getDesc().isUnconditionalBranch()) {
986 int *BytesRemoved)
const {
993 if (!
I->getDesc().isUnconditionalBranch() &&
994 !
I->getDesc().isConditionalBranch())
1000 I->eraseFromParent();
1007 if (!
I->getDesc().isConditionalBranch())
1013 I->eraseFromParent();
1026 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1028 "RISC-V branch conditions have two components!");
1063 assert(RS &&
"RegScavenger required for long branching");
1065 "new block should be inserted for expanding unconditional branch");
1068 "restore block should be inserted for restoring clobbered registers");
1075 if (!isInt<32>(BrOffset))
1077 "Branch offsets outside of the signed 32-bit range not supported");
1082 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1094 if (TmpGPR != RISCV::NoRegister)
1100 TmpGPR = RISCV::X27;
1103 if (FrameIndex == -1)
1108 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1111 MI.getOperand(1).setMBB(&RestoreBB);
1115 TRI->eliminateFrameIndex(RestoreBB.
back(),
1119 MRI.replaceRegWith(ScratchReg, TmpGPR);
1120 MRI.clearVirtRegs();
1125 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1165 auto isLoadImm = [](
const MachineInstr *
MI, int64_t &Imm) ->
bool {
1166 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1167 MI->getOperand(1).getReg() == RISCV::X0) {
1168 Imm =
MI->getOperand(2).getImm();
1178 return Reg.isVirtual() && isLoadImm(
MRI.getVRegDef(Reg), Imm);
1185 auto searchConst = [&](int64_t C1) ->
Register {
1187 auto DefC1 = std::find_if(++II, E, [&](
const MachineInstr &
I) ->
bool {
1189 return isLoadImm(&
I, Imm) && Imm == C1 &&
1190 I.getOperand(0).getReg().isVirtual();
1193 return DefC1->getOperand(0).getReg();
1198 bool Modify =
false;
1200 if (isFromLoadImm(
LHS, C0) &&
MRI.hasOneUse(
LHS.getReg())) {
1205 if (
Register RegZ = searchConst(C0 + 1)) {
1211 MRI.clearKillFlags(RegZ);
1214 }
else if (isFromLoadImm(
RHS, C0) &&
MRI.hasOneUse(
RHS.getReg())) {
1219 if (
Register RegZ = searchConst(C0 - 1)) {
1225 MRI.clearKillFlags(RegZ);
1239 MI.eraseFromParent();
1246 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1248 int NumOp =
MI.getNumExplicitOperands();
1249 return MI.getOperand(NumOp - 1).getMBB();
1253 int64_t BrOffset)
const {
1267 case RISCV::CV_BEQIMM:
1268 case RISCV::CV_BNEIMM:
1269 return isIntN(13, BrOffset);
1271 case RISCV::PseudoBR:
1272 return isIntN(21, BrOffset);
1273 case RISCV::PseudoJump:
1283 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1284 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1285 case RISCV::SLL:
return RISCV::PseudoCCSLL;
break;
1286 case RISCV::SRL:
return RISCV::PseudoCCSRL;
break;
1287 case RISCV::SRA:
return RISCV::PseudoCCSRA;
break;
1288 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1289 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1290 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1292 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
break;
1293 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
break;
1294 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
break;
1295 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
break;
1296 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
break;
1297 case RISCV::ORI:
return RISCV::PseudoCCORI;
break;
1298 case RISCV::XORI:
return RISCV::PseudoCCXORI;
break;
1300 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1301 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1302 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
break;
1303 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
break;
1304 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
break;
1306 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
break;
1307 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
break;
1308 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
break;
1309 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
break;
1311 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
break;
1312 case RISCV::ORN:
return RISCV::PseudoCCORN;
break;
1313 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
break;
1316 return RISCV::INSTRUCTION_LIST_END;
1324 if (!Reg.isVirtual())
1326 if (!
MRI.hasOneNonDBGUse(Reg))
1335 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1336 MI->getOperand(1).getReg() == RISCV::X0)
1341 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1351 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1354 bool DontMoveAcrossStores =
true;
1355 if (!
MI->isSafeToMove(
nullptr, DontMoveAcrossStores))
1362 unsigned &TrueOp,
unsigned &FalseOp,
1363 bool &Optimizable)
const {
1364 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1365 "Unknown select instruction");
1375 Cond.push_back(
MI.getOperand(1));
1376 Cond.push_back(
MI.getOperand(2));
1377 Cond.push_back(
MI.getOperand(3));
1379 Optimizable =
STI.hasShortForwardBranchOpt();
1386 bool PreferFalse)
const {
1387 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1388 "Unknown select instruction");
1389 if (!
STI.hasShortForwardBranchOpt())
1395 bool Invert = !
DefMI;
1403 Register DestReg =
MI.getOperand(0).getReg();
1405 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1409 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1416 NewMI.
add(
MI.getOperand(1));
1417 NewMI.
add(
MI.getOperand(2));
1426 NewMI.
add(FalseReg);
1450 if (
MI.isMetaInstruction())
1453 unsigned Opcode =
MI.getOpcode();
1455 if (Opcode == TargetOpcode::INLINEASM ||
1456 Opcode == TargetOpcode::INLINEASM_BR) {
1459 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1460 *
TM.getMCAsmInfo());
1463 if (!
MI.memoperands_empty()) {
1468 if (ST.hasStdExtCOrZca() && ST.enableRVCHintInstrs()) {
1469 if (isCompressibleInst(
MI,
STI))
1477 if (Opcode == TargetOpcode::BUNDLE)
1478 return getInstBundleLength(
MI);
1480 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1481 if (isCompressibleInst(
MI,
STI))
1486 case TargetOpcode::STACKMAP:
1489 case TargetOpcode::PATCHPOINT:
1492 case TargetOpcode::STATEPOINT: {
1496 return std::max(NumBytes, 8U);
1499 return get(Opcode).getSize();
1503unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1507 while (++
I != E &&
I->isInsideBundle()) {
1508 assert(!
I->isBundle() &&
"No nested bundle!");
1515 const unsigned Opcode =
MI.getOpcode();
1519 case RISCV::FSGNJ_D:
1520 case RISCV::FSGNJ_S:
1521 case RISCV::FSGNJ_H:
1522 case RISCV::FSGNJ_D_INX:
1523 case RISCV::FSGNJ_D_IN32X:
1524 case RISCV::FSGNJ_S_INX:
1525 case RISCV::FSGNJ_H_INX:
1527 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1528 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1532 return (
MI.getOperand(1).isReg() &&
1533 MI.getOperand(1).getReg() == RISCV::X0) ||
1534 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1536 return MI.isAsCheapAsAMove();
1539std::optional<DestSourcePair>
1543 switch (
MI.getOpcode()) {
1548 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1549 MI.getOperand(2).getImm() == 0)
1552 case RISCV::FSGNJ_D:
1553 case RISCV::FSGNJ_S:
1554 case RISCV::FSGNJ_H:
1555 case RISCV::FSGNJ_D_INX:
1556 case RISCV::FSGNJ_D_IN32X:
1557 case RISCV::FSGNJ_S_INX:
1558 case RISCV::FSGNJ_H_INX:
1560 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1561 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
1565 return std::nullopt;
1573 const auto &SchedModel =
STI.getSchedModel();
1574 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1591 RISCV::OpName::frm) < 0;
1593 "New instructions require FRM whereas the old one does not have it");
1600 for (
auto *NewMI : InsInstrs) {
1603 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
1645bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
1646 bool Invert)
const {
1647#define OPCODE_LMUL_CASE(OPC) \
1648 case RISCV::OPC##_M1: \
1649 case RISCV::OPC##_M2: \
1650 case RISCV::OPC##_M4: \
1651 case RISCV::OPC##_M8: \
1652 case RISCV::OPC##_MF2: \
1653 case RISCV::OPC##_MF4: \
1654 case RISCV::OPC##_MF8
1656#define OPCODE_LMUL_MASK_CASE(OPC) \
1657 case RISCV::OPC##_M1_MASK: \
1658 case RISCV::OPC##_M2_MASK: \
1659 case RISCV::OPC##_M4_MASK: \
1660 case RISCV::OPC##_M8_MASK: \
1661 case RISCV::OPC##_MF2_MASK: \
1662 case RISCV::OPC##_MF4_MASK: \
1663 case RISCV::OPC##_MF8_MASK
1668 Opcode = *InvOpcode;
1685#undef OPCODE_LMUL_MASK_CASE
1686#undef OPCODE_LMUL_CASE
1689bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
1702 auto checkImmOperand = [&](
unsigned OpIdx) {
1706 auto checkRegOperand = [&](
unsigned OpIdx) {
1714 if (!checkRegOperand(1))
1729 bool SeenMI2 =
false;
1739 if (It->modifiesRegister(RISCV::V0,
TRI)) {
1740 Register SrcReg = It->getOperand(1).getReg();
1758 if (MI1VReg != SrcReg)
1767 assert(SeenMI2 &&
"Prev is expected to appear before Root");
1806bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
1807 bool &Commuted)
const {
1811 "Expect the present of passthrough operand.");
1817 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
1818 areRVVInstsReassociable(Inst, *MI2);
1822 return areRVVInstsReassociable(Inst, *MI1) &&
1823 (isVectorAssociativeAndCommutative(*MI1) ||
1824 isVectorAssociativeAndCommutative(*MI1,
true)) &&
1831 if (!isVectorAssociativeAndCommutative(Inst) &&
1832 !isVectorAssociativeAndCommutative(Inst,
true))
1844 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
1846 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
1858 for (
unsigned I = 0;
I < 5; ++
I)
1864 bool &Commuted)
const {
1865 if (isVectorAssociativeAndCommutative(Inst) ||
1866 isVectorAssociativeAndCommutative(Inst,
true))
1867 return hasReassociableVectorSibling(Inst, Commuted);
1873 unsigned OperandIdx = Commuted ? 2 : 1;
1877 int16_t InstFrmOpIdx =
1879 int16_t SiblingFrmOpIdx =
1882 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
1887 bool Invert)
const {
1888 if (isVectorAssociativeAndCommutative(Inst, Invert))
1896 Opc = *InverseOpcode;
1941std::optional<unsigned>
1943#define RVV_OPC_LMUL_CASE(OPC, INV) \
1944 case RISCV::OPC##_M1: \
1945 return RISCV::INV##_M1; \
1946 case RISCV::OPC##_M2: \
1947 return RISCV::INV##_M2; \
1948 case RISCV::OPC##_M4: \
1949 return RISCV::INV##_M4; \
1950 case RISCV::OPC##_M8: \
1951 return RISCV::INV##_M8; \
1952 case RISCV::OPC##_MF2: \
1953 return RISCV::INV##_MF2; \
1954 case RISCV::OPC##_MF4: \
1955 return RISCV::INV##_MF4; \
1956 case RISCV::OPC##_MF8: \
1957 return RISCV::INV##_MF8
1959#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
1960 case RISCV::OPC##_M1_MASK: \
1961 return RISCV::INV##_M1_MASK; \
1962 case RISCV::OPC##_M2_MASK: \
1963 return RISCV::INV##_M2_MASK; \
1964 case RISCV::OPC##_M4_MASK: \
1965 return RISCV::INV##_M4_MASK; \
1966 case RISCV::OPC##_M8_MASK: \
1967 return RISCV::INV##_M8_MASK; \
1968 case RISCV::OPC##_MF2_MASK: \
1969 return RISCV::INV##_MF2_MASK; \
1970 case RISCV::OPC##_MF4_MASK: \
1971 return RISCV::INV##_MF4_MASK; \
1972 case RISCV::OPC##_MF8_MASK: \
1973 return RISCV::INV##_MF8_MASK
1977 return std::nullopt;
1979 return RISCV::FSUB_H;
1981 return RISCV::FSUB_S;
1983 return RISCV::FSUB_D;
1985 return RISCV::FADD_H;
1987 return RISCV::FADD_S;
1989 return RISCV::FADD_D;
2006#undef RVV_OPC_LMUL_MASK_CASE
2007#undef RVV_OPC_LMUL_CASE
2012 bool DoRegPressureReduce) {
2028 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2039 bool DoRegPressureReduce) {
2041 bool IsFAdd =
isFADD(Opc);
2042 if (!IsFAdd && !
isFSUB(Opc))
2046 DoRegPressureReduce)) {
2052 DoRegPressureReduce)) {
2062 bool DoRegPressureReduce) {
2070 unsigned CombineOpc) {
2077 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2080 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2091 unsigned OuterShiftAmt) {
2097 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2159 bool DoRegPressureReduce)
const {
2168 DoRegPressureReduce);
2176 return RISCV::FMADD_H;
2178 return RISCV::FMADD_S;
2180 return RISCV::FMADD_D;
2225 bool Mul1IsKill = Mul1.
isKill();
2226 bool Mul2IsKill = Mul2.
isKill();
2227 bool AddendIsKill = Addend.
isKill();
2236 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2261 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2268 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2271 switch (InnerShiftAmt - OuterShiftAmt) {
2275 InnerOpc = RISCV::ADD;
2278 InnerOpc = RISCV::SH1ADD;
2281 InnerOpc = RISCV::SH2ADD;
2284 InnerOpc = RISCV::SH3ADD;
2292 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2302 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2319 DelInstrs, InstrIdxForVirtReg);
2347 unsigned OpType = Operand.OperandType;
2352 int64_t Imm = MO.
getImm();
2359#define CASE_OPERAND_UIMM(NUM) \
2360 case RISCVOp::OPERAND_UIMM##NUM: \
2361 Ok = isUInt<NUM>(Imm); \
2375 Ok = isShiftedUInt<1, 1>(Imm);
2378 Ok = isShiftedUInt<5, 2>(Imm);
2381 Ok = isShiftedUInt<6, 2>(Imm);
2384 Ok = isShiftedUInt<5, 3>(Imm);
2387 Ok = isUInt<8>(Imm) && Imm >= 32;
2390 Ok = isShiftedUInt<6, 3>(Imm);
2393 Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
2396 Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0);
2405 Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
2411 Ok = Imm != 0 && isInt<6>(Imm);
2414 Ok = isUInt<10>(Imm);
2417 Ok = isUInt<11>(Imm);
2420 Ok = isInt<12>(Imm);
2423 Ok = isShiftedInt<7, 5>(Imm);
2426 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2429 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2430 Ok = Ok && Imm != 0;
2433 Ok = (isUInt<5>(Imm) && Imm != 0) ||
2434 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2437 Ok = Imm >= 0 && Imm <= 10;
2440 Ok = Imm >= 0 && Imm <= 7;
2443 Ok = Imm >= 1 && Imm <= 10;
2446 Ok = Imm >= 2 && Imm <= 14;
2449 Ok = (Imm & 0xf) == 0;
2453 ErrInfo =
"Invalid immediate";
2463 if (!
Op.isImm() && !
Op.isReg()) {
2464 ErrInfo =
"Invalid operand type for VL operand";
2467 if (
Op.isReg() &&
Op.getReg() != RISCV::NoRegister) {
2469 auto *RC =
MRI.getRegClass(
Op.getReg());
2470 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
2471 ErrInfo =
"Invalid register class for VL operand";
2476 ErrInfo =
"VL operand w/o SEW operand?";
2482 if (!
MI.getOperand(OpIdx).isImm()) {
2483 ErrInfo =
"SEW value expected to be an immediate";
2486 uint64_t Log2SEW =
MI.getOperand(OpIdx).getImm();
2488 ErrInfo =
"Unexpected SEW value";
2491 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2493 ErrInfo =
"Unexpected SEW value";
2499 if (!
MI.getOperand(OpIdx).isImm()) {
2500 ErrInfo =
"Policy operand expected to be an immediate";
2503 uint64_t Policy =
MI.getOperand(OpIdx).getImm();
2505 ErrInfo =
"Invalid Policy Value";
2509 ErrInfo =
"policy operand w/o VL operand?";
2517 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
2518 ErrInfo =
"policy operand w/o tied operand?";
2561 int64_t NewOffset = OldOffset + Disp;
2563 NewOffset = SignExtend64<32>(NewOffset);
2565 if (!isInt<12>(NewOffset))
2583 "Addressing mode not supported for folding");
2625 OffsetIsScalable =
false;
2641 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
2649 if (MO1->getAddrSpace() != MO2->getAddrSpace())
2652 auto Base1 = MO1->getValue();
2653 auto Base2 = MO2->getValue();
2654 if (!Base1 || !Base2)
2659 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
2662 return Base1 == Base2;
2668 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
2669 unsigned NumBytes)
const {
2672 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
2677 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
2683 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
2689 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
2739 int64_t OffsetA = 0, OffsetB = 0;
2744 int LowOffset = std::min(OffsetA, OffsetB);
2745 int HighOffset = std::max(OffsetA, OffsetB);
2746 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2748 LowOffset + (int)LowWidth.
getValue() <= HighOffset)
2755std::pair<unsigned, unsigned>
2758 return std::make_pair(TF & Mask, TF & ~Mask);
2763 using namespace RISCVII;
2764 static const std::pair<unsigned, const char *> TargetFlags[] = {
2765 {MO_CALL,
"riscv-call"},
2766 {MO_LO,
"riscv-lo"},
2767 {MO_HI,
"riscv-hi"},
2768 {MO_PCREL_LO,
"riscv-pcrel-lo"},
2769 {MO_PCREL_HI,
"riscv-pcrel-hi"},
2770 {MO_GOT_HI,
"riscv-got-hi"},
2771 {MO_TPREL_LO,
"riscv-tprel-lo"},
2772 {MO_TPREL_HI,
"riscv-tprel-hi"},
2773 {MO_TPREL_ADD,
"riscv-tprel-add"},
2774 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
2775 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
2776 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
2777 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
2778 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
2779 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
2787 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
2800 unsigned &Flags)
const {
2815std::optional<outliner::OutlinedFunction>
2817 std::vector<outliner::Candidate> &RepeatedSequenceLocs)
const {
2823 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
2829 if (RepeatedSequenceLocs.size() < 2)
2830 return std::nullopt;
2832 unsigned SequenceSize = 0;
2834 for (
auto &
MI : RepeatedSequenceLocs[0])
2838 unsigned CallOverhead = 8;
2839 for (
auto &
C : RepeatedSequenceLocs)
2843 unsigned FrameOverhead = 4;
2844 if (RepeatedSequenceLocs[0]
2846 ->getSubtarget<RISCVSubtarget>()
2856 unsigned Flags)
const {
2861 const auto &
F =
MI.getMF()->getFunction();
2864 if (
MI.isCFIInstruction())
2878 if (
MI.modifiesRegister(RISCV::X5,
TRI) ||
2879 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
2883 for (
const auto &MO :
MI.operands()) {
2888 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
2901 bool Changed =
true;
2906 for (;
I != E; ++
I) {
2907 if (
I->isCFIInstruction()) {
2908 I->removeFromParent();
2931 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
2942 return std::nullopt;
2946 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
2947 MI.getOperand(2).isImm())
2948 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
2950 return std::nullopt;
2958 std::string GenericComment =
2960 if (!GenericComment.empty())
2961 return GenericComment;
2965 return std::string();
2967 std::string Comment;
2974 if ((
MI.getOpcode() == RISCV::VSETVLI ||
MI.getOpcode() == RISCV::VSETIVLI ||
2975 MI.getOpcode() == RISCV::PseudoVSETVLI ||
2976 MI.getOpcode() == RISCV::PseudoVSETIVLI ||
2977 MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
2979 unsigned Imm =
MI.getOperand(OpIdx).getImm();
2983 unsigned Log2SEW =
MI.getOperand(OpIdx).getImm();
2984 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2989 unsigned Policy =
MI.getOperand(OpIdx).getImm();
2991 "Invalid Policy Value");
3001#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3002 RISCV::Pseudo##OP##_##LMUL
3004#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3005 RISCV::Pseudo##OP##_##LMUL##_MASK
3007#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3008 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3009 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3011#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3012 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3013 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3014 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3015 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3016 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3017 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3019#define CASE_RVV_OPCODE_UNMASK(OP) \
3020 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3021 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3023#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3024 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3025 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3026 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3027 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3028 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3029 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3031#define CASE_RVV_OPCODE_MASK(OP) \
3032 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3033 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3035#define CASE_RVV_OPCODE_WIDEN(OP) \
3036 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3037 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3039#define CASE_RVV_OPCODE(OP) \
3040 CASE_RVV_OPCODE_UNMASK(OP): \
3041 case CASE_RVV_OPCODE_MASK(OP)
3045#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3046 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3048#define CASE_VMA_OPCODE_LMULS_M1(OP, TYPE) \
3049 CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3050 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3051 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3052 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3054#define CASE_VMA_OPCODE_LMULS_MF2(OP, TYPE) \
3055 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3056 case CASE_VMA_OPCODE_LMULS_M1(OP, TYPE)
3058#define CASE_VMA_OPCODE_LMULS_MF4(OP, TYPE) \
3059 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3060 case CASE_VMA_OPCODE_LMULS_MF2(OP, TYPE)
3062#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3063 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3064 case CASE_VMA_OPCODE_LMULS_MF4(OP, TYPE)
3067#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3068 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3070#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3071 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3072 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3073 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3074 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3076#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3077 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3078 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3080#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3081 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3082 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3084#define CASE_VFMA_OPCODE_VV(OP) \
3085 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3086 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3087 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3089#define CASE_VFMA_SPLATS(OP) \
3090 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3091 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3092 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3096 unsigned &SrcOpIdx1,
3097 unsigned &SrcOpIdx2)
const {
3099 if (!
Desc.isCommutable())
3102 switch (
MI.getOpcode()) {
3103 case RISCV::TH_MVEQZ:
3104 case RISCV::TH_MVNEZ:
3108 if (
MI.getOperand(2).getReg() == RISCV::X0)
3111 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3112 case RISCV::TH_MULA:
3113 case RISCV::TH_MULAW:
3114 case RISCV::TH_MULAH:
3115 case RISCV::TH_MULS:
3116 case RISCV::TH_MULSW:
3117 case RISCV::TH_MULSH:
3119 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3120 case RISCV::PseudoCCMOVGPRNoX0:
3121 case RISCV::PseudoCCMOVGPR:
3123 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3150 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3171 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
3176 unsigned CommutableOpIdx1 = 1;
3177 unsigned CommutableOpIdx2 = 3;
3178 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3191 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
3198 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
3200 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
3204 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
3205 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
3211 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
3212 SrcOpIdx2 == CommuteAnyOperandIndex) {
3215 unsigned CommutableOpIdx1 = SrcOpIdx1;
3216 if (SrcOpIdx1 == SrcOpIdx2) {
3219 CommutableOpIdx1 = 1;
3220 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
3222 CommutableOpIdx1 = SrcOpIdx2;
3227 unsigned CommutableOpIdx2;
3228 if (CommutableOpIdx1 != 1) {
3230 CommutableOpIdx2 = 1;
3232 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
3237 if (Op1Reg !=
MI.getOperand(2).getReg())
3238 CommutableOpIdx2 = 2;
3240 CommutableOpIdx2 = 3;
3245 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3258#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
3259 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
3260 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
3263#define CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
3264 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
3265 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
3266 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
3267 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
3269#define CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
3270 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
3271 CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
3273#define CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
3274 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
3275 CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
3277#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
3278 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
3279 CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
3281#define CASE_VMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3282 CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16) \
3283 CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32) \
3284 CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64)
3287#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
3288 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
3289 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
3292#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
3293 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
3294 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
3295 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
3296 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
3298#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
3299 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
3300 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
3302#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
3303 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
3304 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
3305 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
3307#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
3308 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
3309 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
3311#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE, SEW) \
3312 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8, SEW) \
3313 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW)
3315#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3316 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
3317 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
3318 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
3323 unsigned OpIdx2)
const {
3326 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
3330 switch (
MI.getOpcode()) {
3331 case RISCV::TH_MVEQZ:
3332 case RISCV::TH_MVNEZ: {
3333 auto &WorkingMI = cloneIfNew(
MI);
3334 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
3335 : RISCV::TH_MVEQZ));
3339 case RISCV::PseudoCCMOVGPRNoX0:
3340 case RISCV::PseudoCCMOVGPR: {
3344 auto &WorkingMI = cloneIfNew(
MI);
3345 WorkingMI.getOperand(3).setImm(
CC);
3369 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
3370 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
3372 switch (
MI.getOpcode()) {
3395 auto &WorkingMI = cloneIfNew(
MI);
3396 WorkingMI.setDesc(
get(Opc));
3406 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
3409 if (OpIdx1 == 3 || OpIdx2 == 3) {
3411 switch (
MI.getOpcode()) {
3422 auto &WorkingMI = cloneIfNew(
MI);
3423 WorkingMI.setDesc(
get(Opc));
3435#undef CASE_RVV_OPCODE_UNMASK_LMUL
3436#undef CASE_RVV_OPCODE_MASK_LMUL
3437#undef CASE_RVV_OPCODE_LMUL
3438#undef CASE_RVV_OPCODE_UNMASK_WIDEN
3439#undef CASE_RVV_OPCODE_UNMASK
3440#undef CASE_RVV_OPCODE_MASK_WIDEN
3441#undef CASE_RVV_OPCODE_MASK
3442#undef CASE_RVV_OPCODE_WIDEN
3443#undef CASE_RVV_OPCODE
3445#undef CASE_VMA_OPCODE_COMMON
3446#undef CASE_VMA_OPCODE_LMULS_M1
3447#undef CASE_VMA_OPCODE_LMULS_MF2
3448#undef CASE_VMA_OPCODE_LMULS_MF4
3449#undef CASE_VMA_OPCODE_LMULS
3450#undef CASE_VFMA_OPCODE_COMMON
3451#undef CASE_VFMA_OPCODE_LMULS_M1
3452#undef CASE_VFMA_OPCODE_LMULS_MF2
3453#undef CASE_VFMA_OPCODE_LMULS_MF4
3454#undef CASE_VFMA_OPCODE_VV
3455#undef CASE_VFMA_SPLATS
3458#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
3459 RISCV::PseudoV##OP##_##LMUL##_TIED
3461#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
3462 CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
3463 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
3464 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
3465 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
3466 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
3468#define CASE_WIDEOP_OPCODE_LMULS(OP) \
3469 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
3470 case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
3472#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
3473 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
3474 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
3477#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
3478 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
3479 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
3480 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
3481 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
3482 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
3484#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
3485 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
3486 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
3489#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
3490 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
3492#define CASE_FP_WIDEOP_OPCODE_LMULS_MF4(OP) \
3493 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
3494 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
3495 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
3496 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
3497 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
3498 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
3499 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
3500 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
3501 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
3503#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
3504 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
3505 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
3508#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
3509 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
3510 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
3511 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
3512 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
3513 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
3514 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
3515 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
3516 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
3517 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
3519#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
3520 CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
3527 switch (
MI.getOpcode()) {
3533 MI.getNumExplicitOperands() == 7 &&
3534 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
3541 switch (
MI.getOpcode()) {
3551 .
add(
MI.getOperand(0))
3553 .
add(
MI.getOperand(1))
3554 .
add(
MI.getOperand(2))
3555 .
add(
MI.getOperand(3))
3556 .
add(
MI.getOperand(4))
3557 .
add(
MI.getOperand(5))
3558 .
add(
MI.getOperand(6));
3567 MI.getNumExplicitOperands() == 6);
3568 if ((
MI.getOperand(5).getImm() & 1) == 0)
3573 switch (
MI.getOpcode()) {
3585 .
add(
MI.getOperand(0))
3587 .
add(
MI.getOperand(1))
3588 .
add(
MI.getOperand(2))
3589 .
add(
MI.getOperand(3))
3590 .
add(
MI.getOperand(4))
3591 .
add(
MI.getOperand(5));
3598 unsigned NumOps =
MI.getNumOperands();
3599 for (
unsigned I = 1;
I < NumOps; ++
I) {
3601 if (
Op.isReg() &&
Op.isKill())
3609 if (
MI.getOperand(0).isEarlyClobber()) {
3615 if (S->
end ==
Idx.getRegSlot(
true))
3616 S->
end =
Idx.getRegSlot();
3623#undef CASE_WIDEOP_OPCODE_COMMON
3624#undef CASE_WIDEOP_OPCODE_LMULS_MF4
3625#undef CASE_WIDEOP_OPCODE_LMULS
3626#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
3627#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4
3628#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
3629#undef CASE_FP_WIDEOP_OPCODE_COMMON
3630#undef CASE_FP_WIDEOP_OPCODE_LMULS_MF4
3631#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
3632#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4
3633#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
3640 if (llvm::has_single_bit<uint32_t>(Amount)) {
3642 if (ShiftAmount == 0)
3648 }
else if (
STI.hasStdExtZba() &&
3655 if (Amount % 9 == 0) {
3656 Opc = RISCV::SH3ADD;
3657 ShiftAmount =
Log2_64(Amount / 9);
3658 }
else if (Amount % 5 == 0) {
3659 Opc = RISCV::SH2ADD;
3660 ShiftAmount =
Log2_64(Amount / 5);
3661 }
else if (Amount % 3 == 0) {
3662 Opc = RISCV::SH1ADD;
3663 ShiftAmount =
Log2_64(Amount / 3);
3676 }
else if (llvm::has_single_bit<uint32_t>(Amount - 1)) {
3677 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3687 }
else if (llvm::has_single_bit<uint32_t>(Amount + 1)) {
3688 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3698 }
else if (
STI.hasStdExtM() ||
STI.hasStdExtZmmul()) {
3699 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3708 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
3709 if (Amount & (1U << ShiftAmount)) {
3713 .
addImm(ShiftAmount - PrevShiftAmount)
3715 if (Amount >> (ShiftAmount + 1)) {
3718 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3729 PrevShiftAmount = ShiftAmount;
3732 assert(Acc &&
"Expected valid accumulator");
3742 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
3750 return MI.getOpcode() == RISCV::ADDIW &&
MI.getOperand(1).isReg() &&
3751 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0;
3756 return MI.getOpcode() == RISCV::ADD_UW &&
MI.getOperand(1).isReg() &&
3757 MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0;
3762 return MI.getOpcode() == RISCV::ANDI &&
MI.getOperand(1).isReg() &&
3763 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 255;
3774 case RISCV::VL1RE8_V:
3775 case RISCV::VL2RE8_V:
3776 case RISCV::VL4RE8_V:
3777 case RISCV::VL8RE8_V:
3778 case RISCV::VL1RE16_V:
3779 case RISCV::VL2RE16_V:
3780 case RISCV::VL4RE16_V:
3781 case RISCV::VL8RE16_V:
3782 case RISCV::VL1RE32_V:
3783 case RISCV::VL2RE32_V:
3784 case RISCV::VL4RE32_V:
3785 case RISCV::VL8RE32_V:
3786 case RISCV::VL1RE64_V:
3787 case RISCV::VL2RE64_V:
3788 case RISCV::VL4RE64_V:
3789 case RISCV::VL8RE64_V:
3797 unsigned Opcode =
MI.getOpcode();
3798 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
3804std::optional<std::pair<unsigned, unsigned>>
3808 return std::nullopt;
3809 case RISCV::PseudoVSPILL2_M1:
3810 case RISCV::PseudoVRELOAD2_M1:
3811 return std::make_pair(2u, 1u);
3812 case RISCV::PseudoVSPILL2_M2:
3813 case RISCV::PseudoVRELOAD2_M2:
3814 return std::make_pair(2u, 2u);
3815 case RISCV::PseudoVSPILL2_M4:
3816 case RISCV::PseudoVRELOAD2_M4:
3817 return std::make_pair(2u, 4u);
3818 case RISCV::PseudoVSPILL3_M1:
3819 case RISCV::PseudoVRELOAD3_M1:
3820 return std::make_pair(3u, 1u);
3821 case RISCV::PseudoVSPILL3_M2:
3822 case RISCV::PseudoVRELOAD3_M2:
3823 return std::make_pair(3u, 2u);
3824 case RISCV::PseudoVSPILL4_M1:
3825 case RISCV::PseudoVRELOAD4_M1:
3826 return std::make_pair(4u, 1u);
3827 case RISCV::PseudoVSPILL4_M2:
3828 case RISCV::PseudoVRELOAD4_M2:
3829 return std::make_pair(4u, 2u);
3830 case RISCV::PseudoVSPILL5_M1:
3831 case RISCV::PseudoVRELOAD5_M1:
3832 return std::make_pair(5u, 1u);
3833 case RISCV::PseudoVSPILL6_M1:
3834 case RISCV::PseudoVRELOAD6_M1:
3835 return std::make_pair(6u, 1u);
3836 case RISCV::PseudoVSPILL7_M1:
3837 case RISCV::PseudoVRELOAD7_M1:
3838 return std::make_pair(7u, 1u);
3839 case RISCV::PseudoVSPILL8_M1:
3840 case RISCV::PseudoVRELOAD8_M1:
3841 return std::make_pair(8u, 1u);
3846 return MI.getNumExplicitDefs() == 2 &&
3847 MI.modifiesRegister(RISCV::VL,
nullptr) && !
MI.isInlineAsm();
3851 int16_t MI1FrmOpIdx =
3853 int16_t MI2FrmOpIdx =
3855 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
3862std::optional<unsigned>
3867 return std::nullopt;
3870 case RISCV::VSLL_VX:
3871 case RISCV::VSRL_VX:
3872 case RISCV::VSRA_VX:
3874 case RISCV::VSSRL_VX:
3875 case RISCV::VSSRA_VX:
3880 case RISCV::VNSRL_WX:
3881 case RISCV::VNSRA_WX:
3883 case RISCV::VNCLIPU_WX:
3884 case RISCV::VNCLIP_WX:
3889 case RISCV::VADD_VX:
3890 case RISCV::VSUB_VX:
3891 case RISCV::VRSUB_VX:
3893 case RISCV::VWADDU_VX:
3894 case RISCV::VWSUBU_VX:
3895 case RISCV::VWADD_VX:
3896 case RISCV::VWSUB_VX:
3897 case RISCV::VWADDU_WX:
3898 case RISCV::VWSUBU_WX:
3899 case RISCV::VWADD_WX:
3900 case RISCV::VWSUB_WX:
3902 case RISCV::VADC_VXM:
3903 case RISCV::VADC_VIM:
3904 case RISCV::VMADC_VXM:
3905 case RISCV::VMADC_VIM:
3906 case RISCV::VMADC_VX:
3907 case RISCV::VSBC_VXM:
3908 case RISCV::VMSBC_VXM:
3909 case RISCV::VMSBC_VX:
3911 case RISCV::VAND_VX:
3913 case RISCV::VXOR_VX:
3915 case RISCV::VMSEQ_VX:
3916 case RISCV::VMSNE_VX:
3917 case RISCV::VMSLTU_VX:
3918 case RISCV::VMSLT_VX:
3919 case RISCV::VMSLEU_VX:
3920 case RISCV::VMSLE_VX:
3921 case RISCV::VMSGTU_VX:
3922 case RISCV::VMSGT_VX:
3924 case RISCV::VMINU_VX:
3925 case RISCV::VMIN_VX:
3926 case RISCV::VMAXU_VX:
3927 case RISCV::VMAX_VX:
3929 case RISCV::VMUL_VX:
3930 case RISCV::VMULH_VX:
3931 case RISCV::VMULHU_VX:
3932 case RISCV::VMULHSU_VX:
3934 case RISCV::VDIVU_VX:
3935 case RISCV::VDIV_VX:
3936 case RISCV::VREMU_VX:
3937 case RISCV::VREM_VX:
3939 case RISCV::VWMUL_VX:
3940 case RISCV::VWMULU_VX:
3941 case RISCV::VWMULSU_VX:
3943 case RISCV::VMACC_VX:
3944 case RISCV::VNMSAC_VX:
3945 case RISCV::VMADD_VX:
3946 case RISCV::VNMSUB_VX:
3948 case RISCV::VWMACCU_VX:
3949 case RISCV::VWMACC_VX:
3950 case RISCV::VWMACCSU_VX:
3951 case RISCV::VWMACCUS_VX:
3953 case RISCV::VMERGE_VXM:
3955 case RISCV::VMV_V_X:
3957 case RISCV::VSADDU_VX:
3958 case RISCV::VSADD_VX:
3959 case RISCV::VSSUBU_VX:
3960 case RISCV::VSSUB_VX:
3962 case RISCV::VAADDU_VX:
3963 case RISCV::VAADD_VX:
3964 case RISCV::VASUBU_VX:
3965 case RISCV::VASUB_VX:
3967 case RISCV::VSMUL_VX:
3969 case RISCV::VMV_S_X:
3970 return 1U << Log2SEW;
3976 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc, unsigned ZeroReg=0, bool CheckZeroReg=false)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static M68k::CondCode getCondFromBranchOpc(unsigned BrOpc)
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
INITIALIZE_PASS(RISCVInsertVSETVLI, DEBUG_TYPE, RISCV_INSERT_VSETVLI_NAME, false, false) char RISCVCoalesceVSETVLI const LiveIntervals * LIS
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
static bool isRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
#define RVV_OPC_LMUL_CASE(OPC, INV)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isFMUL(unsigned Opc)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isFADD(unsigned Opc)
#define CASE_FP_WIDEOP_OPCODE_LMULS_MF4(OP)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static unsigned getSize(unsigned Kind)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
This class represents an Operation in the Expression.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
TypeSize getValue() const
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Wrapper class representing physical registers. Should be passed by value.
unsigned pred_size() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
std::optional< outliner::OutlinedFunction > getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
RISCVInstrInfo(RISCVSubtarget &STI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC, bool Imm=false) const
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
virtual outliner::InstrType getOutliningTypeImpl(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
bool hasStdExtCOrZca() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getOppositeBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, bool Imm=false)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static RISCVII::VLMUL getLMul(uint64_t TSFlags)
static unsigned getNF(uint64_t TSFlags)
static bool isTailAgnostic(unsigned VType)
static RISCVII::VLMUL getVLMUL(unsigned VType)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static bool isValidSEW(unsigned SEW)
void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
bool isSEXT_W(const MachineInstr &MI)
bool isFaultFirstLoad(const MachineInstr &MI)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isZEXT_B(const MachineInstr &MI)
bool isRVVSpill(const MachineInstr &MI)
bool isZEXT_W(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
static const MachineMemOperand::Flags MONontemporalBit0
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
unsigned getDeadRegState(bool B)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
unsigned getKillRegState(bool B)
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Description of the encoding of one expression Op.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.