39#define GEN_CHECK_COMPRESS_INSTR
40#include "RISCVGenCompressInstEmitter.inc"
42#define GET_INSTRINFO_CTOR_DTOR
43#define GET_INSTRINFO_NAMED_OPS
44#include "RISCVGenInstrInfo.inc"
48 cl::desc(
"Prefer whole register move for vector registers."));
51 "riscv-force-machine-combiner-strategy",
cl::Hidden,
52 cl::desc(
"Force machine combiner to use a specific strategy for machine "
53 "trace metrics evaluation."),
54 cl::init(MachineTraceStrategy::TS_NumStrategies),
57 clEnumValN(MachineTraceStrategy::TS_MinInstrCount,
"min-instr",
58 "MinInstrCount strategy.")));
64#define GET_RISCVVPseudosTable_IMPL
65#include "RISCVGenSearchableTables.inc"
83 int &FrameIndex)
const {
90 unsigned &MemBytes)
const {
91 switch (
MI.getOpcode()) {
114 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
115 MI.getOperand(2).getImm() == 0) {
116 FrameIndex =
MI.getOperand(1).getIndex();
117 return MI.getOperand(0).getReg();
124 int &FrameIndex)
const {
131 unsigned &MemBytes)
const {
132 switch (
MI.getOpcode()) {
152 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
153 MI.getOperand(2).getImm() == 0) {
154 FrameIndex =
MI.getOperand(1).getIndex();
155 return MI.getOperand(0).getReg();
163 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
174 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
175 "Unexpected COPY instruction.");
179 bool FoundDef =
false;
180 bool FirstVSetVLI =
false;
181 unsigned FirstSEW = 0;
184 if (
MBBI->isMetaInstruction())
187 if (
MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
188 MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
189 MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
199 unsigned FirstVType =
MBBI->getOperand(2).getImm();
204 if (FirstLMul != LMul)
209 if (
MBBI->getOperand(0).getReg() != RISCV::X0)
211 if (
MBBI->getOperand(1).isImm())
213 if (
MBBI->getOperand(1).getReg() != RISCV::X0)
219 unsigned VType =
MBBI->getOperand(2).getImm();
237 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
239 }
else if (
MBBI->getNumDefs()) {
242 if (
MBBI->modifiesRegister(RISCV::VL))
248 if (!MO.isReg() || !MO.isDef())
250 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
265 if (MO.getReg() != SrcReg)
302 unsigned Opc,
unsigned NF)
const {
307 unsigned VVOpc, VIOpc;
313 SubRegIdx = RISCV::sub_vrm1_0;
314 VVOpc = RISCV::PseudoVMV_V_V_M1;
315 VIOpc = RISCV::PseudoVMV_V_I_M1;
319 SubRegIdx = RISCV::sub_vrm2_0;
320 VVOpc = RISCV::PseudoVMV_V_V_M2;
321 VIOpc = RISCV::PseudoVMV_V_I_M2;
325 SubRegIdx = RISCV::sub_vrm4_0;
326 VVOpc = RISCV::PseudoVMV_V_V_M4;
327 VIOpc = RISCV::PseudoVMV_V_I_M4;
332 SubRegIdx = RISCV::sub_vrm1_0;
333 VVOpc = RISCV::PseudoVMV_V_V_M8;
334 VIOpc = RISCV::PseudoVMV_V_I_M8;
338 bool UseVMV_V_V =
false;
339 bool UseVMV_V_I =
false;
345 if (DefMBBI->getOpcode() == VIOpc) {
356 MIB = MIB.add(DefMBBI->getOperand(2));
370 int I = 0,
End = NF, Incr = 1;
371 unsigned SrcEncoding =
TRI->getEncodingValue(SrcReg);
372 unsigned DstEncoding =
TRI->getEncodingValue(DstReg);
376 assert(!Fractional &&
"It is impossible be fractional lmul here.");
383 for (;
I !=
End;
I += Incr) {
389 MIB = MIB.add(DefMBBI->getOperand(2));
391 MIB = MIB.addReg(
TRI->getSubReg(SrcReg, SubRegIdx +
I),
410 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
417 if (RISCV::GPRPF64RegClass.
contains(DstReg, SrcReg)) {
420 TRI->getSubReg(DstReg, RISCV::sub_32))
424 TRI->getSubReg(DstReg, RISCV::sub_32_hi))
425 .
addReg(
TRI->getSubReg(SrcReg, RISCV::sub_32_hi),
432 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
433 RISCV::GPRRegClass.
contains(DstReg)) {
435 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
440 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
442 if (
STI.hasStdExtZfh()) {
443 Opc = RISCV::FSGNJ_H;
446 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
447 "Unexpected extensions");
449 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
450 &RISCV::FPR32RegClass);
451 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
452 &RISCV::FPR32RegClass);
453 Opc = RISCV::FSGNJ_S;
461 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
468 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
475 if (RISCV::FPR32RegClass.
contains(DstReg) &&
476 RISCV::GPRRegClass.
contains(SrcReg)) {
482 if (RISCV::GPRRegClass.
contains(DstReg) &&
483 RISCV::FPR32RegClass.
contains(SrcReg)) {
489 if (RISCV::FPR64RegClass.
contains(DstReg) &&
490 RISCV::GPRRegClass.
contains(SrcReg)) {
497 if (RISCV::GPRRegClass.
contains(DstReg) &&
498 RISCV::FPR64RegClass.
contains(SrcReg)) {
506 if (RISCV::VRRegClass.
contains(DstReg, SrcReg)) {
511 if (RISCV::VRM2RegClass.
contains(DstReg, SrcReg)) {
516 if (RISCV::VRM4RegClass.
contains(DstReg, SrcReg)) {
521 if (RISCV::VRM8RegClass.
contains(DstReg, SrcReg)) {
526 if (RISCV::VRN2M1RegClass.
contains(DstReg, SrcReg)) {
532 if (RISCV::VRN2M2RegClass.
contains(DstReg, SrcReg)) {
538 if (RISCV::VRN2M4RegClass.
contains(DstReg, SrcReg)) {
544 if (RISCV::VRN3M1RegClass.
contains(DstReg, SrcReg)) {
550 if (RISCV::VRN3M2RegClass.
contains(DstReg, SrcReg)) {
556 if (RISCV::VRN4M1RegClass.
contains(DstReg, SrcReg)) {
562 if (RISCV::VRN4M2RegClass.
contains(DstReg, SrcReg)) {
568 if (RISCV::VRN5M1RegClass.
contains(DstReg, SrcReg)) {
574 if (RISCV::VRN6M1RegClass.
contains(DstReg, SrcReg)) {
580 if (RISCV::VRN7M1RegClass.
contains(DstReg, SrcReg)) {
586 if (RISCV::VRN8M1RegClass.
contains(DstReg, SrcReg)) {
597 Register SrcReg,
bool IsKill,
int FI,
605 bool IsScalableVector =
true;
606 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
607 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
608 RISCV::SW : RISCV::SD;
609 IsScalableVector =
false;
610 }
else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) {
611 Opcode = RISCV::PseudoRV32ZdinxSD;
612 IsScalableVector =
false;
613 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
615 IsScalableVector =
false;
616 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
618 IsScalableVector =
false;
619 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
621 IsScalableVector =
false;
622 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
624 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
626 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
628 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
630 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
631 Opcode = RISCV::PseudoVSPILL2_M1;
632 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
633 Opcode = RISCV::PseudoVSPILL2_M2;
634 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
635 Opcode = RISCV::PseudoVSPILL2_M4;
636 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
637 Opcode = RISCV::PseudoVSPILL3_M1;
638 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
639 Opcode = RISCV::PseudoVSPILL3_M2;
640 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
641 Opcode = RISCV::PseudoVSPILL4_M1;
642 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
643 Opcode = RISCV::PseudoVSPILL4_M2;
644 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
645 Opcode = RISCV::PseudoVSPILL5_M1;
646 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
647 Opcode = RISCV::PseudoVSPILL6_M1;
648 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
649 Opcode = RISCV::PseudoVSPILL7_M1;
650 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
651 Opcode = RISCV::PseudoVSPILL8_M1;
655 if (IsScalableVector) {
688 bool IsScalableVector =
true;
689 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
690 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
691 RISCV::LW : RISCV::LD;
692 IsScalableVector =
false;
693 }
else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) {
694 Opcode = RISCV::PseudoRV32ZdinxLD;
695 IsScalableVector =
false;
696 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
698 IsScalableVector =
false;
699 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
701 IsScalableVector =
false;
702 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
704 IsScalableVector =
false;
705 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
707 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
709 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
711 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
713 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
714 Opcode = RISCV::PseudoVRELOAD2_M1;
715 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
716 Opcode = RISCV::PseudoVRELOAD2_M2;
717 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
718 Opcode = RISCV::PseudoVRELOAD2_M4;
719 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
720 Opcode = RISCV::PseudoVRELOAD3_M1;
721 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
722 Opcode = RISCV::PseudoVRELOAD3_M2;
723 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
724 Opcode = RISCV::PseudoVRELOAD4_M1;
725 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
726 Opcode = RISCV::PseudoVRELOAD4_M2;
727 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
728 Opcode = RISCV::PseudoVRELOAD5_M1;
729 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
730 Opcode = RISCV::PseudoVRELOAD6_M1;
731 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
732 Opcode = RISCV::PseudoVRELOAD7_M1;
733 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
734 Opcode = RISCV::PseudoVRELOAD8_M1;
738 if (IsScalableVector) {
772 if (Ops.
size() != 1 || Ops[0] != 1)
776 switch (
MI.getOpcode()) {
783 LoadOpc = RISCV::LWU;
787 LoadOpc = RISCV::LBU;
797 case RISCV::ZEXT_H_RV32:
798 case RISCV::ZEXT_H_RV64:
799 LoadOpc = RISCV::LHU;
809 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(LoadOpc),
820 bool DstIsDead)
const {
829 bool SrcRenamable =
false;
833 bool LastItem = ++Num == Seq.
size();
838 switch (Inst.getOpndKind()) {
848 .
addReg(SrcReg, SrcRegState)
855 .
addReg(SrcReg, SrcRegState)
856 .
addReg(SrcReg, SrcRegState)
862 .
addReg(SrcReg, SrcRegState)
870 SrcRenamable = DstRenamable;
900 "Unknown conditional branch");
954 bool AllowModify)
const {
960 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
966 int NumTerminators = 0;
967 for (
auto J =
I.getReverse(); J !=
MBB.
rend() && isUnpredicatedTerminator(*J);
970 if (J->getDesc().isUnconditionalBranch() ||
971 J->getDesc().isIndirectBranch()) {
978 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.
end()) {
979 while (std::next(FirstUncondOrIndirectBr) !=
MBB.
end()) {
980 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
983 I = FirstUncondOrIndirectBr;
987 if (
I->getDesc().isIndirectBranch())
991 if (
I->isPreISelOpcode())
995 if (NumTerminators > 2)
999 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1005 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1011 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1012 I->getDesc().isUnconditionalBranch()) {
1023 int *BytesRemoved)
const {
1030 if (!
I->getDesc().isUnconditionalBranch() &&
1031 !
I->getDesc().isConditionalBranch())
1037 I->eraseFromParent();
1044 if (!
I->getDesc().isConditionalBranch())
1050 I->eraseFromParent();
1063 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1065 "RISC-V branch conditions have two components!");
1098 assert(RS &&
"RegScavenger required for long branching");
1100 "new block should be inserted for expanding unconditional branch");
1103 "restore block should be inserted for restoring clobbered registers");
1110 if (!isInt<32>(BrOffset))
1112 "Branch offsets outside of the signed 32-bit range not supported");
1117 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
1129 if (TmpGPR != RISCV::NoRegister)
1135 TmpGPR = RISCV::X27;
1138 if (FrameIndex == -1)
1143 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1146 MI.getOperand(1).setMBB(&RestoreBB);
1150 TRI->eliminateFrameIndex(RestoreBB.
back(),
1154 MRI.replaceRegWith(ScratchReg, TmpGPR);
1155 MRI.clearVirtRegs();
1160 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1201 auto isLoadImm = [](
const MachineInstr *
MI, int64_t &Imm) ->
bool {
1202 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1203 MI->getOperand(1).getReg() == RISCV::X0) {
1204 Imm =
MI->getOperand(2).getImm();
1214 if (Reg == RISCV::X0) {
1218 if (!Reg.isVirtual())
1220 return isLoadImm(
MRI.getVRegDef(
Op.getReg()), Imm);
1227 auto searchConst = [&](int64_t C1) ->
Register {
1229 auto DefC1 = std::find_if(++II,
E, [&](
const MachineInstr &
I) ->
bool {
1231 return isLoadImm(&
I, Imm) && Imm == C1;
1234 return DefC1->getOperand(0).getReg();
1239 bool Modify =
false;
1241 if (isFromLoadImm(
LHS, C0) &&
MRI.hasOneUse(
LHS.getReg())) {
1246 if (
Register RegZ = searchConst(C0 + 1)) {
1252 MRI.clearKillFlags(RegZ);
1255 }
else if (isFromLoadImm(
RHS, C0) &&
MRI.hasOneUse(
RHS.getReg())) {
1260 if (
Register RegZ = searchConst(C0 - 1)) {
1266 MRI.clearKillFlags(RegZ);
1280 MI.eraseFromParent();
1287 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1289 int NumOp =
MI.getNumExplicitOperands();
1290 return MI.getOperand(NumOp - 1).getMBB();
1294 int64_t BrOffset)
const {
1308 return isIntN(13, BrOffset);
1310 case RISCV::PseudoBR:
1311 return isIntN(21, BrOffset);
1312 case RISCV::PseudoJump:
1322 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1323 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1324 case RISCV::SLL:
return RISCV::PseudoCCSLL;
break;
1325 case RISCV::SRL:
return RISCV::PseudoCCSRL;
break;
1326 case RISCV::SRA:
return RISCV::PseudoCCSRA;
break;
1327 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1328 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1329 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1331 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
break;
1332 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
break;
1333 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
break;
1334 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
break;
1335 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
break;
1336 case RISCV::ORI:
return RISCV::PseudoCCORI;
break;
1337 case RISCV::XORI:
return RISCV::PseudoCCXORI;
break;
1339 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1340 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1341 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
break;
1342 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
break;
1343 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
break;
1345 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
break;
1346 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
break;
1347 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
break;
1348 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
break;
1351 return RISCV::INSTRUCTION_LIST_END;
1359 if (!Reg.isVirtual())
1361 if (!
MRI.hasOneNonDBGUse(Reg))
1370 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1371 MI->getOperand(1).getReg() == RISCV::X0)
1376 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1386 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1389 bool DontMoveAcrossStores =
true;
1390 if (!
MI->isSafeToMove(
nullptr, DontMoveAcrossStores))
1397 unsigned &TrueOp,
unsigned &FalseOp,
1398 bool &Optimizable)
const {
1399 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1400 "Unknown select instruction");
1410 Cond.push_back(
MI.getOperand(1));
1411 Cond.push_back(
MI.getOperand(2));
1412 Cond.push_back(
MI.getOperand(3));
1414 Optimizable =
STI.hasShortForwardBranchOpt();
1421 bool PreferFalse)
const {
1422 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1423 "Unknown select instruction");
1424 if (!
STI.hasShortForwardBranchOpt())
1430 bool Invert = !
DefMI;
1438 Register DestReg =
MI.getOperand(0).getReg();
1440 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1444 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1451 NewMI.
add(
MI.getOperand(1));
1452 NewMI.
add(
MI.getOperand(2));
1461 NewMI.
add(FalseReg);
1485 if (
MI.isMetaInstruction())
1490 if (
Opcode == TargetOpcode::INLINEASM ||
1491 Opcode == TargetOpcode::INLINEASM_BR) {
1494 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1495 *
TM.getMCAsmInfo());
1498 if (!
MI.memoperands_empty()) {
1503 if (ST.hasStdExtCOrZca() && ST.enableRVCHintInstrs()) {
1504 if (isCompressibleInst(
MI,
STI))
1512 if (
Opcode == TargetOpcode::BUNDLE)
1513 return getInstBundleLength(
MI);
1515 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1516 if (isCompressibleInst(
MI,
STI))
1521 case TargetOpcode::STACKMAP:
1524 case TargetOpcode::PATCHPOINT:
1527 case TargetOpcode::STATEPOINT:
1535unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1539 while (++
I !=
E &&
I->isInsideBundle()) {
1540 assert(!
I->isBundle() &&
"No nested bundle!");
1547 const unsigned Opcode =
MI.getOpcode();
1551 case RISCV::FSGNJ_D:
1552 case RISCV::FSGNJ_S:
1553 case RISCV::FSGNJ_H:
1554 case RISCV::FSGNJ_D_INX:
1555 case RISCV::FSGNJ_D_IN32X:
1556 case RISCV::FSGNJ_S_INX:
1557 case RISCV::FSGNJ_H_INX:
1559 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1560 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1564 return (
MI.getOperand(1).isReg() &&
1565 MI.getOperand(1).getReg() == RISCV::X0) ||
1566 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1568 return MI.isAsCheapAsAMove();
1571std::optional<DestSourcePair>
1575 switch (
MI.getOpcode()) {
1580 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1581 MI.getOperand(2).getImm() == 0)
1584 case RISCV::FSGNJ_D:
1585 case RISCV::FSGNJ_S:
1586 case RISCV::FSGNJ_H:
1587 case RISCV::FSGNJ_D_INX:
1588 case RISCV::FSGNJ_D_IN32X:
1589 case RISCV::FSGNJ_S_INX:
1590 case RISCV::FSGNJ_H_INX:
1592 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1593 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
1597 return std::nullopt;
1605 const auto &SchedModel =
STI.getSchedModel();
1606 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1623 RISCV::OpName::frm) < 0;
1625 "New instructions require FRM whereas the old one does not have it");
1632 for (
auto *NewMI : InsInstrs) {
1634 NewMI->getOpcode(), RISCV::OpName::frm)) ==
1635 NewMI->getNumOperands() &&
1636 "Instruction has unexpected number of operands");
1678 bool &Commuted)
const {
1683 unsigned OperandIdx = Commuted ? 2 : 1;
1687 int16_t InstFrmOpIdx =
1689 int16_t SiblingFrmOpIdx =
1692 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
1697 bool Invert)
const {
1703 Opc = *InverseOpcode;
1748std::optional<unsigned>
1752 return std::nullopt;
1754 return RISCV::FSUB_H;
1756 return RISCV::FSUB_S;
1758 return RISCV::FSUB_D;
1760 return RISCV::FADD_H;
1762 return RISCV::FADD_S;
1764 return RISCV::FADD_D;
1778 bool DoRegPressureReduce) {
1794 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
1806 bool DoRegPressureReduce) {
1808 bool IsFAdd =
isFADD(Opc);
1809 if (!IsFAdd && !
isFSUB(Opc))
1813 DoRegPressureReduce)) {
1819 DoRegPressureReduce)) {
1829 bool DoRegPressureReduce) {
1835 bool DoRegPressureReduce)
const {
1841 DoRegPressureReduce);
1850 return RISCV::FMADD_H;
1852 return RISCV::FMADD_S;
1854 return RISCV::FMADD_D;
1899 bool Mul1IsKill = Mul1.
isKill();
1900 bool Mul2IsKill = Mul2.
isKill();
1901 bool AddendIsKill = Addend.
isKill();
1910 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
1931 DelInstrs, InstrIdxForVirtReg);
1953 unsigned OpType = Operand.OperandType;
1958 int64_t Imm = MO.
getImm();
1965#define CASE_OPERAND_UIMM(NUM) \
1966 case RISCVOp::OPERAND_UIMM##NUM: \
1967 Ok = isUInt<NUM>(Imm); \
1981 Ok = isShiftedUInt<1, 1>(Imm);
1984 Ok = isShiftedUInt<5, 2>(Imm);
1987 Ok = isShiftedUInt<6, 2>(Imm);
1990 Ok = isShiftedUInt<5, 3>(Imm);
1993 Ok = isUInt<8>(Imm) && Imm >= 32;
1996 Ok = isShiftedUInt<6, 3>(Imm);
1999 Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
2002 Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0);
2011 Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
2017 Ok = Imm != 0 && isInt<6>(Imm);
2020 Ok = isUInt<10>(Imm);
2023 Ok = isUInt<11>(Imm);
2026 Ok = isInt<12>(Imm);
2029 Ok = isShiftedInt<7, 5>(Imm);
2032 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2035 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2036 Ok = Ok && Imm != 0;
2039 Ok = (isUInt<5>(Imm) && Imm != 0) ||
2040 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2043 Ok = Imm >= 0 && Imm <= 10;
2046 Ok = Imm >= 0 && Imm <= 7;
2049 Ok = Imm >= 1 && Imm <= 10;
2052 Ok = Imm >= 2 && Imm <= 14;
2056 ErrInfo =
"Invalid immediate";
2066 if (!
Op.isImm() && !
Op.isReg()) {
2067 ErrInfo =
"Invalid operand type for VL operand";
2070 if (
Op.isReg() &&
Op.getReg() != RISCV::NoRegister) {
2072 auto *RC =
MRI.getRegClass(
Op.getReg());
2073 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
2074 ErrInfo =
"Invalid register class for VL operand";
2079 ErrInfo =
"VL operand w/o SEW operand?";
2085 if (!
MI.getOperand(OpIdx).isImm()) {
2086 ErrInfo =
"SEW value expected to be an immediate";
2089 uint64_t Log2SEW =
MI.getOperand(OpIdx).getImm();
2091 ErrInfo =
"Unexpected SEW value";
2094 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2096 ErrInfo =
"Unexpected SEW value";
2102 if (!
MI.getOperand(OpIdx).isImm()) {
2103 ErrInfo =
"Policy operand expected to be an immediate";
2106 uint64_t Policy =
MI.getOperand(OpIdx).getImm();
2108 ErrInfo =
"Invalid Policy Value";
2112 ErrInfo =
"policy operand w/o VL operand?";
2120 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
2121 ErrInfo =
"policy operand w/o tied operand?";
2164 int64_t NewOffset = OldOffset + Disp;
2166 NewOffset = SignExtend64<32>(NewOffset);
2168 if (!isInt<12>(NewOffset))
2186 "Addressing mode not supported for folding");
2199 int64_t &
Offset,
bool &OffsetIsScalable,
unsigned &Width,
2228 OffsetIsScalable =
false;
2244 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
2252 if (MO1->getAddrSpace() != MO2->getAddrSpace())
2255 auto Base1 = MO1->getValue();
2256 auto Base2 = MO2->getValue();
2257 if (!Base1 || !Base2)
2262 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
2265 return Base1 == Base2;
2271 unsigned NumBytes)
const {
2274 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
2279 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
2286 return ClusterSize <= 4;
2335 int64_t OffsetA = 0, OffsetB = 0;
2336 unsigned int WidthA = 0, WidthB = 0;
2340 int LowOffset = std::min(OffsetA, OffsetB);
2341 int HighOffset = std::max(OffsetA, OffsetB);
2342 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2343 if (LowOffset + LowWidth <= HighOffset)
2350std::pair<unsigned, unsigned>
2353 return std::make_pair(TF & Mask, TF & ~Mask);
2358 using namespace RISCVII;
2359 static const std::pair<unsigned, const char *> TargetFlags[] = {
2360 {MO_CALL,
"riscv-call"},
2361 {MO_PLT,
"riscv-plt"},
2362 {MO_LO,
"riscv-lo"},
2363 {MO_HI,
"riscv-hi"},
2364 {MO_PCREL_LO,
"riscv-pcrel-lo"},
2365 {MO_PCREL_HI,
"riscv-pcrel-hi"},
2366 {MO_GOT_HI,
"riscv-got-hi"},
2367 {MO_TPREL_LO,
"riscv-tprel-lo"},
2368 {MO_TPREL_HI,
"riscv-tprel-hi"},
2369 {MO_TPREL_ADD,
"riscv-tprel-add"},
2370 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
2371 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"}};
2379 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
2392 unsigned &Flags)
const {
2407std::optional<outliner::OutlinedFunction>
2409 std::vector<outliner::Candidate> &RepeatedSequenceLocs)
const {
2415 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
2421 if (RepeatedSequenceLocs.size() < 2)
2422 return std::nullopt;
2424 unsigned SequenceSize = 0;
2426 auto I = RepeatedSequenceLocs[0].front();
2427 auto E = std::next(RepeatedSequenceLocs[0].back());
2432 unsigned CallOverhead = 8;
2433 for (
auto &
C : RepeatedSequenceLocs)
2437 unsigned FrameOverhead = 4;
2438 if (RepeatedSequenceLocs[0]
2440 ->getSubtarget<RISCVSubtarget>()
2450 unsigned Flags)
const {
2455 const auto &
F =
MI.getMF()->getFunction();
2458 if (
MI.isCFIInstruction())
2472 if (
MI.modifiesRegister(RISCV::X5,
TRI) ||
2473 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
2477 for (
const auto &MO :
MI.operands()) {
2482 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
2495 bool Changed =
true;
2500 for (;
I !=
E; ++
I) {
2501 if (
I->isCFIInstruction()) {
2502 I->removeFromParent();
2525 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
2536 return std::nullopt;
2540 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
2541 MI.getOperand(2).isImm())
2542 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
2544 return std::nullopt;
2552 std::string GenericComment =
2554 if (!GenericComment.empty())
2555 return GenericComment;
2559 return std::string();
2561 std::string Comment;
2568 if ((
MI.getOpcode() == RISCV::VSETVLI ||
MI.getOpcode() == RISCV::VSETIVLI ||
2569 MI.getOpcode() == RISCV::PseudoVSETVLI ||
2570 MI.getOpcode() == RISCV::PseudoVSETIVLI ||
2571 MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
2573 unsigned Imm =
MI.getOperand(OpIdx).getImm();
2577 unsigned Log2SEW =
MI.getOperand(OpIdx).getImm();
2578 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2583 unsigned Policy =
MI.getOperand(OpIdx).getImm();
2585 "Invalid Policy Value");
2595#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
2596 RISCV::PseudoV##OP##_##TYPE##_##LMUL
2598#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
2599 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
2600 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
2601 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
2602 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
2604#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
2605 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
2606 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
2608#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
2609 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
2610 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
2612#define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
2613 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
2614 case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
2616#define CASE_VFMA_SPLATS(OP) \
2617 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16): \
2618 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32): \
2619 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64)
2623 unsigned &SrcOpIdx1,
2624 unsigned &SrcOpIdx2)
const {
2626 if (!
Desc.isCommutable())
2629 switch (
MI.getOpcode()) {
2630 case RISCV::TH_MVEQZ:
2631 case RISCV::TH_MVNEZ:
2635 if (
MI.getOperand(2).getReg() == RISCV::X0)
2638 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
2639 case RISCV::TH_MULA:
2640 case RISCV::TH_MULAW:
2641 case RISCV::TH_MULAH:
2642 case RISCV::TH_MULS:
2643 case RISCV::TH_MULSW:
2644 case RISCV::TH_MULSH:
2646 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
2647 case RISCV::PseudoCCMOVGPR:
2649 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
2670 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2675 unsigned CommutableOpIdx1 = 1;
2676 unsigned CommutableOpIdx2 = 3;
2677 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2690 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2697 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
2699 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
2703 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2704 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
2710 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2711 SrcOpIdx2 == CommuteAnyOperandIndex) {
2714 unsigned CommutableOpIdx1 = SrcOpIdx1;
2715 if (SrcOpIdx1 == SrcOpIdx2) {
2718 CommutableOpIdx1 = 1;
2719 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
2721 CommutableOpIdx1 = SrcOpIdx2;
2726 unsigned CommutableOpIdx2;
2727 if (CommutableOpIdx1 != 1) {
2729 CommutableOpIdx2 = 1;
2731 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
2736 if (Op1Reg !=
MI.getOperand(2).getReg())
2737 CommutableOpIdx2 = 2;
2739 CommutableOpIdx2 = 3;
2744 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2756#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
2757 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
2758 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
2761#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
2762 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
2763 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
2764 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
2765 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
2767#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
2768 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
2769 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
2771#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
2772 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
2773 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
2775#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
2776 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
2777 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
2779#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
2780 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16) \
2781 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32) \
2782 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64)
2787 unsigned OpIdx2)
const {
2790 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
2794 switch (
MI.getOpcode()) {
2795 case RISCV::TH_MVEQZ:
2796 case RISCV::TH_MVNEZ: {
2797 auto &WorkingMI = cloneIfNew(
MI);
2798 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
2799 : RISCV::TH_MVEQZ));
2803 case RISCV::PseudoCCMOVGPR: {
2807 auto &WorkingMI = cloneIfNew(
MI);
2808 WorkingMI.getOperand(3).setImm(
CC);
2832 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
2833 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
2835 switch (
MI.getOpcode()) {
2858 auto &WorkingMI = cloneIfNew(
MI);
2859 WorkingMI.setDesc(
get(Opc));
2869 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
2872 if (OpIdx1 == 3 || OpIdx2 == 3) {
2874 switch (
MI.getOpcode()) {
2885 auto &WorkingMI = cloneIfNew(
MI);
2886 WorkingMI.setDesc(
get(Opc));
2898#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
2899#undef CASE_VFMA_CHANGE_OPCODE_LMULS
2900#undef CASE_VFMA_CHANGE_OPCODE_COMMON
2901#undef CASE_VFMA_SPLATS
2902#undef CASE_VFMA_OPCODE_LMULS
2903#undef CASE_VFMA_OPCODE_COMMON
2906#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
2907 RISCV::PseudoV##OP##_##LMUL##_TIED
2909#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
2910 CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
2911 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
2912 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
2913 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
2914 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
2916#define CASE_WIDEOP_OPCODE_LMULS(OP) \
2917 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
2918 case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
2921#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
2922 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
2923 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
2926#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
2927 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
2928 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
2929 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
2930 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
2931 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
2933#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
2934 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
2935 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
2941 switch (
MI.getOpcode()) {
2947 MI.getNumExplicitOperands() == 7 &&
2948 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
2955 switch (
MI.getOpcode()) {
2965 .
add(
MI.getOperand(0))
2967 .
add(
MI.getOperand(1))
2968 .
add(
MI.getOperand(2))
2969 .
add(
MI.getOperand(3))
2970 .
add(
MI.getOperand(4))
2971 .
add(
MI.getOperand(5))
2972 .
add(
MI.getOperand(6));
2981 MI.getNumExplicitOperands() == 6);
2982 if ((
MI.getOperand(5).getImm() & 1) == 0)
2987 switch (
MI.getOpcode()) {
2999 .
add(
MI.getOperand(0))
3001 .
add(
MI.getOperand(1))
3002 .
add(
MI.getOperand(2))
3003 .
add(
MI.getOperand(3))
3004 .
add(
MI.getOperand(4))
3005 .
add(
MI.getOperand(5));
3012 unsigned NumOps =
MI.getNumOperands();
3013 for (
unsigned I = 1;
I < NumOps; ++
I) {
3015 if (
Op.isReg() &&
Op.isKill())
3023 if (
MI.getOperand(0).isEarlyClobber()) {
3029 if (S->
end ==
Idx.getRegSlot(
true))
3030 S->
end =
Idx.getRegSlot();
3037#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
3038#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
3039#undef CASE_WIDEOP_OPCODE_LMULS
3040#undef CASE_WIDEOP_OPCODE_COMMON
3048 assert(Amount > 0 &&
"There is no need to get VLEN scaled value.");
3049 assert(Amount % 8 == 0 &&
3050 "Reserve the stack by the multiple of one vector size.");
3053 int64_t NumOfVReg = Amount / 8;
3056 assert(isInt<32>(NumOfVReg) &&
3057 "Expect the number of vector registers within 32-bits.");
3058 if (llvm::has_single_bit<uint32_t>(NumOfVReg)) {
3060 if (ShiftAmount == 0)
3066 }
else if (
STI.hasStdExtZba() &&
3073 if (NumOfVReg % 9 == 0) {
3074 Opc = RISCV::SH3ADD;
3075 ShiftAmount =
Log2_64(NumOfVReg / 9);
3076 }
else if (NumOfVReg % 5 == 0) {
3077 Opc = RISCV::SH2ADD;
3078 ShiftAmount =
Log2_64(NumOfVReg / 5);
3079 }
else if (NumOfVReg % 3 == 0) {
3080 Opc = RISCV::SH1ADD;
3081 ShiftAmount =
Log2_64(NumOfVReg / 3);
3094 }
else if (llvm::has_single_bit<uint32_t>(NumOfVReg - 1)) {
3095 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3105 }
else if (llvm::has_single_bit<uint32_t>(NumOfVReg + 1)) {
3106 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3117 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3119 if (!
STI.hasStdExtM() && !
STI.hasStdExtZmmul())
3122 "M- or Zmmul-extension must be enabled to calculate the vscaled size/"
3133 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
3141 return MI.getOpcode() == RISCV::ADDIW &&
MI.getOperand(1).isReg() &&
3142 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0;
3147 return MI.getOpcode() == RISCV::ADD_UW &&
MI.getOperand(1).isReg() &&
3148 MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0;
3153 return MI.getOpcode() == RISCV::ANDI &&
MI.getOperand(1).isReg() &&
3154 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 255;
3165 case RISCV::VL1RE8_V:
3166 case RISCV::VL2RE8_V:
3167 case RISCV::VL4RE8_V:
3168 case RISCV::VL8RE8_V:
3169 case RISCV::VL1RE16_V:
3170 case RISCV::VL2RE16_V:
3171 case RISCV::VL4RE16_V:
3172 case RISCV::VL8RE16_V:
3173 case RISCV::VL1RE32_V:
3174 case RISCV::VL2RE32_V:
3175 case RISCV::VL4RE32_V:
3176 case RISCV::VL8RE32_V:
3177 case RISCV::VL1RE64_V:
3178 case RISCV::VL2RE64_V:
3179 case RISCV::VL4RE64_V:
3180 case RISCV::VL8RE64_V:
3189 if (!RISCVVPseudosTable::getPseudoInfo(
Opcode) &&
3195std::optional<std::pair<unsigned, unsigned>>
3199 return std::nullopt;
3200 case RISCV::PseudoVSPILL2_M1:
3201 case RISCV::PseudoVRELOAD2_M1:
3202 return std::make_pair(2u, 1u);
3203 case RISCV::PseudoVSPILL2_M2:
3204 case RISCV::PseudoVRELOAD2_M2:
3205 return std::make_pair(2u, 2u);
3206 case RISCV::PseudoVSPILL2_M4:
3207 case RISCV::PseudoVRELOAD2_M4:
3208 return std::make_pair(2u, 4u);
3209 case RISCV::PseudoVSPILL3_M1:
3210 case RISCV::PseudoVRELOAD3_M1:
3211 return std::make_pair(3u, 1u);
3212 case RISCV::PseudoVSPILL3_M2:
3213 case RISCV::PseudoVRELOAD3_M2:
3214 return std::make_pair(3u, 2u);
3215 case RISCV::PseudoVSPILL4_M1:
3216 case RISCV::PseudoVRELOAD4_M1:
3217 return std::make_pair(4u, 1u);
3218 case RISCV::PseudoVSPILL4_M2:
3219 case RISCV::PseudoVRELOAD4_M2:
3220 return std::make_pair(4u, 2u);
3221 case RISCV::PseudoVSPILL5_M1:
3222 case RISCV::PseudoVRELOAD5_M1:
3223 return std::make_pair(5u, 1u);
3224 case RISCV::PseudoVSPILL6_M1:
3225 case RISCV::PseudoVRELOAD6_M1:
3226 return std::make_pair(6u, 1u);
3227 case RISCV::PseudoVSPILL7_M1:
3228 case RISCV::PseudoVRELOAD7_M1:
3229 return std::make_pair(7u, 1u);
3230 case RISCV::PseudoVSPILL8_M1:
3231 case RISCV::PseudoVRELOAD8_M1:
3232 return std::make_pair(8u, 1u);
3237 return MI.getNumExplicitDefs() == 2 &&
MI.modifiesRegister(RISCV::VL) &&
3242 int16_t MI1FrmOpIdx =
3244 int16_t MI2FrmOpIdx =
3246 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
3253std::optional<unsigned>
3258 return std::nullopt;
3261 case RISCV::VSLL_VX:
3262 case RISCV::VSRL_VX:
3263 case RISCV::VSRA_VX:
3265 case RISCV::VSSRL_VX:
3266 case RISCV::VSSRA_VX:
3271 case RISCV::VNSRL_WX:
3272 case RISCV::VNSRA_WX:
3274 case RISCV::VNCLIPU_WX:
3275 case RISCV::VNCLIP_WX:
3280 case RISCV::VADD_VX:
3281 case RISCV::VSUB_VX:
3282 case RISCV::VRSUB_VX:
3284 case RISCV::VWADDU_VX:
3285 case RISCV::VWSUBU_VX:
3286 case RISCV::VWADD_VX:
3287 case RISCV::VWSUB_VX:
3288 case RISCV::VWADDU_WX:
3289 case RISCV::VWSUBU_WX:
3290 case RISCV::VWADD_WX:
3291 case RISCV::VWSUB_WX:
3293 case RISCV::VADC_VXM:
3294 case RISCV::VADC_VIM:
3295 case RISCV::VMADC_VXM:
3296 case RISCV::VMADC_VIM:
3297 case RISCV::VMADC_VX:
3298 case RISCV::VSBC_VXM:
3299 case RISCV::VMSBC_VXM:
3300 case RISCV::VMSBC_VX:
3302 case RISCV::VAND_VX:
3304 case RISCV::VXOR_VX:
3306 case RISCV::VMSEQ_VX:
3307 case RISCV::VMSNE_VX:
3308 case RISCV::VMSLTU_VX:
3309 case RISCV::VMSLT_VX:
3310 case RISCV::VMSLEU_VX:
3311 case RISCV::VMSLE_VX:
3312 case RISCV::VMSGTU_VX:
3313 case RISCV::VMSGT_VX:
3315 case RISCV::VMINU_VX:
3316 case RISCV::VMIN_VX:
3317 case RISCV::VMAXU_VX:
3318 case RISCV::VMAX_VX:
3320 case RISCV::VMUL_VX:
3321 case RISCV::VMULH_VX:
3322 case RISCV::VMULHU_VX:
3323 case RISCV::VMULHSU_VX:
3325 case RISCV::VDIVU_VX:
3326 case RISCV::VDIV_VX:
3327 case RISCV::VREMU_VX:
3328 case RISCV::VREM_VX:
3330 case RISCV::VWMUL_VX:
3331 case RISCV::VWMULU_VX:
3332 case RISCV::VWMULSU_VX:
3334 case RISCV::VMACC_VX:
3335 case RISCV::VNMSAC_VX:
3336 case RISCV::VMADD_VX:
3337 case RISCV::VNMSUB_VX:
3339 case RISCV::VWMACCU_VX:
3340 case RISCV::VWMACC_VX:
3341 case RISCV::VWMACCSU_VX:
3342 case RISCV::VWMACCUS_VX:
3344 case RISCV::VMERGE_VXM:
3346 case RISCV::VMV_V_X:
3348 case RISCV::VSADDU_VX:
3349 case RISCV::VSADD_VX:
3350 case RISCV::VSSUBU_VX:
3351 case RISCV::VSSUB_VX:
3353 case RISCV::VAADDU_VX:
3354 case RISCV::VAADD_VX:
3355 case RISCV::VASUBU_VX:
3356 case RISCV::VASUB_VX:
3358 case RISCV::VSMUL_VX:
3360 case RISCV::VMV_S_X:
3361 return 1U << Log2SEW;
3367 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
static M68k::CondCode getCondFromBranchOpc(unsigned BrOpc)
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
const char LLVMTargetMachineRef TM
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
static bool isRVVWholeLoadStore(unsigned Opcode)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, MachineCombinerPattern Pattern)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isFSUB(unsigned Opc)
MachineOutlinerConstructionID
static bool isFMUL(unsigned Opc)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
#define CASE_OPERAND_UIMM(NUM)
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
static bool isFADD(unsigned Opc)
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
static unsigned getAddendOperandIdx(MachineCombinerPattern Pattern)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static unsigned getSize(unsigned Kind)
static constexpr uint32_t Opcode
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
This class represents an Operation in the Expression.
Diagnostic information for unsupported feature in backend.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Wrapper class representing physical registers. Should be passed by value.
unsigned pred_size() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
std::optional< outliner::OutlinedFunction > getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
RISCVInstrInfo(RISCVSubtarget &STI)
void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
const RISCVSubtarget & STI
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
virtual outliner::InstrType getOutliningTypeImpl(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
void finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, const TargetRegisterInfo *TRI) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
void getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, int64_t Amount, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, ArrayRef< const MachineOperand * > BaseOps2, unsigned ClusterSize, unsigned NumBytes) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, unsigned Opc, unsigned NF=1) const
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
bool hasStdExtCOrZca() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getOppositeBranchCondition(CondCode)
unsigned getBrCond(CondCode CC)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static bool isTailAgnostic(unsigned VType)
static RISCVII::VLMUL getVLMUL(unsigned VType)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static bool isValidSEW(unsigned SEW)
void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
bool isSEXT_W(const MachineInstr &MI)
bool isFaultFirstLoad(const MachineInstr &MI)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isZEXT_B(const MachineInstr &MI)
bool isRVVSpill(const MachineInstr &MI)
bool isZEXT_W(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
static const MachineMemOperand::Flags MONontemporalBit0
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
unsigned getDeadRegState(bool B)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
unsigned getKillRegState(bool B)
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Description of the encoding of one expression Op.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.