48#define DEBUG_TYPE "ppc-instr-info"
50#define GET_INSTRMAP_INFO
51#define GET_INSTRINFO_CTOR_DTOR
52#include "PPCGenInstrInfo.inc"
55 "Number of spillvsrrc spilled to stack as vec");
57 "Number of spillvsrrc spilled to stack as gpr");
58STATISTIC(NumGPRtoVSRSpill,
"Number of gpr spills to spillvsrrc");
60 "Number of ISELs that depend on comparison of constants converted");
62 "Number of compare-immediate instructions fed by constants");
64 "Number of record-form rotates converted to record-form andi");
68 cl::desc(
"Disable analysis for CTR loops"));
74cl::desc(
"Causes the backend to crash instead of generating a nop VSX copy"),
79 cl::desc(
"Use the old (incorrect) instruction latency calculation"));
83 cl::desc(
"register pressure factor for the transformations."));
87 cl::desc(
"enable register pressure reduce in machine combiner pass."));
90void PPCInstrInfo::anchor() {}
95 STI.isPPC64() ? PPC::BLR8 : PPC::BLR),
96 Subtarget(STI), RI(STI.getTargetMachine()) {}
104 static_cast<const PPCSubtarget *
>(STI)->getCPUDirective();
108 static_cast<const PPCSubtarget *
>(STI)->getInstrItineraryData();
140 unsigned *PredCost)
const {
142 return PPCGenInstrInfo::getInstrLatency(ItinData,
MI, PredCost);
152 unsigned DefClass =
MI.getDesc().getSchedClass();
153 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
171 std::optional<unsigned>
Latency = PPCGenInstrInfo::getOperandLatency(
174 if (!
DefMI.getParent())
181 if (Reg.isVirtual()) {
184 IsRegCR =
MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
185 MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass);
187 IsRegCR = PPC::CRRCRegClass.contains(Reg) ||
188 PPC::CRBITRCRegClass.contains(Reg);
191 if (
UseMI.isBranch() && IsRegCR) {
271#define InfoArrayIdxFMAInst 0
272#define InfoArrayIdxFAddInst 1
273#define InfoArrayIdxFMULInst 2
274#define InfoArrayIdxAddOpIdx 3
275#define InfoArrayIdxMULOpIdx 4
276#define InfoArrayIdxFSubInst 5
287 {PPC::XSMADDADP, PPC::XSADDDP, PPC::XSMULDP, 1, 2, PPC::XSSUBDP},
288 {PPC::XSMADDASP, PPC::XSADDSP, PPC::XSMULSP, 1, 2, PPC::XSSUBSP},
289 {PPC::XVMADDADP, PPC::XVADDDP, PPC::XVMULDP, 1, 2, PPC::XVSUBDP},
290 {PPC::XVMADDASP, PPC::XVADDSP, PPC::XVMULSP, 1, 2, PPC::XVSUBSP},
291 {PPC::FMADD, PPC::FADD, PPC::FMUL, 3, 1, PPC::FSUB},
292 {PPC::FMADDS, PPC::FADDS, PPC::FMULS, 3, 1, PPC::FSUBS}};
296int16_t PPCInstrInfo::getFMAOpIdxInfo(
unsigned Opcode)
const {
353 bool DoRegPressureReduce)
const {
358 auto IsAllOpsVirtualReg = [](
const MachineInstr &Instr) {
359 for (
const auto &MO : Instr.explicit_operands())
360 if (!(MO.isReg() && MO.getReg().isVirtual()))
365 auto IsReassociableAddOrSub = [&](
const MachineInstr &Instr,
367 if (Instr.getOpcode() !=
378 if (!IsAllOpsVirtualReg(Instr))
384 !
MRI->hasOneNonDBGUse(Instr.getOperand(0).getReg()))
390 auto IsReassociableFMA = [&](
const MachineInstr &Instr, int16_t &AddOpIdx,
391 int16_t &MulOpIdx,
bool IsLeaf) {
392 int16_t
Idx = getFMAOpIdxInfo(Instr.getOpcode());
403 if (!IsAllOpsVirtualReg(Instr))
423 int16_t AddOpIdx = -1;
424 int16_t MulOpIdx = -1;
426 bool IsUsedOnceL =
false;
427 bool IsUsedOnceR =
false;
431 auto IsRPReductionCandidate = [&]() {
435 if (Opcode != PPC::XSMADDASP && Opcode != PPC::XSMADDADP)
440 if (IsReassociableFMA(Root, AddOpIdx, MulOpIdx,
true)) {
441 assert((MulOpIdx >= 0) &&
"mul operand index not right!");
442 Register MULRegL =
TRI->lookThruSingleUseCopyChain(
444 Register MULRegR =
TRI->lookThruSingleUseCopyChain(
446 if (!MULRegL && !MULRegR)
449 if (MULRegL && !MULRegR) {
453 }
else if (!MULRegL && MULRegR) {
465 MULInstrL =
MRI->getVRegDef(MULRegL);
466 MULInstrR =
MRI->getVRegDef(MULRegR);
473 if (DoRegPressureReduce && IsRPReductionCandidate()) {
474 assert((MULInstrL && MULInstrR) &&
"wrong register preduction candidate!");
495 if (!IsReassociableFMA(Root, AddOpIdx, MulOpIdx,
false))
498 assert((AddOpIdx >= 0) &&
"add operand index not right!");
505 if (!IsReassociableFMA(*Prev, AddOpIdx, MulOpIdx,
false))
508 assert((AddOpIdx >= 0) &&
"add operand index not right!");
513 if (IsReassociableFMA(*Leaf, AddOpIdx, MulOpIdx,
true)) {
529 assert(!InsInstrs.
empty() &&
"Instructions set to be inserted is empty!");
562 assert(isa<llvm::ConstantFP>(
C) &&
"not a valid constant!");
565 APFloat F1((dyn_cast<ConstantFP>(
C))->getValueAPF());
567 Constant *NegC = ConstantFP::get(dyn_cast<ConstantFP>(
C)->getContext(), F1);
575 for (
auto *Inst : InsInstrs) {
577 assert(Operand.isReg() &&
"Invalid instruction in InsInstrs!");
578 if (Operand.getReg() == PPC::ZERO8) {
579 Placeholder = &Operand;
585 assert(Placeholder &&
"Placeholder does not exist!");
590 generateLoadForNewConst(ConstPoolIdx, &Root,
C->getType(), InsInstrs);
593 Placeholder->setReg(LoadNewConst);
614 if (!(Subtarget.
isPPC64() && Subtarget.hasP9Vector() &&
622 auto GetMBBPressure =
632 if (
MI.isDebugValue() ||
MI.isDebugLabel())
638 RPTracker.
recede(RegOpers);
648 unsigned VSSRCLimit =
TRI->getRegPressureSetLimit(
652 return GetMBBPressure(
MBB)[PPC::RegisterPressureSets::VSSRC] >
658 if (!
I->hasOneMemOperand())
662 return Op->isLoad() &&
Op->getPseudoValue() &&
666Register PPCInstrInfo::generateLoadForNewConst(
674 "Target not supported!\n");
680 Register VReg1 =
MRI->createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass);
682 BuildMI(*MF,
MI->getDebugLoc(),
get(PPC::ADDIStocHA8), VReg1)
687 "Only float and double are supported!");
692 LoadOpcode = PPC::DFLOADf32;
694 LoadOpcode = PPC::DFLOADf64;
724 assert(
I->mayLoad() &&
"Should be a load instruction.\n");
725 for (
auto MO :
I->uses()) {
729 if (Reg == 0 || !Reg.isVirtual())
735 return (MCP->
getConstants())[MO2.getIndex()].Val.ConstVal;
755 bool DoRegPressureReduce)
const {
765 DoRegPressureReduce);
778 reassociateFMA(Root,
Pattern, InsInstrs, DelInstrs, InstrIdxForVirtReg);
783 DelInstrs, InstrIdxForVirtReg);
788void PPCInstrInfo::reassociateFMA(
799 MRI.constrainRegClass(RegC, RC);
802 int16_t
Idx = getFMAOpIdxInfo(FmaOp);
803 assert(
Idx >= 0 &&
"Root must be a FMA instruction");
805 bool IsILPReassociate =
825 Leaf =
MRI.getVRegDef(MULReg);
831 Leaf =
MRI.getVRegDef(MULReg);
837 if (IsILPReassociate)
845 MRI.constrainRegClass(Reg, RC);
846 KillFlag = Operand.
isKill();
851 bool &MulOp1KillFlag,
bool &MulOp2KillFlag,
852 bool &AddOpKillFlag) {
853 GetOperandInfo(
Instr.getOperand(FirstMulOpIdx), MulOp1, MulOp1KillFlag);
854 GetOperandInfo(
Instr.getOperand(FirstMulOpIdx + 1), MulOp2, MulOp2KillFlag);
855 GetOperandInfo(
Instr.getOperand(AddOpIdx), AddOp, AddOpKillFlag);
858 Register RegM11, RegM12, RegX, RegY, RegM21, RegM22, RegM31, RegM32, RegA11,
860 bool KillX =
false, KillY =
false, KillM11 =
false, KillM12 =
false,
861 KillM21 =
false, KillM22 =
false, KillM31 =
false, KillM32 =
false,
862 KillA11 =
false, KillA21 =
false, KillB =
false;
864 GetFMAInstrInfo(Root, RegM31, RegM32, RegB, KillM31, KillM32, KillB);
866 if (IsILPReassociate)
867 GetFMAInstrInfo(*Prev, RegM21, RegM22, RegA21, KillM21, KillM22, KillA21);
870 GetFMAInstrInfo(*Leaf, RegM11, RegM12, RegA11, KillM11, KillM12, KillA11);
871 GetOperandInfo(Leaf->
getOperand(AddOpIdx), RegX, KillX);
873 GetOperandInfo(Leaf->
getOperand(1), RegX, KillX);
874 GetOperandInfo(Leaf->
getOperand(2), RegY, KillY);
877 GetOperandInfo(Leaf->
getOperand(1), RegX, KillX);
878 GetOperandInfo(Leaf->
getOperand(2), RegY, KillY);
888 InstrIdxForVirtReg.
insert(std::make_pair(NewVRA, 0));
891 if (IsILPReassociate) {
892 NewVRB =
MRI.createVirtualRegister(RC);
893 InstrIdxForVirtReg.
insert(std::make_pair(NewVRB, 1));
898 NewVRD =
MRI.createVirtualRegister(RC);
899 InstrIdxForVirtReg.
insert(std::make_pair(NewVRD, 2));
904 Register RegMul2,
bool KillRegMul2) {
905 MI->getOperand(AddOpIdx).setReg(RegAdd);
906 MI->getOperand(AddOpIdx).setIsKill(KillAdd);
907 MI->getOperand(FirstMulOpIdx).setReg(RegMul1);
908 MI->getOperand(FirstMulOpIdx).setIsKill(KillRegMul1);
909 MI->getOperand(FirstMulOpIdx + 1).setReg(RegMul2);
910 MI->getOperand(FirstMulOpIdx + 1).setIsKill(KillRegMul2);
931 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
932 AdjustOperandOrder(MINewA, RegY, KillY, RegM31, KillM31, RegM32, KillM32);
953 assert(NewVRD &&
"new FMA register not created!");
972 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
973 AdjustOperandOrder(MINewD, NewVRA,
true, RegM31, KillM31, RegM32,
999 bool KillVarReg =
false;
1002 KillVarReg = KillM31;
1005 KillVarReg = KillM32;
1029 if (!IsILPReassociate) {
1038 "Insertion instructions set should not be empty!");
1042 if (IsILPReassociate)
1050 unsigned &SubIdx)
const {
1051 switch (
MI.getOpcode()) {
1052 default:
return false;
1055 case PPC::EXTSW_32_64:
1056 SrcReg =
MI.getOperand(1).getReg();
1057 DstReg =
MI.getOperand(0).getReg();
1058 SubIdx = PPC::sub_32;
1064 int &FrameIndex)
const {
1068 if (
MI.getOperand(1).isImm() && !
MI.getOperand(1).getImm() &&
1069 MI.getOperand(2).isFI()) {
1070 FrameIndex =
MI.getOperand(2).getIndex();
1071 return MI.getOperand(0).getReg();
1081 switch (
MI.getOpcode()) {
1091 case PPC::ADDIStocHA:
1092 case PPC::ADDIStocHA8:
1094 case PPC::ADDItocL8:
1095 case PPC::LOAD_STACK_GUARD:
1096 case PPC::PPCLdFixedAddr:
1098 case PPC::XXLXORspz:
1099 case PPC::XXLXORdpz:
1100 case PPC::XXLEQVOnes:
1101 case PPC::XXSPLTI32DX:
1103 case PPC::XXSPLTIDP:
1107 case PPC::V_SETALLONESB:
1108 case PPC::V_SETALLONESH:
1109 case PPC::V_SETALLONES:
1112 case PPC::XXSETACCZ:
1113 case PPC::XXSETACCZW:
1120 int &FrameIndex)
const {
1122 if (
MI.getOperand(1).isImm() && !
MI.getOperand(1).getImm() &&
1123 MI.getOperand(2).isFI()) {
1124 FrameIndex =
MI.getOperand(2).getIndex();
1125 return MI.getOperand(0).getReg();
1133 unsigned OpIdx2)
const {
1137 if (
MI.getOpcode() != PPC::RLWIMI &&
MI.getOpcode() != PPC::RLWIMI_rec)
1145 if (
MI.getOperand(3).getImm() != 0)
1156 assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
1157 "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMI_rec.");
1161 unsigned SubReg1 =
MI.getOperand(1).getSubReg();
1162 unsigned SubReg2 =
MI.getOperand(2).getSubReg();
1163 bool Reg1IsKill =
MI.getOperand(1).isKill();
1164 bool Reg2IsKill =
MI.getOperand(2).isKill();
1165 bool ChangeReg0 =
false;
1171 "Expecting a two-address instruction!");
1172 assert(
MI.getOperand(0).getSubReg() == SubReg1 &&
"Tied subreg mismatch");
1178 unsigned MB =
MI.getOperand(4).getImm();
1179 unsigned ME =
MI.getOperand(5).getImm();
1183 if (MB == 0 && ME == 31)
1188 Register Reg0 = ChangeReg0 ? Reg2 :
MI.getOperand(0).getReg();
1189 bool Reg0IsDead =
MI.getOperand(0).isDead();
1190 return BuildMI(MF,
MI.getDebugLoc(),
MI.getDesc())
1199 MI.getOperand(0).setReg(Reg2);
1200 MI.getOperand(0).setSubReg(SubReg2);
1202 MI.getOperand(2).setReg(Reg1);
1203 MI.getOperand(1).setReg(Reg2);
1204 MI.getOperand(2).setSubReg(SubReg1);
1205 MI.getOperand(1).setSubReg(SubReg2);
1206 MI.getOperand(2).setIsKill(Reg1IsKill);
1207 MI.getOperand(1).setIsKill(Reg2IsKill);
1210 MI.getOperand(4).setImm((ME + 1) & 31);
1211 MI.getOperand(5).setImm((MB - 1) & 31);
1216 unsigned &SrcOpIdx1,
1217 unsigned &SrcOpIdx2)
const {
1228 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
1238 default: Opcode = PPC::NOP;
break;
1264 bool AllowModify)
const {
1265 bool isPPC64 = Subtarget.
isPPC64();
1272 if (!isUnpredicatedTerminator(*
I))
1278 if (
I->getOpcode() == PPC::B &&
1280 I->eraseFromParent();
1284 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
1293 if (
I ==
MBB.
begin() || !isUnpredicatedTerminator(*--
I)) {
1299 }
else if (LastInst.
getOpcode() == PPC::BCC) {
1307 }
else if (LastInst.
getOpcode() == PPC::BC) {
1315 }
else if (LastInst.
getOpcode() == PPC::BCn) {
1323 }
else if (LastInst.
getOpcode() == PPC::BDNZ8 ||
1334 }
else if (LastInst.
getOpcode() == PPC::BDZ8 ||
1355 if (
I !=
MBB.
begin() && isUnpredicatedTerminator(*--
I))
1359 if (SecondLastInst.
getOpcode() == PPC::BCC &&
1369 }
else if (SecondLastInst.
getOpcode() == PPC::BC &&
1379 }
else if (SecondLastInst.
getOpcode() == PPC::BCn &&
1389 }
else if ((SecondLastInst.
getOpcode() == PPC::BDNZ8 ||
1390 SecondLastInst.
getOpcode() == PPC::BDNZ) &&
1403 }
else if ((SecondLastInst.
getOpcode() == PPC::BDZ8 ||
1404 SecondLastInst.
getOpcode() == PPC::BDZ) &&
1427 I->eraseFromParent();
1436 int *BytesRemoved)
const {
1437 assert(!BytesRemoved &&
"code size not handled");
1443 if (
I->getOpcode() != PPC::B &&
I->getOpcode() != PPC::BCC &&
1444 I->getOpcode() != PPC::BC &&
I->getOpcode() != PPC::BCn &&
1445 I->getOpcode() != PPC::BDNZ8 &&
I->getOpcode() != PPC::BDNZ &&
1446 I->getOpcode() != PPC::BDZ8 &&
I->getOpcode() != PPC::BDZ)
1450 I->eraseFromParent();
1456 if (
I->getOpcode() != PPC::BCC &&
1457 I->getOpcode() != PPC::BC &&
I->getOpcode() != PPC::BCn &&
1458 I->getOpcode() != PPC::BDNZ8 &&
I->getOpcode() != PPC::BDNZ &&
1459 I->getOpcode() != PPC::BDZ8 &&
I->getOpcode() != PPC::BDZ)
1463 I->eraseFromParent();
1472 int *BytesAdded)
const {
1474 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1476 "PPC branch conditions have two components!");
1477 assert(!BytesAdded &&
"code size not handled");
1479 bool isPPC64 = Subtarget.
isPPC64();
1487 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1488 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).
addMBB(
TBB);
1504 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1505 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).
addMBB(
TBB);
1523 Register FalseReg,
int &CondCycles,
1524 int &TrueCycles,
int &FalseCycles)
const {
1525 if (!Subtarget.hasISEL())
1528 if (
Cond.size() != 2)
1544 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
1549 if (!PPC::GPRCRegClass.hasSubClassEq(RC) &&
1550 !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) &&
1551 !PPC::G8RCRegClass.hasSubClassEq(RC) &&
1552 !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC))
1572 "PPC branch conditions have two components!");
1577 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
1578 assert(RC &&
"TrueReg and FalseReg must have overlapping register classes");
1580 bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) ||
1581 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC);
1583 PPC::GPRCRegClass.hasSubClassEq(RC) ||
1584 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) &&
1585 "isel is for regular integer GPRs only");
1587 unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL;
1590 unsigned SubIdx = 0;
1591 bool SwapOps =
false;
1592 switch (SelectPred) {
1596 SubIdx = PPC::sub_eq; SwapOps =
false;
break;
1600 SubIdx = PPC::sub_eq; SwapOps =
true;
break;
1604 SubIdx = PPC::sub_lt; SwapOps =
false;
break;
1608 SubIdx = PPC::sub_lt; SwapOps =
true;
break;
1612 SubIdx = PPC::sub_gt; SwapOps =
false;
break;
1616 SubIdx = PPC::sub_gt; SwapOps =
true;
break;
1620 SubIdx = PPC::sub_un; SwapOps =
false;
break;
1624 SubIdx = PPC::sub_un; SwapOps =
true;
break;
1629 Register FirstReg = SwapOps ? FalseReg : TrueReg,
1630 SecondReg = SwapOps ? TrueReg : FalseReg;
1635 if (
MRI.getRegClass(FirstReg)->contains(PPC::R0) ||
1636 MRI.getRegClass(FirstReg)->contains(PPC::X0)) {
1638 MRI.getRegClass(FirstReg)->contains(PPC::X0) ?
1639 &PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass;
1641 FirstReg =
MRI.createVirtualRegister(FirstRC);
1653 if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT ||
1654 CRBit == PPC::CR2LT || CRBit == PPC::CR3LT ||
1655 CRBit == PPC::CR4LT || CRBit == PPC::CR5LT ||
1656 CRBit == PPC::CR6LT || CRBit == PPC::CR7LT)
1658 if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT ||
1659 CRBit == PPC::CR2GT || CRBit == PPC::CR3GT ||
1660 CRBit == PPC::CR4GT || CRBit == PPC::CR5GT ||
1661 CRBit == PPC::CR6GT || CRBit == PPC::CR7GT)
1663 if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ ||
1664 CRBit == PPC::CR2EQ || CRBit == PPC::CR3EQ ||
1665 CRBit == PPC::CR4EQ || CRBit == PPC::CR5EQ ||
1666 CRBit == PPC::CR6EQ || CRBit == PPC::CR7EQ)
1668 if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN ||
1669 CRBit == PPC::CR2UN || CRBit == PPC::CR3UN ||
1670 CRBit == PPC::CR4UN || CRBit == PPC::CR5UN ||
1671 CRBit == PPC::CR6UN || CRBit == PPC::CR7UN)
1674 assert(Ret != 4 &&
"Invalid CR bit register");
1685 if (PPC::F8RCRegClass.
contains(DestReg) &&
1686 PPC::VSRCRegClass.
contains(SrcReg)) {
1688 TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
1694 }
else if (PPC::F8RCRegClass.
contains(SrcReg) &&
1695 PPC::VSRCRegClass.
contains(DestReg)) {
1697 TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
1706 if (PPC::CRBITRCRegClass.
contains(SrcReg) &&
1707 PPC::GPRCRegClass.
contains(DestReg)) {
1719 }
else if (PPC::CRRCRegClass.
contains(SrcReg) &&
1720 (PPC::G8RCRegClass.
contains(DestReg) ||
1721 PPC::GPRCRegClass.
contains(DestReg))) {
1722 bool Is64Bit = PPC::G8RCRegClass.contains(DestReg);
1723 unsigned MvCode = Is64Bit ? PPC::MFOCRF8 : PPC::MFOCRF;
1724 unsigned ShCode = Is64Bit ? PPC::RLWINM8 : PPC::RLWINM;
1725 unsigned CRNum =
TRI->getEncodingValue(SrcReg);
1737 }
else if (PPC::G8RCRegClass.
contains(SrcReg) &&
1738 PPC::VSFRCRegClass.
contains(DestReg)) {
1739 assert(Subtarget.hasDirectMove() &&
1740 "Subtarget doesn't support directmove, don't know how to copy.");
1745 }
else if (PPC::VSFRCRegClass.
contains(SrcReg) &&
1746 PPC::G8RCRegClass.
contains(DestReg)) {
1747 assert(Subtarget.hasDirectMove() &&
1748 "Subtarget doesn't support directmove, don't know how to copy.");
1752 }
else if (PPC::SPERCRegClass.
contains(SrcReg) &&
1753 PPC::GPRCRegClass.
contains(DestReg)) {
1757 }
else if (PPC::GPRCRegClass.
contains(SrcReg) &&
1758 PPC::SPERCRegClass.
contains(DestReg)) {
1765 if (PPC::GPRCRegClass.
contains(DestReg, SrcReg))
1767 else if (PPC::G8RCRegClass.
contains(DestReg, SrcReg))
1769 else if (PPC::F4RCRegClass.
contains(DestReg, SrcReg))
1771 else if (PPC::CRRCRegClass.
contains(DestReg, SrcReg))
1773 else if (PPC::VRRCRegClass.
contains(DestReg, SrcReg))
1775 else if (PPC::VSRCRegClass.
contains(DestReg, SrcReg))
1785 else if (PPC::VSFRCRegClass.
contains(DestReg, SrcReg) ||
1786 PPC::VSSRCRegClass.
contains(DestReg, SrcReg))
1787 Opc = (Subtarget.hasP9Vector()) ? PPC::XSCPSGNDP : PPC::XXLORf;
1788 else if (Subtarget.pairedVectorMemops() &&
1789 PPC::VSRpRCRegClass.contains(DestReg, SrcReg)) {
1790 if (SrcReg > PPC::VSRp15)
1791 SrcReg = PPC::V0 + (SrcReg - PPC::VSRp16) * 2;
1793 SrcReg = PPC::VSL0 + (SrcReg - PPC::VSRp0) * 2;
1794 if (DestReg > PPC::VSRp15)
1795 DestReg = PPC::V0 + (DestReg - PPC::VSRp16) * 2;
1797 DestReg = PPC::VSL0 + (DestReg - PPC::VSRp0) * 2;
1804 else if (PPC::CRBITRCRegClass.
contains(DestReg, SrcReg))
1806 else if (PPC::SPERCRegClass.
contains(DestReg, SrcReg))
1808 else if ((PPC::ACCRCRegClass.
contains(DestReg) ||
1809 PPC::UACCRCRegClass.
contains(DestReg)) &&
1810 (PPC::ACCRCRegClass.
contains(SrcReg) ||
1811 PPC::UACCRCRegClass.
contains(SrcReg))) {
1817 bool DestPrimed = PPC::ACCRCRegClass.contains(DestReg);
1818 bool SrcPrimed = PPC::ACCRCRegClass.contains(SrcReg);
1820 PPC::VSL0 + (SrcReg - (SrcPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1822 PPC::VSL0 + (DestReg - (DestPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1831 if (SrcPrimed && !KillSrc)
1834 }
else if (PPC::G8pRCRegClass.
contains(DestReg) &&
1835 PPC::G8pRCRegClass.
contains(SrcReg)) {
1837 unsigned DestRegIdx = DestReg - PPC::G8p0;
1838 MCRegister DestRegSub0 = PPC::X0 + 2 * DestRegIdx;
1839 MCRegister DestRegSub1 = PPC::X0 + 2 * DestRegIdx + 1;
1840 unsigned SrcRegIdx = SrcReg - PPC::G8p0;
1841 MCRegister SrcRegSub0 = PPC::X0 + 2 * SrcRegIdx;
1842 MCRegister SrcRegSub1 = PPC::X0 + 2 * SrcRegIdx + 1;
1864 if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
1865 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
1867 }
else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
1868 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
1870 }
else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
1872 }
else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
1874 }
else if (PPC::SPERCRegClass.hasSubClassEq(RC)) {
1876 }
else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
1878 }
else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
1880 }
else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
1882 }
else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
1884 }
else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
1886 }
else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) {
1888 }
else if (PPC::SPILLTOVSRRCRegClass.hasSubClassEq(RC)) {
1890 }
else if (PPC::ACCRCRegClass.hasSubClassEq(RC)) {
1891 assert(Subtarget.pairedVectorMemops() &&
1892 "Register unexpected when paired memops are disabled.");
1894 }
else if (PPC::UACCRCRegClass.hasSubClassEq(RC)) {
1895 assert(Subtarget.pairedVectorMemops() &&
1896 "Register unexpected when paired memops are disabled.");
1898 }
else if (PPC::WACCRCRegClass.hasSubClassEq(RC)) {
1899 assert(Subtarget.pairedVectorMemops() &&
1900 "Register unexpected when paired memops are disabled.");
1902 }
else if (PPC::VSRpRCRegClass.hasSubClassEq(RC)) {
1903 assert(Subtarget.pairedVectorMemops() &&
1904 "Register unexpected when paired memops are disabled.");
1906 }
else if (PPC::G8pRCRegClass.hasSubClassEq(RC)) {
1917 return OpcodesForSpill[getSpillIndex(RC)];
1923 return OpcodesForSpill[getSpillIndex(RC)];
1926void PPCInstrInfo::StoreRegToStackSlot(
1940 if (PPC::CRRCRegClass.hasSubClassEq(RC) ||
1941 PPC::CRBITRCRegClass.hasSubClassEq(RC))
1955 StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs);
1957 for (
unsigned i = 0, e = NewMIs.
size(); i != e; ++i)
1965 NewMIs.
back()->addMemOperand(MF, MMO);
1984 unsigned DestReg,
int FrameIdx,
2002 LoadRegFromStackSlot(MF,
DL, DestReg, FrameIdx, RC, NewMIs);
2004 for (
unsigned i = 0, e = NewMIs.
size(); i != e; ++i)
2012 NewMIs.
back()->addMemOperand(MF, MMO);
2035 assert(
Cond.size() == 2 &&
"Invalid PPC branch opcode!");
2037 Cond[0].setImm(
Cond[0].getImm() == 0 ? 1 : 0);
2050 unsigned DefOpc =
DefMI.getOpcode();
2051 if (DefOpc != PPC::LI && DefOpc != PPC::LI8)
2053 if (!
DefMI.getOperand(1).isImm())
2055 if (
DefMI.getOperand(1).getImm() != 0)
2071 for (UseIdx = 0; UseIdx <
UseMI.getNumOperands(); ++UseIdx)
2072 if (
UseMI.getOperand(UseIdx).isReg() &&
2076 assert(UseIdx <
UseMI.getNumOperands() &&
"Cannot find Reg in UseMI");
2087 if (UseInfo->
RegClass != PPC::GPRC_NOR0RegClassID &&
2088 UseInfo->
RegClass != PPC::G8RC_NOX0RegClassID)
2100 bool isPPC64 = Subtarget.
isPPC64();
2101 ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO;
2103 ZeroReg = UseInfo->
RegClass == PPC::G8RC_NOX0RegClassID ?
2104 PPC::ZERO8 : PPC::ZERO;
2109 UseMI.getOperand(UseIdx).setReg(ZeroReg);
2121 if (
MRI->use_nodbg_empty(Reg))
2122 DefMI.eraseFromParent();
2128 if (
MI.definesRegister(PPC::CTR) ||
MI.definesRegister(PPC::CTR8))
2140 unsigned NumT,
unsigned ExtraT,
2142 unsigned NumF,
unsigned ExtraF,
2162 switch (
MI.getOpcode()) {
2178 unsigned OpC =
MI.getOpcode();
2179 if (OpC == PPC::BLR || OpC == PPC::BLR8) {
2180 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR) {
2181 bool isPPC64 = Subtarget.
isPPC64();
2182 MI.setDesc(
get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR)
2183 : (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
2189 MI.setDesc(
get(PPC::BCLR));
2192 MI.setDesc(
get(PPC::BCLRn));
2195 MI.setDesc(
get(PPC::BCCLR));
2197 .
addImm(Pred[0].getImm())
2202 }
else if (OpC == PPC::B) {
2203 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR) {
2204 bool isPPC64 = Subtarget.
isPPC64();
2205 MI.setDesc(
get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ)
2206 : (isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
2213 MI.removeOperand(0);
2215 MI.setDesc(
get(PPC::BC));
2221 MI.removeOperand(0);
2223 MI.setDesc(
get(PPC::BCn));
2229 MI.removeOperand(0);
2231 MI.setDesc(
get(PPC::BCC));
2233 .
addImm(Pred[0].getImm())
2239 }
else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 || OpC == PPC::BCTRL ||
2240 OpC == PPC::BCTRL8 || OpC == PPC::BCTRL_RM ||
2241 OpC == PPC::BCTRL8_RM) {
2242 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR)
2245 bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8 ||
2246 OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM;
2247 bool isPPC64 = Subtarget.
isPPC64();
2250 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8)
2251 : (setLR ? PPC::BCCTRL : PPC::BCCTR)));
2254 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n)
2255 : (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
2258 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8)
2259 : (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
2261 .
addImm(Pred[0].getImm())
2270 if (OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM)
2282 assert(Pred1.
size() == 2 &&
"Invalid PPC first predicate");
2283 assert(Pred2.
size() == 2 &&
"Invalid PPC second predicate");
2285 if (Pred1[1].
getReg() == PPC::CTR8 || Pred1[1].
getReg() == PPC::CTR)
2287 if (Pred2[1].
getReg() == PPC::CTR8 || Pred2[1].
getReg() == PPC::CTR)
2312 std::vector<MachineOperand> &Pred,
2313 bool SkipDead)
const {
2321 { &PPC::CRRCRegClass, &PPC::CRBITRCRegClass,
2322 &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass };
2326 for (
unsigned c = 0; c < std::size(RCs) && !Found; ++c) {
2329 if (MO.isDef() && RC->
contains(MO.getReg())) {
2333 }
else if (MO.isRegMask()) {
2335 if (MO.clobbersPhysReg(R)) {
2348 int64_t &
Value)
const {
2349 unsigned Opc =
MI.getOpcode();
2352 default:
return false;
2357 SrcReg =
MI.getOperand(1).getReg();
2359 Value =
MI.getOperand(2).getImm();
2368 SrcReg =
MI.getOperand(1).getReg();
2369 SrcReg2 =
MI.getOperand(2).getReg();
2388 if (OpC == PPC::FCMPUS || OpC == PPC::FCMPUD)
2400 bool isPPC64 = Subtarget.
isPPC64();
2401 bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW;
2402 bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW;
2403 bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD;
2412 if (!
MI)
return false;
2414 bool equalityOnly =
false;
2417 if (is32BitSignedCompare) {
2423 }
else if (is32BitUnsignedCompare) {
2428 equalityOnly =
true;
2432 equalityOnly = is64BitUnsignedCompare;
2434 equalityOnly = is32BitUnsignedCompare;
2440 I =
MRI->use_instr_begin(CRReg), IE =
MRI->use_instr_end();
2452 if (SubIdx != PPC::sub_eq)
2464 bool FoundUse =
false;
2466 J =
MRI->use_instr_begin(CRReg), JE =
MRI->use_instr_end();
2493 else if (
Value != 0) {
2502 if (equalityOnly || !
MRI->hasOneUse(CRReg))
2512 int16_t Immed = (int16_t)
Value;
2546 for (;
I != E && !noSub; --
I) {
2548 unsigned IOpC = Instr.getOpcode();
2550 if (&*
I != &CmpInstr && (Instr.modifiesRegister(PPC::CR0,
TRI) ||
2551 Instr.readsRegister(PPC::CR0,
TRI)))
2560 if ((OpC == PPC::CMPW || OpC == PPC::CMPLW ||
2561 OpC == PPC::CMPD || OpC == PPC::CMPLD) &&
2562 (IOpC == PPC::SUBF || IOpC == PPC::SUBF8) &&
2563 ((Instr.getOperand(1).getReg() == SrcReg &&
2564 Instr.getOperand(2).getReg() == SrcReg2) ||
2565 (Instr.getOperand(1).getReg() == SrcReg2 &&
2566 Instr.getOperand(2).getReg() == SrcReg))) {
2584 int MIOpC =
MI->getOpcode();
2585 if (MIOpC == PPC::ANDI_rec || MIOpC == PPC::ANDI8_rec ||
2586 MIOpC == PPC::ANDIS_rec || MIOpC == PPC::ANDIS8_rec)
2589 NewOpC = PPC::getRecordFormOpcode(MIOpC);
2607 if (!equalityOnly && (NewOpC == PPC::SUBF_rec || NewOpC == PPC::SUBF8_rec) &&
2617 bool ShouldSwap =
false;
2618 if (Sub &&
Value == 0) {
2624 ShouldSwap = !ShouldSwap;
2629 I =
MRI->use_instr_begin(CRReg), IE =
MRI->use_instr_end();
2637 "Invalid predicate for equality-only optimization");
2644 assert((!equalityOnly || NewSubReg == PPC::sub_eq) &&
2645 "Invalid CR bit for equality-only optimization");
2647 if (NewSubReg == PPC::sub_lt)
2648 NewSubReg = PPC::sub_gt;
2649 else if (NewSubReg == PPC::sub_gt)
2650 NewSubReg = PPC::sub_lt;
2658 "Non-zero immediate support and ShouldSwap"
2659 "may conflict in updating predicate");
2667 BuildMI(*
MI->getParent(), std::next(MII),
MI->getDebugLoc(),
2668 get(TargetOpcode::COPY), CRReg)
2673 MI->clearRegisterDeads(PPC::CR0);
2675 if (MIOpC != NewOpC) {
2685 if (MIOpC == PPC::RLWINM || MIOpC == PPC::RLWINM8) {
2686 Register GPRRes =
MI->getOperand(0).getReg();
2687 int64_t SH =
MI->getOperand(2).getImm();
2688 int64_t MB =
MI->getOperand(3).getImm();
2689 int64_t ME =
MI->getOperand(4).getImm();
2692 bool MBInLoHWord = MB >= 16;
2693 bool MEInLoHWord = ME >= 16;
2696 if (MB <= ME && MBInLoHWord == MEInLoHWord && SH == 0) {
2697 Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1);
2699 Mask >>= MBInLoHWord ? 0 : 16;
2700 NewOpC = MIOpC == PPC::RLWINM
2701 ? (MBInLoHWord ? PPC::ANDI_rec : PPC::ANDIS_rec)
2702 : (MBInLoHWord ? PPC::ANDI8_rec : PPC::ANDIS8_rec);
2703 }
else if (
MRI->use_empty(GPRRes) && (ME == 31) &&
2704 (ME - MB + 1 == SH) && (MB >= 16)) {
2708 Mask = ((1LLU << 32) - 1) & ~((1LLU << (32 - SH)) - 1);
2710 NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDIS_rec : PPC::ANDIS8_rec;
2713 if (Mask != ~0LLU) {
2714 MI->removeOperand(4);
2715 MI->removeOperand(3);
2716 MI->getOperand(2).setImm(Mask);
2717 NumRcRotatesConvertedToRcAnd++;
2719 }
else if (MIOpC == PPC::RLDICL &&
MI->getOperand(2).getImm() == 0) {
2720 int64_t MB =
MI->getOperand(3).getImm();
2722 uint64_t Mask = (1LLU << (63 - MB + 1)) - 1;
2723 NewOpC = PPC::ANDI8_rec;
2724 MI->removeOperand(3);
2725 MI->getOperand(2).setImm(Mask);
2726 NumRcRotatesConvertedToRcAnd++;
2731 MI->setDesc(NewDesc);
2734 if (!
MI->definesRegister(ImpDef)) {
2735 MI->addOperand(*
MI->getParent()->getParent(),
2740 if (!
MI->readsRegister(ImpUse)) {
2741 MI->addOperand(*
MI->getParent()->getParent(),
2746 assert(
MI->definesRegister(PPC::CR0) &&
2747 "Record-form instruction does not define cr0?");
2752 for (
unsigned i = 0, e = PredsToUpdate.
size(); i < e; i++)
2753 PredsToUpdate[i].first->setImm(PredsToUpdate[i].second);
2755 for (
unsigned i = 0, e = SubRegsToUpdate.
size(); i < e; i++)
2756 SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second);
2767 int64_t CmpMask, CmpValue;
2772 if (CmpValue || !CmpMask || SrcReg2)
2780 if (Opc == PPC::CMPLWI || Opc == PPC::CMPLDI)
2787 if (Subtarget.
isPPC64() && Opc == PPC::CMPWI)
2794 bool SrcRegHasOtherUse =
false;
2801 if (CRReg != PPC::CR0)
2805 bool SeenUseOfCRReg =
false;
2806 bool IsCRRegKilled =
false;
2807 if (!isRegElgibleForForwarding(RegMO, *SrcMI, CmpMI,
false, IsCRRegKilled,
2813 int NewOpC = PPC::getRecordFormOpcode(SrcMIOpc);
2827 "Record-form instruction does not define cr0?");
2841 OffsetIsScalable =
false;
2876 case PPC::DFSTOREf64:
2877 return FirstOpc == SecondOpc;
2883 return SecondOpc == PPC::STW || SecondOpc == PPC::STW8;
2890 int64_t OpOffset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
2891 unsigned NumBytes)
const {
2897 "Only base registers and frame indices are supported.");
2902 if (ClusterSize > 2)
2916 unsigned FirstOpc = FirstLdSt.
getOpcode();
2917 unsigned SecondOpc = SecondLdSt.
getOpcode();
2929 int64_t Offset1 = 0, Offset2 = 0;
2937 assert(Base1 == &BaseOp1 && Base2 == &BaseOp2 &&
2938 "getMemOperandWithOffsetWidth return incorrect base op");
2940 assert(Offset1 <= Offset2 &&
"Caller should have ordered offsets.");
2941 return Offset1 + (int64_t)Width1.
getValue() == Offset2;
2948 unsigned Opcode =
MI.getOpcode();
2950 if (Opcode == PPC::INLINEASM || Opcode == PPC::INLINEASM_BR) {
2952 const char *AsmStr =
MI.getOperand(0).getSymbolName();
2954 }
else if (Opcode == TargetOpcode::STACKMAP) {
2957 }
else if (Opcode == TargetOpcode::PATCHPOINT) {
2961 return get(Opcode).getSize();
2965std::pair<unsigned, unsigned>
2968 return std::make_pair(TF, 0u);
2973 using namespace PPCII;
2974 static const std::pair<unsigned, const char *> TargetFlags[] = {
2975 {MO_PLT,
"ppc-plt"},
2976 {MO_PIC_FLAG,
"ppc-pic"},
2977 {MO_PCREL_FLAG,
"ppc-pcrel"},
2978 {MO_GOT_FLAG,
"ppc-got"},
2979 {MO_PCREL_OPT_FLAG,
"ppc-opt-pcrel"},
2980 {MO_TLSGD_FLAG,
"ppc-tlsgd"},
2981 {MO_TPREL_FLAG,
"ppc-tprel"},
2982 {MO_TLSLDM_FLAG,
"ppc-tlsldm"},
2983 {MO_TLSLD_FLAG,
"ppc-tlsld"},
2984 {MO_TLSGDM_FLAG,
"ppc-tlsgdm"},
2985 {MO_GOT_TLSGD_PCREL_FLAG,
"ppc-got-tlsgd-pcrel"},
2986 {MO_GOT_TLSLD_PCREL_FLAG,
"ppc-got-tlsld-pcrel"},
2987 {MO_GOT_TPREL_PCREL_FLAG,
"ppc-got-tprel-pcrel"},
2990 {MO_TPREL_LO,
"ppc-tprel-lo"},
2991 {MO_TPREL_HA,
"ppc-tprel-ha"},
2992 {MO_DTPREL_LO,
"ppc-dtprel-lo"},
2993 {MO_TLSLD_LO,
"ppc-tlsld-lo"},
2994 {MO_TOC_LO,
"ppc-toc-lo"},
2995 {MO_TLS,
"ppc-tls"},
2996 {MO_PIC_HA_FLAG,
"ppc-ha-pic"},
2997 {MO_PIC_LO_FLAG,
"ppc-lo-pic"},
2998 {MO_TPREL_PCREL_FLAG,
"ppc-tprel-pcrel"},
2999 {MO_TLS_PCREL_FLAG,
"ppc-tls-pcrel"},
3000 {MO_GOT_PCREL_FLAG,
"ppc-got-pcrel"},
3012 unsigned UpperOpcode, LowerOpcode;
3013 switch (
MI.getOpcode()) {
3014 case PPC::DFLOADf32:
3015 UpperOpcode = PPC::LXSSP;
3016 LowerOpcode = PPC::LFS;
3018 case PPC::DFLOADf64:
3019 UpperOpcode = PPC::LXSD;
3020 LowerOpcode = PPC::LFD;
3022 case PPC::DFSTOREf32:
3023 UpperOpcode = PPC::STXSSP;
3024 LowerOpcode = PPC::STFS;
3026 case PPC::DFSTOREf64:
3027 UpperOpcode = PPC::STXSD;
3028 LowerOpcode = PPC::STFD;
3030 case PPC::XFLOADf32:
3031 UpperOpcode = PPC::LXSSPX;
3032 LowerOpcode = PPC::LFSX;
3034 case PPC::XFLOADf64:
3035 UpperOpcode = PPC::LXSDX;
3036 LowerOpcode = PPC::LFDX;
3038 case PPC::XFSTOREf32:
3039 UpperOpcode = PPC::STXSSPX;
3040 LowerOpcode = PPC::STFSX;
3042 case PPC::XFSTOREf64:
3043 UpperOpcode = PPC::STXSDX;
3044 LowerOpcode = PPC::STFDX;
3047 UpperOpcode = PPC::LXSIWAX;
3048 LowerOpcode = PPC::LFIWAX;
3051 UpperOpcode = PPC::LXSIWZX;
3052 LowerOpcode = PPC::LFIWZX;
3055 UpperOpcode = PPC::STXSIWX;
3056 LowerOpcode = PPC::STFIWX;
3062 Register TargetReg =
MI.getOperand(0).getReg();
3064 if ((TargetReg >= PPC::F0 && TargetReg <= PPC::F31) ||
3065 (TargetReg >= PPC::VSL0 && TargetReg <= PPC::VSL31))
3066 Opcode = LowerOpcode;
3068 Opcode = UpperOpcode;
3069 MI.setDesc(
get(Opcode));
3078 auto &
MBB = *
MI.getParent();
3079 auto DL =
MI.getDebugLoc();
3081 switch (
MI.getOpcode()) {
3082 case PPC::BUILD_UACC: {
3085 if (ACC - PPC::ACC0 != UACC - PPC::UACC0) {
3086 MCRegister SrcVSR = PPC::VSL0 + (UACC - PPC::UACC0) * 4;
3087 MCRegister DstVSR = PPC::VSL0 + (ACC - PPC::ACC0) * 4;
3091 for (
int VecNo = 0; VecNo < 4; VecNo++)
3093 .addReg(SrcVSR + VecNo)
3101 case PPC::KILL_PAIR: {
3102 MI.setDesc(
get(PPC::UNENCODED_NOP));
3103 MI.removeOperand(1);
3104 MI.removeOperand(0);
3107 case TargetOpcode::LOAD_STACK_GUARD: {
3109 "Only Linux target is expected to contain LOAD_STACK_GUARD");
3110 const int64_t
Offset = Subtarget.
isPPC64() ? -0x7010 : -0x7008;
3111 const unsigned Reg = Subtarget.
isPPC64() ? PPC::X13 : PPC::R2;
3112 MI.setDesc(
get(Subtarget.
isPPC64() ? PPC::LD : PPC::LWZ));
3118 case PPC::PPCLdFixedAddr: {
3120 "Only targets with Glibc expected to contain PPCLdFixedAddr");
3122 const unsigned Reg = Subtarget.
isPPC64() ? PPC::X13 : PPC::R2;
3123 MI.setDesc(
get(PPC::LWZ));
3125#undef PPC_LNX_FEATURE
3127#define PPC_LNX_DEFINE_OFFSETS
3128#include "llvm/TargetParser/PPCTargetParser.def"
3130 bool Is64 = Subtarget.
isPPC64();
3131 if (FAType == PPC_FAWORD_HWCAP) {
3133 Offset = Is64 ? PPC_HWCAP_OFFSET_LE64 : PPC_HWCAP_OFFSET_LE32;
3135 Offset = Is64 ? PPC_HWCAP_OFFSET_BE64 : PPC_HWCAP_OFFSET_BE32;
3136 }
else if (FAType == PPC_FAWORD_HWCAP2) {
3138 Offset = Is64 ? PPC_HWCAP2_OFFSET_LE64 : PPC_HWCAP2_OFFSET_LE32;
3140 Offset = Is64 ? PPC_HWCAP2_OFFSET_BE64 : PPC_HWCAP2_OFFSET_BE32;
3141 }
else if (FAType == PPC_FAWORD_CPUID) {
3143 Offset = Is64 ? PPC_CPUID_OFFSET_LE64 : PPC_CPUID_OFFSET_LE32;
3145 Offset = Is64 ? PPC_CPUID_OFFSET_BE64 : PPC_CPUID_OFFSET_BE32;
3147 assert(
Offset &&
"Do not know the offset for this fixed addr load");
3148 MI.removeOperand(1);
3154#define PPC_TGT_PARSER_UNDEF_MACROS
3155#include "llvm/TargetParser/PPCTargetParser.def"
3156#undef PPC_TGT_PARSER_UNDEF_MACROS
3158 case PPC::DFLOADf32:
3159 case PPC::DFLOADf64:
3160 case PPC::DFSTOREf32:
3161 case PPC::DFSTOREf64: {
3162 assert(Subtarget.hasP9Vector() &&
3163 "Invalid D-Form Pseudo-ops on Pre-P9 target.");
3166 "D-form op must have register and immediate operands");
3169 case PPC::XFLOADf32:
3170 case PPC::XFSTOREf32:
3174 assert(Subtarget.hasP8Vector() &&
3175 "Invalid X-Form Pseudo-ops on Pre-P8 target.");
3176 assert(
MI.getOperand(2).isReg() &&
MI.getOperand(1).isReg() &&
3177 "X-form op must have register and register operands");
3180 case PPC::XFLOADf64:
3181 case PPC::XFSTOREf64: {
3182 assert(Subtarget.hasVSX() &&
3183 "Invalid X-Form Pseudo-ops on target that has no VSX.");
3184 assert(
MI.getOperand(2).isReg() &&
MI.getOperand(1).isReg() &&
3185 "X-form op must have register and register operands");
3188 case PPC::SPILLTOVSR_LD: {
3189 Register TargetReg =
MI.getOperand(0).getReg();
3190 if (PPC::VSFRCRegClass.
contains(TargetReg)) {
3191 MI.setDesc(
get(PPC::DFLOADf64));
3195 MI.setDesc(
get(PPC::LD));
3198 case PPC::SPILLTOVSR_ST: {
3200 if (PPC::VSFRCRegClass.
contains(SrcReg)) {
3201 NumStoreSPILLVSRRCAsVec++;
3202 MI.setDesc(
get(PPC::DFSTOREf64));
3205 NumStoreSPILLVSRRCAsGpr++;
3206 MI.setDesc(
get(PPC::STD));
3210 case PPC::SPILLTOVSR_LDX: {
3211 Register TargetReg =
MI.getOperand(0).getReg();
3212 if (PPC::VSFRCRegClass.
contains(TargetReg))
3213 MI.setDesc(
get(PPC::LXSDX));
3215 MI.setDesc(
get(PPC::LDX));
3218 case PPC::SPILLTOVSR_STX: {
3220 if (PPC::VSFRCRegClass.
contains(SrcReg)) {
3221 NumStoreSPILLVSRRCAsVec++;
3222 MI.setDesc(
get(PPC::STXSDX));
3224 NumStoreSPILLVSRRCAsGpr++;
3225 MI.setDesc(
get(PPC::STDX));
3232 case PPC::CFENCE8: {
3233 auto Val =
MI.getOperand(0).getReg();
3234 unsigned CmpOp = Subtarget.
isPPC64() ? PPC::CMPD : PPC::CMPW;
3240 MI.setDesc(
get(PPC::ISYNC));
3241 MI.removeOperand(0);
3252static unsigned selectReg(int64_t Imm1, int64_t Imm2,
unsigned CompareOpc,
3253 unsigned TrueReg,
unsigned FalseReg,
3254 unsigned CRSubReg) {
3256 if (CompareOpc == PPC::CMPWI || CompareOpc == PPC::CMPDI) {
3260 return Imm1 < Imm2 ? TrueReg : FalseReg;
3262 return Imm1 > Imm2 ? TrueReg : FalseReg;
3264 return Imm1 == Imm2 ? TrueReg : FalseReg;
3268 else if (CompareOpc == PPC::CMPLWI || CompareOpc == PPC::CMPLDI) {
3276 return Imm1 == Imm2 ? TrueReg : FalseReg;
3279 return PPC::NoRegister;
3284 int64_t Imm)
const {
3285 assert(
MI.getOperand(OpNo).isReg() &&
"Operand must be a REG");
3287 Register InUseReg =
MI.getOperand(OpNo).getReg();
3288 MI.getOperand(OpNo).ChangeToImmediate(Imm);
3296 int UseOpIdx =
MI.findRegisterUseOperandIdx(InUseReg,
false,
TRI);
3297 if (UseOpIdx >= 0) {
3307 MI.removeOperand(UseOpIdx);
3316 int OperandToKeep = LII.
SetCR ? 1 : 0;
3317 for (
int i =
MI.getNumOperands() - 1; i > OperandToKeep; i--)
3318 MI.removeOperand(i);
3322 MI.setDesc(
get(LII.
Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3337 bool &SeenIntermediateUse)
const {
3338 assert(!
MI.getParent()->getParent()->getRegInfo().isSSA() &&
3339 "Should be called after register allocation.");
3343 SeenIntermediateUse =
false;
3344 for (; It != E; ++It) {
3345 if (It->modifiesRegister(Reg,
TRI))
3347 if (It->readsRegister(Reg,
TRI))
3348 SeenIntermediateUse =
true;
3356 int64_t Imm)
const {
3358 "Register should be in non-SSA form after RA");
3359 bool isPPC64 = Subtarget.
isPPC64();
3363 if (isInt<16>(Imm)) {
3365 }
else if (isInt<32>(Imm)) {
3373 assert(isPPC64 &&
"Materializing 64-bit immediate to single register is "
3374 "only supported in PPC64");
3376 if ((Imm >> 32) & 0xFFFF)
3379 .
addImm((Imm >> 32) & 0xFFFF);
3386 .
addImm((Imm >> 16) & 0xFFFF);
3396 unsigned &OpNoForForwarding,
3397 bool &SeenIntermediateUse)
const {
3398 OpNoForForwarding = ~0U;
3406 for (
int i = 1, e =
MI.getNumOperands(); i < e; i++) {
3407 if (!
MI.getOperand(i).isReg())
3410 if (!Reg.isVirtual())
3415 if (DefMIForTrueReg->
getOpcode() == PPC::LI ||
3416 DefMIForTrueReg->
getOpcode() == PPC::LI8 ||
3417 DefMIForTrueReg->
getOpcode() == PPC::ADDI ||
3418 DefMIForTrueReg->
getOpcode() == PPC::ADDI8) {
3419 OpNoForForwarding = i;
3420 DefMI = DefMIForTrueReg;
3435 unsigned Opc =
MI.getOpcode();
3436 bool ConvertibleImmForm =
3437 Opc == PPC::CMPWI || Opc == PPC::CMPLWI || Opc == PPC::CMPDI ||
3438 Opc == PPC::CMPLDI || Opc == PPC::ADDI || Opc == PPC::ADDI8 ||
3439 Opc == PPC::ORI || Opc == PPC::ORI8 || Opc == PPC::XORI ||
3440 Opc == PPC::XORI8 || Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec ||
3441 Opc == PPC::RLDICL_32 || Opc == PPC::RLDICL_32_64 ||
3442 Opc == PPC::RLWINM || Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8 ||
3443 Opc == PPC::RLWINM8_rec;
3444 bool IsVFReg = (
MI.getNumOperands() &&
MI.getOperand(0).isReg())
3451 if ((Opc == PPC::OR || Opc == PPC::OR8) &&
3452 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
3454 for (
int i = 1, e =
MI.getNumOperands(); i <
e; i++) {
3456 SeenIntermediateUse =
false;
3470 case PPC::ADDItocL8:
3473 OpNoForForwarding = i;
3480 return OpNoForForwarding == ~0
U ? nullptr :
DefMI;
3483unsigned PPCInstrInfo::getSpillTarget()
const {
3486 bool IsP10Variant = Subtarget.isISA3_1() || Subtarget.pairedVectorMemops();
3487 return Subtarget.isISAFuture() ? 3 : IsP10Variant ?
3488 2 : Subtarget.hasP9Vector() ?
3527 bool PostRA = !
MRI->isSSA();
3533 unsigned ToBeDeletedReg = 0;
3534 int64_t OffsetImm = 0;
3535 unsigned XFormOpcode = 0;
3543 bool OtherIntermediateUse =
false;
3547 if (OtherIntermediateUse || !ADDMI)
3554 unsigned ScaleRegIdx = 0;
3555 int64_t OffsetAddi = 0;
3569 assert(ADDIMI &&
"There should be ADDIMI for valid ToBeChangedReg.");
3574 for (
auto It = ++Start; It !=
End; It++)
3583 (ScaleReg == PPC::R0 || ScaleReg == PPC::X0))
3588 if (NewDefFor(ToBeChangedReg, *ADDMI,
MI) || NewDefFor(ScaleReg, *ADDMI,
MI))
3604 MI.setDesc(
get(XFormOpcode));
3606 .ChangeToRegister(ScaleReg,
false,
false,
3610 .ChangeToRegister(ToBeChangedReg,
false,
false,
true);
3622 int64_t &Imm)
const {
3626 if (Opc != PPC::ADDI && Opc != PPC::ADDI8)
3642 return Opc == PPC::ADD4 || Opc == PPC::ADD8;
3646 unsigned &ToBeDeletedReg,
3647 unsigned &XFormOpcode,
3651 if (!
MI.mayLoadOrStore())
3654 unsigned Opc =
MI.getOpcode();
3659 if (XFormOpcode == PPC::INSTRUCTION_LIST_END)
3673 if (!ImmOperand.
isImm())
3676 assert(RegOperand.
isReg() &&
"Instruction format is not right");
3679 if (!RegOperand.
isKill())
3682 ToBeDeletedReg = RegOperand.
getReg();
3683 OffsetImm = ImmOperand.
getImm();
3690 int64_t &OffsetAddi,
3691 int64_t OffsetImm)
const {
3698 bool OtherIntermediateUse =
false;
3719 if (OtherIntermediateUse || !ADDIMI)
3725 if (isInt<16>(OffsetAddi + OffsetImm))
3738 bool PostRA = !
MRI->isSSA();
3739 bool SeenIntermediateUse =
true;
3740 unsigned ForwardingOperand = ~0U;
3742 SeenIntermediateUse);
3745 assert(ForwardingOperand <
MI.getNumOperands() &&
3746 "The forwarding operand needs to be valid at this point");
3747 bool IsForwardingOperandKilled =
MI.getOperand(ForwardingOperand).isKill();
3748 bool KillFwdDefMI = !SeenIntermediateUse && IsForwardingOperandKilled;
3749 if (KilledDef && KillFwdDefMI)
3764 PPC::INSTRUCTION_LIST_END &&
3765 transformToNewImmFormFedByAdd(
MI, *
DefMI, ForwardingOperand))
3769 bool IsVFReg =
MI.getOperand(0).isReg()
3777 transformToImmFormFedByAdd(
MI, III, ForwardingOperand, *
DefMI,
3784 transformToImmFormFedByLI(
MI, III, ForwardingOperand, *
DefMI))
3789 if (!HasImmForm && simplifyToLI(
MI, *
DefMI, ForwardingOperand, KilledDef))
3798 Register FoldingReg =
MI.getOperand(1).getReg();
3802 if (SrcMI->
getOpcode() != PPC::RLWINM &&
3803 SrcMI->
getOpcode() != PPC::RLWINM_rec &&
3807 assert((
MI.getOperand(2).isImm() &&
MI.getOperand(3).isImm() &&
3810 "Invalid PPC::RLWINM Instruction!");
3818 assert((MEMI < 32 && MESrc < 32 && MBMI < 32 && MBSrc < 32) &&
3819 "Invalid PPC::RLWINM Instruction!");
3841 bool SrcMaskFull = (MBSrc - MESrc == 1) || (MBSrc == 0 && MESrc == 31);
3844 if ((MBMI > MEMI) && !SrcMaskFull)
3854 APInt RotatedSrcMask = MaskSrc.
rotl(SHMI);
3855 APInt FinalMask = RotatedSrcMask & MaskMI;
3857 bool Simplified =
false;
3860 if (FinalMask.
isZero()) {
3862 (
MI.getOpcode() == PPC::RLWINM8 ||
MI.getOpcode() == PPC::RLWINM8_rec);
3867 if (
MI.getOpcode() == PPC::RLWINM ||
MI.getOpcode() == PPC::RLWINM8) {
3869 MI.removeOperand(4);
3870 MI.removeOperand(3);
3871 MI.removeOperand(2);
3872 MI.getOperand(1).ChangeToImmediate(0);
3873 MI.setDesc(
get(Is64Bit ? PPC::LI8 : PPC::LI));
3876 MI.removeOperand(4);
3877 MI.removeOperand(3);
3878 MI.getOperand(2).setImm(0);
3879 MI.setDesc(
get(Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3882 MI.getOperand(1).setIsKill(
true);
3886 MI.getOperand(1).setIsKill(
false);
3902 uint16_t NewSH = (SHSrc + SHMI) % 32;
3903 MI.getOperand(2).setImm(NewSH);
3906 MI.getOperand(3).setImm(NewMB);
3907 MI.getOperand(4).setImm(NewME);
3911 MI.getOperand(1).setIsKill(
true);
3915 MI.getOperand(1).setIsKill(
false);
3920 if (Simplified &
MRI->use_nodbg_empty(FoldingReg) &&
3945 default:
return false;
3953 III.
ImmOpcode = Opc == PPC::ADD4 ? PPC::ADDI : PPC::ADDI8;
3962 III.
ImmOpcode = Opc == PPC::ADDC ? PPC::ADDIC : PPC::ADDIC8;
3978 III.
ImmOpcode = Opc == PPC::SUBFC ? PPC::SUBFIC : PPC::SUBFIC8;
3986 III.
ImmOpcode = Opc == PPC::CMPW ? PPC::CMPWI : PPC::CMPDI;
3994 III.
ImmOpcode = Opc == PPC::CMPLW ? PPC::CMPLWI : PPC::CMPLDI;
4014 case PPC::OR: III.
ImmOpcode = PPC::ORI;
break;
4015 case PPC::OR8: III.
ImmOpcode = PPC::ORI8;
break;
4016 case PPC::XOR: III.
ImmOpcode = PPC::XORI;
break;
4017 case PPC::XOR8: III.
ImmOpcode = PPC::XORI8;
break;
4022 case PPC::RLWNM_rec:
4023 case PPC::RLWNM8_rec:
4043 if (Opc == PPC::RLWNM || Opc == PPC::RLWNM8 || Opc == PPC::RLWNM_rec ||
4044 Opc == PPC::RLWNM8_rec)
4050 case PPC::RLWNM: III.
ImmOpcode = PPC::RLWINM;
break;
4051 case PPC::RLWNM8: III.
ImmOpcode = PPC::RLWINM8;
break;
4052 case PPC::RLWNM_rec:
4055 case PPC::RLWNM8_rec:
4058 case PPC::SLW: III.
ImmOpcode = PPC::RLWINM;
break;
4059 case PPC::SLW8: III.
ImmOpcode = PPC::RLWINM8;
break;
4066 case PPC::SRW: III.
ImmOpcode = PPC::RLWINM;
break;
4067 case PPC::SRW8: III.
ImmOpcode = PPC::RLWINM8;
break;
4087 case PPC::RLDCL_rec:
4089 case PPC::RLDCR_rec:
4105 if (Opc == PPC::RLDCL || Opc == PPC::RLDCL_rec || Opc == PPC::RLDCR ||
4106 Opc == PPC::RLDCR_rec)
4112 case PPC::RLDCL: III.
ImmOpcode = PPC::RLDICL;
break;
4113 case PPC::RLDCL_rec:
4116 case PPC::RLDCR: III.
ImmOpcode = PPC::RLDICR;
break;
4117 case PPC::RLDCR_rec:
4120 case PPC::SLD: III.
ImmOpcode = PPC::RLDICR;
break;
4124 case PPC::SRD: III.
ImmOpcode = PPC::RLDICL;
break;
4171 case PPC::LBZX: III.
ImmOpcode = PPC::LBZ;
break;
4172 case PPC::LBZX8: III.
ImmOpcode = PPC::LBZ8;
break;
4173 case PPC::LHZX: III.
ImmOpcode = PPC::LHZ;
break;
4174 case PPC::LHZX8: III.
ImmOpcode = PPC::LHZ8;
break;
4175 case PPC::LHAX: III.
ImmOpcode = PPC::LHA;
break;
4176 case PPC::LHAX8: III.
ImmOpcode = PPC::LHA8;
break;
4177 case PPC::LWZX: III.
ImmOpcode = PPC::LWZ;
break;
4178 case PPC::LWZX8: III.
ImmOpcode = PPC::LWZ8;
break;
4184 case PPC::LFSX: III.
ImmOpcode = PPC::LFS;
break;
4185 case PPC::LFDX: III.
ImmOpcode = PPC::LFD;
break;
4186 case PPC::STBX: III.
ImmOpcode = PPC::STB;
break;
4187 case PPC::STBX8: III.
ImmOpcode = PPC::STB8;
break;
4188 case PPC::STHX: III.
ImmOpcode = PPC::STH;
break;
4189 case PPC::STHX8: III.
ImmOpcode = PPC::STH8;
break;
4190 case PPC::STWX: III.
ImmOpcode = PPC::STW;
break;
4191 case PPC::STWX8: III.
ImmOpcode = PPC::STW8;
break;
4196 case PPC::STFSX: III.
ImmOpcode = PPC::STFS;
break;
4197 case PPC::STFDX: III.
ImmOpcode = PPC::STFD;
break;
4229 case PPC::LBZUX: III.
ImmOpcode = PPC::LBZU;
break;
4230 case PPC::LBZUX8: III.
ImmOpcode = PPC::LBZU8;
break;
4231 case PPC::LHZUX: III.
ImmOpcode = PPC::LHZU;
break;
4232 case PPC::LHZUX8: III.
ImmOpcode = PPC::LHZU8;
break;
4233 case PPC::LHAUX: III.
ImmOpcode = PPC::LHAU;
break;
4234 case PPC::LHAUX8: III.
ImmOpcode = PPC::LHAU8;
break;
4235 case PPC::LWZUX: III.
ImmOpcode = PPC::LWZU;
break;
4236 case PPC::LWZUX8: III.
ImmOpcode = PPC::LWZU8;
break;
4241 case PPC::LFSUX: III.
ImmOpcode = PPC::LFSU;
break;
4242 case PPC::LFDUX: III.
ImmOpcode = PPC::LFDU;
break;
4243 case PPC::STBUX: III.
ImmOpcode = PPC::STBU;
break;
4244 case PPC::STBUX8: III.
ImmOpcode = PPC::STBU8;
break;
4245 case PPC::STHUX: III.
ImmOpcode = PPC::STHU;
break;
4246 case PPC::STHUX8: III.
ImmOpcode = PPC::STHU8;
break;
4247 case PPC::STWUX: III.
ImmOpcode = PPC::STWU;
break;
4248 case PPC::STWUX8: III.
ImmOpcode = PPC::STWU8;
break;
4253 case PPC::STFSUX: III.
ImmOpcode = PPC::STFSU;
break;
4254 case PPC::STFDUX: III.
ImmOpcode = PPC::STFDU;
break;
4267 case PPC::XFLOADf32:
4268 case PPC::XFLOADf64:
4269 case PPC::XFSTOREf32:
4270 case PPC::XFSTOREf64:
4271 if (!Subtarget.hasP9Vector())
4298 case PPC::XFLOADf32:
4312 case PPC::XFLOADf64:
4330 case PPC::XFSTOREf32:
4344 case PPC::XFSTOREf64:
4355 assert(Op1 != Op2 &&
"Cannot swap operand with itself.");
4357 unsigned MaxOp = std::max(Op1, Op2);
4358 unsigned MinOp = std::min(Op1, Op2);
4361 MI.removeOperand(std::max(Op1, Op2));
4362 MI.removeOperand(std::min(Op1, Op2));
4366 if (MaxOp - MinOp == 1 &&
MI.getNumOperands() == MinOp) {
4367 MI.addOperand(MOp2);
4368 MI.addOperand(MOp1);
4373 unsigned TotalOps =
MI.getNumOperands() + 2;
4374 for (
unsigned i =
MI.getNumOperands() - 1; i >= MinOp; i--) {
4376 MI.removeOperand(i);
4379 MI.addOperand(MOp2);
4381 for (
unsigned i =
MI.getNumOperands(); i < TotalOps; i++) {
4383 MI.addOperand(MOp1);
4385 MI.addOperand(MOps.
back());
4396 unsigned OpNoForForwarding
4436 unsigned Opc =
DefMI.getOpcode();
4437 if (Opc != PPC::ADDItocL8 && Opc != PPC::ADDI && Opc != PPC::ADDI8)
4441 "Add inst must have at least three operands");
4442 RegMO = &
DefMI.getOperand(1);
4443 ImmMO = &
DefMI.getOperand(2);
4446 if (!RegMO->
isReg())
4455bool PPCInstrInfo::isRegElgibleForForwarding(
4458 bool &IsFwdFeederRegKilled,
bool &SeenIntermediateUse)
const {
4475 for (; It != E; ++It) {
4479 IsFwdFeederRegKilled =
true;
4481 SeenIntermediateUse =
true;
4483 if ((&*It) == &
DefMI)
4496bool PPCInstrInfo::isImmElgibleForForwarding(
const MachineOperand &ImmMO,
4500 int64_t BaseImm)
const {
4502 if (
DefMI.getOpcode() == PPC::ADDItocL8) {
4523 if (ImmMO.
isImm()) {
4528 APInt ActualValue(64, ImmMO.
getImm() + BaseImm,
true);
4533 Imm = SignExtend64<16>(ImmMO.
getImm() + BaseImm);
4549 unsigned OpNoForForwarding,
4551 if ((
DefMI.getOpcode() != PPC::LI &&
DefMI.getOpcode() != PPC::LI8) ||
4552 !
DefMI.getOperand(1).isImm())
4559 int64_t Immediate =
DefMI.getOperand(1).getImm();
4561 int64_t SExtImm = SignExtend64<16>(Immediate);
4563 bool ReplaceWithLI =
false;
4564 bool Is64BitLI =
false;
4567 unsigned Opc =
MI.getOpcode();
4588 bool Changed =
false;
4590 int64_t Comparand =
MI.getOperand(2).getImm();
4591 int64_t SExtComparand = ((
uint64_t)Comparand & ~0x7FFFuLL) != 0
4592 ? (Comparand | 0xFFFFFFFFFFFF0000)
4595 for (
auto &CompareUseMI :
MRI->use_instructions(DefReg)) {
4596 unsigned UseOpc = CompareUseMI.getOpcode();
4597 if (UseOpc != PPC::ISEL && UseOpc != PPC::ISEL8)
4599 unsigned CRSubReg = CompareUseMI.getOperand(3).getSubReg();
4600 Register TrueReg = CompareUseMI.getOperand(1).getReg();
4601 Register FalseReg = CompareUseMI.getOperand(2).getReg();
4602 unsigned RegToCopy =
4603 selectReg(SExtImm, SExtComparand, Opc, TrueReg, FalseReg, CRSubReg);
4604 if (RegToCopy == PPC::NoRegister)
4607 if (RegToCopy == PPC::ZERO || RegToCopy == PPC::ZERO8) {
4608 CompareUseMI.setDesc(
get(UseOpc == PPC::ISEL8 ? PPC::LI8 : PPC::LI));
4610 CompareUseMI.removeOperand(3);
4611 CompareUseMI.removeOperand(2);
4615 dbgs() <<
"Found LI -> CMPI -> ISEL, replacing with a copy.\n");
4619 CompareUseMI.setDesc(
get(PPC::COPY));
4620 CompareUseMI.removeOperand(3);
4621 CompareUseMI.removeOperand(RegToCopy == TrueReg ? 2 : 1);
4622 CmpIselsConverted++;
4631 MissedConvertibleImmediateInstrs++;
4639 int64_t Addend =
MI.getOperand(2).getImm();
4640 if (isInt<16>(Addend + SExtImm)) {
4641 ReplaceWithLI =
true;
4642 Is64BitLI = Opc == PPC::ADDI8;
4643 NewImm = Addend + SExtImm;
4649 case PPC::SUBFIC8: {
4651 if (
MI.getNumOperands() > 3 && !
MI.getOperand(3).isDead())
4653 int64_t Minuend =
MI.getOperand(2).getImm();
4654 if (isInt<16>(Minuend - SExtImm)) {
4655 ReplaceWithLI =
true;
4656 Is64BitLI = Opc == PPC::SUBFIC8;
4657 NewImm = Minuend - SExtImm;
4663 case PPC::RLDICL_rec:
4664 case PPC::RLDICL_32:
4665 case PPC::RLDICL_32_64: {
4667 int64_t SH =
MI.getOperand(2).getImm();
4668 int64_t MB =
MI.getOperand(3).getImm();
4669 APInt InVal((Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec) ? 64 : 32,
4671 InVal = InVal.rotl(SH);
4677 if (isUInt<15>(InVal.getSExtValue()) ||
4678 (Opc == PPC::RLDICL_rec && isUInt<16>(InVal.getSExtValue()))) {
4679 ReplaceWithLI =
true;
4680 Is64BitLI = Opc != PPC::RLDICL_32;
4681 NewImm = InVal.getSExtValue();
4682 SetCR = Opc == PPC::RLDICL_rec;
4689 case PPC::RLWINM_rec:
4690 case PPC::RLWINM8_rec: {
4691 int64_t SH =
MI.getOperand(2).getImm();
4692 int64_t MB =
MI.getOperand(3).getImm();
4693 int64_t ME =
MI.getOperand(4).getImm();
4694 APInt InVal(32, SExtImm,
true);
4695 InVal = InVal.rotl(SH);
4701 bool ValueFits = isUInt<15>(InVal.getSExtValue());
4702 ValueFits |= ((Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec) &&
4703 isUInt<16>(InVal.getSExtValue()));
4705 ReplaceWithLI =
true;
4706 Is64BitLI = Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8_rec;
4707 NewImm = InVal.getSExtValue();
4708 SetCR = Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec;
4717 int64_t LogicalImm =
MI.getOperand(2).getImm();
4719 if (Opc == PPC::ORI || Opc == PPC::ORI8)
4720 Result = LogicalImm | SExtImm;
4722 Result = LogicalImm ^ SExtImm;
4723 if (isInt<16>(Result)) {
4724 ReplaceWithLI =
true;
4725 Is64BitLI = Opc == PPC::ORI8 || Opc == PPC::XORI8;
4733 if (ReplaceWithLI) {
4738 bool ImmChanged = (SExtImm & NewImm) != NewImm;
4739 if (PostRA && ImmChanged)
4746 DefMI.getOperand(1).setImm(NewImm);
4750 else if (
MRI->use_empty(
MI.getOperand(0).getReg())) {
4752 assert(Immediate &&
"Transformation converted zero to non-zero?");
4755 }
else if (ImmChanged)
4770 if (KilledDef && SetCR)
4771 *KilledDef =
nullptr;
4784bool PPCInstrInfo::transformToNewImmFormFedByAdd(
4794 if (!
MI.mayLoadOrStore())
4799 assert((XFormOpcode != PPC::INSTRUCTION_LIST_END) &&
4800 "MI must have x-form opcode");
4804 bool IsVFReg =
MI.getOperand(0).isReg()
4818 if (!ImmOperandMI.
isImm())
4824 if (!isDefMIElgibleForForwarding(
DefMI, III, ImmMO, RegMO))
4826 assert(ImmMO && RegMO &&
"Imm and Reg operand must have been set");
4831 int64_t ImmBase = ImmOperandMI.
getImm();
4833 if (!isImmElgibleForForwarding(*ImmMO,
DefMI, III, Imm, ImmBase))
4837 LLVM_DEBUG(
dbgs() <<
"Replacing existing reg+imm instruction:\n");
4854bool PPCInstrInfo::transformToImmFormFedByAdd(
4864 if (!isUseMIElgibleForForwarding(
MI, III, OpNoForForwarding))
4871 if (!isDefMIElgibleForForwarding(
DefMI, III, ImmMO, RegMO))
4873 assert(ImmMO && RegMO &&
"Imm and Reg operand must have been set");
4878 if (!isImmElgibleForForwarding(*ImmMO,
DefMI, III, Imm))
4881 bool IsFwdFeederRegKilled =
false;
4882 bool SeenIntermediateUse =
false;
4884 if (!isRegElgibleForForwarding(*RegMO,
DefMI,
MI, KillDefMI,
4885 IsFwdFeederRegKilled, SeenIntermediateUse))
4905 if (ImmMO->
isImm()) {
4916 if (
DefMI.getOpcode() == PPC::ADDItocL8)
4926 MI.removeOperand(i);
4932 MI.addOperand(*ImmMO);
4934 for (
auto &MO : MOps)
4951 unsigned ConstantOpNo,
4954 if ((
DefMI.getOpcode() != PPC::LI &&
DefMI.getOpcode() != PPC::LI8) ||
4955 !
DefMI.getOperand(1).isImm())
4959 int64_t
Imm = SignExtend64<16>(
DefMI.getOperand(1).getImm());
4971 APInt ActualValue(64, Imm,
true);
4972 if (!ActualValue.isSignedIntN(III.
ImmWidth))
4986 Register OrigZeroReg =
MI.getOperand(PosForOrigZero).getReg();
4990 if ((NewZeroReg == PPC::R0 || NewZeroReg == PPC::X0) &&
4993 if ((OrigZeroReg == PPC::R0 || OrigZeroReg == PPC::X0) &&
4994 ConstantOpNo != PosForOrigZero)
4998 unsigned Opc =
MI.getOpcode();
4999 bool SpecialShift32 = Opc == PPC::SLW || Opc == PPC::SLW_rec ||
5000 Opc == PPC::SRW || Opc == PPC::SRW_rec ||
5001 Opc == PPC::SLW8 || Opc == PPC::SLW8_rec ||
5002 Opc == PPC::SRW8 || Opc == PPC::SRW8_rec;
5003 bool SpecialShift64 = Opc == PPC::SLD || Opc == PPC::SLD_rec ||
5004 Opc == PPC::SRD || Opc == PPC::SRD_rec;
5005 bool SetCR = Opc == PPC::SLW_rec || Opc == PPC::SRW_rec ||
5006 Opc == PPC::SLD_rec || Opc == PPC::SRD_rec;
5007 bool RightShift = Opc == PPC::SRW || Opc == PPC::SRW_rec || Opc == PPC::SRD ||
5008 Opc == PPC::SRD_rec;
5022 if (SpecialShift32 || SpecialShift64) {
5027 uint64_t ShAmt =
Imm & (SpecialShift32 ? 0x1F : 0x3F);
5028 if (Imm & (SpecialShift32 ? 0x20 : 0x40))
5033 else if (!SetCR && ShAmt == 0 && !PostRA) {
5034 MI.removeOperand(2);
5035 MI.setDesc(
get(PPC::COPY));
5038 if (SpecialShift32) {
5084 MRI.getRegClass(RegToModify)->hasSuperClassEq(&PPC::GPRCRegClass) ?
5085 &PPC::GPRC_and_GPRC_NOR0RegClass : &PPC::G8RC_and_G8RC_NOX0RegClass;
5086 MRI.setRegClass(RegToModify, NewRC);
5102 if (Subtarget.hasVSX() && RC == &PPC::VRRCRegClass)
5103 return &PPC::VSRCRegClass;
5108 return PPC::getRecordFormOpcode(Opcode);
5112 return (Opcode == PPC::LBZU || Opcode == PPC::LBZUX || Opcode == PPC::LBZU8 ||
5113 Opcode == PPC::LBZUX8 || Opcode == PPC::LHZU ||
5114 Opcode == PPC::LHZUX || Opcode == PPC::LHZU8 ||
5115 Opcode == PPC::LHZUX8);
5128 int Opcode =
MI->getOpcode();
5131 if (
TII->isSExt32To64(Opcode))
5140 if (Opcode == PPC::RLDICL &&
MI->getOperand(3).getImm() >= 33)
5146 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5147 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec) &&
5148 MI->getOperand(3).getImm() > 0 &&
5149 MI->getOperand(3).getImm() <=
MI->getOperand(4).getImm())
5154 if (Opcode == PPC::ANDIS_rec || Opcode == PPC::ANDIS8_rec) {
5156 if ((Imm & 0x8000) == 0)
5175 int Opcode =
MI->getOpcode();
5178 if (
TII->isZExt32To64(Opcode))
5183 Opcode == PPC::LWZUX || Opcode == PPC::LWZU8 || Opcode == PPC::LWZUX8) &&
5184 MI->getOperand(0).getReg() == Reg)
5189 if (Opcode == PPC::LI || Opcode == PPC::LI8 ||
5190 Opcode == PPC::LIS || Opcode == PPC::LIS8) {
5191 int64_t Imm =
MI->getOperand(1).getImm();
5192 if (((
uint64_t)Imm & ~0x7FFFuLL) == 0)
5198 if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICL_rec ||
5199 Opcode == PPC::RLDCL || Opcode == PPC::RLDCL_rec ||
5200 Opcode == PPC::RLDICL_32_64) &&
5201 MI->getOperand(3).getImm() >= 32)
5204 if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDIC_rec) &&
5205 MI->getOperand(3).getImm() >= 32 &&
5206 MI->getOperand(3).getImm() <= 63 -
MI->getOperand(2).getImm())
5209 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5210 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec ||
5211 Opcode == PPC::RLWINM8 || Opcode == PPC::RLWNM8) &&
5212 MI->getOperand(3).getImm() <=
MI->getOperand(4).getImm())
5221 if (!
MI.getOperand(1).isImm() || !
MI.getOperand(2).isReg())
5225 Register StackReg =
MI.getOperand(2).getReg();
5227 if (StackReg == SPReg &&
StackOffset == TOCSaveOffset)
5241std::pair<bool, bool>
5243 const unsigned BinOpDepth,
5246 return std::pair<bool, bool>(
false,
false);
5250 return std::pair<bool, bool>(
false,
false);
5257 if (IsSExt && IsZExt)
5258 return std::pair<bool, bool>(IsSExt, IsZExt);
5260 switch (
MI->getOpcode()) {
5262 Register SrcReg =
MI->getOperand(1).getReg();
5271 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5272 SrcExt.second || IsZExt);
5278 if (
MI->getParent()->getBasicBlock() ==
5284 return std::pair<bool, bool>(IsSExt, IsZExt);
5288 if (SrcReg != PPC::X3) {
5291 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5292 SrcExt.second || IsZExt);
5302 std::pair<bool, bool> IsExtendPair = std::pair<bool, bool>(IsSExt, IsZExt);
5305 if (II ==
MBB->
instr_begin() || (--II)->getOpcode() != PPC::ADJCALLSTACKUP)
5306 return IsExtendPair;
5310 return IsExtendPair;
5315 return IsExtendPair;
5319 IsSExt |= Attrs.hasAttribute(Attribute::SExt);
5320 IsZExt |= Attrs.hasAttribute(Attribute::ZExt);
5321 return std::pair<bool, bool>(IsSExt, IsZExt);
5324 return IsExtendPair;
5333 Register SrcReg =
MI->getOperand(1).getReg();
5335 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5336 SrcExt.second || IsZExt);
5347 Register SrcReg =
MI->getOperand(1).getReg();
5351 return std::pair<bool, bool>(
false, SrcExt.second || IsZExt);
5353 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5354 SrcExt.second || IsZExt);
5364 return std::pair<bool, bool>(
false,
false);
5368 unsigned OperandEnd = 3, OperandStride = 1;
5369 if (
MI->getOpcode() == PPC::PHI) {
5370 OperandEnd =
MI->getNumOperands();
5376 for (
unsigned I = 1;
I != OperandEnd;
I += OperandStride) {
5377 if (!
MI->getOperand(
I).isReg())
5378 return std::pair<bool, bool>(
false,
false);
5382 IsSExt &= SrcExt.first;
5383 IsZExt &= SrcExt.second;
5385 return std::pair<bool, bool>(IsSExt, IsZExt);
5394 return std::pair<bool, bool>(
false,
false);
5396 Register SrcReg1 =
MI->getOperand(1).getReg();
5397 Register SrcReg2 =
MI->getOperand(2).getReg();
5400 return std::pair<bool, bool>(Src1Ext.first && Src2Ext.first,
5401 Src1Ext.second || Src2Ext.second);
5407 return std::pair<bool, bool>(IsSExt, IsZExt);
5411 return (Opcode == (Subtarget.
isPPC64() ? PPC::BDNZ8 : PPC::BDNZ));
5424 :
Loop(
Loop), EndLoop(EndLoop), LoopCount(LoopCount),
5426 TII(MF->getSubtarget().getInstrInfo()) {
5435 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5437 return MI == EndLoop;
5440 std::optional<bool> createTripCountGreaterCondition(
5443 if (TripCount == -1) {
5453 return TripCount > TC;
5461 void adjustTripCount(
int TripCountAdjust)
override {
5464 if (LoopCount->
getOpcode() == PPC::LI8 ||
5475 void disposed()
override {
5476 Loop->eraseFromParent();
5483std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5488 if (Preheader == LoopBB)
5489 Preheader = *std::next(LoopBB->
pred_begin());
5492 if (
I != LoopBB->
end() &&
isBDNZ(
I->getOpcode())) {
5495 Register LoopCountReg = LoopInst->getOperand(0).getReg();
5498 return std::make_unique<PPCPipelinerLoopInfo>(LoopInst, &*
I, LoopCount);
5508 unsigned LOOPi = (Subtarget.
isPPC64() ? PPC::MTCTR8loop : PPC::MTCTRloop);
5511 for (
auto &
I : PreHeader.
instrs())
5512 if (
I.getOpcode() == LOOPi)
5558 int64_t OffsetA = 0, OffsetB = 0;
5563 int LowOffset = std::min(OffsetA, OffsetB);
5564 int HighOffset = std::max(OffsetA, OffsetB);
5565 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
5567 LowOffset + (int)LowWidth.
getValue() <= HighOffset)
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const Function * getParent(const Value *V)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isOpZeroOfSubwordPreincLoad(int Opcode)
static bool MBBDefinesCTR(MachineBasicBlock &MBB)
static bool definedByZeroExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI)
static cl::opt< float > FMARPFactor("ppc-fma-rp-factor", cl::Hidden, cl::init(1.5), cl::desc("register pressure factor for the transformations."))
#define InfoArrayIdxMULOpIdx
static unsigned selectReg(int64_t Imm1, int64_t Imm2, unsigned CompareOpc, unsigned TrueReg, unsigned FalseReg, unsigned CRSubReg)
static unsigned getCRBitValue(unsigned CRBit)
static bool isAnImmediateOperand(const MachineOperand &MO)
static const uint16_t FMAOpIdxInfo[][6]
static cl::opt< bool > DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden, cl::desc("Disable analysis for CTR loops"))
#define InfoArrayIdxAddOpIdx
static cl::opt< bool > UseOldLatencyCalc("ppc-old-latency-calc", cl::Hidden, cl::desc("Use the old (incorrect) instruction latency calculation"))
#define InfoArrayIdxFMAInst
static bool isClusterableLdStOpcPair(unsigned FirstOpc, unsigned SecondOpc, const PPCSubtarget &Subtarget)
static cl::opt< bool > EnableFMARegPressureReduction("ppc-fma-rp-reduction", cl::Hidden, cl::init(true), cl::desc("enable register pressure reduce in machine combiner pass."))
static bool isLdStSafeToCluster(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
const unsigned MAX_BINOP_DEPTH
static cl::opt< bool > DisableCmpOpt("disable-ppc-cmp-opt", cl::desc("Disable compare instruction optimization"), cl::Hidden)
#define InfoArrayIdxFSubInst
#define InfoArrayIdxFAddInst
#define InfoArrayIdxFMULInst
static bool definedBySignExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI)
static cl::opt< bool > VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy", cl::desc("Causes the backend to crash instead of generating a nop VSX copy"), cl::Hidden)
static void swapMIOperands(MachineInstr &MI, unsigned Op1, unsigned Op2)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static unsigned getSize(unsigned Kind)
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit, unsigned hiBit)
Wrap version of getBitsSet.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
const BasicBlock & getEntryBlock() const
AttributeList getAttributes() const
Return the attribute list for this Function.
Type * getReturnType() const
Returns the type of the ret val.
A possibly irreducible generalization of a Loop.
Module * getParent()
Get the module that this global value is contained inside of...
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
Class to represent integer types.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
TypeSize getValue() const
Represents a single loop in the control flow graph.
Instances of this class represent a single low-level machine instruction.
void setOpcode(unsigned Op)
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
bool isPseudo() const
Return true if this is a pseudo instruction that doesn't correspond to a real machine instruction.
This holds information about one operand of a machine instruction, indicating the register class for ...
uint16_t Constraints
Operand constraints (see OperandConstraint enum).
bool isLookupPtrRegClass() const
Set if this operand is a pointer value and it requires a callback to look up its register class.
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Wrapper class representing physical registers. Should be passed by value.
instr_iterator instr_begin()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
pred_iterator pred_begin()
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool isCall(QueryType Type=AnyInBundle) const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool hasImplicitDef() const
Returns true if the instruction has implicit definition.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
iterator_range< mop_iterator > operands()
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isLiveIn(Register Reg) const
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
PPCDispatchGroupSBHazardRecognizer - This class implements a scoreboard-based hazard recognizer for P...
uint64_t getTOCSaveOffset() const
getTOCSaveOffset - Return the previous frame offset to save the TOC register – 64-bit SVR4 ABI only.
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
bool isLiveInSExt(Register VReg) const
This function returns true if the specified vreg is a live-in register and sign-extended.
bool isLiveInZExt(Register VReg) const
This function returns true if the specified vreg is a live-in register and zero-extended.
PPCHazardRecognizer970 - This class defines a finite state automata that models the dispatch logic on...
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
PPCInstrInfo(PPCSubtarget &STI)
bool getFMAPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for a fma chain ending in Root.
bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase=nullptr) const
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
const TargetRegisterClass * updatedRC(const TargetRegisterClass *RC) const
bool isPredicated(const MachineInstr &MI) const override
bool expandVSXMemPseudo(MachineInstr &MI) const
bool onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg) const
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
Fixup the placeholders we put in genAlternativeCodeSequence() for MachineCombiner.
MCInst getNop() const override
Return the noop instruction to use for a noop.
static int getRecordFormOpcode(unsigned Opcode)
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool isXFormMemOp(unsigned Opcode) const
const PPCRegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
CombinerObjective getCombinerObjective(unsigned Pattern) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const
unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const
bool isTOCSaveMI(const MachineInstr &MI) const
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer to use for this target when ...
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
bool isBDNZ(unsigned Opcode) const
Check Opcode is BDNZ (Decrement CTR and branch if it is still nonzero).
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
bool isZeroExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
std::pair< bool, bool > isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth, const MachineRegisterInfo *MRI) const
bool expandPostRAPseudo(MachineInstr &MI) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index, MachineInstr *&ADDIMI, int64_t &OffsetAddi, int64_t OffsetImm) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
void materializeImmPostRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, int64_t Imm) const
bool isADDInstrEligibleForFolding(MachineInstr &ADDMI) const
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Return true if two MIs access different memory addresses and false otherwise.
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
CreateTargetHazardRecognizer - Return the hazard recognizer to use for this target when scheduling th...
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
Get the base operand and byte offset of an instruction that reads/writes memory.
void setSpecialOperandAttr(MachineInstr &MI, uint32_t Flags) const
bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const
void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
bool foldFrameOffset(MachineInstr &MI) const
bool isLoadFromConstantPool(MachineInstr *I) const
MachineInstr * findLoopInstr(MachineBasicBlock &PreHeader, SmallPtrSet< MachineBasicBlock *, 8 > &Visited) const
Find the hardware loop instruction used to set-up the specified loop.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
bool convertToImmediateForm(MachineInstr &MI, SmallSet< Register, 4 > &RegsToUpdate, MachineInstr **KilledDef=nullptr) const
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
Return true if get the base operand, byte offset of an instruction and the memory width.
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const override
On PowerPC, we leverage machine combiner pass to reduce register pressure when the register pressure ...
bool isSignExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo, int64_t Imm) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
Returns true if the two given memory operations should be scheduled adjacent.
void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const
bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg, unsigned &XFormOpcode, int64_t &OffsetOfImmInstr, ImmInstrInfo &III) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
Return true when there is potentially a faster code sequence for an instruction chain ending in <Root...
bool optimizeCmpPostRA(MachineInstr &MI) const
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
const Constant * getConstantFromConstantPool(MachineInstr *I) const
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III, bool PostRA) const
MachineInstr * getDefMIPostRA(unsigned Reg, MachineInstr &MI, bool &SeenIntermediateUse) const
unsigned getMappedIdxOpcForImmOpc(unsigned ImmOpcode) const
getMappedIdxOpcForImmOpc - Return the mapped index form load/store opcode for a given imm form load/s...
static void emitAccCopyInfo(MachineBasicBlock &MBB, MCRegister DestReg, MCRegister SrcReg)
const PPCFrameLowering * getFrameLowering() const override
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
unsigned getCPUDirective() const
getCPUDirective - Returns the -m directive specified for the cpu.
bool isLittleEndian() const
bool isTargetLinux() const
const PPCTargetMachine & getTargetMachine() const
const Triple & getTargetTriple() const
void setGlibcHWCAPAccess(bool Val=true) const
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
Track the current register pressure at some position in the instruction stream, and remember the high...
void closeRegion()
Finalize the region boundaries and recored live ins and live outs.
void recede(SmallVectorImpl< RegisterMaskPair > *LiveUses=nullptr)
Recede across the previous instruction.
RegisterPressure & getPressure()
Get the resulting register pressure over the traversed region.
void recedeSkipDebugValues()
Recede until we find an instruction which is not a DebugValue.
void init(const MachineFunction *mf, const RegisterClassInfo *rci, const LiveIntervals *lis, const MachineBasicBlock *mbb, MachineBasicBlock::const_iterator pos, bool TrackLaneMasks, bool TrackUntiedDefs)
Setup the RegPressureTracker.
MachineBasicBlock::const_iterator getPos() const
Get the MI position corresponding to this register pressure.
List of registers defined and used by a machine instruction.
void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, bool TrackLaneMasks, bool IgnoreDead)
Analyze the given instruction MI and fill in the Uses, Defs and DeadDefs list based on the MachineOpe...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
const TargetInstrInfo * TII
Target instruction information.
MachineFunction & MF
Machine function.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
StackOffset holds a fixed and a scalable offset in bytes.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
CodeModel::Model getCodeModel() const
Returns the code model.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
bool isOSGlibc() const
Tests whether the OS uses glibc.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
LLVM Value Representation.
Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Predicate getSwappedPredicate(Predicate Opcode)
Assume the condition register is set by MI(a,b), return the predicate if we modify the instructions s...
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
int getAltVSXFMAOpcode(uint16_t Opcode)
int getNonRecordFormOpcode(uint16_t)
unsigned getPredicateCondition(Predicate Opcode)
Return the condition without hint bits.
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Predicate InvertPredicate(Predicate Opcode)
Invert the specified predicate. != -> ==, < -> >=.
unsigned getPredicateHint(Predicate Opcode)
Return the hint bits of the predicate.
static bool isVFRegister(unsigned Reg)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getDeadRegState(bool B)
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
static unsigned getCRFromCRBit(unsigned SrcReg)
auto reverse(ContainerTy &&C)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
@ MustReduceRegisterPressure
void recomputeLivenessFlags(MachineBasicBlock &MBB)
Recomputes dead and kill flags in MBB.
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
static bool isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME)
Returns true iff Val consists of one contiguous run of 1s with any number of 0s on either side.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t IsSummingOperands
uint64_t OpNoForForwarding
uint64_t ImmMustBeMultipleOf
uint64_t ZeroIsSpecialNew
uint64_t ZeroIsSpecialOrig
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
std::vector< unsigned > MaxSetPressure
Map of max reg pressure indexed by pressure set ID, not class ID.