47#define DEBUG_TYPE "ppc-instr-info"
49#define GET_INSTRMAP_INFO
50#define GET_INSTRINFO_CTOR_DTOR
51#include "PPCGenInstrInfo.inc"
54 "Number of spillvsrrc spilled to stack as vec");
56 "Number of spillvsrrc spilled to stack as gpr");
57STATISTIC(NumGPRtoVSRSpill,
"Number of gpr spills to spillvsrrc");
59 "Number of ISELs that depend on comparison of constants converted");
61 "Number of compare-immediate instructions fed by constants");
63 "Number of record-form rotates converted to record-form andi");
67 cl::desc(
"Disable analysis for CTR loops"));
73cl::desc(
"Causes the backend to crash instead of generating a nop VSX copy"),
78 cl::desc(
"Use the old (incorrect) instruction latency calculation"));
82 cl::desc(
"register pressure factor for the transformations."));
86 cl::desc(
"enable register pressure reduce in machine combiner pass."));
89void PPCInstrInfo::anchor() {}
94 STI.isPPC64() ?
PPC::BLR8 :
PPC::BLR),
95 Subtarget(STI), RI(STI.getTargetMachine()) {}
103 static_cast<const PPCSubtarget *
>(STI)->getCPUDirective();
107 static_cast<const PPCSubtarget *
>(STI)->getInstrItineraryData();
139 unsigned *PredCost)
const {
141 return PPCGenInstrInfo::getInstrLatency(ItinData,
MI, PredCost);
151 unsigned DefClass =
MI.getDesc().getSchedClass();
152 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
170 std::optional<unsigned>
Latency = PPCGenInstrInfo::getOperandLatency(
173 if (!
DefMI.getParent())
180 if (Reg.isVirtual()) {
182 &
DefMI.getParent()->getParent()->getRegInfo();
183 IsRegCR =
MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
184 MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass);
186 IsRegCR = PPC::CRRCRegClass.contains(Reg) ||
187 PPC::CRBITRCRegClass.contains(Reg);
190 if (
UseMI.isBranch() && IsRegCR) {
196 unsigned Directive = Subtarget.getCPUDirective();
270#define InfoArrayIdxFMAInst 0
271#define InfoArrayIdxFAddInst 1
272#define InfoArrayIdxFMULInst 2
273#define InfoArrayIdxAddOpIdx 3
274#define InfoArrayIdxMULOpIdx 4
275#define InfoArrayIdxFSubInst 5
286 {PPC::XSMADDADP, PPC::XSADDDP, PPC::XSMULDP, 1, 2, PPC::XSSUBDP},
287 {PPC::XSMADDASP, PPC::XSADDSP, PPC::XSMULSP, 1, 2, PPC::XSSUBSP},
288 {PPC::XVMADDADP, PPC::XVADDDP, PPC::XVMULDP, 1, 2, PPC::XVSUBDP},
289 {PPC::XVMADDASP, PPC::XVADDSP, PPC::XVMULSP, 1, 2, PPC::XVSUBSP},
290 {PPC::FMADD, PPC::FADD, PPC::FMUL, 3, 1, PPC::FSUB},
291 {PPC::FMADDS, PPC::FADDS, PPC::FMULS, 3, 1, PPC::FSUBS}};
295int16_t PPCInstrInfo::getFMAOpIdxInfo(
unsigned Opcode)
const {
352 bool DoRegPressureReduce)
const {
357 auto IsAllOpsVirtualReg = [](
const MachineInstr &Instr) {
358 for (
const auto &MO : Instr.explicit_operands())
359 if (!(MO.isReg() && MO.getReg().isVirtual()))
364 auto IsReassociableAddOrSub = [&](
const MachineInstr &Instr,
366 if (Instr.getOpcode() !=
377 if (!IsAllOpsVirtualReg(Instr))
383 !
MRI->hasOneNonDBGUse(Instr.getOperand(0).getReg()))
389 auto IsReassociableFMA = [&](
const MachineInstr &Instr, int16_t &AddOpIdx,
390 int16_t &MulOpIdx,
bool IsLeaf) {
391 int16_t Idx = getFMAOpIdxInfo(Instr.getOpcode());
402 if (!IsAllOpsVirtualReg(Instr))
419 return MRI->hasOneNonDBGUse(OpAdd.
getReg());
422 int16_t AddOpIdx = -1;
423 int16_t MulOpIdx = -1;
425 bool IsUsedOnceL =
false;
426 bool IsUsedOnceR =
false;
430 auto IsRPReductionCandidate = [&]() {
434 if (Opcode != PPC::XSMADDASP && Opcode != PPC::XSMADDADP)
439 if (IsReassociableFMA(Root, AddOpIdx, MulOpIdx,
true)) {
440 assert((MulOpIdx >= 0) &&
"mul operand index not right!");
441 Register MULRegL =
TRI->lookThruSingleUseCopyChain(
443 Register MULRegR =
TRI->lookThruSingleUseCopyChain(
445 if (!MULRegL && !MULRegR)
448 if (MULRegL && !MULRegR) {
452 }
else if (!MULRegL && MULRegR) {
464 MULInstrL =
MRI->getVRegDef(MULRegL);
465 MULInstrR =
MRI->getVRegDef(MULRegR);
472 if (DoRegPressureReduce && IsRPReductionCandidate()) {
473 assert((MULInstrL && MULInstrR) &&
"wrong register preduction candidate!");
494 if (!IsReassociableFMA(Root, AddOpIdx, MulOpIdx,
false))
497 assert((AddOpIdx >= 0) &&
"add operand index not right!");
504 if (!IsReassociableFMA(*Prev, AddOpIdx, MulOpIdx,
false))
507 assert((AddOpIdx >= 0) &&
"add operand index not right!");
512 if (IsReassociableFMA(*Leaf, AddOpIdx, MulOpIdx,
true)) {
528 assert(!InsInstrs.
empty() &&
"Instructions set to be inserted is empty!");
535 int16_t Idx = getFMAOpIdxInfo(Root.
getOpcode());
574 for (
auto *Inst : InsInstrs) {
576 assert(Operand.isReg() &&
"Invalid instruction in InsInstrs!");
577 if (Operand.getReg() == PPC::ZERO8) {
578 Placeholder = &Operand;
584 assert(Placeholder &&
"Placeholder does not exist!");
589 generateLoadForNewConst(ConstPoolIdx, &Root,
C->getType(), InsInstrs);
592 Placeholder->setReg(LoadNewConst);
613 if (!(Subtarget.isPPC64() && Subtarget.hasP9Vector() &&
621 auto GetMBBPressure =
627 RPTracker.
init(
MBB->getParent(), RegClassInfo,
nullptr,
MBB,
MBB->end(),
631 if (
MI.isDebugValue() ||
MI.isDebugLabel())
637 RPTracker.
recede(RegOpers);
647 unsigned VSSRCLimit =
651 return GetMBBPressure(
MBB)[PPC::RegisterPressureSets::VSSRC] >
657 if (!
I->hasOneMemOperand())
661 return Op->isLoad() &&
Op->getPseudoValue() &&
665Register PPCInstrInfo::generateLoadForNewConst(
671 assert((Subtarget.isPPC64() && Subtarget.hasP9Vector() &&
673 "Target not supported!\n");
679 Register VReg1 =
MRI->createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass);
681 BuildMI(*MF,
MI->getDebugLoc(),
get(PPC::ADDIStocHA8), VReg1)
685 assert((Ty->isFloatTy() || Ty->isDoubleTy()) &&
686 "Only float and double are supported!");
691 LoadOpcode = PPC::DFLOADf32;
693 LoadOpcode = PPC::DFLOADf64;
723 assert(
I->mayLoad() &&
"Should be a load instruction.\n");
724 for (
auto MO :
I->uses()) {
728 if (Reg == 0 || !Reg.isVirtual())
732 for (
auto MO2 :
DefMI->uses())
734 return (MCP->
getConstants())[MO2.getIndex()].Val.ConstVal;
754 bool DoRegPressureReduce)
const {
764 DoRegPressureReduce);
777 reassociateFMA(Root,
Pattern, InsInstrs, DelInstrs, InstrIdxForVirtReg);
782 DelInstrs, InstrIdxForVirtReg);
787void PPCInstrInfo::reassociateFMA(
798 MRI.constrainRegClass(RegC, RC);
801 int16_t Idx = getFMAOpIdxInfo(FmaOp);
802 assert(Idx >= 0 &&
"Root must be a FMA instruction");
804 bool IsILPReassociate =
824 Leaf =
MRI.getVRegDef(MULReg);
830 Leaf =
MRI.getVRegDef(MULReg);
835 uint32_t IntersectedFlags = 0;
836 if (IsILPReassociate)
841 auto GetOperandInfo = [&](
const MachineOperand &Operand,
Register &
Reg,
844 MRI.constrainRegClass(
Reg, RC);
845 KillFlag = Operand.
isKill();
848 auto GetFMAInstrInfo = [&](
const MachineInstr &
Instr,
Register &MulOp1,
850 bool &MulOp1KillFlag,
bool &MulOp2KillFlag,
851 bool &AddOpKillFlag) {
852 GetOperandInfo(
Instr.getOperand(FirstMulOpIdx), MulOp1, MulOp1KillFlag);
853 GetOperandInfo(
Instr.getOperand(FirstMulOpIdx + 1), MulOp2, MulOp2KillFlag);
854 GetOperandInfo(
Instr.getOperand(AddOpIdx), AddOp, AddOpKillFlag);
857 Register RegM11, RegM12, RegX, RegY, RegM21, RegM22, RegM31, RegM32, RegA11,
859 bool KillX =
false, KillY =
false, KillM11 =
false, KillM12 =
false,
860 KillM21 =
false, KillM22 =
false, KillM31 =
false, KillM32 =
false,
861 KillA11 =
false, KillA21 =
false, KillB =
false;
863 GetFMAInstrInfo(Root, RegM31, RegM32, RegB, KillM31, KillM32, KillB);
865 if (IsILPReassociate)
866 GetFMAInstrInfo(*Prev, RegM21, RegM22, RegA21, KillM21, KillM22, KillA21);
869 GetFMAInstrInfo(*Leaf, RegM11, RegM12, RegA11, KillM11, KillM12, KillA11);
870 GetOperandInfo(Leaf->
getOperand(AddOpIdx), RegX, KillX);
872 GetOperandInfo(Leaf->
getOperand(1), RegX, KillX);
873 GetOperandInfo(Leaf->
getOperand(2), RegY, KillY);
876 GetOperandInfo(Leaf->
getOperand(1), RegX, KillX);
877 GetOperandInfo(Leaf->
getOperand(2), RegY, KillY);
887 InstrIdxForVirtReg.
insert(std::make_pair(NewVRA, 0));
890 if (IsILPReassociate) {
891 NewVRB =
MRI.createVirtualRegister(RC);
892 InstrIdxForVirtReg.
insert(std::make_pair(NewVRB, 1));
897 NewVRD =
MRI.createVirtualRegister(RC);
898 InstrIdxForVirtReg.
insert(std::make_pair(NewVRD, 2));
901 auto AdjustOperandOrder = [&](MachineInstr *
MI,
Register RegAdd,
bool KillAdd,
903 Register RegMul2,
bool KillRegMul2) {
904 MI->getOperand(AddOpIdx).setReg(RegAdd);
905 MI->getOperand(AddOpIdx).setIsKill(KillAdd);
906 MI->getOperand(FirstMulOpIdx).setReg(RegMul1);
907 MI->getOperand(FirstMulOpIdx).setIsKill(KillRegMul1);
908 MI->getOperand(FirstMulOpIdx + 1).setReg(RegMul2);
909 MI->getOperand(FirstMulOpIdx + 1).setIsKill(KillRegMul2);
912 MachineInstrBuilder NewARegPressure, NewCRegPressure;
918 MachineInstrBuilder MINewB =
923 MachineInstrBuilder MINewA =
930 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
931 AdjustOperandOrder(MINewA, RegY, KillY, RegM31, KillM31, RegM32, KillM32);
934 MachineInstrBuilder MINewC =
952 assert(NewVRD &&
"new FMA register not created!");
954 MachineInstrBuilder MINewA =
959 MachineInstrBuilder MINewB =
964 MachineInstrBuilder MINewD =
971 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
972 AdjustOperandOrder(MINewD, NewVRA,
true, RegM31, KillM31, RegM32,
976 MachineInstrBuilder MINewC =
998 bool KillVarReg =
false;
1001 KillVarReg = KillM31;
1004 KillVarReg = KillM32;
1028 if (!IsILPReassociate) {
1037 "Insertion instructions set should not be empty!");
1041 if (IsILPReassociate)
1049 unsigned &SubIdx)
const {
1050 switch (
MI.getOpcode()) {
1051 default:
return false;
1054 case PPC::EXTSW_32_64:
1055 SrcReg =
MI.getOperand(1).getReg();
1056 DstReg =
MI.getOperand(0).getReg();
1057 SubIdx = PPC::sub_32;
1063 int &FrameIndex)
const {
1067 if (
MI.getOperand(1).isImm() && !
MI.getOperand(1).getImm() &&
1068 MI.getOperand(2).isFI()) {
1069 FrameIndex =
MI.getOperand(2).getIndex();
1070 return MI.getOperand(0).getReg();
1080 switch (
MI.getOpcode()) {
1090 case PPC::ADDIStocHA:
1091 case PPC::ADDIStocHA8:
1093 case PPC::ADDItocL8:
1094 case PPC::LOAD_STACK_GUARD:
1095 case PPC::PPCLdFixedAddr:
1097 case PPC::XXLXORspz:
1098 case PPC::XXLXORdpz:
1099 case PPC::XXLEQVOnes:
1100 case PPC::XXSPLTI32DX:
1102 case PPC::XXSPLTIDP:
1106 case PPC::V_SETALLONESB:
1107 case PPC::V_SETALLONESH:
1108 case PPC::V_SETALLONES:
1111 case PPC::XXSETACCZ:
1112 case PPC::DMXXSETACCZ:
1119 int &FrameIndex)
const {
1121 if (
MI.getOperand(1).isImm() && !
MI.getOperand(1).getImm() &&
1122 MI.getOperand(2).isFI()) {
1123 FrameIndex =
MI.getOperand(2).getIndex();
1124 return MI.getOperand(0).getReg();
1132 unsigned OpIdx2)
const {
1136 if (
MI.getOpcode() != PPC::RLWIMI &&
MI.getOpcode() != PPC::RLWIMI_rec)
1144 if (
MI.getOperand(3).getImm() != 0)
1155 assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
1156 "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMI_rec.");
1160 unsigned SubReg1 =
MI.getOperand(1).getSubReg();
1161 unsigned SubReg2 =
MI.getOperand(2).getSubReg();
1162 bool Reg1IsKill =
MI.getOperand(1).isKill();
1163 bool Reg2IsKill =
MI.getOperand(2).isKill();
1164 bool ChangeReg0 =
false;
1170 "Expecting a two-address instruction!");
1171 assert(
MI.getOperand(0).getSubReg() == SubReg1 &&
"Tied subreg mismatch");
1177 unsigned MB =
MI.getOperand(4).getImm();
1178 unsigned ME =
MI.getOperand(5).getImm();
1182 if (MB == 0 && ME == 31)
1187 Register Reg0 = ChangeReg0 ? Reg2 :
MI.getOperand(0).getReg();
1188 bool Reg0IsDead =
MI.getOperand(0).isDead();
1189 return BuildMI(MF,
MI.getDebugLoc(),
MI.getDesc())
1198 MI.getOperand(0).setReg(Reg2);
1199 MI.getOperand(0).setSubReg(SubReg2);
1201 MI.getOperand(2).setReg(Reg1);
1202 MI.getOperand(1).setReg(Reg2);
1203 MI.getOperand(2).setSubReg(SubReg1);
1204 MI.getOperand(1).setSubReg(SubReg2);
1205 MI.getOperand(2).setIsKill(Reg1IsKill);
1206 MI.getOperand(1).setIsKill(Reg2IsKill);
1209 MI.getOperand(4).setImm((ME + 1) & 31);
1210 MI.getOperand(5).setImm((MB - 1) & 31);
1215 unsigned &SrcOpIdx1,
1216 unsigned &SrcOpIdx2)
const {
1227 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
1234 unsigned Directive = Subtarget.getCPUDirective();
1237 default: Opcode = PPC::NOP;
break;
1263 bool AllowModify)
const {
1264 bool isPPC64 = Subtarget.isPPC64();
1271 if (!isUnpredicatedTerminator(*
I))
1277 if (
I->getOpcode() == PPC::B &&
1278 MBB.isLayoutSuccessor(
I->getOperand(0).getMBB())) {
1279 I->eraseFromParent();
1282 I =
MBB.getLastNonDebugInstr();
1283 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1292 if (
I ==
MBB.begin() || !isUnpredicatedTerminator(*--
I)) {
1298 }
else if (LastInst.
getOpcode() == PPC::BCC) {
1306 }
else if (LastInst.
getOpcode() == PPC::BC) {
1314 }
else if (LastInst.
getOpcode() == PPC::BCn) {
1322 }
else if (LastInst.
getOpcode() == PPC::BDNZ8 ||
1333 }
else if (LastInst.
getOpcode() == PPC::BDZ8 ||
1354 if (
I !=
MBB.begin() && isUnpredicatedTerminator(*--
I))
1358 if (SecondLastInst.
getOpcode() == PPC::BCC &&
1368 }
else if (SecondLastInst.
getOpcode() == PPC::BC &&
1378 }
else if (SecondLastInst.
getOpcode() == PPC::BCn &&
1388 }
else if ((SecondLastInst.
getOpcode() == PPC::BDNZ8 ||
1389 SecondLastInst.
getOpcode() == PPC::BDNZ) &&
1402 }
else if ((SecondLastInst.
getOpcode() == PPC::BDZ8 ||
1403 SecondLastInst.
getOpcode() == PPC::BDZ) &&
1426 I->eraseFromParent();
1435 int *BytesRemoved)
const {
1436 assert(!BytesRemoved &&
"code size not handled");
1442 if (
I->getOpcode() != PPC::B &&
I->getOpcode() != PPC::BCC &&
1443 I->getOpcode() != PPC::BC &&
I->getOpcode() != PPC::BCn &&
1444 I->getOpcode() != PPC::BDNZ8 &&
I->getOpcode() != PPC::BDNZ &&
1445 I->getOpcode() != PPC::BDZ8 &&
I->getOpcode() != PPC::BDZ)
1449 I->eraseFromParent();
1453 if (
I ==
MBB.begin())
return 1;
1455 if (
I->getOpcode() != PPC::BCC &&
1456 I->getOpcode() != PPC::BC &&
I->getOpcode() != PPC::BCn &&
1457 I->getOpcode() != PPC::BDNZ8 &&
I->getOpcode() != PPC::BDNZ &&
1458 I->getOpcode() != PPC::BDZ8 &&
I->getOpcode() != PPC::BDZ)
1462 I->eraseFromParent();
1471 int *BytesAdded)
const {
1473 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1475 "PPC branch conditions have two components!");
1476 assert(!BytesAdded &&
"code size not handled");
1478 bool isPPC64 = Subtarget.isPPC64();
1486 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1487 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).
addMBB(
TBB);
1503 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1504 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).
addMBB(
TBB);
1522 Register FalseReg,
int &CondCycles,
1523 int &TrueCycles,
int &FalseCycles)
const {
1524 if (!Subtarget.hasISEL())
1527 if (
Cond.size() != 2)
1543 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
1548 if (!PPC::GPRCRegClass.hasSubClassEq(RC) &&
1549 !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) &&
1550 !PPC::G8RCRegClass.hasSubClassEq(RC) &&
1551 !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC))
1571 "PPC branch conditions have two components!");
1576 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
1577 assert(RC &&
"TrueReg and FalseReg must have overlapping register classes");
1579 bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) ||
1580 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC);
1582 PPC::GPRCRegClass.hasSubClassEq(RC) ||
1583 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) &&
1584 "isel is for regular integer GPRs only");
1586 unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL;
1589 unsigned SubIdx = 0;
1590 bool SwapOps =
false;
1591 switch (SelectPred) {
1595 SubIdx = PPC::sub_eq; SwapOps =
false;
break;
1599 SubIdx = PPC::sub_eq; SwapOps =
true;
break;
1603 SubIdx = PPC::sub_lt; SwapOps =
false;
break;
1607 SubIdx = PPC::sub_lt; SwapOps =
true;
break;
1611 SubIdx = PPC::sub_gt; SwapOps =
false;
break;
1615 SubIdx = PPC::sub_gt; SwapOps =
true;
break;
1619 SubIdx = PPC::sub_un; SwapOps =
false;
break;
1623 SubIdx = PPC::sub_un; SwapOps =
true;
break;
1628 Register FirstReg = SwapOps ? FalseReg : TrueReg,
1629 SecondReg = SwapOps ? TrueReg : FalseReg;
1634 if (
MRI.getRegClass(FirstReg)->contains(PPC::R0) ||
1635 MRI.getRegClass(FirstReg)->contains(PPC::X0)) {
1637 MRI.getRegClass(FirstReg)->contains(PPC::X0) ?
1638 &PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass;
1640 FirstReg =
MRI.createVirtualRegister(FirstRC);
1653 if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT ||
1654 CRBit == PPC::CR2LT || CRBit == PPC::CR3LT ||
1655 CRBit == PPC::CR4LT || CRBit == PPC::CR5LT ||
1656 CRBit == PPC::CR6LT || CRBit == PPC::CR7LT)
1658 if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT ||
1659 CRBit == PPC::CR2GT || CRBit == PPC::CR3GT ||
1660 CRBit == PPC::CR4GT || CRBit == PPC::CR5GT ||
1661 CRBit == PPC::CR6GT || CRBit == PPC::CR7GT)
1663 if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ ||
1664 CRBit == PPC::CR2EQ || CRBit == PPC::CR3EQ ||
1665 CRBit == PPC::CR4EQ || CRBit == PPC::CR5EQ ||
1666 CRBit == PPC::CR6EQ || CRBit == PPC::CR7EQ)
1668 if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN ||
1669 CRBit == PPC::CR2UN || CRBit == PPC::CR3UN ||
1670 CRBit == PPC::CR4UN || CRBit == PPC::CR5UN ||
1671 CRBit == PPC::CR6UN || CRBit == PPC::CR7UN)
1674 assert(Ret != 4 &&
"Invalid CR bit register");
1682 bool RenamableDest,
bool RenamableSrc)
const {
1686 if (PPC::F8RCRegClass.
contains(DestReg) &&
1687 PPC::VSRCRegClass.
contains(SrcReg)) {
1689 TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
1695 }
else if (PPC::F8RCRegClass.
contains(SrcReg) &&
1696 PPC::VSRCRegClass.
contains(DestReg)) {
1698 TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
1707 if (PPC::CRBITRCRegClass.
contains(SrcReg) &&
1708 PPC::GPRCRegClass.
contains(DestReg)) {
1720 }
else if (PPC::CRRCRegClass.
contains(SrcReg) &&
1721 (PPC::G8RCRegClass.
contains(DestReg) ||
1722 PPC::GPRCRegClass.
contains(DestReg))) {
1723 bool Is64Bit = PPC::G8RCRegClass.contains(DestReg);
1724 unsigned MvCode = Is64Bit ? PPC::MFOCRF8 : PPC::MFOCRF;
1725 unsigned ShCode = Is64Bit ? PPC::RLWINM8 : PPC::RLWINM;
1726 unsigned CRNum =
TRI->getEncodingValue(SrcReg);
1738 }
else if (PPC::G8RCRegClass.
contains(SrcReg) &&
1739 PPC::VSFRCRegClass.
contains(DestReg)) {
1740 assert(Subtarget.hasDirectMove() &&
1741 "Subtarget doesn't support directmove, don't know how to copy.");
1746 }
else if (PPC::VSFRCRegClass.
contains(SrcReg) &&
1747 PPC::G8RCRegClass.
contains(DestReg)) {
1748 assert(Subtarget.hasDirectMove() &&
1749 "Subtarget doesn't support directmove, don't know how to copy.");
1753 }
else if (PPC::SPERCRegClass.
contains(SrcReg) &&
1754 PPC::GPRCRegClass.
contains(DestReg)) {
1758 }
else if (PPC::GPRCRegClass.
contains(SrcReg) &&
1759 PPC::SPERCRegClass.
contains(DestReg)) {
1763 }
else if ((PPC::G8RCRegClass.
contains(DestReg) ||
1764 PPC::GPRCRegClass.
contains(DestReg)) &&
1765 SrcReg == PPC::CARRY) {
1766 bool Is64Bit = PPC::G8RCRegClass.contains(DestReg);
1771 }
else if ((PPC::G8RCRegClass.
contains(SrcReg) ||
1772 PPC::GPRCRegClass.
contains(SrcReg)) &&
1773 DestReg == PPC::CARRY) {
1774 bool Is64Bit = PPC::G8RCRegClass.contains(SrcReg);
1783 if (PPC::GPRCRegClass.
contains(DestReg, SrcReg))
1785 else if (PPC::G8RCRegClass.
contains(DestReg, SrcReg))
1787 else if (PPC::F4RCRegClass.
contains(DestReg, SrcReg))
1789 else if (PPC::CRRCRegClass.
contains(DestReg, SrcReg))
1791 else if (PPC::VRRCRegClass.
contains(DestReg, SrcReg))
1793 else if (PPC::VSRCRegClass.
contains(DestReg, SrcReg))
1803 else if (PPC::VSFRCRegClass.
contains(DestReg, SrcReg) ||
1804 PPC::VSSRCRegClass.
contains(DestReg, SrcReg))
1805 Opc = (Subtarget.hasP9Vector()) ? PPC::XSCPSGNDP : PPC::XXLORf;
1806 else if (Subtarget.pairedVectorMemops() &&
1807 PPC::VSRpRCRegClass.contains(DestReg, SrcReg)) {
1808 if (SrcReg > PPC::VSRp15)
1809 SrcReg = PPC::V0 + (SrcReg - PPC::VSRp16) * 2;
1811 SrcReg = PPC::VSL0 + (SrcReg - PPC::VSRp0) * 2;
1812 if (DestReg > PPC::VSRp15)
1813 DestReg = PPC::V0 + (DestReg - PPC::VSRp16) * 2;
1815 DestReg = PPC::VSL0 + (DestReg - PPC::VSRp0) * 2;
1822 else if (PPC::CRBITRCRegClass.
contains(DestReg, SrcReg))
1824 else if (PPC::SPERCRegClass.
contains(DestReg, SrcReg))
1826 else if ((PPC::ACCRCRegClass.
contains(DestReg) ||
1827 PPC::UACCRCRegClass.
contains(DestReg)) &&
1828 (PPC::ACCRCRegClass.
contains(SrcReg) ||
1829 PPC::UACCRCRegClass.
contains(SrcReg))) {
1835 bool DestPrimed = PPC::ACCRCRegClass.contains(DestReg);
1836 bool SrcPrimed = PPC::ACCRCRegClass.contains(SrcReg);
1838 PPC::VSL0 + (SrcReg - (SrcPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1840 PPC::VSL0 + (DestReg - (DestPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1843 for (
unsigned Idx = 0; Idx < 4; Idx++)
1849 if (SrcPrimed && !KillSrc)
1852 }
else if (PPC::G8pRCRegClass.
contains(DestReg) &&
1853 PPC::G8pRCRegClass.
contains(SrcReg)) {
1855 unsigned DestRegIdx = DestReg - PPC::G8p0;
1856 MCRegister DestRegSub0 = PPC::X0 + 2 * DestRegIdx;
1857 MCRegister DestRegSub1 = PPC::X0 + 2 * DestRegIdx + 1;
1858 unsigned SrcRegIdx = SrcReg - PPC::G8p0;
1859 MCRegister SrcRegSub0 = PPC::X0 + 2 * SrcRegIdx;
1860 MCRegister SrcRegSub1 = PPC::X0 + 2 * SrcRegIdx + 1;
1868 }
else if ((PPC::WACCRCRegClass.
contains(DestReg) ||
1869 PPC::WACC_HIRCRegClass.
contains(DestReg)) &&
1870 (PPC::WACCRCRegClass.
contains(SrcReg) ||
1871 PPC::WACC_HIRCRegClass.
contains(SrcReg))) {
1873 Opc = PPC::WACCRCRegClass.contains(SrcReg) ? PPC::DMXXEXTFDMR512
1874 : PPC::DMXXEXTFDMR512_HI;
1877 RS.enterBasicBlockEnd(
MBB);
1878 RS.backward(std::next(
I));
1880 Register TmpReg1 = RS.scavengeRegisterBackwards(PPC::VSRpRCRegClass,
I,
1884 RS.setRegUsed(TmpReg1);
1885 Register TmpReg2 = RS.scavengeRegisterBackwards(PPC::VSRpRCRegClass,
I,
1894 Opc = PPC::WACCRCRegClass.contains(DestReg) ? PPC::DMXXINSTDMR512
1895 : PPC::DMXXINSTDMR512_HI;
1902 }
else if (PPC::DMRRCRegClass.
contains(DestReg) &&
1903 PPC::DMRRCRegClass.
contains(SrcReg)) {
1914 if (
MCID.getNumOperands() == 3)
1924 if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
1925 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
1927 }
else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
1928 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
1930 }
else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
1932 }
else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
1934 }
else if (PPC::SPERCRegClass.hasSubClassEq(RC)) {
1936 }
else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
1938 }
else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
1940 }
else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
1942 }
else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
1944 }
else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
1946 }
else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) {
1948 }
else if (PPC::SPILLTOVSRRCRegClass.hasSubClassEq(RC)) {
1950 }
else if (PPC::ACCRCRegClass.hasSubClassEq(RC)) {
1951 assert(Subtarget.pairedVectorMemops() &&
1952 "Register unexpected when paired memops are disabled.");
1954 }
else if (PPC::UACCRCRegClass.hasSubClassEq(RC)) {
1955 assert(Subtarget.pairedVectorMemops() &&
1956 "Register unexpected when paired memops are disabled.");
1958 }
else if (PPC::WACCRCRegClass.hasSubClassEq(RC)) {
1959 assert(Subtarget.pairedVectorMemops() &&
1960 "Register unexpected when paired memops are disabled.");
1962 }
else if (PPC::VSRpRCRegClass.hasSubClassEq(RC)) {
1963 assert(Subtarget.pairedVectorMemops() &&
1964 "Register unexpected when paired memops are disabled.");
1966 }
else if (PPC::G8pRCRegClass.hasSubClassEq(RC)) {
1968 }
else if (PPC::DMRROWRCRegClass.hasSubClassEq(RC)) {
1970 }
else if (PPC::DMRROWpRCRegClass.hasSubClassEq(RC)) {
1972 }
else if (PPC::DMRpRCRegClass.hasSubClassEq(RC)) {
1974 }
else if (PPC::DMRRCRegClass.hasSubClassEq(RC)) {
1985 return OpcodesForSpill[getSpillIndex(RC)];
1991 return OpcodesForSpill[getSpillIndex(RC)];
1994void PPCInstrInfo::StoreRegToStackSlot(
2008 if (PPC::CRRCRegClass.hasSubClassEq(RC) ||
2009 PPC::CRBITRCRegClass.hasSubClassEq(RC))
2022 StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs);
2025 MBB.insert(
MI, NewMI);
2032 NewMIs.
back()->addMemOperand(MF, MMO);
2051 unsigned DestReg,
int FrameIdx,
2066 if (
MI !=
MBB.end())
DL =
MI->getDebugLoc();
2068 LoadRegFromStackSlot(MF,
DL, DestReg, FrameIdx, RC, NewMIs);
2071 MBB.insert(
MI, NewMI);
2078 NewMIs.
back()->addMemOperand(MF, MMO);
2101 assert(
Cond.size() == 2 &&
"Invalid PPC branch opcode!");
2116 unsigned DefOpc =
DefMI.getOpcode();
2117 if (DefOpc != PPC::LI && DefOpc != PPC::LI8)
2119 if (!
DefMI.getOperand(1).isImm())
2121 if (
DefMI.getOperand(1).getImm() != 0)
2137 for (UseIdx = 0; UseIdx <
UseMI.getNumOperands(); ++UseIdx)
2138 if (
UseMI.getOperand(UseIdx).isReg() &&
2139 UseMI.getOperand(UseIdx).getReg() == Reg)
2142 assert(UseIdx <
UseMI.getNumOperands() &&
"Cannot find Reg in UseMI");
2149 int16_t RegClass = getOpRegClassID(UseInfo);
2150 if (UseInfo.RegClass != PPC::GPRC_NOR0RegClassID &&
2151 UseInfo.RegClass != PPC::G8RC_NOX0RegClassID)
2157 if (UseInfo.Constraints != 0)
2161 RegClass == PPC::G8RC_NOX0RegClassID ? PPC::ZERO8 : PPC::ZERO;
2165 UseMI.getOperand(UseIdx).setReg(ZeroReg);
2177 if (
MRI->use_nodbg_empty(Reg))
2178 DefMI.eraseFromParent();
2184 if (
MI.definesRegister(PPC::CTR,
nullptr) ||
2185 MI.definesRegister(PPC::CTR8,
nullptr))
2197 unsigned NumT,
unsigned ExtraT,
2199 unsigned NumF,
unsigned ExtraF,
2219 switch (
MI.getOpcode()) {
2235 unsigned OpC =
MI.getOpcode();
2236 if (OpC == PPC::BLR || OpC == PPC::BLR8) {
2237 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR) {
2238 bool isPPC64 = Subtarget.isPPC64();
2239 MI.setDesc(
get(Pred[0].
getImm() ? (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR)
2240 : (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
2246 MI.setDesc(
get(PPC::BCLR));
2249 MI.setDesc(
get(PPC::BCLRn));
2252 MI.setDesc(
get(PPC::BCCLR));
2259 }
else if (OpC == PPC::B) {
2260 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR) {
2261 bool isPPC64 = Subtarget.isPPC64();
2262 MI.setDesc(
get(Pred[0].
getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ)
2263 : (isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
2270 MI.removeOperand(0);
2272 MI.setDesc(
get(PPC::BC));
2278 MI.removeOperand(0);
2280 MI.setDesc(
get(PPC::BCn));
2286 MI.removeOperand(0);
2288 MI.setDesc(
get(PPC::BCC));
2296 }
else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 || OpC == PPC::BCTRL ||
2297 OpC == PPC::BCTRL8 || OpC == PPC::BCTRL_RM ||
2298 OpC == PPC::BCTRL8_RM) {
2299 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR)
2302 bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8 ||
2303 OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM;
2304 bool isPPC64 = Subtarget.isPPC64();
2307 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8)
2308 : (setLR ? PPC::BCCTRL : PPC::BCCTR)));
2311 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n)
2312 : (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
2315 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8)
2316 : (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
2327 if (OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM)
2339 assert(Pred1.
size() == 2 &&
"Invalid PPC first predicate");
2340 assert(Pred2.
size() == 2 &&
"Invalid PPC second predicate");
2342 if (Pred1[1].
getReg() == PPC::CTR8 || Pred1[1].
getReg() == PPC::CTR)
2344 if (Pred2[1].
getReg() == PPC::CTR8 || Pred2[1].
getReg() == PPC::CTR)
2369 std::vector<MachineOperand> &Pred,
2370 bool SkipDead)
const {
2378 { &PPC::CRRCRegClass, &PPC::CRBITRCRegClass,
2379 &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass };
2383 for (
unsigned c = 0; c < std::size(RCs) && !Found; ++c) {
2386 if (MO.isDef() && RC->
contains(MO.getReg())) {
2390 }
else if (MO.isRegMask()) {
2392 if (MO.clobbersPhysReg(R)) {
2405 int64_t &
Value)
const {
2406 unsigned Opc =
MI.getOpcode();
2409 default:
return false;
2414 SrcReg =
MI.getOperand(1).getReg();
2416 Value =
MI.getOperand(2).getImm();
2425 SrcReg =
MI.getOperand(1).getReg();
2426 SrcReg2 =
MI.getOperand(2).getReg();
2445 if (OpC == PPC::FCMPUS || OpC == PPC::FCMPUD)
2457 bool isPPC64 = Subtarget.isPPC64();
2458 bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW;
2459 bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW;
2460 bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD;
2469 if (!
MI)
return false;
2471 bool equalityOnly =
false;
2474 if (is32BitSignedCompare) {
2480 }
else if (is32BitUnsignedCompare) {
2485 equalityOnly =
true;
2489 equalityOnly = is64BitUnsignedCompare;
2491 equalityOnly = is32BitUnsignedCompare;
2497 I =
MRI->use_instr_begin(CRReg), IE =
MRI->use_instr_end();
2500 if (
UseMI->getOpcode() == PPC::BCC) {
2506 }
else if (
UseMI->getOpcode() == PPC::ISEL ||
2507 UseMI->getOpcode() == PPC::ISEL8) {
2508 unsigned SubIdx =
UseMI->getOperand(3).getSubReg();
2509 if (SubIdx != PPC::sub_eq)
2521 bool FoundUse =
false;
2523 J =
MRI->use_instr_begin(CRReg), JE =
MRI->use_instr_end();
2550 else if (
Value != 0) {
2559 if (equalityOnly || !
MRI->hasOneUse(CRReg))
2563 if (
UseMI->getOpcode() != PPC::BCC)
2569 int16_t Immed = (int16_t)
Value;
2593 UseMI->getOperand(0).setImm(Pred);
2603 for (;
I != E && !noSub; --
I) {
2605 unsigned IOpC = Instr.getOpcode();
2607 if (&*
I != &CmpInstr && (Instr.modifiesRegister(PPC::CR0,
TRI) ||
2608 Instr.readsRegister(PPC::CR0,
TRI)))
2617 if ((OpC == PPC::CMPW || OpC == PPC::CMPLW ||
2618 OpC == PPC::CMPD || OpC == PPC::CMPLD) &&
2619 (IOpC == PPC::SUBF || IOpC == PPC::SUBF8) &&
2620 ((Instr.getOperand(1).getReg() == SrcReg &&
2621 Instr.getOperand(2).getReg() == SrcReg2) ||
2622 (Instr.getOperand(1).getReg() == SrcReg2 &&
2623 Instr.getOperand(2).getReg() == SrcReg))) {
2641 int MIOpC =
MI->getOpcode();
2642 if (MIOpC == PPC::ANDI_rec || MIOpC == PPC::ANDI8_rec ||
2643 MIOpC == PPC::ANDIS_rec || MIOpC == PPC::ANDIS8_rec)
2646 NewOpC = PPC::getRecordFormOpcode(MIOpC);
2664 if (!equalityOnly && (NewOpC == PPC::SUBF_rec || NewOpC == PPC::SUBF8_rec) &&
2674 bool ShouldSwap =
false;
2676 ShouldSwap = SrcReg2 != 0 &&
Sub->getOperand(1).getReg() == SrcReg2 &&
2677 Sub->getOperand(2).getReg() == SrcReg;
2681 ShouldSwap = !ShouldSwap;
2686 I =
MRI->use_instr_begin(CRReg), IE =
MRI->use_instr_end();
2689 if (
UseMI->getOpcode() == PPC::BCC) {
2694 "Invalid predicate for equality-only optimization");
2698 }
else if (
UseMI->getOpcode() == PPC::ISEL ||
2699 UseMI->getOpcode() == PPC::ISEL8) {
2700 unsigned NewSubReg =
UseMI->getOperand(3).getSubReg();
2701 assert((!equalityOnly || NewSubReg == PPC::sub_eq) &&
2702 "Invalid CR bit for equality-only optimization");
2704 if (NewSubReg == PPC::sub_lt)
2705 NewSubReg = PPC::sub_gt;
2706 else if (NewSubReg == PPC::sub_gt)
2707 NewSubReg = PPC::sub_lt;
2709 SubRegsToUpdate.
push_back(std::make_pair(&(
UseMI->getOperand(3)),
2715 "Non-zero immediate support and ShouldSwap"
2716 "may conflict in updating predicate");
2724 BuildMI(*
MI->getParent(), std::next(MII),
MI->getDebugLoc(),
2725 get(TargetOpcode::COPY), CRReg)
2730 MI->clearRegisterDeads(PPC::CR0);
2732 if (MIOpC != NewOpC) {
2742 if (MIOpC == PPC::RLWINM || MIOpC == PPC::RLWINM8) {
2743 Register GPRRes =
MI->getOperand(0).getReg();
2744 int64_t SH =
MI->getOperand(2).getImm();
2745 int64_t MB =
MI->getOperand(3).getImm();
2746 int64_t ME =
MI->getOperand(4).getImm();
2749 bool MBInLoHWord = MB >= 16;
2750 bool MEInLoHWord = ME >= 16;
2753 if (MB <= ME && MBInLoHWord == MEInLoHWord && SH == 0) {
2754 Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1);
2756 Mask >>= MBInLoHWord ? 0 : 16;
2757 NewOpC = MIOpC == PPC::RLWINM
2758 ? (MBInLoHWord ? PPC::ANDI_rec : PPC::ANDIS_rec)
2759 : (MBInLoHWord ? PPC::ANDI8_rec : PPC::ANDIS8_rec);
2760 }
else if (
MRI->use_empty(GPRRes) && (ME == 31) &&
2761 (ME - MB + 1 == SH) && (MB >= 16)) {
2765 Mask = ((1LLU << 32) - 1) & ~((1LLU << (32 - SH)) - 1);
2767 NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDIS_rec : PPC::ANDIS8_rec;
2770 if (Mask != ~0LLU) {
2771 MI->removeOperand(4);
2772 MI->removeOperand(3);
2773 MI->getOperand(2).setImm(Mask);
2774 NumRcRotatesConvertedToRcAnd++;
2776 }
else if (MIOpC == PPC::RLDICL &&
MI->getOperand(2).getImm() == 0) {
2777 int64_t MB =
MI->getOperand(3).getImm();
2779 uint64_t Mask = (1LLU << (63 - MB + 1)) - 1;
2780 NewOpC = PPC::ANDI8_rec;
2781 MI->removeOperand(3);
2782 MI->getOperand(2).setImm(Mask);
2783 NumRcRotatesConvertedToRcAnd++;
2788 MI->setDesc(NewDesc);
2791 if (!
MI->definesRegister(ImpDef,
nullptr)) {
2792 MI->addOperand(*
MI->getParent()->getParent(),
2797 if (!
MI->readsRegister(ImpUse,
nullptr)) {
2798 MI->addOperand(*
MI->getParent()->getParent(),
2803 assert(
MI->definesRegister(PPC::CR0,
nullptr) &&
2804 "Record-form instruction does not define cr0?");
2809 for (
unsigned i = 0, e = PredsToUpdate.
size(); i < e; i++)
2810 PredsToUpdate[i].first->setImm(PredsToUpdate[i].second);
2812 for (
unsigned i = 0, e = SubRegsToUpdate.
size(); i < e; i++)
2813 SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second);
2824 int64_t CmpMask, CmpValue;
2829 if (CmpValue || !CmpMask || SrcReg2)
2837 if (
Opc == PPC::CMPLWI ||
Opc == PPC::CMPLDI)
2844 if (Subtarget.isPPC64() &&
Opc == PPC::CMPWI)
2851 bool SrcRegHasOtherUse =
false;
2858 if (CRReg != PPC::CR0)
2862 bool SeenUseOfCRReg =
false;
2863 bool IsCRRegKilled =
false;
2864 if (!isRegElgibleForForwarding(RegMO, *SrcMI, CmpMI,
false, IsCRRegKilled,
2870 int NewOpC = PPC::getRecordFormOpcode(SrcMIOpc);
2884 "Record-form instruction does not define cr0?");
2898 OffsetIsScalable =
false;
2933 case PPC::DFSTOREf64:
2934 return FirstOpc == SecondOpc;
2940 return SecondOpc == PPC::STW || SecondOpc == PPC::STW8;
2947 int64_t OpOffset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
2948 unsigned NumBytes)
const {
2954 "Only base registers and frame indices are supported.");
2959 if (ClusterSize > 2)
2973 unsigned FirstOpc = FirstLdSt.
getOpcode();
2974 unsigned SecondOpc = SecondLdSt.
getOpcode();
2986 int64_t Offset1 = 0, Offset2 = 0;
2995 assert(Base1 == &BaseOp1 && Base2 == &BaseOp2 &&
2996 "getMemOperandWithOffsetWidth return incorrect base op");
2998 assert(Offset1 <= Offset2 &&
"Caller should have ordered offsets.");
2999 return Offset1 + (int64_t)Width1.
getValue() == Offset2;
3006 unsigned Opcode =
MI.getOpcode();
3009 case PPC::INLINEASM:
3010 case PPC::INLINEASM_BR: {
3012 const char *AsmStr =
MI.getOperand(0).getSymbolName();
3015 case TargetOpcode::STACKMAP: {
3019 case TargetOpcode::PATCHPOINT: {
3023 case TargetOpcode::PATCHABLE_FUNCTION_ENTER: {
3027 (void)
F.getFnAttribute(
"patchable-function-entry")
3029 .getAsInteger(10, Num);
3033 return get(Opcode).getSize();
3037std::pair<unsigned, unsigned>
3040 return std::make_pair(TF, 0u);
3045 using namespace PPCII;
3046 static const std::pair<unsigned, const char *> TargetFlags[] = {
3047 {MO_PLT,
"ppc-plt"},
3048 {MO_PIC_FLAG,
"ppc-pic"},
3049 {MO_PCREL_FLAG,
"ppc-pcrel"},
3050 {MO_GOT_FLAG,
"ppc-got"},
3051 {MO_PCREL_OPT_FLAG,
"ppc-opt-pcrel"},
3052 {MO_TLSGD_FLAG,
"ppc-tlsgd"},
3053 {MO_TPREL_FLAG,
"ppc-tprel"},
3054 {MO_TLSLDM_FLAG,
"ppc-tlsldm"},
3055 {MO_TLSLD_FLAG,
"ppc-tlsld"},
3056 {MO_TLSGDM_FLAG,
"ppc-tlsgdm"},
3057 {MO_GOT_TLSGD_PCREL_FLAG,
"ppc-got-tlsgd-pcrel"},
3058 {MO_GOT_TLSLD_PCREL_FLAG,
"ppc-got-tlsld-pcrel"},
3059 {MO_GOT_TPREL_PCREL_FLAG,
"ppc-got-tprel-pcrel"},
3062 {MO_TPREL_LO,
"ppc-tprel-lo"},
3063 {MO_TPREL_HA,
"ppc-tprel-ha"},
3064 {MO_DTPREL_LO,
"ppc-dtprel-lo"},
3065 {MO_TLSLD_LO,
"ppc-tlsld-lo"},
3066 {MO_TOC_LO,
"ppc-toc-lo"},
3067 {MO_TLS,
"ppc-tls"},
3068 {MO_PIC_HA_FLAG,
"ppc-ha-pic"},
3069 {MO_PIC_LO_FLAG,
"ppc-lo-pic"},
3070 {MO_TPREL_PCREL_FLAG,
"ppc-tprel-pcrel"},
3071 {MO_TLS_PCREL_FLAG,
"ppc-tls-pcrel"},
3072 {MO_GOT_PCREL_FLAG,
"ppc-got-pcrel"},
3084 unsigned UpperOpcode, LowerOpcode;
3085 switch (
MI.getOpcode()) {
3086 case PPC::DFLOADf32:
3087 UpperOpcode = PPC::LXSSP;
3088 LowerOpcode = PPC::LFS;
3090 case PPC::DFLOADf64:
3091 UpperOpcode = PPC::LXSD;
3092 LowerOpcode = PPC::LFD;
3094 case PPC::DFSTOREf32:
3095 UpperOpcode = PPC::STXSSP;
3096 LowerOpcode = PPC::STFS;
3098 case PPC::DFSTOREf64:
3099 UpperOpcode = PPC::STXSD;
3100 LowerOpcode = PPC::STFD;
3102 case PPC::XFLOADf32:
3103 UpperOpcode = PPC::LXSSPX;
3104 LowerOpcode = PPC::LFSX;
3106 case PPC::XFLOADf64:
3107 UpperOpcode = PPC::LXSDX;
3108 LowerOpcode = PPC::LFDX;
3110 case PPC::XFSTOREf32:
3111 UpperOpcode = PPC::STXSSPX;
3112 LowerOpcode = PPC::STFSX;
3114 case PPC::XFSTOREf64:
3115 UpperOpcode = PPC::STXSDX;
3116 LowerOpcode = PPC::STFDX;
3119 UpperOpcode = PPC::LXSIWAX;
3120 LowerOpcode = PPC::LFIWAX;
3123 UpperOpcode = PPC::LXSIWZX;
3124 LowerOpcode = PPC::LFIWZX;
3127 UpperOpcode = PPC::STXSIWX;
3128 LowerOpcode = PPC::STFIWX;
3134 Register TargetReg =
MI.getOperand(0).getReg();
3136 if ((TargetReg >= PPC::F0 && TargetReg <= PPC::F31) ||
3137 (TargetReg >= PPC::VSL0 && TargetReg <= PPC::VSL31))
3138 Opcode = LowerOpcode;
3140 Opcode = UpperOpcode;
3141 MI.setDesc(
get(Opcode));
3150 auto &
MBB = *
MI.getParent();
3151 auto DL =
MI.getDebugLoc();
3153 switch (
MI.getOpcode()) {
3154 case PPC::BUILD_UACC: {
3157 if (ACC - PPC::ACC0 != UACC - PPC::UACC0) {
3158 MCRegister SrcVSR = PPC::VSL0 + (UACC - PPC::UACC0) * 4;
3159 MCRegister DstVSR = PPC::VSL0 + (ACC - PPC::ACC0) * 4;
3163 for (
int VecNo = 0; VecNo < 4; VecNo++)
3173 case PPC::KILL_PAIR: {
3174 MI.setDesc(
get(PPC::UNENCODED_NOP));
3175 MI.removeOperand(1);
3176 MI.removeOperand(0);
3179 case TargetOpcode::LOAD_STACK_GUARD: {
3180 auto M =
MBB.getParent()->getFunction().getParent();
3182 (Subtarget.isTargetLinux() || M->getStackProtectorGuard() ==
"tls") &&
3183 "Only Linux target or tls mode are expected to contain "
3184 "LOAD_STACK_GUARD");
3186 if (M->getStackProtectorGuard() ==
"tls")
3187 Offset = M->getStackProtectorGuardOffset();
3189 Offset = Subtarget.isPPC64() ? -0x7010 : -0x7008;
3190 const unsigned Reg = Subtarget.isPPC64() ? PPC::X13 : PPC::R2;
3191 MI.setDesc(
get(Subtarget.isPPC64() ? PPC::LD : PPC::LWZ));
3197 case PPC::PPCLdFixedAddr: {
3198 assert((Subtarget.getTargetTriple().isOSGlibc() ||
3199 Subtarget.getTargetTriple().isMusl()) &&
3200 "Only targets with Glibc expected to contain PPCLdFixedAddr");
3202 const unsigned Reg = Subtarget.isPPC64() ? PPC::X13 : PPC::R2;
3203 MI.setDesc(
get(PPC::LWZ));
3205#undef PPC_LNX_FEATURE
3207#define PPC_LNX_DEFINE_OFFSETS
3208#include "llvm/TargetParser/PPCTargetParser.def"
3209 bool IsLE = Subtarget.isLittleEndian();
3210 bool Is64 = Subtarget.isPPC64();
3211 if (FAType == PPC_FAWORD_HWCAP) {
3213 Offset = Is64 ? PPC_HWCAP_OFFSET_LE64 : PPC_HWCAP_OFFSET_LE32;
3215 Offset = Is64 ? PPC_HWCAP_OFFSET_BE64 : PPC_HWCAP_OFFSET_BE32;
3216 }
else if (FAType == PPC_FAWORD_HWCAP2) {
3218 Offset = Is64 ? PPC_HWCAP2_OFFSET_LE64 : PPC_HWCAP2_OFFSET_LE32;
3220 Offset = Is64 ? PPC_HWCAP2_OFFSET_BE64 : PPC_HWCAP2_OFFSET_BE32;
3221 }
else if (FAType == PPC_FAWORD_CPUID) {
3223 Offset = Is64 ? PPC_CPUID_OFFSET_LE64 : PPC_CPUID_OFFSET_LE32;
3225 Offset = Is64 ? PPC_CPUID_OFFSET_BE64 : PPC_CPUID_OFFSET_BE32;
3227 assert(
Offset &&
"Do not know the offset for this fixed addr load");
3228 MI.removeOperand(1);
3229 Subtarget.getTargetMachine().setGlibcHWCAPAccess();
3234#define PPC_TGT_PARSER_UNDEF_MACROS
3235#include "llvm/TargetParser/PPCTargetParser.def"
3236#undef PPC_TGT_PARSER_UNDEF_MACROS
3238 case PPC::DFLOADf32:
3239 case PPC::DFLOADf64:
3240 case PPC::DFSTOREf32:
3241 case PPC::DFSTOREf64: {
3242 assert(Subtarget.hasP9Vector() &&
3243 "Invalid D-Form Pseudo-ops on Pre-P9 target.");
3246 "D-form op must have register and immediate operands");
3249 case PPC::XFLOADf32:
3250 case PPC::XFSTOREf32:
3254 assert(Subtarget.hasP8Vector() &&
3255 "Invalid X-Form Pseudo-ops on Pre-P8 target.");
3256 assert(
MI.getOperand(2).isReg() &&
MI.getOperand(1).isReg() &&
3257 "X-form op must have register and register operands");
3260 case PPC::XFLOADf64:
3261 case PPC::XFSTOREf64: {
3262 assert(Subtarget.hasVSX() &&
3263 "Invalid X-Form Pseudo-ops on target that has no VSX.");
3264 assert(
MI.getOperand(2).isReg() &&
MI.getOperand(1).isReg() &&
3265 "X-form op must have register and register operands");
3268 case PPC::SPILLTOVSR_LD: {
3269 Register TargetReg =
MI.getOperand(0).getReg();
3270 if (PPC::VSFRCRegClass.
contains(TargetReg)) {
3271 MI.setDesc(
get(PPC::DFLOADf64));
3275 MI.setDesc(
get(PPC::LD));
3278 case PPC::SPILLTOVSR_ST: {
3280 if (PPC::VSFRCRegClass.
contains(SrcReg)) {
3281 NumStoreSPILLVSRRCAsVec++;
3282 MI.setDesc(
get(PPC::DFSTOREf64));
3285 NumStoreSPILLVSRRCAsGpr++;
3286 MI.setDesc(
get(PPC::STD));
3290 case PPC::SPILLTOVSR_LDX: {
3291 Register TargetReg =
MI.getOperand(0).getReg();
3292 if (PPC::VSFRCRegClass.
contains(TargetReg))
3293 MI.setDesc(
get(PPC::LXSDX));
3295 MI.setDesc(
get(PPC::LDX));
3298 case PPC::SPILLTOVSR_STX: {
3300 if (PPC::VSFRCRegClass.
contains(SrcReg)) {
3301 NumStoreSPILLVSRRCAsVec++;
3302 MI.setDesc(
get(PPC::STXSDX));
3304 NumStoreSPILLVSRRCAsGpr++;
3305 MI.setDesc(
get(PPC::STDX));
3312 case PPC::CFENCE8: {
3313 auto Val =
MI.getOperand(0).getReg();
3314 unsigned CmpOp = Subtarget.isPPC64() ? PPC::CMPD : PPC::CMPW;
3320 MI.setDesc(
get(PPC::ISYNC));
3321 MI.removeOperand(0);
3332static unsigned selectReg(int64_t Imm1, int64_t Imm2,
unsigned CompareOpc,
3333 unsigned TrueReg,
unsigned FalseReg,
3334 unsigned CRSubReg) {
3336 if (CompareOpc == PPC::CMPWI || CompareOpc == PPC::CMPDI) {
3340 return Imm1 < Imm2 ? TrueReg : FalseReg;
3342 return Imm1 > Imm2 ? TrueReg : FalseReg;
3344 return Imm1 == Imm2 ? TrueReg : FalseReg;
3348 else if (CompareOpc == PPC::CMPLWI || CompareOpc == PPC::CMPLDI) {
3356 return Imm1 == Imm2 ? TrueReg : FalseReg;
3359 return PPC::NoRegister;
3364 int64_t Imm)
const {
3365 assert(
MI.getOperand(OpNo).isReg() &&
"Operand must be a REG");
3367 Register InUseReg =
MI.getOperand(OpNo).getReg();
3368 MI.getOperand(OpNo).ChangeToImmediate(Imm);
3376 int UseOpIdx =
MI.findRegisterUseOperandIdx(InUseReg,
TRI,
false);
3377 if (UseOpIdx >= 0) {
3387 MI.removeOperand(UseOpIdx);
3396 int OperandToKeep = LII.
SetCR ? 1 : 0;
3397 for (
int i =
MI.getNumOperands() - 1; i > OperandToKeep; i--)
3398 MI.removeOperand(i);
3402 MI.setDesc(
get(LII.
Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3417 bool &SeenIntermediateUse)
const {
3418 assert(!
MI.getParent()->getParent()->getRegInfo().isSSA() &&
3419 "Should be called after register allocation.");
3423 SeenIntermediateUse =
false;
3424 for (; It != E; ++It) {
3425 if (It->modifiesRegister(Reg,
TRI))
3427 if (It->readsRegister(Reg,
TRI))
3428 SeenIntermediateUse =
true;
3436 int64_t Imm)
const {
3437 assert(!
MBB.getParent()->getRegInfo().isSSA() &&
3438 "Register should be in non-SSA form after RA");
3439 bool isPPC64 = Subtarget.isPPC64();
3453 assert(isPPC64 &&
"Materializing 64-bit immediate to single register is "
3454 "only supported in PPC64");
3456 if ((Imm >> 32) & 0xFFFF)
3459 .
addImm((Imm >> 32) & 0xFFFF);
3466 .
addImm((Imm >> 16) & 0xFFFF);
3476 unsigned &OpNoForForwarding,
3477 bool &SeenIntermediateUse)
const {
3478 OpNoForForwarding = ~0U;
3486 for (
int i = 1, e =
MI.getNumOperands(); i < e; i++) {
3487 if (!
MI.getOperand(i).isReg())
3490 if (!Reg.isVirtual())
3495 if (DefMIForTrueReg->
getOpcode() == PPC::LI ||
3496 DefMIForTrueReg->
getOpcode() == PPC::LI8 ||
3497 DefMIForTrueReg->
getOpcode() == PPC::ADDI ||
3498 DefMIForTrueReg->
getOpcode() == PPC::ADDI8) {
3499 OpNoForForwarding = i;
3500 DefMI = DefMIForTrueReg;
3505 if (
DefMI->getOpcode() == PPC::LI ||
DefMI->getOpcode() == PPC::LI8)
3515 unsigned Opc =
MI.getOpcode();
3516 bool ConvertibleImmForm =
3517 Opc == PPC::CMPWI ||
Opc == PPC::CMPLWI ||
Opc == PPC::CMPDI ||
3518 Opc == PPC::CMPLDI ||
Opc == PPC::ADDI ||
Opc == PPC::ADDI8 ||
3519 Opc == PPC::ORI ||
Opc == PPC::ORI8 ||
Opc == PPC::XORI ||
3520 Opc == PPC::XORI8 ||
Opc == PPC::RLDICL ||
Opc == PPC::RLDICL_rec ||
3521 Opc == PPC::RLDICL_32 ||
Opc == PPC::RLDICL_32_64 ||
3522 Opc == PPC::RLWINM ||
Opc == PPC::RLWINM_rec ||
Opc == PPC::RLWINM8 ||
3523 Opc == PPC::RLWINM8_rec;
3524 bool IsVFReg = (
MI.getNumOperands() &&
MI.getOperand(0).isReg())
3531 if ((
Opc == PPC::OR ||
Opc == PPC::OR8) &&
3532 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
3534 for (
int i = 1, e =
MI.getNumOperands(); i < e; i++) {
3535 MachineOperand &MO =
MI.getOperand(i);
3536 SeenIntermediateUse =
false;
3550 case PPC::ADDItocL8:
3553 OpNoForForwarding = i;
3560 return OpNoForForwarding == ~0
U ? nullptr :
DefMI;
3563unsigned PPCInstrInfo::getSpillTarget()
const {
3566 bool IsP10Variant = Subtarget.isISA3_1() || Subtarget.pairedVectorMemops();
3568 return Subtarget.isISAFuture() ? 3 : IsP10Variant ?
3569 2 : Subtarget.hasP9Vector() ?
3608 bool PostRA = !
MRI->isSSA();
3614 unsigned ToBeDeletedReg = 0;
3615 int64_t OffsetImm = 0;
3616 unsigned XFormOpcode = 0;
3624 bool OtherIntermediateUse =
false;
3628 if (OtherIntermediateUse || !ADDMI)
3635 unsigned ScaleRegIdx = 0;
3636 int64_t OffsetAddi = 0;
3650 assert(ADDIMI &&
"There should be ADDIMI for valid ToBeChangedReg.");
3655 for (
auto It = ++Start; It != End; It++)
3664 (ScaleReg == PPC::R0 || ScaleReg == PPC::X0))
3669 if (NewDefFor(ToBeChangedReg, *ADDMI,
MI) || NewDefFor(ScaleReg, *ADDMI,
MI))
3685 MI.setDesc(
get(XFormOpcode));
3687 .ChangeToRegister(ScaleReg,
false,
false,
3691 .ChangeToRegister(ToBeChangedReg,
false,
false,
true);
3703 int64_t &Imm)
const {
3707 if (
Opc != PPC::ADDI &&
Opc != PPC::ADDI8)
3723 return Opc == PPC::ADD4 ||
Opc == PPC::ADD8;
3727 unsigned &ToBeDeletedReg,
3728 unsigned &XFormOpcode,
3732 if (!
MI.mayLoadOrStore())
3735 unsigned Opc =
MI.getOpcode();
3737 XFormOpcode = RI.getMappedIdxOpcForImmOpc(
Opc);
3740 if (XFormOpcode == PPC::INSTRUCTION_LIST_END)
3754 if (!ImmOperand.
isImm())
3757 assert(RegOperand.
isReg() &&
"Instruction format is not right");
3760 if (!RegOperand.
isKill())
3763 ToBeDeletedReg = RegOperand.
getReg();
3764 OffsetImm = ImmOperand.
getImm();
3771 int64_t &OffsetAddi,
3772 int64_t OffsetImm)
const {
3773 assert((Index == 1 || Index == 2) &&
"Invalid operand index for add.");
3779 bool OtherIntermediateUse =
false;
3800 if (OtherIntermediateUse || !ADDIMI)
3819 bool PostRA = !
MRI->isSSA();
3820 bool SeenIntermediateUse =
true;
3821 unsigned ForwardingOperand = ~0U;
3823 SeenIntermediateUse);
3826 assert(ForwardingOperand <
MI.getNumOperands() &&
3827 "The forwarding operand needs to be valid at this point");
3828 bool IsForwardingOperandKilled =
MI.getOperand(ForwardingOperand).isKill();
3829 bool KillFwdDefMI = !SeenIntermediateUse && IsForwardingOperandKilled;
3830 if (KilledDef && KillFwdDefMI)
3844 if (RI.getMappedIdxOpcForImmOpc(
MI.getOpcode()) !=
3845 PPC::INSTRUCTION_LIST_END &&
3846 transformToNewImmFormFedByAdd(
MI, *
DefMI, ForwardingOperand))
3850 bool IsVFReg =
MI.getOperand(0).isReg() &&
3851 MI.getOperand(0).getReg().isPhysical() &&
3858 transformToImmFormFedByAdd(
MI, III, ForwardingOperand, *
DefMI,
3865 transformToImmFormFedByLI(
MI, III, ForwardingOperand, *
DefMI))
3871 simplifyToLI(
MI, *
DefMI, ForwardingOperand, KilledDef, &RegsToUpdate))
3880 Register FoldingReg =
MI.getOperand(1).getReg();
3884 if (SrcMI->
getOpcode() != PPC::RLWINM &&
3885 SrcMI->
getOpcode() != PPC::RLWINM_rec &&
3889 assert((
MI.getOperand(2).isImm() &&
MI.getOperand(3).isImm() &&
3892 "Invalid PPC::RLWINM Instruction!");
3900 assert((MEMI < 32 && MESrc < 32 && MBMI < 32 && MBSrc < 32) &&
3901 "Invalid PPC::RLWINM Instruction!");
3923 bool SrcMaskFull = (MBSrc - MESrc == 1) || (MBSrc == 0 && MESrc == 31);
3926 if ((MBMI > MEMI) && !SrcMaskFull)
3936 APInt RotatedSrcMask = MaskSrc.
rotl(SHMI);
3937 APInt FinalMask = RotatedSrcMask & MaskMI;
3939 bool Simplified =
false;
3942 if (FinalMask.
isZero()) {
3944 (
MI.getOpcode() == PPC::RLWINM8 ||
MI.getOpcode() == PPC::RLWINM8_rec);
3949 if (
MI.getOpcode() == PPC::RLWINM ||
MI.getOpcode() == PPC::RLWINM8) {
3951 MI.removeOperand(4);
3952 MI.removeOperand(3);
3953 MI.removeOperand(2);
3954 MI.getOperand(1).ChangeToImmediate(0);
3955 MI.setDesc(
get(Is64Bit ? PPC::LI8 : PPC::LI));
3958 MI.removeOperand(4);
3959 MI.removeOperand(3);
3960 MI.getOperand(2).setImm(0);
3961 MI.setDesc(
get(Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3964 MI.getOperand(1).setIsKill(
true);
3968 MI.getOperand(1).setIsKill(
false);
3984 uint16_t NewSH = (SHSrc + SHMI) % 32;
3985 MI.getOperand(2).setImm(NewSH);
3988 MI.getOperand(3).setImm(NewMB);
3989 MI.getOperand(4).setImm(NewME);
3993 MI.getOperand(1).setIsKill(
true);
3997 MI.getOperand(1).setIsKill(
false);
4002 if (Simplified &
MRI->use_nodbg_empty(FoldingReg) &&
4027 default:
return false;
4035 III.
ImmOpcode =
Opc == PPC::ADD4 ? PPC::ADDI : PPC::ADDI8;
4044 III.
ImmOpcode =
Opc == PPC::ADDC ? PPC::ADDIC : PPC::ADDIC8;
4060 III.
ImmOpcode =
Opc == PPC::SUBFC ? PPC::SUBFIC : PPC::SUBFIC8;
4068 III.
ImmOpcode =
Opc == PPC::CMPW ? PPC::CMPWI : PPC::CMPDI;
4076 III.
ImmOpcode =
Opc == PPC::CMPLW ? PPC::CMPLWI : PPC::CMPLDI;
4096 case PPC::OR: III.
ImmOpcode = PPC::ORI;
break;
4097 case PPC::OR8: III.
ImmOpcode = PPC::ORI8;
break;
4098 case PPC::XOR: III.
ImmOpcode = PPC::XORI;
break;
4099 case PPC::XOR8: III.
ImmOpcode = PPC::XORI8;
break;
4104 case PPC::RLWNM_rec:
4105 case PPC::RLWNM8_rec:
4125 if (
Opc == PPC::RLWNM ||
Opc == PPC::RLWNM8 ||
Opc == PPC::RLWNM_rec ||
4126 Opc == PPC::RLWNM8_rec)
4132 case PPC::RLWNM: III.
ImmOpcode = PPC::RLWINM;
break;
4133 case PPC::RLWNM8: III.
ImmOpcode = PPC::RLWINM8;
break;
4134 case PPC::RLWNM_rec:
4137 case PPC::RLWNM8_rec:
4140 case PPC::SLW: III.
ImmOpcode = PPC::RLWINM;
break;
4141 case PPC::SLW8: III.
ImmOpcode = PPC::RLWINM8;
break;
4148 case PPC::SRW: III.
ImmOpcode = PPC::RLWINM;
break;
4149 case PPC::SRW8: III.
ImmOpcode = PPC::RLWINM8;
break;
4169 case PPC::RLDCL_rec:
4171 case PPC::RLDCR_rec:
4187 if (
Opc == PPC::RLDCL ||
Opc == PPC::RLDCL_rec ||
Opc == PPC::RLDCR ||
4188 Opc == PPC::RLDCR_rec)
4194 case PPC::RLDCL: III.
ImmOpcode = PPC::RLDICL;
break;
4195 case PPC::RLDCL_rec:
4198 case PPC::RLDCR: III.
ImmOpcode = PPC::RLDICR;
break;
4199 case PPC::RLDCR_rec:
4202 case PPC::SLD: III.
ImmOpcode = PPC::RLDICR;
break;
4206 case PPC::SRD: III.
ImmOpcode = PPC::RLDICL;
break;
4253 case PPC::LBZX: III.
ImmOpcode = PPC::LBZ;
break;
4254 case PPC::LBZX8: III.
ImmOpcode = PPC::LBZ8;
break;
4255 case PPC::LHZX: III.
ImmOpcode = PPC::LHZ;
break;
4256 case PPC::LHZX8: III.
ImmOpcode = PPC::LHZ8;
break;
4257 case PPC::LHAX: III.
ImmOpcode = PPC::LHA;
break;
4258 case PPC::LHAX8: III.
ImmOpcode = PPC::LHA8;
break;
4259 case PPC::LWZX: III.
ImmOpcode = PPC::LWZ;
break;
4260 case PPC::LWZX8: III.
ImmOpcode = PPC::LWZ8;
break;
4266 case PPC::LFSX: III.
ImmOpcode = PPC::LFS;
break;
4267 case PPC::LFDX: III.
ImmOpcode = PPC::LFD;
break;
4268 case PPC::STBX: III.
ImmOpcode = PPC::STB;
break;
4269 case PPC::STBX8: III.
ImmOpcode = PPC::STB8;
break;
4270 case PPC::STHX: III.
ImmOpcode = PPC::STH;
break;
4271 case PPC::STHX8: III.
ImmOpcode = PPC::STH8;
break;
4272 case PPC::STWX: III.
ImmOpcode = PPC::STW;
break;
4273 case PPC::STWX8: III.
ImmOpcode = PPC::STW8;
break;
4278 case PPC::STFSX: III.
ImmOpcode = PPC::STFS;
break;
4279 case PPC::STFDX: III.
ImmOpcode = PPC::STFD;
break;
4311 case PPC::LBZUX: III.
ImmOpcode = PPC::LBZU;
break;
4312 case PPC::LBZUX8: III.
ImmOpcode = PPC::LBZU8;
break;
4313 case PPC::LHZUX: III.
ImmOpcode = PPC::LHZU;
break;
4314 case PPC::LHZUX8: III.
ImmOpcode = PPC::LHZU8;
break;
4315 case PPC::LHAUX: III.
ImmOpcode = PPC::LHAU;
break;
4316 case PPC::LHAUX8: III.
ImmOpcode = PPC::LHAU8;
break;
4317 case PPC::LWZUX: III.
ImmOpcode = PPC::LWZU;
break;
4318 case PPC::LWZUX8: III.
ImmOpcode = PPC::LWZU8;
break;
4323 case PPC::LFSUX: III.
ImmOpcode = PPC::LFSU;
break;
4324 case PPC::LFDUX: III.
ImmOpcode = PPC::LFDU;
break;
4325 case PPC::STBUX: III.
ImmOpcode = PPC::STBU;
break;
4326 case PPC::STBUX8: III.
ImmOpcode = PPC::STBU8;
break;
4327 case PPC::STHUX: III.
ImmOpcode = PPC::STHU;
break;
4328 case PPC::STHUX8: III.
ImmOpcode = PPC::STHU8;
break;
4329 case PPC::STWUX: III.
ImmOpcode = PPC::STWU;
break;
4330 case PPC::STWUX8: III.
ImmOpcode = PPC::STWU8;
break;
4335 case PPC::STFSUX: III.
ImmOpcode = PPC::STFSU;
break;
4336 case PPC::STFDUX: III.
ImmOpcode = PPC::STFDU;
break;
4349 case PPC::XFLOADf32:
4350 case PPC::XFLOADf64:
4351 case PPC::XFSTOREf32:
4352 case PPC::XFSTOREf64:
4353 if (!Subtarget.hasP9Vector())
4380 case PPC::XFLOADf32:
4394 case PPC::XFLOADf64:
4412 case PPC::XFSTOREf32:
4426 case PPC::XFSTOREf64:
4437 assert(Op1 != Op2 &&
"Cannot swap operand with itself.");
4439 unsigned MaxOp = std::max(Op1, Op2);
4440 unsigned MinOp = std::min(Op1, Op2);
4443 MI.removeOperand(std::max(Op1, Op2));
4444 MI.removeOperand(std::min(Op1, Op2));
4448 if (MaxOp - MinOp == 1 &&
MI.getNumOperands() == MinOp) {
4449 MI.addOperand(MOp2);
4450 MI.addOperand(MOp1);
4455 unsigned TotalOps =
MI.getNumOperands() + 2;
4456 for (
unsigned i =
MI.getNumOperands() - 1; i >= MinOp; i--) {
4458 MI.removeOperand(i);
4461 MI.addOperand(MOp2);
4463 for (
unsigned i =
MI.getNumOperands(); i < TotalOps; i++) {
4465 MI.addOperand(MOp1);
4467 MI.addOperand(MOps.
back());
4478 unsigned OpNoForForwarding
4482 MachineRegisterInfo &
MRI =
MI.getParent()->getParent()->getRegInfo();
4519 if (
Opc != PPC::ADDItocL8 &&
Opc != PPC::ADDI &&
Opc != PPC::ADDI8)
4525 if (
Opc == PPC::ADDItocL8 && Subtarget.isAIX())
4529 "Add inst must have at least three operands");
4530 RegMO = &
DefMI.getOperand(1);
4531 ImmMO = &
DefMI.getOperand(2);
4534 if (!RegMO->
isReg())
4543bool PPCInstrInfo::isRegElgibleForForwarding(
4546 bool &IsFwdFeederRegKilled,
bool &SeenIntermediateUse)
const {
4553 const MachineRegisterInfo &
MRI =
MI.getParent()->getParent()->getRegInfo();
4563 for (; It !=
E; ++It) {
4567 IsFwdFeederRegKilled =
true;
4569 SeenIntermediateUse =
true;
4571 if ((&*It) == &
DefMI)
4584bool PPCInstrInfo::isImmElgibleForForwarding(
const MachineOperand &ImmMO,
4588 int64_t BaseImm)
const {
4590 if (
DefMI.getOpcode() == PPC::ADDItocL8) {
4611 if (ImmMO.
isImm()) {
4616 APInt ActualValue(64, ImmMO.
getImm() + BaseImm,
true);
4637 unsigned OpNoForForwarding,
4640 if ((
DefMI.getOpcode() != PPC::LI &&
DefMI.getOpcode() != PPC::LI8) ||
4641 !
DefMI.getOperand(1).isImm())
4644 MachineFunction *MF =
MI.getParent()->getParent();
4648 int64_t Immediate =
DefMI.getOperand(1).getImm();
4652 bool ReplaceWithLI =
false;
4653 bool Is64BitLI =
false;
4656 unsigned Opc =
MI.getOpcode();
4679 int64_t Comparand =
MI.getOperand(2).getImm();
4680 int64_t SExtComparand = ((uint64_t)Comparand & ~0x7FFFuLL) != 0
4681 ? (Comparand | 0xFFFFFFFFFFFF0000)
4684 for (
auto &CompareUseMI :
MRI->use_instructions(DefReg)) {
4685 unsigned UseOpc = CompareUseMI.getOpcode();
4686 if (UseOpc != PPC::ISEL && UseOpc != PPC::ISEL8)
4688 unsigned CRSubReg = CompareUseMI.getOperand(3).getSubReg();
4689 Register TrueReg = CompareUseMI.getOperand(1).getReg();
4690 Register FalseReg = CompareUseMI.getOperand(2).getReg();
4691 unsigned RegToCopy =
4692 selectReg(SExtImm, SExtComparand,
Opc, TrueReg, FalseReg, CRSubReg);
4693 if (RegToCopy == PPC::NoRegister)
4696 if (RegToCopy == PPC::ZERO || RegToCopy == PPC::ZERO8) {
4697 CompareUseMI.setDesc(
get(UseOpc == PPC::ISEL8 ? PPC::LI8 : PPC::LI));
4699 CompareUseMI.removeOperand(3);
4700 CompareUseMI.removeOperand(2);
4704 dbgs() <<
"Found LI -> CMPI -> ISEL, replacing with a copy.\n");
4708 for (
const MachineOperand &MO : CompareUseMI.operands())
4713 CompareUseMI.setDesc(
get(PPC::COPY));
4714 CompareUseMI.removeOperand(3);
4715 CompareUseMI.removeOperand(RegToCopy == TrueReg ? 2 : 1);
4716 CmpIselsConverted++;
4725 MissedConvertibleImmediateInstrs++;
4733 int64_t Addend =
MI.getOperand(2).getImm();
4735 ReplaceWithLI =
true;
4736 Is64BitLI =
Opc == PPC::ADDI8;
4737 NewImm = Addend + SExtImm;
4743 case PPC::SUBFIC8: {
4745 if (
MI.getNumOperands() > 3 && !
MI.getOperand(3).isDead())
4747 int64_t Minuend =
MI.getOperand(2).getImm();
4749 ReplaceWithLI =
true;
4750 Is64BitLI =
Opc == PPC::SUBFIC8;
4751 NewImm = Minuend - SExtImm;
4757 case PPC::RLDICL_rec:
4758 case PPC::RLDICL_32:
4759 case PPC::RLDICL_32_64: {
4761 int64_t SH =
MI.getOperand(2).getImm();
4762 int64_t MB =
MI.getOperand(3).getImm();
4763 APInt InVal((
Opc == PPC::RLDICL ||
Opc == PPC::RLDICL_rec) ? 64 : 32,
4765 InVal = InVal.rotl(SH);
4766 uint64_t
Mask = MB == 0 ? -1LL
U : (1LL
U << (63 - MB + 1)) - 1;
4772 (
Opc == PPC::RLDICL_rec &&
isUInt<16>(InVal.getSExtValue()))) {
4773 ReplaceWithLI =
true;
4774 Is64BitLI =
Opc != PPC::RLDICL_32;
4775 NewImm = InVal.getSExtValue();
4776 SetCR =
Opc == PPC::RLDICL_rec;
4783 case PPC::RLWINM_rec:
4784 case PPC::RLWINM8_rec: {
4785 int64_t SH =
MI.getOperand(2).getImm();
4786 int64_t MB =
MI.getOperand(3).getImm();
4787 int64_t ME =
MI.getOperand(4).getImm();
4788 APInt InVal(32, SExtImm,
true);
4789 InVal = InVal.rotl(SH);
4795 bool ValueFits =
isUInt<15>(InVal.getSExtValue());
4796 ValueFits |= ((
Opc == PPC::RLWINM_rec ||
Opc == PPC::RLWINM8_rec) &&
4799 ReplaceWithLI =
true;
4800 Is64BitLI =
Opc == PPC::RLWINM8 ||
Opc == PPC::RLWINM8_rec;
4801 NewImm = InVal.getSExtValue();
4802 SetCR =
Opc == PPC::RLWINM_rec ||
Opc == PPC::RLWINM8_rec;
4811 int64_t LogicalImm =
MI.getOperand(2).getImm();
4813 if (
Opc == PPC::ORI ||
Opc == PPC::ORI8)
4814 Result = LogicalImm | SExtImm;
4816 Result = LogicalImm ^ SExtImm;
4818 ReplaceWithLI =
true;
4819 Is64BitLI =
Opc == PPC::ORI8 ||
Opc == PPC::XORI8;
4827 if (ReplaceWithLI) {
4832 bool ImmChanged = (SExtImm & NewImm) != NewImm;
4833 if (PostRA && ImmChanged)
4840 DefMI.getOperand(1).setImm(NewImm);
4844 else if (
MRI->use_empty(
MI.getOperand(0).getReg())) {
4846 assert(Immediate &&
"Transformation converted zero to non-zero?");
4849 }
else if (ImmChanged)
4858 LoadImmediateInfo LII;
4864 if (KilledDef && SetCR)
4865 *KilledDef =
nullptr;
4878bool PPCInstrInfo::transformToNewImmFormFedByAdd(
4880 MachineRegisterInfo *
MRI = &
MI.getParent()->getParent()->getRegInfo();
4888 if (!
MI.mayLoadOrStore())
4891 unsigned XFormOpcode = RI.getMappedIdxOpcForImmOpc(
MI.getOpcode());
4893 assert((XFormOpcode != PPC::INSTRUCTION_LIST_END) &&
4894 "MI must have x-form opcode");
4898 bool IsVFReg =
MI.getOperand(0).isReg() &&
4899 MI.getOperand(0).getReg().isPhysical() &&
4911 MachineOperand ImmOperandMI =
MI.getOperand(III.
ImmOpNo);
4912 if (!ImmOperandMI.
isImm())
4916 MachineOperand *ImmMO =
nullptr;
4917 MachineOperand *RegMO =
nullptr;
4918 if (!isDefMIElgibleForForwarding(
DefMI, III, ImmMO, RegMO))
4920 assert(ImmMO && RegMO &&
"Imm and Reg operand must have been set");
4925 int64_t ImmBase = ImmOperandMI.
getImm();
4927 if (!isImmElgibleForForwarding(*ImmMO,
DefMI, III, Imm, ImmBase))
4931 LLVM_DEBUG(
dbgs() <<
"Replacing existing reg+imm instruction:\n");
4948bool PPCInstrInfo::transformToImmFormFedByAdd(
4958 if (!isUseMIElgibleForForwarding(
MI, III, OpNoForForwarding))
4963 MachineOperand *ImmMO =
nullptr;
4964 MachineOperand *RegMO =
nullptr;
4965 if (!isDefMIElgibleForForwarding(
DefMI, III, ImmMO, RegMO))
4967 assert(ImmMO && RegMO &&
"Imm and Reg operand must have been set");
4972 if (!isImmElgibleForForwarding(*ImmMO,
DefMI, III, Imm))
4975 bool IsFwdFeederRegKilled =
false;
4976 bool SeenIntermediateUse =
false;
4978 if (!isRegElgibleForForwarding(*RegMO,
DefMI,
MI, KillDefMI,
4979 IsFwdFeederRegKilled, SeenIntermediateUse))
4982 MachineRegisterInfo &
MRI =
MI.getParent()->getParent()->getRegInfo();
4999 if (ImmMO->
isImm()) {
5010 if (
DefMI.getOpcode() == PPC::ADDItocL8)
5020 MI.removeOperand(i);
5026 MI.addOperand(*ImmMO);
5028 for (
auto &MO : MOps)
5045 unsigned ConstantOpNo,
5048 if ((
DefMI.getOpcode() != PPC::LI &&
DefMI.getOpcode() != PPC::LI8) ||
5049 !
DefMI.getOperand(1).isImm())
5055 MachineRegisterInfo &
MRI =
MI.getParent()->getParent()->getRegInfo();
5065 APInt ActualValue(64, Imm,
true);
5066 if (!ActualValue.isSignedIntN(III.
ImmWidth))
5069 uint64_t UnsignedMax = (1 << III.
ImmWidth) - 1;
5070 if ((uint64_t)Imm > UnsignedMax)
5080 Register OrigZeroReg =
MI.getOperand(PosForOrigZero).getReg();
5084 if ((NewZeroReg == PPC::R0 || NewZeroReg == PPC::X0) &&
5087 if ((OrigZeroReg == PPC::R0 || OrigZeroReg == PPC::X0) &&
5088 ConstantOpNo != PosForOrigZero)
5092 unsigned Opc =
MI.getOpcode();
5093 bool SpecialShift32 =
Opc == PPC::SLW ||
Opc == PPC::SLW_rec ||
5094 Opc == PPC::SRW ||
Opc == PPC::SRW_rec ||
5095 Opc == PPC::SLW8 ||
Opc == PPC::SLW8_rec ||
5096 Opc == PPC::SRW8 ||
Opc == PPC::SRW8_rec;
5097 bool SpecialShift64 =
Opc == PPC::SLD ||
Opc == PPC::SLD_rec ||
5098 Opc == PPC::SRD ||
Opc == PPC::SRD_rec;
5099 bool SetCR =
Opc == PPC::SLW_rec ||
Opc == PPC::SRW_rec ||
5100 Opc == PPC::SLD_rec ||
Opc == PPC::SRD_rec;
5102 Opc == PPC::SRD_rec;
5116 if (SpecialShift32 || SpecialShift64) {
5117 LoadImmediateInfo LII;
5121 uint64_t ShAmt =
Imm & (SpecialShift32 ? 0x1F : 0x3F);
5122 if (Imm & (SpecialShift32 ? 0x20 : 0x40))
5127 else if (!SetCR && ShAmt == 0 && !PostRA) {
5128 MI.removeOperand(2);
5129 MI.setDesc(
get(PPC::COPY));
5132 if (SpecialShift32) {
5136 uint64_t SH = ShAmt == 0 ? 0 :
RightShift ? 32 - ShAmt : ShAmt;
5140 MachineInstrBuilder(*
MI.getParent()->getParent(),
MI).addImm(MB)
5146 uint64_t SH = ShAmt == 0 ? 0 :
RightShift ? 64 - ShAmt : ShAmt;
5147 uint64_t ME =
RightShift ? ShAmt : 63 - ShAmt;
5149 MachineInstrBuilder(*
MI.getParent()->getParent(),
MI).addImm(ME);
5177 const TargetRegisterClass *NewRC =
5178 MRI.getRegClass(RegToModify)->hasSuperClassEq(&PPC::GPRCRegClass) ?
5179 &PPC::GPRC_and_GPRC_NOR0RegClass : &PPC::G8RC_and_G8RC_NOX0RegClass;
5180 MRI.setRegClass(RegToModify, NewRC);
5196 if (Subtarget.hasVSX() && RC == &PPC::VRRCRegClass)
5197 return &PPC::VSRCRegClass;
5202 return PPC::getRecordFormOpcode(Opcode);
5206 return (Opcode == PPC::LBZU || Opcode == PPC::LBZUX || Opcode == PPC::LBZU8 ||
5207 Opcode == PPC::LBZUX8 || Opcode == PPC::LHZU ||
5208 Opcode == PPC::LHZUX || Opcode == PPC::LHZU8 ||
5209 Opcode == PPC::LHZUX8);
5222 int Opcode =
MI->getOpcode();
5225 if (
TII->isSExt32To64(Opcode))
5234 if (Opcode == PPC::RLDICL &&
MI->getOperand(3).getImm() >= 33)
5240 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5241 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec) &&
5242 MI->getOperand(3).getImm() > 0 &&
5243 MI->getOperand(3).getImm() <=
MI->getOperand(4).getImm())
5248 if (Opcode == PPC::ANDIS_rec || Opcode == PPC::ANDIS8_rec) {
5250 if ((Imm & 0x8000) == 0)
5269 int Opcode =
MI->getOpcode();
5272 if (
TII->isZExt32To64(Opcode))
5277 Opcode == PPC::LWZUX || Opcode == PPC::LWZU8 || Opcode == PPC::LWZUX8) &&
5278 MI->getOperand(0).getReg() ==
Reg)
5283 if (Opcode == PPC::LI || Opcode == PPC::LI8 ||
5284 Opcode == PPC::LIS || Opcode == PPC::LIS8) {
5285 int64_t Imm =
MI->getOperand(1).getImm();
5286 if (((
uint64_t)Imm & ~0x7FFFuLL) == 0)
5292 if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICL_rec ||
5293 Opcode == PPC::RLDCL || Opcode == PPC::RLDCL_rec ||
5294 Opcode == PPC::RLDICL_32_64) &&
5295 MI->getOperand(3).getImm() >= 32)
5298 if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDIC_rec) &&
5299 MI->getOperand(3).getImm() >= 32 &&
5300 MI->getOperand(3).getImm() <= 63 -
MI->getOperand(2).getImm())
5303 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5304 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec ||
5305 Opcode == PPC::RLWINM8 || Opcode == PPC::RLWNM8) &&
5306 MI->getOperand(3).getImm() <=
MI->getOperand(4).getImm())
5315 if (!
MI.getOperand(1).isImm() || !
MI.getOperand(2).isReg())
5317 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5319 Register StackReg =
MI.getOperand(2).getReg();
5342 unsigned BinOpDepth,
5344 if (!Reg.isVirtual())
5351 unsigned Opcode =
MI->getOpcode();
5360 unsigned OperandEnd = 3, OperandStride = 1;
5361 if (Opcode == PPC::PHI) {
5362 OperandEnd =
MI->getNumOperands();
5366 for (
unsigned I = 1;
I < OperandEnd;
I += OperandStride) {
5367 assert(
MI->getOperand(
I).isReg() &&
"Operand must be register");
5369 BinOpDepth + 1, LV);
5378 Register SrcReg =
MI->getOperand(1).getReg();
5392 if (SrcReg != PPC::X3)
5415 BinOpDepth + 1, LV);
5417 BinOpDepth + 1, LV);
5422 if (RC == &PPC::G8RCRegClass || RC == &PPC::G8RC_and_G8RC_NOX0RegClass)
5431 std::unordered_map<unsigned, unsigned> OpcodeMap = {
5432 {PPC::OR, PPC::OR8}, {PPC::ISEL, PPC::ISEL8},
5433 {PPC::ORI, PPC::ORI8}, {PPC::XORI, PPC::XORI8},
5434 {PPC::ORIS, PPC::ORIS8}, {PPC::XORIS, PPC::XORIS8},
5435 {PPC::AND, PPC::AND8}};
5438 auto It = OpcodeMap.find(Opcode);
5439 if (It != OpcodeMap.end()) {
5441 NewOpcode = It->second;
5443 if (!
TII->isSExt32To64(Opcode))
5449 NewOpcode = PPC::get64BitInstrFromSignedExt32BitInstr(Opcode);
5452 assert(NewOpcode != -1 &&
5453 "Must have a 64-bit opcode to map the 32-bit opcode!");
5458 TRI->getRegClass(
MCID.operands()[0].RegClass);
5460 Register SrcReg =
MI->getOperand(0).getReg();
5470 auto MBB =
MI->getParent();
5478 for (
unsigned i = 1; i <
MI->getNumOperands(); i++) {
5480 if (!Operand.
isReg())
5488 TRI->getRegClass(
MCID.operands()[i].RegClass);
5490 if (NewUsedRegRC != OrgRC && (OrgRC == &PPC::GPRCRegClass ||
5491 OrgRC == &PPC::GPRC_and_GPRC_NOR0RegClass)) {
5493 Register TmpReg =
MRI->createVirtualRegister(NewUsedRegRC);
5494 Register DstTmpReg =
MRI->createVirtualRegister(NewUsedRegRC);
5500 PromoteRegs[i] = DstTmpReg;
5504 Register NewDefinedReg =
MRI->createVirtualRegister(NewRC);
5510 for (
unsigned i = 1; i <
MI->getNumOperands(); i++) {
5511 if (
auto It = PromoteRegs.
find(i); It != PromoteRegs.
end())
5517 for (
unsigned i = 1; i < Iter->getNumOperands(); i++) {
5519 if (!Operand.
isReg())
5527 MI->eraseFromParent();
5543std::pair<bool, bool>
5545 const unsigned BinOpDepth,
5548 return std::pair<bool, bool>(
false,
false);
5552 return std::pair<bool, bool>(
false,
false);
5559 if (IsSExt && IsZExt)
5560 return std::pair<bool, bool>(IsSExt, IsZExt);
5562 switch (
MI->getOpcode()) {
5564 Register SrcReg =
MI->getOperand(1).getReg();
5573 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5574 SrcExt.second || IsZExt);
5580 if (
MI->getParent()->getBasicBlock() ==
5586 return std::pair<bool, bool>(IsSExt, IsZExt);
5590 if (SrcReg != PPC::X3) {
5593 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5594 SrcExt.second || IsZExt);
5604 std::pair<bool, bool> IsExtendPair = std::pair<bool, bool>(IsSExt, IsZExt);
5607 if (
II ==
MBB->instr_begin() || (--
II)->getOpcode() != PPC::ADJCALLSTACKUP)
5608 return IsExtendPair;
5612 return IsExtendPair;
5617 return IsExtendPair;
5619 if (IntTy && IntTy->getBitWidth() <= 32) {
5621 IsSExt |= Attrs.hasAttribute(Attribute::SExt);
5622 IsZExt |= Attrs.hasAttribute(Attribute::ZExt);
5623 return std::pair<bool, bool>(IsSExt, IsZExt);
5626 return IsExtendPair;
5635 Register SrcReg =
MI->getOperand(1).getReg();
5637 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5638 SrcExt.second || IsZExt);
5649 Register SrcReg =
MI->getOperand(1).getReg();
5653 return std::pair<bool, bool>(
false, SrcExt.second || IsZExt);
5655 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5656 SrcExt.second || IsZExt);
5666 return std::pair<bool, bool>(
false,
false);
5670 unsigned OperandEnd = 3, OperandStride = 1;
5671 if (
MI->getOpcode() == PPC::PHI) {
5672 OperandEnd =
MI->getNumOperands();
5678 for (
unsigned I = 1;
I != OperandEnd;
I += OperandStride) {
5679 if (!
MI->getOperand(
I).isReg())
5680 return std::pair<bool, bool>(
false,
false);
5684 IsSExt &= SrcExt.first;
5685 IsZExt &= SrcExt.second;
5687 return std::pair<bool, bool>(IsSExt, IsZExt);
5696 return std::pair<bool, bool>(
false,
false);
5698 Register SrcReg1 =
MI->getOperand(1).getReg();
5699 Register SrcReg2 =
MI->getOperand(2).getReg();
5702 return std::pair<bool, bool>(Src1Ext.first && Src2Ext.first,
5703 Src1Ext.second || Src2Ext.second);
5709 return std::pair<bool, bool>(IsSExt, IsZExt);
5713 return (Opcode == (Subtarget.isPPC64() ? PPC::BDNZ8 : PPC::BDNZ));
5726 :
Loop(
Loop), EndLoop(EndLoop), LoopCount(LoopCount),
5728 TII(MF->getSubtarget().getInstrInfo()) {
5737 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5739 return MI == EndLoop;
5742 std::optional<bool> createTripCountGreaterCondition(
5743 int TC, MachineBasicBlock &
MBB,
5744 SmallVectorImpl<MachineOperand> &
Cond)
override {
5745 if (TripCount == -1) {
5750 MF->
getSubtarget<PPCSubtarget>().isPPC64() ? PPC::CTR8 : PPC::CTR,
5755 return TripCount > TC;
5758 void setPreheader(MachineBasicBlock *NewPreheader)
override {
5763 void adjustTripCount(
int TripCountAdjust)
override {
5766 if (LoopCount->
getOpcode() == PPC::LI8 ||
5777 void disposed(LiveIntervals *LIS)
override {
5789std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5794 if (Preheader == LoopBB)
5795 Preheader = *std::next(LoopBB->
pred_begin());
5798 if (
I != LoopBB->
end() &&
isBDNZ(
I->getOpcode())) {
5801 Register LoopCountReg = LoopInst->getOperand(0).getReg();
5804 return std::make_unique<PPCPipelinerLoopInfo>(LoopInst, &*
I, LoopCount);
5814 unsigned LOOPi = (Subtarget.isPPC64() ? PPC::MTCTR8loop : PPC::MTCTRloop);
5817 for (
auto &
I : PreHeader.
instrs())
5818 if (
I.getOpcode() == LOOPi)
5861 int64_t OffsetA = 0, OffsetB = 0;
5867 int LowOffset = std::min(OffsetA, OffsetB);
5868 int HighOffset = std::max(OffsetA, OffsetB);
5869 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
5871 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis false
static const Function * getParent(const Value *V)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
static bool isOpZeroOfSubwordPreincLoad(int Opcode)
static bool MBBDefinesCTR(MachineBasicBlock &MBB)
static bool definedByZeroExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI)
static cl::opt< float > FMARPFactor("ppc-fma-rp-factor", cl::Hidden, cl::init(1.5), cl::desc("register pressure factor for the transformations."))
#define InfoArrayIdxMULOpIdx
static unsigned selectReg(int64_t Imm1, int64_t Imm2, unsigned CompareOpc, unsigned TrueReg, unsigned FalseReg, unsigned CRSubReg)
static unsigned getCRBitValue(unsigned CRBit)
static bool isAnImmediateOperand(const MachineOperand &MO)
static const uint16_t FMAOpIdxInfo[][6]
static cl::opt< bool > DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden, cl::desc("Disable analysis for CTR loops"))
#define InfoArrayIdxAddOpIdx
static cl::opt< bool > UseOldLatencyCalc("ppc-old-latency-calc", cl::Hidden, cl::desc("Use the old (incorrect) instruction latency calculation"))
#define InfoArrayIdxFMAInst
static bool isClusterableLdStOpcPair(unsigned FirstOpc, unsigned SecondOpc, const PPCSubtarget &Subtarget)
static cl::opt< bool > EnableFMARegPressureReduction("ppc-fma-rp-reduction", cl::Hidden, cl::init(true), cl::desc("enable register pressure reduce in machine combiner pass."))
static bool isLdStSafeToCluster(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
const unsigned MAX_BINOP_DEPTH
static cl::opt< bool > DisableCmpOpt("disable-ppc-cmp-opt", cl::desc("Disable compare instruction optimization"), cl::Hidden)
#define InfoArrayIdxFSubInst
#define InfoArrayIdxFAddInst
#define InfoArrayIdxFMULInst
static bool definedBySignExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI)
static cl::opt< bool > VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy", cl::desc("Causes the backend to crash instead of generating a nop VSX copy"), cl::Hidden)
static void swapMIOperands(MachineInstr &MI, unsigned Op1, unsigned Op2)
static constexpr MCPhysReg SPReg
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool isPhysical(const MachineOperand &MO)
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit, unsigned hiBit)
Wrap version of getBitsSet.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
This class holds the attributes for a particular argument, parameter, function, or return value.
This is an important base class in LLVM.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
const BasicBlock & getEntryBlock() const
AttributeList getAttributes() const
Return the attribute list for this Function.
Type * getReturnType() const
Returns the type of the ret val.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
Class to represent integer types.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
LLVM_ABI void recomputeForSingleDefVirtReg(Register Reg)
Recompute liveness from scratch for a virtual register Reg that is known to have a single def that do...
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
Represents a single loop in the control flow graph.
Instances of this class represent a single low-level machine instruction.
void setOpcode(unsigned Op)
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
bool isPseudo() const
Return true if this is a pseudo instruction that doesn't correspond to a real machine instruction.
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool isCall(QueryType Type=AnyInBundle) const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool hasImplicitDef() const
Returns true if the instruction has implicit definition.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void dump() const
LLVM_ABI void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
LLVM_ABI bool isLiveIn(Register Reg) const
PPCDispatchGroupSBHazardRecognizer - This class implements a scoreboard-based hazard recognizer for P...
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
bool isLiveInSExt(Register VReg) const
This function returns true if the specified vreg is a live-in register and sign-extended.
bool isLiveInZExt(Register VReg) const
This function returns true if the specified vreg is a live-in register and zero-extended.
PPCHazardRecognizer970 - This class defines a finite state automata that models the dispatch logic on...
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
bool getFMAPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for a fma chain ending in Root.
bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase=nullptr) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
PPCInstrInfo(const PPCSubtarget &STI)
const TargetRegisterClass * updatedRC(const TargetRegisterClass *RC) const
bool isPredicated(const MachineInstr &MI) const override
bool expandVSXMemPseudo(MachineInstr &MI) const
bool onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg) const
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
Fixup the placeholders we put in genAlternativeCodeSequence() for MachineCombiner.
MCInst getNop() const override
Return the noop instruction to use for a noop.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
static int getRecordFormOpcode(unsigned Opcode)
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool isXFormMemOp(unsigned Opcode) const
const PPCRegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
CombinerObjective getCombinerObjective(unsigned Pattern) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const
unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const
void promoteInstr32To64ForElimEXTSW(const Register &Reg, MachineRegisterInfo *MRI, unsigned BinOpDepth, LiveVariables *LV) const
bool isTOCSaveMI(const MachineInstr &MI) const
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer to use for this target when ...
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
bool isBDNZ(unsigned Opcode) const
Check Opcode is BDNZ (Decrement CTR and branch if it is still nonzero).
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
bool isZeroExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
std::pair< bool, bool > isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth, const MachineRegisterInfo *MRI) const
bool expandPostRAPseudo(MachineInstr &MI) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index, MachineInstr *&ADDIMI, int64_t &OffsetAddi, int64_t OffsetImm) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
void materializeImmPostRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, int64_t Imm) const
bool isADDInstrEligibleForFolding(MachineInstr &ADDMI) const
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Return true if two MIs access different memory addresses and false otherwise.
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
CreateTargetHazardRecognizer - Return the hazard recognizer to use for this target when scheduling th...
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
Get the base operand and byte offset of an instruction that reads/writes memory.
void setSpecialOperandAttr(MachineInstr &MI, uint32_t Flags) const
bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const
void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC) const
bool foldFrameOffset(MachineInstr &MI) const
bool isLoadFromConstantPool(MachineInstr *I) const
MachineInstr * findLoopInstr(MachineBasicBlock &PreHeader, SmallPtrSet< MachineBasicBlock *, 8 > &Visited) const
Find the hardware loop instruction used to set-up the specified loop.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC) const
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
bool convertToImmediateForm(MachineInstr &MI, SmallSet< Register, 4 > &RegsToUpdate, MachineInstr **KilledDef=nullptr) const
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
Return true if get the base operand, byte offset of an instruction and the memory width.
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const override
On PowerPC, we leverage machine combiner pass to reduce register pressure when the register pressure ...
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
bool isSignExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo, int64_t Imm) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
Returns true if the two given memory operations should be scheduled adjacent.
void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const
bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg, unsigned &XFormOpcode, int64_t &OffsetOfImmInstr, ImmInstrInfo &III) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
Return true when there is potentially a faster code sequence for an instruction chain ending in <Root...
bool optimizeCmpPostRA(MachineInstr &MI) const
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
const Constant * getConstantFromConstantPool(MachineInstr *I) const
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III, bool PostRA) const
MachineInstr * getDefMIPostRA(unsigned Reg, MachineInstr &MI, bool &SeenIntermediateUse) const
static void emitAccCopyInfo(MachineBasicBlock &MBB, MCRegister DestReg, MCRegister SrcReg)
const PPCTargetMachine & getTargetMachine() const
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
Track the current register pressure at some position in the instruction stream, and remember the high...
LLVM_ABI void closeRegion()
Finalize the region boundaries and recored live ins and live outs.
LLVM_ABI void recede(SmallVectorImpl< VRegMaskOrUnit > *LiveUses=nullptr)
Recede across the previous instruction.
RegisterPressure & getPressure()
Get the resulting register pressure over the traversed region.
LLVM_ABI void recedeSkipDebugValues()
Recede until we find an instruction which is not a DebugValue.
LLVM_ABI void init(const MachineFunction *mf, const RegisterClassInfo *rci, const LiveIntervals *lis, const MachineBasicBlock *mbb, MachineBasicBlock::const_iterator pos, bool TrackLaneMasks, bool TrackUntiedDefs)
Setup the RegPressureTracker.
MachineBasicBlock::const_iterator getPos() const
Get the MI position corresponding to this register pressure.
unsigned getRegPressureSetLimit(unsigned Idx) const
Get the register unit limit for the given pressure set index.
List of registers defined and used by a machine instruction.
LLVM_ABI void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, bool TrackLaneMasks, bool IgnoreDead)
Analyze the given instruction MI and fill in the Uses, Defs and DeadDefs list based on the MachineOpe...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
const TargetInstrInfo * TII
Target instruction information.
MachineFunction & MF
Machine function.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
StackOffset holds a fixed and a scalable offset in bytes.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
PPCII - This namespace holds all of the PowerPC target-specific per-instruction flags.
Define some predicates that are used for node matching.
Predicate getSwappedPredicate(Predicate Opcode)
Assume the condition register is set by MI(a,b), return the predicate if we modify the instructions s...
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
int getAltVSXFMAOpcode(uint16_t Opcode)
int getNonRecordFormOpcode(uint16_t)
unsigned getPredicateCondition(Predicate Opcode)
Return the condition without hint bits.
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
unsigned getPredicateHint(Predicate Opcode)
Return the hint bits of the predicate.
Predicate InvertPredicate(Predicate Opcode)
Invert the specified predicate. != -> ==, < -> >=.
static bool isVFRegister(MCRegister Reg)
template class LLVM_TEMPLATE_ABI opt< bool >
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Define
Register definition.
constexpr RegState getKillRegState(bool B)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
static unsigned getCRFromCRBit(unsigned SrcReg)
constexpr RegState getDeadRegState(bool B)
auto reverse(ContainerTy &&C)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
@ MustReduceRegisterPressure
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
void recomputeLivenessFlags(MachineBasicBlock &MBB)
Recomputes dead and kill flags in MBB.
@ Sub
Subtraction of integers.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
static bool isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME)
Returns true iff Val consists of one contiguous run of 1s with any number of 0s on either side.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t IsSummingOperands
uint64_t OpNoForForwarding
uint64_t ImmMustBeMultipleOf
uint64_t ZeroIsSpecialNew
uint64_t ZeroIsSpecialOrig
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
std::vector< unsigned > MaxSetPressure
Map of max reg pressure indexed by pressure set ID, not class ID.