74#define DEBUG_TYPE "arm-instrinfo"
76#define GET_INSTRINFO_CTOR_DTOR
77#include "ARMGenInstrInfo.inc"
81 cl::desc(
"Enable ARM 2-addr to 3-addr conv"));
95 { ARM::VMLAS, ARM::VMULS, ARM::VADDS,
false,
false },
96 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS,
false,
false },
97 { ARM::VMLAD, ARM::VMULD, ARM::VADDD,
false,
false },
98 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD,
false,
false },
99 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS,
true,
false },
100 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS,
true,
false },
101 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD,
true,
false },
102 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD,
true,
false },
105 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd,
false,
false },
106 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd,
false,
false },
107 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq,
false,
false },
108 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq,
false,
false },
109 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd,
false,
true },
110 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd,
false,
true },
111 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq,
false,
true },
112 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq,
false,
true },
118 for (
unsigned i = 0, e = std::size(
ARM_MLxTable); i != e; ++i) {
131 if (usePreRAHazardRecognizer()) {
133 static_cast<const ARMSubtarget *
>(STI)->getInstrItineraryData();
153 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4,
true));
190 default:
return nullptr;
216 unsigned OffImm =
MI.getOperand(NumOps - 2).getImm();
229 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
234 }
else if (Amt != 0) {
238 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
247 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
260 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
267 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
276 std::vector<MachineInstr*> NewMIs;
280 BuildMI(MF,
MI.getDebugLoc(),
get(MemOpc),
MI.getOperand(0).getReg())
291 NewMIs.push_back(MemMI);
292 NewMIs.push_back(UpdateMI);
296 BuildMI(MF,
MI.getDebugLoc(),
get(MemOpc),
MI.getOperand(0).getReg())
309 NewMIs.push_back(UpdateMI);
310 NewMIs.push_back(MemMI);
316 if (MO.isReg() && MO.getReg().isVirtual()) {
321 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
325 if (MO.isUse() && MO.isKill()) {
326 for (
unsigned j = 0; j < 2; ++j) {
332 if (VI.removeKill(
MI))
333 VI.Kills.push_back(NewMI);
359 bool AllowModify)
const {
374 bool CantAnalyze =
false;
378 while (
I->isDebugInstr() || !
I->isTerminator() ||
380 I->getOpcode() == ARM::t2DoLoopStartTP){
392 TBB =
I->getOperand(0).getMBB();
398 assert(!FBB &&
"FBB should have been null.");
400 TBB =
I->getOperand(0).getMBB();
401 Cond.push_back(
I->getOperand(1));
402 Cond.push_back(
I->getOperand(2));
403 }
else if (
I->isReturn()) {
406 }
else if (
I->getOpcode() == ARM::t2LoopEnd &&
413 TBB =
I->getOperand(1).getMBB();
415 Cond.push_back(
I->getOperand(0));
472 int *BytesRemoved)
const {
473 assert(!BytesRemoved &&
"code size not handled");
484 I->eraseFromParent();
494 I->eraseFromParent();
503 int *BytesAdded)
const {
504 assert(!BytesAdded &&
"code size not handled");
513 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
515 "ARM branch conditions have two or three components!");
525 }
else if (
Cond.size() == 2) {
536 if (
Cond.size() == 2)
541 else if (
Cond.size() == 3)
552 if (
Cond.size() == 2) {
564 while (++
I != E &&
I->isInsideBundle()) {
565 int PIdx =
I->findFirstPredOperandIdx();
566 if (PIdx != -1 &&
I->getOperand(PIdx).getImm() !=
ARMCC::AL)
572 int PIdx =
MI.findFirstPredOperandIdx();
573 return PIdx != -1 &&
MI.getOperand(PIdx).getImm() !=
ARMCC::AL;
581 std::string GenericComment =
583 if (!GenericComment.empty())
584 return GenericComment;
588 return std::string();
592 int FirstPredOp =
MI.findFirstPredOperandIdx();
593 if (FirstPredOp != (
int) OpIdx)
594 return std::string();
596 std::string
CC =
"CC::";
603 unsigned Opc =
MI.getOpcode();
612 int PIdx =
MI.findFirstPredOperandIdx();
615 PMO.
setImm(Pred[0].getImm());
616 MI.getOperand(PIdx+1).setReg(Pred[1].
getReg());
623 "CPSR def isn't expected operand");
624 assert((
MI.getOperand(1).isDead() ||
625 MI.getOperand(1).getReg() != ARM::CPSR) &&
626 "if conversion tried to stop defining used CPSR");
627 MI.getOperand(1).setReg(ARM::NoRegister);
637 if (Pred1.
size() > 2 || Pred2.
size() > 2)
662 std::vector<MachineOperand> &Pred,
663 bool SkipDead)
const {
666 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
667 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
668 if (ClobbersCPSR || IsCPSR) {
686 for (
const auto &MO :
MI.operands())
687 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
693 switch (
MI->getOpcode()) {
694 default:
return true;
725 if (!
MI.isPredicable())
763 if (!MO.isReg() || MO.isUndef() || MO.isUse())
765 if (MO.getReg() != ARM::CPSR)
785 switch (
MI.getOpcode()) {
793 case TargetOpcode::BUNDLE:
794 return getInstBundleLength(
MI);
795 case ARM::CONSTPOOL_ENTRY:
796 case ARM::JUMPTABLE_INSTS:
797 case ARM::JUMPTABLE_ADDRS:
798 case ARM::JUMPTABLE_TBB:
799 case ARM::JUMPTABLE_TBH:
802 return MI.getOperand(2).getImm();
804 return MI.getOperand(1).getImm();
806 case ARM::INLINEASM_BR: {
808 unsigned Size = getInlineAsmLength(
MI.getOperand(0).getSymbolName(), *MAI);
816unsigned ARMBaseInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
820 while (++
I != E &&
I->isInsideBundle()) {
821 assert(!
I->isBundle() &&
"No nested bundle!");
829 unsigned DestReg,
bool KillSrc,
831 unsigned Opc = Subtarget.isThumb()
832 ? (Subtarget.
isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
849 unsigned SrcReg,
bool KillSrc,
851 unsigned Opc = Subtarget.isThumb()
852 ? (Subtarget.
isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
886 unsigned Cond,
unsigned Inactive) {
895 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
896 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
898 if (GPRDest && GPRSrc) {
906 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
907 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
910 if (SPRDest && SPRSrc)
912 else if (GPRDest && SPRSrc)
914 else if (SPRDest && GPRSrc)
916 else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) && Subtarget.hasFP64())
918 else if (ARM::QPRRegClass.
contains(DestReg, SrcReg))
919 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
924 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR)
926 if (Opc == ARM::MVE_VORR)
928 else if (Opc != ARM::MQPRCopy)
934 unsigned BeginIdx = 0;
935 unsigned SubRegs = 0;
939 if (ARM::QQPRRegClass.
contains(DestReg, SrcReg)) {
940 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
941 BeginIdx = ARM::qsub_0;
943 }
else if (ARM::QQQQPRRegClass.
contains(DestReg, SrcReg)) {
944 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
945 BeginIdx = ARM::qsub_0;
948 }
else if (ARM::DPairRegClass.
contains(DestReg, SrcReg)) {
950 BeginIdx = ARM::dsub_0;
952 }
else if (ARM::DTripleRegClass.
contains(DestReg, SrcReg)) {
954 BeginIdx = ARM::dsub_0;
956 }
else if (ARM::DQuadRegClass.
contains(DestReg, SrcReg)) {
958 BeginIdx = ARM::dsub_0;
960 }
else if (ARM::GPRPairRegClass.
contains(DestReg, SrcReg)) {
961 Opc = Subtarget.
isThumb2() ? ARM::tMOVr : ARM::MOVr;
962 BeginIdx = ARM::gsub_0;
964 }
else if (ARM::DPairSpcRegClass.
contains(DestReg, SrcReg)) {
966 BeginIdx = ARM::dsub_0;
969 }
else if (ARM::DTripleSpcRegClass.
contains(DestReg, SrcReg)) {
971 BeginIdx = ARM::dsub_0;
974 }
else if (ARM::DQuadSpcRegClass.
contains(DestReg, SrcReg)) {
976 BeginIdx = ARM::dsub_0;
979 }
else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) &&
980 !Subtarget.hasFP64()) {
982 BeginIdx = ARM::ssub_0;
984 }
else if (SrcReg == ARM::CPSR) {
987 }
else if (DestReg == ARM::CPSR) {
990 }
else if (DestReg == ARM::VPR) {
996 }
else if (SrcReg == ARM::VPR) {
1002 }
else if (DestReg == ARM::FPSCR_NZCV) {
1004 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
1008 }
else if (SrcReg == ARM::FPSCR_NZCV) {
1010 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
1016 assert(Opc &&
"Impossible reg-to-reg copy");
1022 if (
TRI->regsOverlap(SrcReg,
TRI->getSubReg(DestReg, BeginIdx))) {
1023 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
1029 for (
unsigned i = 0; i != SubRegs; ++i) {
1030 Register Dst =
TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
1031 Register Src =
TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
1032 assert(Dst && Src &&
"Bad sub-register");
1034 assert(!DstRegs.
count(Src) &&
"destructive vector copy");
1039 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) {
1043 if (Opc == ARM::MVE_VORR)
1048 if (Opc == ARM::MOVr)
1057std::optional<DestSourcePair>
1066 if (!
MI.isMoveReg() ||
1067 (
MI.getOpcode() == ARM::VORRq &&
1068 MI.getOperand(1).getReg() !=
MI.getOperand(2).getReg()))
1069 return std::nullopt;
1073std::optional<ParamLoadedValue>
1077 Register DstReg = DstSrcPair->Destination->getReg();
1098 return std::nullopt;
1105 unsigned SubIdx,
unsigned State,
1108 return MIB.
addReg(Reg, State);
1111 return MIB.
addReg(
TRI->getSubReg(Reg, SubIdx), State);
1112 return MIB.
addReg(Reg, State, SubIdx);
1117 Register SrcReg,
bool isKill,
int FI,
1129 switch (
TRI->getSpillSize(*RC)) {
1131 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1142 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1149 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1156 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1167 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1174 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1175 if (Subtarget.hasV5TEOps()) {
1195 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1211 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1212 Subtarget.hasMVEIntegerOps()) {
1217 .addMemOperand(MMO);
1223 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1226 Subtarget.hasNEON()) {
1240 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1247 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1248 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1249 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1251 Subtarget.hasNEON()) {
1260 }
else if (Subtarget.hasMVEIntegerOps()) {
1272 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1273 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0,
TRI);
1280 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1281 Subtarget.hasMVEIntegerOps()) {
1286 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1292 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1293 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0,
TRI);
1294 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_3, 0,
TRI);
1295 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_4, 0,
TRI);
1296 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_5, 0,
TRI);
1297 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_6, 0,
TRI);
1308 int &FrameIndex)
const {
1309 switch (
MI.getOpcode()) {
1313 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1314 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1315 MI.getOperand(3).getImm() == 0) {
1316 FrameIndex =
MI.getOperand(1).getIndex();
1317 return MI.getOperand(0).getReg();
1325 case ARM::VSTR_P0_off:
1326 case ARM::MVE_VSTRWU32:
1327 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1328 MI.getOperand(2).getImm() == 0) {
1329 FrameIndex =
MI.getOperand(1).getIndex();
1330 return MI.getOperand(0).getReg();
1334 case ARM::VST1d64TPseudo:
1335 case ARM::VST1d64QPseudo:
1336 if (
MI.getOperand(0).isFI() &&
MI.getOperand(2).getSubReg() == 0) {
1337 FrameIndex =
MI.getOperand(0).getIndex();
1338 return MI.getOperand(2).getReg();
1342 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1343 FrameIndex =
MI.getOperand(1).getIndex();
1344 return MI.getOperand(0).getReg();
1347 case ARM::MQQPRStore:
1348 case ARM::MQQQQPRStore:
1349 if (
MI.getOperand(1).isFI()) {
1350 FrameIndex =
MI.getOperand(1).getIndex();
1351 return MI.getOperand(0).getReg();
1360 int &FrameIndex)
const {
1362 if (
MI.mayStore() && hasStoreToStackSlot(
MI, Accesses) &&
1363 Accesses.
size() == 1) {
1365 cast<FixedStackPseudoSourceValue>(Accesses.
front()->getPseudoValue())
1387 switch (
TRI->getSpillSize(*RC)) {
1389 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1399 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1405 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1411 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1421 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1427 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1430 if (Subtarget.hasV5TEOps()) {
1453 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1466 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1467 Subtarget.hasMVEIntegerOps()) {
1469 MIB.addFrameIndex(FI)
1471 .addMemOperand(MMO);
1477 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1479 Subtarget.hasNEON()) {
1500 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1501 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1502 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1504 Subtarget.hasNEON()) {
1510 }
else if (Subtarget.hasMVEIntegerOps()) {
1530 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1531 Subtarget.hasMVEIntegerOps()) {
1535 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1559 int &FrameIndex)
const {
1560 switch (
MI.getOpcode()) {
1564 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1565 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1566 MI.getOperand(3).getImm() == 0) {
1567 FrameIndex =
MI.getOperand(1).getIndex();
1568 return MI.getOperand(0).getReg();
1576 case ARM::VLDR_P0_off:
1577 case ARM::MVE_VLDRWU32:
1578 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1579 MI.getOperand(2).getImm() == 0) {
1580 FrameIndex =
MI.getOperand(1).getIndex();
1581 return MI.getOperand(0).getReg();
1585 case ARM::VLD1d8TPseudo:
1586 case ARM::VLD1d16TPseudo:
1587 case ARM::VLD1d32TPseudo:
1588 case ARM::VLD1d64TPseudo:
1589 case ARM::VLD1d8QPseudo:
1590 case ARM::VLD1d16QPseudo:
1591 case ARM::VLD1d32QPseudo:
1592 case ARM::VLD1d64QPseudo:
1593 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1594 FrameIndex =
MI.getOperand(1).getIndex();
1595 return MI.getOperand(0).getReg();
1599 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1600 FrameIndex =
MI.getOperand(1).getIndex();
1601 return MI.getOperand(0).getReg();
1604 case ARM::MQQPRLoad:
1605 case ARM::MQQQQPRLoad:
1606 if (
MI.getOperand(1).isFI()) {
1607 FrameIndex =
MI.getOperand(1).getIndex();
1608 return MI.getOperand(0).getReg();
1617 int &FrameIndex)
const {
1619 if (
MI.mayLoad() && hasLoadFromStackSlot(
MI, Accesses) &&
1620 Accesses.
size() == 1) {
1622 cast<FixedStackPseudoSourceValue>(Accesses.
front()->getPseudoValue())
1633 bool isThumb2 = Subtarget.
isThumb2();
1640 if (isThumb1 || !
MI->getOperand(1).isDead()) {
1642 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1643 : isThumb1 ? ARM::tLDMIA_UPD
1647 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1650 if (isThumb1 || !
MI->getOperand(0).isDead()) {
1652 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA_UPD
1653 : isThumb1 ? ARM::tSTMIA_UPD
1657 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1672 [&
TRI](
const unsigned &Reg1,
const unsigned &Reg2) ->
bool {
1673 return TRI.getEncodingValue(Reg1) <
1674 TRI.getEncodingValue(Reg2);
1677 for (
const auto &Reg : ScratchRegs) {
1686 if (
MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1687 expandLoadStackGuard(
MI);
1688 MI.getParent()->erase(
MI);
1692 if (
MI.getOpcode() == ARM::MEMCPY) {
1701 if (!
MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1706 Register DstRegS =
MI.getOperand(0).getReg();
1707 Register SrcRegS =
MI.getOperand(1).getReg();
1708 if (!ARM::SPRRegClass.
contains(DstRegS, SrcRegS))
1712 unsigned DstRegD =
TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0,
1714 unsigned SrcRegD =
TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0,
1716 if (!DstRegD || !SrcRegD)
1722 if (!
MI.definesRegister(DstRegD,
TRI) ||
MI.readsRegister(DstRegD,
TRI))
1726 if (
MI.getOperand(0).isDead())
1735 int ImpDefIdx =
MI.findRegisterDefOperandIdx(DstRegD,
nullptr);
1736 if (ImpDefIdx != -1)
1737 MI.removeOperand(ImpDefIdx);
1740 MI.setDesc(
get(ARM::VMOVD));
1741 MI.getOperand(0).setReg(DstRegD);
1742 MI.getOperand(1).setReg(SrcRegD);
1749 MI.getOperand(1).setIsUndef();
1754 if (
MI.getOperand(1).isKill()) {
1755 MI.getOperand(1).setIsKill(
false);
1756 MI.addRegisterKilled(SrcRegS,
TRI,
true);
1770 assert(MCPE.isMachineConstantPoolEntry() &&
1771 "Expecting a machine constantpool entry!");
1785 cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId,
ARMCP::CPValue,
1790 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
1793 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
1801 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
1821 case ARM::tLDRpci_pic:
1822 case ARM::t2LDRpci_pic: {
1842 switch (
I->getOpcode()) {
1843 case ARM::tLDRpci_pic:
1844 case ARM::t2LDRpci_pic: {
1846 unsigned CPI =
I->getOperand(1).getIndex();
1848 I->getOperand(1).setIndex(CPI);
1849 I->getOperand(2).setImm(PCLabelId);
1853 if (!
I->isBundledWithSucc())
1864 if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic ||
1865 Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic ||
1866 Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1867 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1868 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1869 Opcode == ARM::t2MOV_ga_pcrel) {
1880 if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1881 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1882 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1883 Opcode == ARM::t2MOV_ga_pcrel)
1895 if (isARMCP0 && isARMCP1) {
1901 }
else if (!isARMCP0 && !isARMCP1) {
1905 }
else if (Opcode == ARM::PICLDR) {
1913 if (Addr0 != Addr1) {
1949 int64_t &Offset2)
const {
1956 auto IsLoadOpcode = [&](
unsigned Opcode) {
1971 case ARM::t2LDRSHi8:
1973 case ARM::t2LDRBi12:
1974 case ARM::t2LDRSHi12:
1993 if (isa<ConstantSDNode>(Load1->
getOperand(1)) &&
1995 Offset1 = cast<ConstantSDNode>(Load1->
getOperand(1))->getSExtValue();
1996 Offset2 = cast<ConstantSDNode>(Load2->
getOperand(1))->getSExtValue();
2015 int64_t Offset1, int64_t Offset2,
2016 unsigned NumLoads)
const {
2020 assert(Offset2 > Offset1);
2022 if ((Offset2 - Offset1) / 8 > 64)
2053 if (
MI.isDebugInstr())
2057 if (
MI.isTerminator() ||
MI.isPosition())
2061 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
2075 while (++
I !=
MBB->
end() &&
I->isDebugInstr())
2077 if (
I !=
MBB->
end() &&
I->getOpcode() == ARM::t2IT)
2088 if (!
MI.isCall() &&
MI.definesRegister(ARM::SP,
nullptr))
2096 unsigned NumCycles,
unsigned ExtraPredCycles,
2106 if (!Pred->
empty()) {
2108 if (LastMI->
getOpcode() == ARM::t2Bcc) {
2117 MBB, 0, 0, Probability);
2122 unsigned TCycles,
unsigned TExtra,
2124 unsigned FCycles,
unsigned FExtra,
2141 const unsigned ScalingUpFactor = 1024;
2143 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
2144 unsigned UnpredCost;
2145 if (!Subtarget.hasBranchPredictor()) {
2148 unsigned NotTakenBranchCost = 1;
2150 unsigned TUnpredCycles, FUnpredCycles;
2153 TUnpredCycles = TCycles + NotTakenBranchCost;
2154 FUnpredCycles = TakenBranchCost;
2157 TUnpredCycles = TCycles + TakenBranchCost;
2158 FUnpredCycles = FCycles + NotTakenBranchCost;
2161 PredCost -= 1 * ScalingUpFactor;
2164 unsigned TUnpredCost = Probability.
scale(TUnpredCycles * ScalingUpFactor);
2165 unsigned FUnpredCost = Probability.
getCompl().
scale(FUnpredCycles * ScalingUpFactor);
2166 UnpredCost = TUnpredCost + FUnpredCost;
2169 if (Subtarget.
isThumb2() && TCycles + FCycles > 4) {
2170 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2173 unsigned TUnpredCost = Probability.
scale(TCycles * ScalingUpFactor);
2174 unsigned FUnpredCost =
2176 UnpredCost = TUnpredCost + FUnpredCost;
2177 UnpredCost += 1 * ScalingUpFactor;
2181 return PredCost <= UnpredCost;
2186 unsigned NumInsts)
const {
2194 unsigned MaxInsts = Subtarget.
restrictIT() ? 1 : 4;
2203 if (
MI.getOpcode() == ARM::t2Bcc &&
2226 return Subtarget.isProfitableToUnpredicate();
2234 int PIdx =
MI.findFirstPredOperandIdx();
2240 PredReg =
MI.getOperand(PIdx+1).getReg();
2249 if (Opc == ARM::t2B)
2258 unsigned OpIdx2)
const {
2259 switch (
MI.getOpcode()) {
2261 case ARM::t2MOVCCr: {
2286 if (!Reg.isVirtual())
2288 if (!
MRI.hasOneNonDBGUse(Reg))
2300 if (MO.isFI() || MO.isCPI() || MO.isJTI())
2307 if (MO.getReg().isPhysical())
2309 if (MO.isDef() && !MO.isDead())
2312 bool DontMoveAcrossStores =
true;
2313 if (!
MI->isSafeToMove(
nullptr, DontMoveAcrossStores))
2320 unsigned &TrueOp,
unsigned &FalseOp,
2321 bool &Optimizable)
const {
2322 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2323 "Unknown select instruction");
2332 Cond.push_back(
MI.getOperand(3));
2333 Cond.push_back(
MI.getOperand(4));
2342 bool PreferFalse)
const {
2343 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2344 "Unknown select instruction");
2347 bool Invert = !
DefMI;
2349 DefMI = canFoldIntoMOVCC(
MI.getOperand(1).getReg(),
MRI,
this);
2356 Register DestReg =
MI.getOperand(0).getReg();
2359 if (!
MRI.constrainRegClass(DestReg, FalseClass))
2361 if (!
MRI.constrainRegClass(DestReg, TrueClass))
2372 i != e && !DefDesc.
operands()[i].isPredicate(); ++i)
2375 unsigned CondCode =
MI.getOperand(3).getImm();
2380 NewMI.
add(
MI.getOperand(4));
2391 NewMI.
add(FalseReg);
2422 {ARM::ADDSri, ARM::ADDri},
2423 {ARM::ADDSrr, ARM::ADDrr},
2424 {ARM::ADDSrsi, ARM::ADDrsi},
2425 {ARM::ADDSrsr, ARM::ADDrsr},
2427 {ARM::SUBSri, ARM::SUBri},
2428 {ARM::SUBSrr, ARM::SUBrr},
2429 {ARM::SUBSrsi, ARM::SUBrsi},
2430 {ARM::SUBSrsr, ARM::SUBrsr},
2432 {ARM::RSBSri, ARM::RSBri},
2433 {ARM::RSBSrsi, ARM::RSBrsi},
2434 {ARM::RSBSrsr, ARM::RSBrsr},
2436 {ARM::tADDSi3, ARM::tADDi3},
2437 {ARM::tADDSi8, ARM::tADDi8},
2438 {ARM::tADDSrr, ARM::tADDrr},
2439 {ARM::tADCS, ARM::tADC},
2441 {ARM::tSUBSi3, ARM::tSUBi3},
2442 {ARM::tSUBSi8, ARM::tSUBi8},
2443 {ARM::tSUBSrr, ARM::tSUBrr},
2444 {ARM::tSBCS, ARM::tSBC},
2445 {ARM::tRSBS, ARM::tRSB},
2446 {ARM::tLSLSri, ARM::tLSLri},
2448 {ARM::t2ADDSri, ARM::t2ADDri},
2449 {ARM::t2ADDSrr, ARM::t2ADDrr},
2450 {ARM::t2ADDSrs, ARM::t2ADDrs},
2452 {ARM::t2SUBSri, ARM::t2SUBri},
2453 {ARM::t2SUBSrr, ARM::t2SUBrr},
2454 {ARM::t2SUBSrs, ARM::t2SUBrs},
2456 {ARM::t2RSBSri, ARM::t2RSBri},
2457 {ARM::t2RSBSrs, ARM::t2RSBrs},
2462 if (OldOpc == Entry.PseudoOpc)
2463 return Entry.MachineOpc;
2474 if (NumBytes == 0 && DestReg != BaseReg) {
2483 bool isSub = NumBytes < 0;
2484 if (isSub) NumBytes = -NumBytes;
2488 unsigned ThisVal = NumBytes & llvm::rotr<uint32_t>(0xFF, RotAmt);
2489 assert(ThisVal &&
"Didn't extract field correctly");
2492 NumBytes &= ~ThisVal;
2497 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2510 unsigned NumBytes) {
2521 if (!IsPush && !IsPop)
2524 bool IsVFPPushPop =
MI->getOpcode() == ARM::VSTMDDB_UPD ||
2525 MI->getOpcode() == ARM::VLDMDIA_UPD;
2526 bool IsT1PushPop =
MI->getOpcode() == ARM::tPUSH ||
2527 MI->getOpcode() == ARM::tPOP ||
2528 MI->getOpcode() == ARM::tPOP_RET;
2530 assert((IsT1PushPop || (
MI->getOperand(0).getReg() == ARM::SP &&
2531 MI->getOperand(1).getReg() == ARM::SP)) &&
2532 "trying to fold sp update into non-sp-updating push/pop");
2537 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2542 int RegListIdx = IsT1PushPop ? 2 : 4;
2545 unsigned RegsNeeded;
2548 RegsNeeded = NumBytes / 8;
2549 RegClass = &ARM::DPRRegClass;
2551 RegsNeeded = NumBytes / 4;
2552 RegClass = &ARM::GPRRegClass;
2562 unsigned FirstRegEnc = -1;
2565 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2570 TRI->getEncodingValue(MO.
getReg()) < FirstRegEnc)
2571 FirstRegEnc =
TRI->getEncodingValue(MO.
getReg());
2574 const MCPhysReg *CSRegs =
TRI->getCalleeSavedRegs(&MF);
2577 for (
int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2579 unsigned CurReg = RegClass->
getRegister(CurRegEnc);
2580 if (IsT1PushPop && CurRegEnc >
TRI->getEncodingValue(ARM::R7))
2587 false,
false,
true));
2597 MI->getParent()->computeRegisterLiveness(
TRI, CurReg,
MI) !=
2619 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i)
2620 MI->removeOperand(i);
2633 unsigned Opcode =
MI.getOpcode();
2639 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2642 if (Opcode == ARM::ADDri) {
2643 Offset +=
MI.getOperand(FrameRegIdx+1).getImm();
2646 MI.setDesc(
TII.get(ARM::MOVr));
2647 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2648 MI.removeOperand(FrameRegIdx+1);
2654 MI.setDesc(
TII.get(ARM::SUBri));
2660 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2661 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(
Offset);
2669 unsigned ThisImmVal =
Offset & llvm::rotr<uint32_t>(0xFF, RotAmt);
2676 "Bit extraction didn't work?");
2677 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2679 unsigned ImmIdx = 0;
2681 unsigned NumBits = 0;
2685 ImmIdx = FrameRegIdx + 1;
2686 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2690 ImmIdx = FrameRegIdx+2;
2697 ImmIdx = FrameRegIdx+2;
2708 ImmIdx = FrameRegIdx+1;
2716 ImmIdx = FrameRegIdx+1;
2726 ImmIdx = FrameRegIdx+1;
2727 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2736 Offset += InstrOffs * Scale;
2737 assert((
Offset & (Scale-1)) == 0 &&
"Can't encode this offset!");
2747 int ImmedOffset =
Offset / Scale;
2748 unsigned Mask = (1 << NumBits) - 1;
2749 if ((
unsigned)
Offset <= Mask * Scale) {
2751 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2757 ImmedOffset = -ImmedOffset;
2759 ImmedOffset |= 1 << NumBits;
2767 ImmedOffset = ImmedOffset & Mask;
2770 ImmedOffset = -ImmedOffset;
2772 ImmedOffset |= 1 << NumBits;
2788 Register &SrcReg2, int64_t &CmpMask,
2789 int64_t &CmpValue)
const {
2790 switch (
MI.getOpcode()) {
2795 SrcReg =
MI.getOperand(0).getReg();
2798 CmpValue =
MI.getOperand(1).getImm();
2803 SrcReg =
MI.getOperand(0).getReg();
2804 SrcReg2 =
MI.getOperand(1).getReg();
2810 SrcReg =
MI.getOperand(0).getReg();
2812 CmpMask =
MI.getOperand(1).getImm();
2825 int CmpMask,
bool CommonUse) {
2826 switch (
MI->getOpcode()) {
2829 if (CmpMask !=
MI->getOperand(2).getImm())
2831 if (SrcReg ==
MI->getOperand(CommonUse ? 1 : 0).getReg())
2921 switch (
MI->getOpcode()) {
2922 default:
return false;
3018 if (!
MI)
return false;
3021 if (CmpMask != ~0) {
3025 UI =
MRI->use_instr_begin(SrcReg), UE =
MRI->use_instr_end();
3027 if (UI->getParent() != CmpInstr.
getParent())
3036 if (!
MI)
return false;
3045 if (
I ==
B)
return false;
3056 else if (
MI->getParent() != CmpInstr.
getParent() || CmpValue != 0) {
3061 if (CmpInstr.
getOpcode() == ARM::CMPri ||
3069 bool IsThumb1 =
false;
3086 if (
MI && IsThumb1) {
3088 if (
I != E && !
MI->readsRegister(ARM::CPSR,
TRI)) {
3089 bool CanReorder =
true;
3090 for (;
I != E; --
I) {
3091 if (
I->getOpcode() != ARM::tMOVi8) {
3097 MI =
MI->removeFromParent();
3108 bool SubAddIsThumb1 =
false;
3123 if (Instr.modifiesRegister(ARM::CPSR,
TRI) ||
3124 Instr.readsRegister(ARM::CPSR,
TRI))
3146 IsThumb1 = SubAddIsThumb1;
3161 bool isSafe =
false;
3164 while (!isSafe && ++
I != E) {
3166 for (
unsigned IO = 0, EO = Instr.getNumOperands();
3167 !isSafe && IO != EO; ++IO) {
3181 bool IsInstrVSel =
true;
3182 switch (Instr.getOpcode()) {
3184 IsInstrVSel =
false;
3218 bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr ||
3219 Opc == ARM::SUBri || Opc == ARM::t2SUBri ||
3220 Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 ||
3222 unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2;
3234 std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3268 if (Succ->isLiveIn(ARM::CPSR))
3275 unsigned CPSRRegNum =
MI->getNumExplicitOperands() - 1;
3276 MI->getOperand(CPSRRegNum).setReg(ARM::CPSR);
3277 MI->getOperand(CPSRRegNum).setIsDef(
true);
3285 for (
unsigned i = 0, e = OperandsToUpdate.
size(); i < e; i++)
3286 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second);
3288 MI->clearRegisterDeads(ARM::CPSR);
3302 int64_t CmpMask, CmpValue;
3304 if (Next !=
MI.getParent()->end() &&
3315 unsigned DefOpc =
DefMI.getOpcode();
3316 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm &&
3317 DefOpc != ARM::tMOVi32imm)
3319 if (!
DefMI.getOperand(1).isImm())
3323 if (!
MRI->hasOneNonDBGUse(Reg))
3339 if (
UseMI.getOperand(NumOps - 1).
getReg() == ARM::CPSR)
3345 unsigned UseOpc =
UseMI.getOpcode();
3346 unsigned NewUseOpc = 0;
3348 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3349 bool Commute =
false;
3351 default:
return false;
3359 case ARM::t2EORrr: {
3365 if (UseOpc == ARM::SUBrr && Commute)
3371 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3374 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3388 case ARM::ORRrr: NewUseOpc = ARM::ORRri;
break;
3389 case ARM::EORrr: NewUseOpc = ARM::EORri;
break;
3393 case ARM::t2SUBrr: {
3394 if (UseOpc == ARM::t2SUBrr && Commute)
3399 const bool ToSP =
DefMI.getOperand(0).
getReg() == ARM::SP;
3400 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3401 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3403 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3406 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3421 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri;
break;
3422 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri;
break;
3429 unsigned OpIdx = Commute ? 2 : 1;
3431 bool isKill =
UseMI.getOperand(OpIdx).isKill();
3433 Register NewReg =
MRI->createVirtualRegister(TRC);
3441 UseMI.getOperand(1).setReg(NewReg);
3442 UseMI.getOperand(1).setIsKill();
3443 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3444 DefMI.eraseFromParent();
3451 case ARM::t2ADDspImm:
3452 case ARM::t2SUBspImm:
3462 switch (
MI.getOpcode()) {
3466 assert(UOps >= 0 &&
"bad # UOps");
3474 unsigned ShOpVal =
MI.getOperand(3).getImm();
3479 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3487 if (!
MI.getOperand(2).getReg())
3490 unsigned ShOpVal =
MI.getOperand(3).getImm();
3495 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3505 case ARM::LDRSB_POST:
3506 case ARM::LDRSH_POST: {
3509 return (Rt == Rm) ? 4 : 3;
3512 case ARM::LDR_PRE_REG:
3513 case ARM::LDRB_PRE_REG: {
3518 unsigned ShOpVal =
MI.getOperand(4).getImm();
3523 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3529 case ARM::STR_PRE_REG:
3530 case ARM::STRB_PRE_REG: {
3531 unsigned ShOpVal =
MI.getOperand(4).getImm();
3536 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3543 case ARM::STRH_PRE: {
3553 case ARM::LDR_POST_REG:
3554 case ARM::LDRB_POST_REG:
3555 case ARM::LDRH_POST: {
3558 return (Rt == Rm) ? 3 : 2;
3561 case ARM::LDR_PRE_IMM:
3562 case ARM::LDRB_PRE_IMM:
3563 case ARM::LDR_POST_IMM:
3564 case ARM::LDRB_POST_IMM:
3565 case ARM::STRB_POST_IMM:
3566 case ARM::STRB_POST_REG:
3567 case ARM::STRB_PRE_IMM:
3568 case ARM::STRH_POST:
3569 case ARM::STR_POST_IMM:
3570 case ARM::STR_POST_REG:
3571 case ARM::STR_PRE_IMM:
3574 case ARM::LDRSB_PRE:
3575 case ARM::LDRSH_PRE: {
3582 unsigned ShOpVal =
MI.getOperand(4).getImm();
3587 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3600 return (Rt == Rn) ? 3 : 2;
3611 case ARM::LDRD_POST:
3612 case ARM::t2LDRD_POST:
3615 case ARM::STRD_POST:
3616 case ARM::t2STRD_POST:
3619 case ARM::LDRD_PRE: {
3626 return (Rt == Rn) ? 4 : 3;
3629 case ARM::t2LDRD_PRE: {
3632 return (Rt == Rn) ? 4 : 3;
3635 case ARM::STRD_PRE: {
3643 case ARM::t2STRD_PRE:
3646 case ARM::t2LDR_POST:
3647 case ARM::t2LDRB_POST:
3648 case ARM::t2LDRB_PRE:
3649 case ARM::t2LDRSBi12:
3650 case ARM::t2LDRSBi8:
3651 case ARM::t2LDRSBpci:
3653 case ARM::t2LDRH_POST:
3654 case ARM::t2LDRH_PRE:
3656 case ARM::t2LDRSB_POST:
3657 case ARM::t2LDRSB_PRE:
3658 case ARM::t2LDRSH_POST:
3659 case ARM::t2LDRSH_PRE:
3660 case ARM::t2LDRSHi12:
3661 case ARM::t2LDRSHi8:
3662 case ARM::t2LDRSHpci:
3666 case ARM::t2LDRDi8: {
3669 return (Rt == Rn) ? 3 : 2;
3672 case ARM::t2STRB_POST:
3673 case ARM::t2STRB_PRE:
3676 case ARM::t2STRH_POST:
3677 case ARM::t2STRH_PRE:
3679 case ARM::t2STR_POST:
3680 case ARM::t2STR_PRE:
3711 E =
MI.memoperands_end();
3713 Size += (*I)->getSize().getValue();
3720 return std::min(
Size / 4, 16U);
3725 unsigned UOps = 1 + NumRegs;
3729 case ARM::VLDMDIA_UPD:
3730 case ARM::VLDMDDB_UPD:
3731 case ARM::VLDMSIA_UPD:
3732 case ARM::VLDMSDB_UPD:
3733 case ARM::VSTMDIA_UPD:
3734 case ARM::VSTMDDB_UPD:
3735 case ARM::VSTMSIA_UPD:
3736 case ARM::VSTMSDB_UPD:
3737 case ARM::LDMIA_UPD:
3738 case ARM::LDMDA_UPD:
3739 case ARM::LDMDB_UPD:
3740 case ARM::LDMIB_UPD:
3741 case ARM::STMIA_UPD:
3742 case ARM::STMDA_UPD:
3743 case ARM::STMDB_UPD:
3744 case ARM::STMIB_UPD:
3745 case ARM::tLDMIA_UPD:
3746 case ARM::tSTMIA_UPD:
3747 case ARM::t2LDMIA_UPD:
3748 case ARM::t2LDMDB_UPD:
3749 case ARM::t2STMIA_UPD:
3750 case ARM::t2STMDB_UPD:
3753 case ARM::LDMIA_RET:
3755 case ARM::t2LDMIA_RET:
3764 if (!ItinData || ItinData->
isEmpty())
3768 unsigned Class =
Desc.getSchedClass();
3770 if (ItinUOps >= 0) {
3777 unsigned Opc =
MI.getOpcode();
3796 case ARM::VLDMDIA_UPD:
3797 case ARM::VLDMDDB_UPD:
3799 case ARM::VLDMSIA_UPD:
3800 case ARM::VLDMSDB_UPD:
3802 case ARM::VSTMDIA_UPD:
3803 case ARM::VSTMDDB_UPD:
3805 case ARM::VSTMSIA_UPD:
3806 case ARM::VSTMSDB_UPD: {
3807 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands();
3808 return (NumRegs / 2) + (NumRegs % 2) + 1;
3811 case ARM::LDMIA_RET:
3816 case ARM::LDMIA_UPD:
3817 case ARM::LDMDA_UPD:
3818 case ARM::LDMDB_UPD:
3819 case ARM::LDMIB_UPD:
3824 case ARM::STMIA_UPD:
3825 case ARM::STMDA_UPD:
3826 case ARM::STMDB_UPD:
3827 case ARM::STMIB_UPD:
3829 case ARM::tLDMIA_UPD:
3830 case ARM::tSTMIA_UPD:
3834 case ARM::t2LDMIA_RET:
3837 case ARM::t2LDMIA_UPD:
3838 case ARM::t2LDMDB_UPD:
3841 case ARM::t2STMIA_UPD:
3842 case ARM::t2STMDB_UPD: {
3843 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands() + 1;
3855 unsigned UOps = (NumRegs / 2);
3861 unsigned UOps = (NumRegs / 2);
3864 if ((NumRegs % 2) || !
MI.hasOneMemOperand() ||
3875std::optional<unsigned>
3878 unsigned DefIdx,
unsigned DefAlign)
const {
3887 DefCycle = RegNo / 2 + 1;
3892 bool isSLoad =
false;
3897 case ARM::VLDMSIA_UPD:
3898 case ARM::VLDMSDB_UPD:
3905 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3909 DefCycle = RegNo + 2;
3915std::optional<unsigned>
3918 unsigned DefIdx,
unsigned DefAlign)
const {
3928 DefCycle = RegNo / 2;
3934 DefCycle = (RegNo / 2);
3937 if ((RegNo % 2) || DefAlign < 8)
3943 DefCycle = RegNo + 2;
3949std::optional<unsigned>
3952 unsigned UseIdx,
unsigned UseAlign)
const {
3960 UseCycle = RegNo / 2 + 1;
3965 bool isSStore =
false;
3970 case ARM::VSTMSIA_UPD:
3971 case ARM::VSTMSDB_UPD:
3978 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3982 UseCycle = RegNo + 2;
3988std::optional<unsigned>
3991 unsigned UseIdx,
unsigned UseAlign)
const {
3998 UseCycle = RegNo / 2;
4004 UseCycle = (RegNo / 2);
4007 if ((RegNo % 2) || UseAlign < 8)
4018 unsigned DefIdx,
unsigned DefAlign,
const MCInstrDesc &UseMCID,
4019 unsigned UseIdx,
unsigned UseAlign)
const {
4029 std::optional<unsigned> DefCycle;
4030 bool LdmBypass =
false;
4037 case ARM::VLDMDIA_UPD:
4038 case ARM::VLDMDDB_UPD:
4040 case ARM::VLDMSIA_UPD:
4041 case ARM::VLDMSDB_UPD:
4042 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4045 case ARM::LDMIA_RET:
4050 case ARM::LDMIA_UPD:
4051 case ARM::LDMDA_UPD:
4052 case ARM::LDMDB_UPD:
4053 case ARM::LDMIB_UPD:
4055 case ARM::tLDMIA_UPD:
4057 case ARM::t2LDMIA_RET:
4060 case ARM::t2LDMIA_UPD:
4061 case ARM::t2LDMDB_UPD:
4063 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4071 std::optional<unsigned> UseCycle;
4078 case ARM::VSTMDIA_UPD:
4079 case ARM::VSTMDDB_UPD:
4081 case ARM::VSTMSIA_UPD:
4082 case ARM::VSTMSDB_UPD:
4083 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4090 case ARM::STMIA_UPD:
4091 case ARM::STMDA_UPD:
4092 case ARM::STMDB_UPD:
4093 case ARM::STMIB_UPD:
4094 case ARM::tSTMIA_UPD:
4099 case ARM::t2STMIA_UPD:
4100 case ARM::t2STMDB_UPD:
4101 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4109 if (UseCycle > *DefCycle + 1)
4110 return std::nullopt;
4112 UseCycle = *DefCycle - *UseCycle + 1;
4113 if (UseCycle > 0u) {
4119 UseCycle = *UseCycle - 1;
4121 UseClass, UseIdx)) {
4122 UseCycle = *UseCycle - 1;
4131 unsigned &DefIdx,
unsigned &Dist) {
4136 assert(II->isInsideBundle() &&
"Empty bundle?");
4139 while (II->isInsideBundle()) {
4140 Idx = II->findRegisterDefOperandIdx(Reg,
TRI,
false,
true);
4147 assert(
Idx != -1 &&
"Cannot find bundled definition!");
4154 unsigned &UseIdx,
unsigned &Dist) {
4158 assert(II->isInsideBundle() &&
"Empty bundle?");
4163 while (II != E && II->isInsideBundle()) {
4164 Idx = II->findRegisterUseOperandIdx(Reg,
TRI,
false);
4167 if (II->getOpcode() != ARM::t2IT)
4195 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4205 case ARM::t2LDRSHs: {
4207 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4208 if (ShAmt == 0 || ShAmt == 2)
4213 }
else if (Subtarget.
isSwift()) {
4220 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4225 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4236 case ARM::t2LDRSHs: {
4238 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4239 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4246 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4253 case ARM::VLD1q8wb_fixed:
4254 case ARM::VLD1q16wb_fixed:
4255 case ARM::VLD1q32wb_fixed:
4256 case ARM::VLD1q64wb_fixed:
4257 case ARM::VLD1q8wb_register:
4258 case ARM::VLD1q16wb_register:
4259 case ARM::VLD1q32wb_register:
4260 case ARM::VLD1q64wb_register:
4267 case ARM::VLD2d8wb_fixed:
4268 case ARM::VLD2d16wb_fixed:
4269 case ARM::VLD2d32wb_fixed:
4270 case ARM::VLD2q8wb_fixed:
4271 case ARM::VLD2q16wb_fixed:
4272 case ARM::VLD2q32wb_fixed:
4273 case ARM::VLD2d8wb_register:
4274 case ARM::VLD2d16wb_register:
4275 case ARM::VLD2d32wb_register:
4276 case ARM::VLD2q8wb_register:
4277 case ARM::VLD2q16wb_register:
4278 case ARM::VLD2q32wb_register:
4283 case ARM::VLD3d8_UPD:
4284 case ARM::VLD3d16_UPD:
4285 case ARM::VLD3d32_UPD:
4286 case ARM::VLD1d64Twb_fixed:
4287 case ARM::VLD1d64Twb_register:
4288 case ARM::VLD3q8_UPD:
4289 case ARM::VLD3q16_UPD:
4290 case ARM::VLD3q32_UPD:
4295 case ARM::VLD4d8_UPD:
4296 case ARM::VLD4d16_UPD:
4297 case ARM::VLD4d32_UPD:
4298 case ARM::VLD1d64Qwb_fixed:
4299 case ARM::VLD1d64Qwb_register:
4300 case ARM::VLD4q8_UPD:
4301 case ARM::VLD4q16_UPD:
4302 case ARM::VLD4q32_UPD:
4303 case ARM::VLD1DUPq8:
4304 case ARM::VLD1DUPq16:
4305 case ARM::VLD1DUPq32:
4306 case ARM::VLD1DUPq8wb_fixed:
4307 case ARM::VLD1DUPq16wb_fixed:
4308 case ARM::VLD1DUPq32wb_fixed:
4309 case ARM::VLD1DUPq8wb_register:
4310 case ARM::VLD1DUPq16wb_register:
4311 case ARM::VLD1DUPq32wb_register:
4312 case ARM::VLD2DUPd8:
4313 case ARM::VLD2DUPd16:
4314 case ARM::VLD2DUPd32:
4315 case ARM::VLD2DUPd8wb_fixed:
4316 case ARM::VLD2DUPd16wb_fixed:
4317 case ARM::VLD2DUPd32wb_fixed:
4318 case ARM::VLD2DUPd8wb_register:
4319 case ARM::VLD2DUPd16wb_register:
4320 case ARM::VLD2DUPd32wb_register:
4321 case ARM::VLD4DUPd8:
4322 case ARM::VLD4DUPd16:
4323 case ARM::VLD4DUPd32:
4324 case ARM::VLD4DUPd8_UPD:
4325 case ARM::VLD4DUPd16_UPD:
4326 case ARM::VLD4DUPd32_UPD:
4328 case ARM::VLD1LNd16:
4329 case ARM::VLD1LNd32:
4330 case ARM::VLD1LNd8_UPD:
4331 case ARM::VLD1LNd16_UPD:
4332 case ARM::VLD1LNd32_UPD:
4334 case ARM::VLD2LNd16:
4335 case ARM::VLD2LNd32:
4336 case ARM::VLD2LNq16:
4337 case ARM::VLD2LNq32:
4338 case ARM::VLD2LNd8_UPD:
4339 case ARM::VLD2LNd16_UPD:
4340 case ARM::VLD2LNd32_UPD:
4341 case ARM::VLD2LNq16_UPD:
4342 case ARM::VLD2LNq32_UPD:
4344 case ARM::VLD4LNd16:
4345 case ARM::VLD4LNd32:
4346 case ARM::VLD4LNq16:
4347 case ARM::VLD4LNq32:
4348 case ARM::VLD4LNd8_UPD:
4349 case ARM::VLD4LNd16_UPD:
4350 case ARM::VLD4LNd32_UPD:
4351 case ARM::VLD4LNq16_UPD:
4352 case ARM::VLD4LNq32_UPD:
4366 if (!ItinData || ItinData->
isEmpty())
4367 return std::nullopt;
4373 unsigned DefAdj = 0;
4374 if (
DefMI.isBundle())
4383 unsigned UseAdj = 0;
4384 if (
UseMI.isBundle()) {
4388 return std::nullopt;
4391 return getOperandLatencyImpl(
4392 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->
getDesc(), DefAdj, DefMO,
4393 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->
getDesc(), UseAdj);
4396std::optional<unsigned> ARMBaseInstrInfo::getOperandLatencyImpl(
4398 unsigned DefIdx,
const MCInstrDesc &DefMCID,
unsigned DefAdj,
4400 unsigned UseIdx,
const MCInstrDesc &UseMCID,
unsigned UseAdj)
const {
4401 if (Reg == ARM::CPSR) {
4402 if (
DefMI.getOpcode() == ARM::FMSTAT) {
4404 return Subtarget.
isLikeA9() ? 1 : 20;
4408 if (
UseMI.isBranch())
4428 return std::nullopt;
4430 unsigned DefAlign =
DefMI.hasOneMemOperand()
4433 unsigned UseAlign =
UseMI.hasOneMemOperand()
4439 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4442 return std::nullopt;
4445 int Adj = DefAdj + UseAdj;
4449 if (Adj >= 0 || (
int)*
Latency > -Adj) {
4456std::optional<unsigned>
4458 SDNode *DefNode,
unsigned DefIdx,
4459 SDNode *UseNode,
unsigned UseIdx)
const {
4465 if (isZeroCost(DefMCID.
Opcode))
4468 if (!ItinData || ItinData->
isEmpty())
4469 return DefMCID.
mayLoad() ? 3 : 1;
4472 std::optional<unsigned>
Latency =
4475 int Threshold = 1 + Adj;
4480 auto *DefMN = cast<MachineSDNode>(DefNode);
4481 unsigned DefAlign = !DefMN->memoperands_empty()
4482 ? (*DefMN->memoperands_begin())->
getAlign().value()
4484 auto *UseMN = cast<MachineSDNode>(UseNode);
4485 unsigned UseAlign = !UseMN->memoperands_empty()
4486 ? (*UseMN->memoperands_begin())->
getAlign().value()
4489 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4491 return std::nullopt;
4512 case ARM::t2LDRSHs: {
4515 if (ShAmt == 0 || ShAmt == 2)
4530 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4547 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4554 case ARM::VLD1q8wb_register:
4555 case ARM::VLD1q16wb_register:
4556 case ARM::VLD1q32wb_register:
4557 case ARM::VLD1q64wb_register:
4558 case ARM::VLD1q8wb_fixed:
4559 case ARM::VLD1q16wb_fixed:
4560 case ARM::VLD1q32wb_fixed:
4561 case ARM::VLD1q64wb_fixed:
4565 case ARM::VLD2q8Pseudo:
4566 case ARM::VLD2q16Pseudo:
4567 case ARM::VLD2q32Pseudo:
4568 case ARM::VLD2d8wb_fixed:
4569 case ARM::VLD2d16wb_fixed:
4570 case ARM::VLD2d32wb_fixed:
4571 case ARM::VLD2q8PseudoWB_fixed:
4572 case ARM::VLD2q16PseudoWB_fixed:
4573 case ARM::VLD2q32PseudoWB_fixed:
4574 case ARM::VLD2d8wb_register:
4575 case ARM::VLD2d16wb_register:
4576 case ARM::VLD2d32wb_register:
4577 case ARM::VLD2q8PseudoWB_register:
4578 case ARM::VLD2q16PseudoWB_register:
4579 case ARM::VLD2q32PseudoWB_register:
4580 case ARM::VLD3d8Pseudo:
4581 case ARM::VLD3d16Pseudo:
4582 case ARM::VLD3d32Pseudo:
4583 case ARM::VLD1d8TPseudo:
4584 case ARM::VLD1d16TPseudo:
4585 case ARM::VLD1d32TPseudo:
4586 case ARM::VLD1d64TPseudo:
4587 case ARM::VLD1d64TPseudoWB_fixed:
4588 case ARM::VLD1d64TPseudoWB_register:
4589 case ARM::VLD3d8Pseudo_UPD:
4590 case ARM::VLD3d16Pseudo_UPD:
4591 case ARM::VLD3d32Pseudo_UPD:
4592 case ARM::VLD3q8Pseudo_UPD:
4593 case ARM::VLD3q16Pseudo_UPD:
4594 case ARM::VLD3q32Pseudo_UPD:
4595 case ARM::VLD3q8oddPseudo:
4596 case ARM::VLD3q16oddPseudo:
4597 case ARM::VLD3q32oddPseudo:
4598 case ARM::VLD3q8oddPseudo_UPD:
4599 case ARM::VLD3q16oddPseudo_UPD:
4600 case ARM::VLD3q32oddPseudo_UPD:
4601 case ARM::VLD4d8Pseudo:
4602 case ARM::VLD4d16Pseudo:
4603 case ARM::VLD4d32Pseudo:
4604 case ARM::VLD1d8QPseudo:
4605 case ARM::VLD1d16QPseudo:
4606 case ARM::VLD1d32QPseudo:
4607 case ARM::VLD1d64QPseudo:
4608 case ARM::VLD1d64QPseudoWB_fixed:
4609 case ARM::VLD1d64QPseudoWB_register:
4610 case ARM::VLD1q8HighQPseudo:
4611 case ARM::VLD1q8LowQPseudo_UPD:
4612 case ARM::VLD1q8HighTPseudo:
4613 case ARM::VLD1q8LowTPseudo_UPD:
4614 case ARM::VLD1q16HighQPseudo:
4615 case ARM::VLD1q16LowQPseudo_UPD:
4616 case ARM::VLD1q16HighTPseudo:
4617 case ARM::VLD1q16LowTPseudo_UPD:
4618 case ARM::VLD1q32HighQPseudo:
4619 case ARM::VLD1q32LowQPseudo_UPD:
4620 case ARM::VLD1q32HighTPseudo:
4621 case ARM::VLD1q32LowTPseudo_UPD:
4622 case ARM::VLD1q64HighQPseudo:
4623 case ARM::VLD1q64LowQPseudo_UPD:
4624 case ARM::VLD1q64HighTPseudo:
4625 case ARM::VLD1q64LowTPseudo_UPD:
4626 case ARM::VLD4d8Pseudo_UPD:
4627 case ARM::VLD4d16Pseudo_UPD:
4628 case ARM::VLD4d32Pseudo_UPD:
4629 case ARM::VLD4q8Pseudo_UPD:
4630 case ARM::VLD4q16Pseudo_UPD:
4631 case ARM::VLD4q32Pseudo_UPD:
4632 case ARM::VLD4q8oddPseudo:
4633 case ARM::VLD4q16oddPseudo:
4634 case ARM::VLD4q32oddPseudo:
4635 case ARM::VLD4q8oddPseudo_UPD:
4636 case ARM::VLD4q16oddPseudo_UPD:
4637 case ARM::VLD4q32oddPseudo_UPD:
4638 case ARM::VLD1DUPq8:
4639 case ARM::VLD1DUPq16:
4640 case ARM::VLD1DUPq32:
4641 case ARM::VLD1DUPq8wb_fixed:
4642 case ARM::VLD1DUPq16wb_fixed:
4643 case ARM::VLD1DUPq32wb_fixed:
4644 case ARM::VLD1DUPq8wb_register:
4645 case ARM::VLD1DUPq16wb_register:
4646 case ARM::VLD1DUPq32wb_register:
4647 case ARM::VLD2DUPd8:
4648 case ARM::VLD2DUPd16:
4649 case ARM::VLD2DUPd32:
4650 case ARM::VLD2DUPd8wb_fixed:
4651 case ARM::VLD2DUPd16wb_fixed:
4652 case ARM::VLD2DUPd32wb_fixed:
4653 case ARM::VLD2DUPd8wb_register:
4654 case ARM::VLD2DUPd16wb_register:
4655 case ARM::VLD2DUPd32wb_register:
4656 case ARM::VLD2DUPq8EvenPseudo:
4657 case ARM::VLD2DUPq8OddPseudo:
4658 case ARM::VLD2DUPq16EvenPseudo:
4659 case ARM::VLD2DUPq16OddPseudo:
4660 case ARM::VLD2DUPq32EvenPseudo:
4661 case ARM::VLD2DUPq32OddPseudo:
4662 case ARM::VLD3DUPq8EvenPseudo:
4663 case ARM::VLD3DUPq8OddPseudo:
4664 case ARM::VLD3DUPq16EvenPseudo:
4665 case ARM::VLD3DUPq16OddPseudo:
4666 case ARM::VLD3DUPq32EvenPseudo:
4667 case ARM::VLD3DUPq32OddPseudo:
4668 case ARM::VLD4DUPd8Pseudo:
4669 case ARM::VLD4DUPd16Pseudo:
4670 case ARM::VLD4DUPd32Pseudo:
4671 case ARM::VLD4DUPd8Pseudo_UPD:
4672 case ARM::VLD4DUPd16Pseudo_UPD:
4673 case ARM::VLD4DUPd32Pseudo_UPD:
4674 case ARM::VLD4DUPq8EvenPseudo:
4675 case ARM::VLD4DUPq8OddPseudo:
4676 case ARM::VLD4DUPq16EvenPseudo:
4677 case ARM::VLD4DUPq16OddPseudo:
4678 case ARM::VLD4DUPq32EvenPseudo:
4679 case ARM::VLD4DUPq32OddPseudo:
4680 case ARM::VLD1LNq8Pseudo:
4681 case ARM::VLD1LNq16Pseudo:
4682 case ARM::VLD1LNq32Pseudo:
4683 case ARM::VLD1LNq8Pseudo_UPD:
4684 case ARM::VLD1LNq16Pseudo_UPD:
4685 case ARM::VLD1LNq32Pseudo_UPD:
4686 case ARM::VLD2LNd8Pseudo:
4687 case ARM::VLD2LNd16Pseudo:
4688 case ARM::VLD2LNd32Pseudo:
4689 case ARM::VLD2LNq16Pseudo:
4690 case ARM::VLD2LNq32Pseudo:
4691 case ARM::VLD2LNd8Pseudo_UPD:
4692 case ARM::VLD2LNd16Pseudo_UPD:
4693 case ARM::VLD2LNd32Pseudo_UPD:
4694 case ARM::VLD2LNq16Pseudo_UPD:
4695 case ARM::VLD2LNq32Pseudo_UPD:
4696 case ARM::VLD4LNd8Pseudo:
4697 case ARM::VLD4LNd16Pseudo:
4698 case ARM::VLD4LNd32Pseudo:
4699 case ARM::VLD4LNq16Pseudo:
4700 case ARM::VLD4LNq32Pseudo:
4701 case ARM::VLD4LNd8Pseudo_UPD:
4702 case ARM::VLD4LNd16Pseudo_UPD:
4703 case ARM::VLD4LNd32Pseudo_UPD:
4704 case ARM::VLD4LNq16Pseudo_UPD:
4705 case ARM::VLD4LNq32Pseudo_UPD:
4715unsigned ARMBaseInstrInfo::getPredicationCost(
const MachineInstr &
MI)
const {
4716 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4726 !Subtarget.cheapPredicableCPSRDef())) {
4736 unsigned *PredCost)
const {
4737 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4743 if (
MI.isBundle()) {
4747 while (++
I != E &&
I->isInsideBundle()) {
4748 if (
I->getOpcode() != ARM::t2IT)
4749 Latency += getInstrLatency(ItinData, *
I, PredCost);
4756 !Subtarget.cheapPredicableCPSRDef()))) {
4764 return MI.mayLoad() ? 3 : 1;
4777 MI.hasOneMemOperand() ? (*
MI.memoperands_begin())->
getAlign().value() : 0;
4779 if (Adj >= 0 || (
int)
Latency > -Adj) {
4787 if (!
Node->isMachineOpcode())
4790 if (!ItinData || ItinData->
isEmpty())
4793 unsigned Opcode =
Node->getMachineOpcode();
4803bool ARMBaseInstrInfo::hasHighOperandLatency(
const TargetSchedModel &SchedModel,
4808 unsigned UseIdx)
const {
4811 if (Subtarget.nonpipelinedVFP() &&
4826 unsigned DefIdx)
const {
4828 if (!ItinData || ItinData->
isEmpty())
4833 unsigned DefClass =
DefMI.getDesc().getSchedClass();
4834 std::optional<unsigned> DefCycle =
4836 return DefCycle && DefCycle <= 2U;
4844 ErrInfo =
"Pseudo flag setting opcodes only exist in Selection DAG";
4847 if (
MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4849 if (!ARM::hGPRRegClass.
contains(
MI.getOperand(0).getReg()) &&
4850 !ARM::hGPRRegClass.contains(
MI.getOperand(1).getReg())) {
4851 ErrInfo =
"Non-flag-setting Thumb1 mov is v6-only";
4855 if (
MI.getOpcode() == ARM::tPUSH ||
4856 MI.getOpcode() == ARM::tPOP ||
4857 MI.getOpcode() == ARM::tPOP_RET) {
4859 if (MO.isImplicit() || !MO.isReg())
4862 if (Reg < ARM::R0 || Reg > ARM::R7) {
4863 if (!(
MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) &&
4864 !(
MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) {
4865 ErrInfo =
"Unsupported register in Thumb1 push/pop";
4871 if (
MI.getOpcode() == ARM::MVE_VMOV_q_rr) {
4872 assert(
MI.getOperand(4).isImm() &&
MI.getOperand(5).isImm());
4873 if ((
MI.getOperand(4).getImm() != 2 &&
MI.getOperand(4).getImm() != 3) ||
4874 MI.getOperand(4).getImm() !=
MI.getOperand(5).getImm() + 2) {
4875 ErrInfo =
"Incorrect array index for MVE_VMOV_q_rr";
4896 for (
auto Op :
MI.operands()) {
4903 ErrInfo =
"Incorrect AddrMode Imm for instruction";
4913 unsigned LoadImmOpc,
4914 unsigned LoadOpc)
const {
4916 "ROPI/RWPI not currently supported with stack guard");
4924 if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) {
4926 "TLS stack protector requires hardware TLS register");
4937 Offset = M.getStackProtectorGuardOffset();
4942 unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri;
4952 cast<GlobalValue>((*
MI->memoperands_begin())->getValue());
4961 else if (IsIndirect)
4963 }
else if (IsIndirect) {
4967 if (LoadImmOpc == ARM::tMOVi32imm) {
4970 ARMSysReg::lookupMClassSysRegByName(
"apsr_nzcvq")->Encoding;
5006 unsigned &AddSubOpc,
5007 bool &NegAcc,
bool &HasLane)
const {
5009 if (
I == MLxEntryMap.
end())
5013 MulOpc = Entry.MulOpc;
5014 AddSubOpc = Entry.AddSubOpc;
5015 NegAcc = Entry.NegAcc;
5016 HasLane = Entry.HasLane;
5040std::pair<uint16_t, uint16_t>
5044 if (Subtarget.hasNEON()) {
5053 (
MI.getOpcode() == ARM::VMOVRS ||
MI.getOpcode() == ARM::VMOVSR ||
5054 MI.getOpcode() == ARM::VMOVS))
5061 return std::make_pair(
ExeNEON, 0);
5066 return std::make_pair(
ExeNEON, 0);
5069 return std::make_pair(
ExeVFP, 0);
5075 unsigned SReg,
unsigned &Lane) {
5076 unsigned DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
5079 if (DReg != ARM::NoRegister)
5083 DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
5085 assert(DReg &&
"S-register with no D super-register?");
5106 unsigned Lane,
unsigned &ImplicitSReg) {
5109 if (
MI.definesRegister(DReg,
TRI) ||
MI.readsRegister(DReg,
TRI)) {
5115 ImplicitSReg =
TRI->getSubReg(DReg,
5116 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
5118 MI.getParent()->computeRegisterLiveness(
TRI, ImplicitSReg,
MI);
5133 unsigned DstReg, SrcReg, DReg;
5137 switch (
MI.getOpcode()) {
5149 assert(Subtarget.hasNEON() &&
"VORRd requires NEON");
5152 DstReg =
MI.getOperand(0).getReg();
5153 SrcReg =
MI.getOperand(1).getReg();
5155 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5156 MI.removeOperand(i - 1);
5159 MI.setDesc(
get(ARM::VORRd));
5171 DstReg =
MI.getOperand(0).getReg();
5172 SrcReg =
MI.getOperand(1).getReg();
5174 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5175 MI.removeOperand(i - 1);
5182 MI.setDesc(
get(ARM::VGETLNi32));
5198 DstReg =
MI.getOperand(0).getReg();
5199 SrcReg =
MI.getOperand(1).getReg();
5203 unsigned ImplicitSReg;
5207 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5208 MI.removeOperand(i - 1);
5212 MI.setDesc(
get(ARM::VSETLNi32));
5222 if (ImplicitSReg != 0)
5231 DstReg =
MI.getOperand(0).getReg();
5232 SrcReg =
MI.getOperand(1).getReg();
5234 unsigned DstLane = 0, SrcLane = 0, DDst, DSrc;
5238 unsigned ImplicitSReg;
5242 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5243 MI.removeOperand(i - 1);
5248 MI.setDesc(
get(ARM::VDUPLN32d));
5258 if (ImplicitSReg != 0)
5282 unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5283 bool CurUndef = !
MI.readsRegister(CurReg,
TRI);
5286 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5287 CurUndef = !
MI.readsRegister(CurReg,
TRI);
5292 if (SrcLane == DstLane)
5295 MI.setDesc(
get(ARM::VEXTd32));
5300 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5301 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5304 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5305 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5310 if (SrcLane != DstLane)
5316 if (ImplicitSReg != 0)
5343 if (!PartialUpdateClearance)
5354 switch (
MI.getOpcode()) {
5360 case ARM::VMOVv4i16:
5361 case ARM::VMOVv2i32:
5362 case ARM::VMOVv2f32:
5363 case ARM::VMOVv1i64:
5364 UseOp =
MI.findRegisterUseOperandIdx(Reg,
TRI,
false);
5368 case ARM::VLD1LNd32:
5377 if (UseOp != -1 &&
MI.getOperand(UseOp).readsReg())
5381 if (Reg.isVirtual()) {
5383 if (!MO.
getSubReg() ||
MI.readsVirtualRegister(Reg))
5385 }
else if (ARM::SPRRegClass.
contains(Reg)) {
5387 unsigned DReg =
TRI->getMatchingSuperReg(Reg, ARM::ssub_0,
5389 if (!DReg || !
MI.definesRegister(DReg,
TRI))
5395 return PartialUpdateClearance;
5402 assert(OpNum <
MI.getDesc().getNumDefs() &&
"OpNum is not a def");
5407 assert(Reg.isPhysical() &&
"Can't break virtual register dependencies.");
5408 unsigned DReg = Reg;
5411 if (ARM::SPRRegClass.
contains(Reg)) {
5412 DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5413 assert(
TRI->isSuperRegister(Reg, DReg) &&
"Register enums broken");
5416 assert(ARM::DPRRegClass.
contains(DReg) &&
"Can only break D-reg deps");
5417 assert(
MI.definesRegister(DReg,
TRI) &&
"MI doesn't clobber full D-reg");
5430 MI.addRegisterKilled(DReg,
TRI,
true);
5434 return Subtarget.hasFeature(ARM::HasV6KOps);
5438 if (
MI->getNumOperands() < 4)
5440 unsigned ShOpVal =
MI->getOperand(3).getImm();
5444 ((ShImm == 1 || ShImm == 2) &&
5454 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5455 assert(
MI.isRegSequenceLike() &&
"Invalid kind of instruction");
5457 switch (
MI.getOpcode()) {
5469 MOReg = &
MI.getOperand(2);
5481 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5482 assert(
MI.isExtractSubregLike() &&
"Invalid kind of instruction");
5484 switch (
MI.getOpcode()) {
5495 InputReg.
SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5504 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5505 assert(
MI.isInsertSubregLike() &&
"Invalid kind of instruction");
5507 switch (
MI.getOpcode()) {
5508 case ARM::VSETLNi32:
5509 case ARM::MVE_VMOV_to_lane_32:
5520 InsertedReg.
Reg = MOInsertedReg.
getReg();
5528std::pair<unsigned, unsigned>
5531 return std::make_pair(TF & Mask, TF & ~Mask);
5536 using namespace ARMII;
5538 static const std::pair<unsigned, const char *> TargetFlags[] = {
5539 {MO_LO16,
"arm-lo16"}, {MO_HI16,
"arm-hi16"},
5540 {MO_LO_0_7,
"arm-lo-0-7"}, {MO_HI_0_7,
"arm-hi-0-7"},
5541 {MO_LO_8_15,
"arm-lo-8-15"}, {MO_HI_8_15,
"arm-hi-8-15"},
5548 using namespace ARMII;
5550 static const std::pair<unsigned, const char *> TargetFlags[] = {
5551 {MO_COFFSTUB,
"arm-coffstub"},
5552 {MO_GOT,
"arm-got"},
5553 {MO_SBREL,
"arm-sbrel"},
5554 {MO_DLLIMPORT,
"arm-dllimport"},
5555 {MO_SECREL,
"arm-secrel"},
5556 {MO_NONLAZY,
"arm-nonlazy"}};
5560std::optional<RegImmPair>
5563 unsigned Opcode =
MI.getOpcode();
5570 return std::nullopt;
5573 if (Opcode == ARM::SUBri)
5575 else if (Opcode != ARM::ADDri)
5576 return std::nullopt;
5581 if (!
MI.getOperand(1).isReg() || !
MI.getOperand(2).isImm())
5582 return std::nullopt;
5584 Offset =
MI.getOperand(2).getImm() * Sign;
5592 for (
auto I =
From;
I != To; ++
I)
5593 if (
I->modifiesRegister(Reg,
TRI))
5606 if (CmpMI->modifiesRegister(ARM::CPSR,
TRI))
5608 if (CmpMI->readsRegister(ARM::CPSR,
TRI))
5614 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5616 Register Reg = CmpMI->getOperand(0).getReg();
5619 if (Pred !=
ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5632 if (Subtarget->isThumb()) {
5634 return ForCodesize ? 2 : 1;
5635 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff ||
5638 return ForCodesize ? 4 : 1;
5640 return ForCodesize ? 4 : 2;
5642 return ForCodesize ? 4 : 2;
5644 return ForCodesize ? 4 : 2;
5647 return ForCodesize ? 4 : 1;
5649 return ForCodesize ? 4 : 1;
5650 if (Subtarget->hasV6T2Ops() && Val <= 0xffff)
5651 return ForCodesize ? 4 : 1;
5653 return ForCodesize ? 8 : 2;
5655 return ForCodesize ? 8 : 2;
5658 return ForCodesize ? 8 : 2;
5659 return ForCodesize ? 8 : 3;
5808 : CallTailCall(target.
isThumb() ? 4 : 4),
5809 FrameTailCall(target.
isThumb() ? 0 : 0),
5810 CallThunk(target.
isThumb() ? 4 : 4),
5811 FrameThunk(target.
isThumb() ? 0 : 0),
5812 CallNoLRSave(target.
isThumb() ? 4 : 4),
5813 FrameNoLRSave(target.
isThumb() ? 2 : 4),
5814 CallRegSave(target.
isThumb() ? 8 : 12),
5815 FrameRegSave(target.
isThumb() ? 2 : 4),
5816 CallDefault(target.
isThumb() ? 8 : 12),
5817 FrameDefault(target.
isThumb() ? 2 : 4),
5818 SaveRestoreLROnStack(target.
isThumb() ? 8 : 8) {}
5831 for (
Register Reg : ARM::rGPRRegClass) {
5832 if (!(Reg < regsReserved.
size() && regsReserved.
test(Reg)) &&
5835 C.isAvailableAcrossAndOutOfSeq(Reg,
TRI) &&
5836 C.isAvailableInsideSeq(Reg,
TRI))
5850 for (;
I != E; ++
I) {
5854 if (
MI.modifiesRegister(ARM::LR, &
TRI))
5858 unsigned Opcode =
MI.getOpcode();
5859 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR ||
5860 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET ||
5861 Opcode == ARM::tBXNS_RET) {
5867 if (
MI.readsRegister(ARM::LR, &
TRI))
5873std::optional<outliner::OutlinedFunction>
5875 std::vector<outliner::Candidate> &RepeatedSequenceLocs)
const {
5878 unsigned SequenceSize = 0;
5879 for (
auto &
MI : FirstCand)
5883 unsigned FlagsSetInAll = 0xF;
5888 FlagsSetInAll &=
C.Flags;
5907 return C.isAnyUnavailableAcrossOrOutOfSeq({ARM::R12, ARM::CPSR},
TRI);
5915 llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
5918 if (RepeatedSequenceLocs.size() < 2)
5919 return std::nullopt;
5938 if (std::distance(RepeatedSequenceLocs.begin(), NoBTI) >
5939 std::distance(NoBTI, RepeatedSequenceLocs.end()))
5940 RepeatedSequenceLocs.erase(NoBTI, RepeatedSequenceLocs.end());
5942 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoBTI);
5944 if (RepeatedSequenceLocs.size() < 2)
5945 return std::nullopt;
5955 if (std::distance(RepeatedSequenceLocs.begin(), NoPAC) >
5956 std::distance(NoPAC, RepeatedSequenceLocs.end()))
5957 RepeatedSequenceLocs.erase(NoPAC, RepeatedSequenceLocs.end());
5959 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoPAC);
5961 if (RepeatedSequenceLocs.size() < 2)
5962 return std::nullopt;
5967 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back().getOpcode();
5970 auto SetCandidateCallInfo =
5971 [&RepeatedSequenceLocs](
unsigned CallID,
unsigned NumBytesForCall) {
5973 C.setCallInfo(CallID, NumBytesForCall);
5978 const auto &SomeMFI =
5981 if (SomeMFI.branchTargetEnforcement()) {
5990 if (SomeMFI.shouldSignReturnAddress(
true)) {
6000 if (RepeatedSequenceLocs[0].back().isTerminator()) {
6004 }
else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
6005 LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL ||
6006 LastInstrOpcode == ARM::tBLXr ||
6007 LastInstrOpcode == ARM::tBLXr_noip ||
6008 LastInstrOpcode == ARM::tBLXi) {
6016 unsigned NumBytesNoStackCalls = 0;
6017 std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
6022 const auto Last =
C.getMBB()->rbegin();
6023 const bool LRIsAvailable =
6024 C.getMBB()->isReturnBlock() && !
Last->isCall()
6027 :
C.isAvailableAcrossAndOutOfSeq(ARM::LR,
TRI);
6028 if (LRIsAvailable) {
6032 CandidatesWithoutStackFixups.push_back(
C);
6037 else if (findRegisterToSaveLRTo(
C)) {
6041 CandidatesWithoutStackFixups.push_back(
C);
6046 else if (
C.isAvailableInsideSeq(ARM::SP,
TRI)) {
6049 CandidatesWithoutStackFixups.push_back(
C);
6055 NumBytesNoStackCalls += SequenceSize;
6061 if (NumBytesNoStackCalls <=
6062 RepeatedSequenceLocs.size() * Costs.
CallDefault) {
6063 RepeatedSequenceLocs = CandidatesWithoutStackFixups;
6071 if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
6074 if (std::any_of(FirstCand.
begin(), std::prev(FirstCand.
end()),
6089 NumBytesToCreateFrame, FrameID);
6092bool ARMBaseInstrInfo::checkAndUpdateStackOffset(
MachineInstr *
MI,
6095 int SPIdx =
MI->findRegisterUseOperandIdx(ARM::SP,
nullptr);
6120 unsigned NumOps =
MI->getDesc().getNumOperands();
6121 unsigned ImmIdx = NumOps - 3;
6125 int64_t OffVal =
Offset.getImm();
6131 unsigned NumBits = 0;
6160 assert((
Fixup & 3) == 0 &&
"Can't encode this offset!");
6180 assert(((OffVal * Scale +
Fixup) & (Scale - 1)) == 0 &&
6181 "Can't encode this offset!");
6182 OffVal +=
Fixup / Scale;
6184 unsigned Mask = (1 << NumBits) - 1;
6186 if (OffVal <= Mask) {
6188 MI->getOperand(ImmIdx).setImm(OffVal);
6196 Function &
F, std::vector<outliner::Candidate> &Candidates)
const {
6200 const Function &CFn =
C.getMF()->getFunction();
6204 ARMGenInstrInfo::mergeOutliningCandidateAttributes(
F, Candidates);
6212 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
6231 unsigned &Flags)
const {
6235 "Suitable Machine Function for outlining must track liveness");
6243 bool R12AvailableInBlock = LRU.
available(ARM::R12);
6244 bool CPSRAvailableInBlock = LRU.
available(ARM::CPSR);
6248 if (R12AvailableInBlock && CPSRAvailableInBlock)
6249 Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead;
6256 if (R12AvailableInBlock && !LRU.
available(ARM::R12))
6258 if (CPSRAvailableInBlock && !LRU.
available(ARM::CPSR))
6264 Flags |= MachineOutlinerMBBFlags::HasCalls;
6268 bool LRIsAvailable =
6273 Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
6280 unsigned Flags)
const {
6286 unsigned Opc =
MI.getOpcode();
6287 if (Opc == ARM::tPICADD || Opc == ARM::PICADD || Opc == ARM::PICSTR ||
6288 Opc == ARM::PICSTRB || Opc == ARM::PICSTRH || Opc == ARM::PICLDR ||
6289 Opc == ARM::PICLDRB || Opc == ARM::PICLDRH || Opc == ARM::PICLDRSB ||
6290 Opc == ARM::PICLDRSH || Opc == ARM::t2LDRpci_pic ||
6291 Opc == ARM::t2MOVi16_ga_pcrel || Opc == ARM::t2MOVTi16_ga_pcrel ||
6292 Opc == ARM::t2MOV_ga_pcrel)
6296 if (Opc == ARM::t2BF_LabelPseudo || Opc == ARM::t2DoLoopStart ||
6297 Opc == ARM::t2DoLoopStartTP || Opc == ARM::t2WhileLoopStart ||
6298 Opc == ARM::t2WhileLoopStartLR || Opc == ARM::t2WhileLoopStartTP ||
6299 Opc == ARM::t2LoopDec || Opc == ARM::t2LoopEnd ||
6300 Opc == ARM::t2LoopEndDec)
6309 if (
MI.isTerminator())
6315 if (
MI.readsRegister(ARM::LR,
TRI) ||
MI.readsRegister(ARM::PC,
TRI))
6323 if (MOP.isGlobal()) {
6324 Callee = dyn_cast<Function>(MOP.getGlobal());
6332 (Callee->getName() ==
"\01__gnu_mcount_nc" ||
6333 Callee->getName() ==
"\01mcount" || Callee->getName() ==
"__mcount"))
6341 if (Opc == ARM::BL || Opc == ARM::tBL || Opc == ARM::BLX ||
6342 Opc == ARM::BLX_noip || Opc == ARM::tBLXr || Opc == ARM::tBLXr_noip ||
6347 return UnknownCallOutlineType;
6356 return UnknownCallOutlineType;
6364 return UnknownCallOutlineType;
6372 if (
MI.modifiesRegister(ARM::LR,
TRI) ||
MI.modifiesRegister(ARM::PC,
TRI))
6376 if (
MI.modifiesRegister(ARM::SP,
TRI) ||
MI.readsRegister(ARM::SP,
TRI)) {
6389 bool MightNeedStackFixUp =
6390 (Flags & (MachineOutlinerMBBFlags::LRUnavailableSomewhere |
6391 MachineOutlinerMBBFlags::HasCalls));
6393 if (!MightNeedStackFixUp)
6399 if (
MI.modifiesRegister(ARM::SP,
TRI))
6413 if (
MI.readsRegister(ARM::ITSTATE,
TRI) ||
6414 MI.modifiesRegister(ARM::ITSTATE,
TRI))
6418 if (
MI.isCFIInstruction())
6449 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM;
6464 int64_t StackPosEntry =
6474 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6482 unsigned DwarfRAC =
MRI->getDwarfRegNum(ARM::RA_AUTH_CODE,
true);
6496 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6497 unsigned DwarfReg =
MRI->getDwarfRegNum(Reg,
true);
6508 bool CFI,
bool Auth)
const {
6524 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
6528 if (!Subtarget.isThumb())
6539 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6540 int64_t StackPosEntry =
6547 int64_t LRPosEntry =
6554 unsigned DwarfRAC =
MRI->getDwarfRegNum(ARM::RA_AUTH_CODE,
true);
6567void ARMBaseInstrInfo::emitCFIForLRRestoreFromReg(
6571 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6573 int64_t LRPosEntry =
6587 bool isThumb = Subtarget.isThumb();
6588 unsigned FuncOp =
isThumb ? 2 : 0;
6589 unsigned Opc = Call->getOperand(FuncOp).isReg()
6590 ?
isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
6595 .
add(Call->getOperand(FuncOp));
6596 if (
isThumb && !Call->getOperand(FuncOp).isReg())
6598 Call->eraseFromParent();
6603 return MI.isCall() && !
MI.isReturn();
6611 Et = std::prev(
MBB.
end());
6623 ->shouldSignReturnAddress(
true);
6624 saveLROnStack(
MBB, It,
true, Auth);
6629 "Can only fix up stack references once");
6630 fixupPostOutline(
MBB);
6633 restoreLRFromStack(
MBB, Et,
true, Auth);
6653 fixupPostOutline(
MBB);
6662 bool isThumb = Subtarget.isThumb();
6668 ? Subtarget.
isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6679 Opc =
isThumb ? ARM::tBL : ARM::BL;
6695 Register Reg = findRegisterToSaveLRTo(
C);
6696 assert(Reg != 0 &&
"No callee-saved register available?");
6701 emitCFIForLRSaveToReg(
MBB, It, Reg);
6705 emitCFIForLRRestoreFromReg(
MBB, It);
6725bool ARMBaseInstrInfo::isReallyTriviallyReMaterializable(
6759 static int constexpr MAX_STAGES = 30;
6760 static int constexpr LAST_IS_USE = MAX_STAGES;
6761 static int constexpr SEEN_AS_LIVE = MAX_STAGES + 1;
6762 typedef std::bitset<MAX_STAGES + 2> IterNeed;
6763 typedef std::map<unsigned, IterNeed> IterNeeds;
6766 const IterNeeds &CIN);
6778 : EndLoop(EndLoop), LoopCount(LoopCount),
6780 TII(MF->getSubtarget().getInstrInfo()) {}
6782 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
6784 return MI == EndLoop ||
MI == LoopCount;
6788 if (tooMuchRegisterPressure(SSD, SMS))
6794 std::optional<bool> createTripCountGreaterCondition(
6805 }
else if (EndLoop->
getOpcode() == ARM::t2LoopEnd) {
6810 if (
I.getOpcode() == ARM::t2LoopDec)
6812 assert(LoopDec &&
"Unable to find copied LoopDec");
6818 .
addReg(ARM::NoRegister);
6828 void adjustTripCount(
int TripCountAdjust)
override {}
6830 void disposed()
override {}
6834 const IterNeeds &CIN) {
6836 for (
const auto &
N : CIN) {
6837 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6838 for (
int I = 0;
I < Cnt; ++
I)
6843 for (
const auto &
N : CIN) {
6844 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6845 for (
int I = 0;
I < Cnt; ++
I)
6853 IterNeeds CrossIterationNeeds;
6858 for (
auto &SU : SSD.
SUnits) {
6861 for (
auto &S : SU.Succs)
6864 if (
Reg.isVirtual())
6865 CrossIterationNeeds.insert(std::make_pair(
Reg.id(), IterNeed()))
6866 .first->second.set(0);
6867 }
else if (S.isAssignedRegDep()) {
6869 if (OStg >= 0 && OStg != Stg) {
6871 if (
Reg.isVirtual())
6872 CrossIterationNeeds.insert(std::make_pair(
Reg.id(), IterNeed()))
6873 .first->second |= ((1 << (OStg - Stg)) - 1);
6882 std::vector<SUnit *> ProposedSchedule;
6886 std::deque<SUnit *> Instrs =
6888 std::sort(Instrs.begin(), Instrs.end(),
6889 [](
SUnit *
A,
SUnit *
B) { return A->NodeNum > B->NodeNum; });
6890 for (
SUnit *SU : Instrs)
6891 ProposedSchedule.push_back(SU);
6897 for (
auto *SU : ProposedSchedule)
6901 if (!MO.isReg() || !MO.getReg())
6904 auto CIter = CrossIterationNeeds.find(
Reg.id());
6905 if (CIter == CrossIterationNeeds.end() || CIter->second[LAST_IS_USE] ||
6906 CIter->second[SEEN_AS_LIVE])
6908 if (MO.isDef() && !MO.isDead())
6909 CIter->second.set(SEEN_AS_LIVE);
6910 else if (MO.isUse())
6911 CIter->second.set(LAST_IS_USE);
6913 for (
auto &CI : CrossIterationNeeds)
6914 CI.second.reset(LAST_IS_USE);
6920 RPTracker.init(MF, &RegClassInfo,
nullptr, EndLoop->
getParent(),
6924 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6926 for (
auto *SU : ProposedSchedule) {
6928 RPTracker.setPos(std::next(CurInstI));
6934 if (!MO.isReg() || !MO.getReg())
6937 if (MO.isDef() && !MO.isDead()) {
6938 auto CIter = CrossIterationNeeds.find(
Reg.id());
6939 if (CIter != CrossIterationNeeds.end()) {
6940 CIter->second.reset(0);
6941 CIter->second.reset(SEEN_AS_LIVE);
6945 for (
auto &S : SU->Preds) {
6947 if (S.isAssignedRegDep()) {
6949 auto CIter = CrossIterationNeeds.find(
Reg.id());
6950 if (CIter != CrossIterationNeeds.end()) {
6952 assert(Stg2 <= Stg &&
"Data dependence upon earlier stage");
6953 if (Stg - Stg2 < MAX_STAGES)
6954 CIter->second.set(Stg - Stg2);
6955 CIter->second.set(SEEN_AS_LIVE);
6960 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6963 auto &
P = RPTracker.getPressure().MaxSetPressure;
6964 for (
unsigned I = 0, E =
P.size();
I < E; ++
I)
6965 if (
P[
I] >
TRI->getRegPressureSetLimit(*MF,
I)) {
6973std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
6977 if (Preheader == LoopBB)
6978 Preheader = *std::next(LoopBB->
pred_begin());
6980 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2Bcc) {
6986 for (
auto &L : LoopBB->
instrs()) {
6993 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, CCSetter);
7007 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2LoopEnd) {
7008 for (
auto &L : LoopBB->
instrs())
7013 Register LoopDecResult =
I->getOperand(0).getReg();
7016 if (!LoopDec || LoopDec->
getOpcode() != ARM::t2LoopDec)
7019 for (
auto &J : Preheader->
instrs())
7020 if (J.getOpcode() == ARM::t2DoLoopStart)
7024 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, LoopDec);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
@ MachineOutlinerNoLRSave
Only emit a branch.
@ MachineOutlinerThunk
Emit a call and return.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static bool isLoad(int Opcode)
static bool isThumb(const MCSubtargetInfo &STI)
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, MachineInstr &MI, unsigned DReg, unsigned Lane, unsigned &ImplicitSReg)
getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, set ImplicitSReg to a register n...
static const MachineInstr * getBundledUseMI(const TargetRegisterInfo *TRI, const MachineInstr &MI, unsigned Reg, unsigned &UseIdx, unsigned &Dist)
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI)
Create a copy of a const pool value.
static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, int CmpMask, bool CommonUse)
isSuitableForMask - Identify a suitable 'and' instruction that operates on the given source register ...
static cl::opt< bool > EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, cl::desc("Enable ARM 2-addr to 3-addr conv"))
static int adjustDefLatency(const ARMSubtarget &Subtarget, const MachineInstr &DefMI, const MCInstrDesc &DefMCID, unsigned DefAlign)
Return the number of cycles to add to (or subtract from) the static itinerary based on the def opcode...
static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, const MachineInstr &MI)
static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[]
static bool isEligibleForITBlock(const MachineInstr *MI)
static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC)
getCmpToAddCondition - assume the flags are set by CMP(a,b), return the condition code if we modify t...
static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1)
static bool isLRAvailable(const TargetRegisterInfo &TRI, MachineBasicBlock::reverse_iterator I, MachineBasicBlock::reverse_iterator E)
static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, unsigned SReg, unsigned &Lane)
static const ARM_MLxEntry ARM_MLxTable[]
static bool isRedundantFlagInstr(const MachineInstr *CmpI, Register SrcReg, Register SrcReg2, int64_t ImmValue, const MachineInstr *OI, bool &IsThumb1)
isRedundantFlagInstr - check whether the first instruction, whose only purpose is to update flags,...
static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, unsigned NumRegs)
static const MachineInstr * getBundledDefMI(const TargetRegisterInfo *TRI, const MachineInstr *MI, unsigned Reg, unsigned &DefIdx, unsigned &Dist)
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Looks at all the uses of the given value Returns the Liveness deduced from the uses of this value Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses If the result is Live
This file defines the DenseMap class.
const HexagonInstrInfo * TII
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
PowerPC TLS Dynamic Call Fixup
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallSet class.
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
static bool isCPSRDefined(const MachineInstr &MI)
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction to set the zero flag so that we can remove a "comparis...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
outliner::InstrType getOutliningTypeImpl(MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
const MachineInstrBuilder & AddDReg(MachineInstrBuilder &MIB, unsigned Reg, unsigned SubIdx, unsigned State, const TargetRegisterInfo *TRI) const
unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const override
virtual unsigned getUnindexedOpcode(unsigned Opc) const =0
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &, unsigned, const TargetRegisterInfo *) const override
unsigned getNumLDMAddresses(const MachineInstr &MI) const
Get the number of addresses by LDM or VLDM or zero for unknown.
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI) const override
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
virtual const ARMBaseRegisterInfo & getRegisterInfo() const =0
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Returns the size of the specified MachineInstr.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
std::optional< outliner::OutlinedFunction > getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
ARM supports the MachineOutliner.
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Enable outlining by default at -Oz.
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const override
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
ARMBaseInstrInfo(const ARMSubtarget &STI)
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool isPredicated(const MachineInstr &MI) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI, unsigned LoadImmOpc, unsigned LoadOpc) const
bool isPredicable(const MachineInstr &MI) const override
isPredicable - Return true if the specified instruction can be predicated.
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
Specialization of TargetInstrInfo::describeLoadedValue, used to enhance debug entry value description...
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const override
void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned SrcReg, bool KillSrc, const ARMSubtarget &Subtarget) const
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to determine if two loads are lo...
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const override
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const override
bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const override
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool expandPostRAPseudo(MachineInstr &MI) const override
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to determine (in conjunction w...
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
void copyFromCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg, bool KillSrc, const ARMSubtarget &Subtarget) const
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
VFP/NEON execution domains.
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
bool isFpMLxInstruction(unsigned Opcode) const
isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS instruction.
bool isSwiftFastImmShift(const MachineInstr *MI) const
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2 if h...
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void breakPartialRegDependency(MachineInstr &, unsigned, const TargetRegisterInfo *TRI) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const ARMSubtarget & getSubtarget() const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const override
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
bool shouldSink(const MachineInstr &MI) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
ARMConstantPoolConstant - ARM-specific constant pool values for Constants, Functions,...
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
ARMConstantPoolMBB - ARM-specific constantpool value of a machine basic block.
ARMConstantPoolSymbol - ARM-specific constantpool values for external symbols.
ARMConstantPoolValue - ARM specific constantpool value.
bool isMachineBasicBlock() const
bool isGlobalValue() const
ARMCP::ARMCPModifier getModifier() const
bool mustAddCurrentAddress() const
virtual bool hasSameValue(ARMConstantPoolValue *ACPV)
hasSameValue - Return true if this ARM constpool value can share the same constantpool entry as anoth...
bool isBlockAddress() const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool isThumb2Function() const
bool branchTargetEnforcement() const
unsigned createPICLabelUId()
bool isThumb1OnlyFunction() const
bool isThumbFunction() const
bool shouldSignReturnAddress() const
bool isTargetMachO() const
ARMLdStMultipleTiming getLdStMultipleTiming() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
bool isReadTPSoft() const
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
unsigned getMispredictionPenalty() const
const ARMBaseRegisterInfo * getRegisterInfo() const override
unsigned getReturnOpcode() const
Returns the correct return opcode for the current feature set.
Align getStackAlignment() const
getStackAlignment - Returns the minimum alignment known to hold of the stack frame on entry to the fu...
bool enableMachinePipeliner() const override
Returns true if machine pipeliner should be enabled.
bool isTargetCOFF() const
unsigned getPartialUpdateClearance() const
@ DoubleIssueCheckUnalignedAccess
Can load/store 2 registers/cycle, but needs an extra cycle if the access is not 64-bit aligned.
@ SingleIssue
Can load/store 1 register/cycle.
@ DoubleIssue
Can load/store 2 registers/cycle.
@ SingleIssuePlusExtras
Can load/store 1 register/cycle, but needs an extra cycle for address computation and potentially als...
int getPreISelOperandLatencyAdjustment() const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool test(unsigned Idx) const
size_type size() const
size - Returns the number of bits in this bitvector.
uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
A possibly irreducible generalization of a Loop.
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
Itinerary data supplied by a subtarget to be used by a target.
int getNumMicroOps(unsigned ItinClassIndx) const
Return the number of micro-ops that the given class decodes to.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Return true if there is a pipeline forwarding between instructions of itinerary classes DefClass and ...
bool isEmpty() const
Returns true if there are no itineraries.
A set of register units used to track register liveness.
bool available(MCPhysReg Reg) const
Returns true if no part of physical register Reg is live.
void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
void accumulate(const MachineInstr &MI)
Adds all register units used, defined or clobbered in MI.
void addVirtualRegisterDead(Register IncomingReg, MachineInstr &MI, bool AddIfNotFound=false)
addVirtualRegisterDead - Add information about the fact that the specified register is dead after bei...
void addVirtualRegisterKilled(Register IncomingReg, MachineInstr &MI, bool AddIfNotFound=false)
addVirtualRegisterKilled - Add information about the fact that the specified register is killed after...
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
This class is intended to be used as a base class for asm properties and features specific to the tar...
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction createUndefined(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_undefined From now on the previous value of Register can't be restored anymore.
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction createRegister(MCSymbol *L, unsigned Register1, unsigned Register2, SMLoc Loc={})
.cfi_register Previous value of Register1 is saved in register Register2.
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
bool hasImplicitDefOfPhysReg(unsigned Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
bool isCall() const
Return true if the instruction is a call.
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
unsigned getOpcode() const
Return the opcode number for this descriptor.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
bool isValid() const
isValid - Returns true until all the operands have been visited.
unsigned pred_size() const
instr_iterator instr_begin()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool isReturnBlock() const
Convenience function that returns true if the block ends in a return instruction.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
instr_iterator instr_end()
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< succ_iterator > successors()
reverse_iterator rbegin()
LivenessQueryResult
Possible outcome of a register liveness query to computeRegisterLiveness()
@ LQR_Dead
Register is known to be fully dead.
@ LQR_Live
Register is known to be (at least partially) live.
@ LQR_Unknown
Register liveness not decidable from local neighborhood.
This class is a data container for one entry in a MachineConstantPool.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
union llvm::MachineConstantPoolEntry::@196 Val
The constant itself.
MachineConstantPoolValue * MachineCPVal
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getNumObjects() const
Return the number of objects.
unsigned addFrameInst(const MCCFIInstruction &Inst)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineModuleInfo & getMMI() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool isCall(QueryType Type=AnyInBundle) const
unsigned getNumOperands() const
Retuns the total number of operands.
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isRegSequence() const
bool isInsertSubreg() const
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool tracksLiveness() const
tracksLiveness - Returns true when tracking register liveness accurately.
const TargetRegisterInfo * getTargetRegisterInfo() const
A Module instance is used to store all the information related to an LLVM module.
void AddHazardRecognizer(std::unique_ptr< ScheduleHazardRecognizer > &&)
Track the current register pressure at some position in the instruction stream, and remember the high...
void increaseRegPressure(Register RegUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
void decreaseRegPressure(Register RegUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
void runOnMachineFunction(const MachineFunction &MF)
runOnFunction - Prepare to answer questions about MF.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
@ Anti
A register anti-dependence (aka WAR).
This class represents the scheduled code.
unsigned getMaxStageCount()
Return the maximum stage count needed for this schedule.
int stageScheduled(SUnit *SU) const
Return the stage for a scheduled instruction.
int getInitiationInterval() const
Return the initiation interval for this schedule.
std::deque< SUnit * > & getInstructions(int cycle)
Return the instructions that are scheduled at the specified cycle.
int getFirstCycle() const
Return the first cycle in the completed schedule.
int getFinalCycle() const
Return the last cycle in the finalized schedule.
Scheduling unit. This is a node in the scheduling DAG.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
std::vector< SUnit > SUnits
The scheduling units.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static CondCodes getOppositeCondition(CondCodes CC)
@ MO_OPTION_MASK
MO_OPTION_MASK - Most flags are mutually exclusive; this mask selects just that part of the flag set.
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_GOT
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
AddrMode
ARM Addressing Modes.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned char getAM5FP16Offset(unsigned AM5Opc)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
ShiftOpc getAM2ShiftOpc(unsigned AM2Opc)
unsigned getAM2Offset(unsigned AM2Opc)
unsigned getSOImmValRotate(unsigned Imm)
getSOImmValRotate - Try to handle Imm with an immediate shifter operand, computing the rotate amount ...
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
ShiftOpc getSORegShOp(unsigned Op)
AddrOpc getAM5Op(unsigned AM5Opc)
bool isSOImmTwoPartValNeg(unsigned V)
isSOImmTwoPartValNeg - Return true if the specified value can be obtained by two SOImmVal,...
unsigned getSOImmTwoPartSecond(unsigned V)
getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, return the second chunk of ...
bool isSOImmTwoPartVal(unsigned V)
isSOImmTwoPartVal - Return true if the specified value can be obtained by or'ing together two SOImmVa...
AddrOpc getAM5FP16Op(unsigned AM5Opc)
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
unsigned getT2SOImmTwoPartSecond(unsigned Imm)
unsigned getT2SOImmTwoPartFirst(unsigned Imm)
bool isT2SOImmTwoPartVal(unsigned Imm)
unsigned char getAM5Offset(unsigned AM5Opc)
unsigned getSOImmTwoPartFirst(unsigned V)
getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, return the first chunk of it...
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
@ C
The default llvm calling convention, compatible with C.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
static bool isIndirectCall(const MachineInstr &MI)
MachineInstr * findCMPToFoldIntoCBZ(MachineInstr *Br, const TargetRegisterInfo *TRI)
Search backwards from a tBcc to find a tCMPi8 against 0, meaning we can convert them to a tCBZ or tCB...
static bool isCondBranchOpcode(int Opc)
bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns true if Val1 has a lower Constant Materialization Cost than Val2.
static bool isPushOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
void addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond)
static bool isVCTP(const MachineInstr *MI)
bool IsCPSRDead< MachineInstr >(const MachineInstr *MI)
unsigned getBLXpredOpcode(const MachineFunction &MF)
static bool isIndirectBranchOpcode(int Opc)
bool getAlign(const Function &F, unsigned index, unsigned &align)
bool isLegalAddressImm(unsigned Opcode, int Imm, const TargetInstrInfo *TII)
bool registerDefinedBetween(unsigned Reg, MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, const TargetRegisterInfo *TRI)
Return true if Reg is defd between From and To.
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static bool isSEHInstruction(const MachineInstr &MI)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, MachineFunction &MF, MachineInstr *MI, unsigned NumBytes)
Tries to add registers to the reglist of a given base-updating push/pop instruction to adjust the sta...
static bool isARMLowRegister(unsigned Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
auto reverse(ContainerTy &&C)
static bool isJumpTableBranchOpcode(int Opc)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static bool isPopOpcode(int Opc)
void addPredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned Cond, unsigned Inactive)
unsigned getUndefRegState(bool B)
void addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, Register DestReg)
unsigned ConstantMaterializationCost(unsigned Val, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns the number of instructions required to materialize the given constant in a register,...
unsigned getKillRegState(bool B)
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static bool isIndirectControlFlowNotComingBack(const MachineInstr &MI)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition,...
unsigned getMatchingCondBranchOpcode(unsigned Opc)
static bool isCalleeSavedRegister(unsigned Reg, const MCPhysReg *CSRegs)
static bool isUncondBranchOpcode(int Opc)
auto partition(R &&Range, UnaryPredicate P)
Provide wrappers to std::partition which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
static bool isSpeculationBarrierEndBBOpcode(int Opc)
unsigned getBLXOpcode(const MachineFunction &MF)
void addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB)
bool isV8EligibleForIT(const InstrType *Instr)
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
ARM_MLxEntry - Record information about MLA / MLS instructions.
Map pseudo instructions that imply an 'S' bit onto real opcodes.
OutlinerCosts(const ARMSubtarget &target)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Description of the encoding of one expression Op.
static constexpr LaneBitmask getAll()
static constexpr LaneBitmask getNone()
VarInfo - This represents the regions where a virtual register is live in the program.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineBasicBlock::iterator begin()
MachineBasicBlock::iterator end()
The information necessary to create an outlined function for some class of candidate.
unsigned FrameConstructionID
Target-defined identifier for constructing a frame for this function.
std::vector< Candidate > Candidates