75#define DEBUG_TYPE "arm-instrinfo"
77#define GET_INSTRINFO_CTOR_DTOR
78#include "ARMGenInstrInfo.inc"
82 cl::desc(
"Enable ARM 2-addr to 3-addr conv"));
96 { ARM::VMLAS, ARM::VMULS, ARM::VADDS,
false,
false },
97 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS,
false,
false },
98 { ARM::VMLAD, ARM::VMULD, ARM::VADDD,
false,
false },
99 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD,
false,
false },
100 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS,
true,
false },
101 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS,
true,
false },
102 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD,
true,
false },
103 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD,
true,
false },
106 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd,
false,
false },
107 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd,
false,
false },
108 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq,
false,
false },
109 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq,
false,
false },
110 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd,
false,
true },
111 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd,
false,
true },
112 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq,
false,
true },
113 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq,
false,
true },
119 for (
unsigned i = 0, e = std::size(
ARM_MLxTable); i != e; ++i) {
132 if (usePreRAHazardRecognizer()) {
134 static_cast<const ARMSubtarget *
>(STI)->getInstrItineraryData();
154 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4,
true));
191 default:
return nullptr;
217 unsigned OffImm =
MI.getOperand(NumOps - 2).getImm();
230 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
235 }
else if (Amt != 0) {
239 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
248 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
261 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
268 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
277 std::vector<MachineInstr*> NewMIs;
281 BuildMI(MF,
MI.getDebugLoc(),
get(MemOpc),
MI.getOperand(0).getReg())
292 NewMIs.push_back(MemMI);
293 NewMIs.push_back(UpdateMI);
297 BuildMI(MF,
MI.getDebugLoc(),
get(MemOpc),
MI.getOperand(0).getReg())
310 NewMIs.push_back(UpdateMI);
311 NewMIs.push_back(MemMI);
317 if (MO.isReg() && MO.getReg().isVirtual()) {
322 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
326 if (MO.isUse() && MO.isKill()) {
327 for (
unsigned j = 0; j < 2; ++j) {
333 if (VI.removeKill(
MI))
334 VI.Kills.push_back(NewMI);
360 bool AllowModify)
const {
375 bool CantAnalyze =
false;
379 while (
I->isDebugInstr() || !
I->isTerminator() ||
381 I->getOpcode() == ARM::t2DoLoopStartTP){
393 TBB =
I->getOperand(0).getMBB();
399 assert(!FBB &&
"FBB should have been null.");
401 TBB =
I->getOperand(0).getMBB();
402 Cond.push_back(
I->getOperand(1));
403 Cond.push_back(
I->getOperand(2));
404 }
else if (
I->isReturn()) {
407 }
else if (
I->getOpcode() == ARM::t2LoopEnd &&
414 TBB =
I->getOperand(1).getMBB();
416 Cond.push_back(
I->getOperand(0));
473 int *BytesRemoved)
const {
474 assert(!BytesRemoved &&
"code size not handled");
485 I->eraseFromParent();
495 I->eraseFromParent();
504 int *BytesAdded)
const {
505 assert(!BytesAdded &&
"code size not handled");
514 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
516 "ARM branch conditions have two or three components!");
526 }
else if (
Cond.size() == 2) {
537 if (
Cond.size() == 2)
542 else if (
Cond.size() == 3)
553 if (
Cond.size() == 2) {
565 while (++
I != E &&
I->isInsideBundle()) {
566 int PIdx =
I->findFirstPredOperandIdx();
567 if (PIdx != -1 &&
I->getOperand(PIdx).getImm() !=
ARMCC::AL)
573 int PIdx =
MI.findFirstPredOperandIdx();
574 return PIdx != -1 &&
MI.getOperand(PIdx).getImm() !=
ARMCC::AL;
582 std::string GenericComment =
584 if (!GenericComment.empty())
585 return GenericComment;
589 return std::string();
593 int FirstPredOp =
MI.findFirstPredOperandIdx();
594 if (FirstPredOp != (
int) OpIdx)
595 return std::string();
597 std::string
CC =
"CC::";
604 unsigned Opc =
MI.getOpcode();
613 int PIdx =
MI.findFirstPredOperandIdx();
616 PMO.
setImm(Pred[0].getImm());
617 MI.getOperand(PIdx+1).setReg(Pred[1].
getReg());
624 "CPSR def isn't expected operand");
625 assert((
MI.getOperand(1).isDead() ||
626 MI.getOperand(1).getReg() != ARM::CPSR) &&
627 "if conversion tried to stop defining used CPSR");
628 MI.getOperand(1).setReg(ARM::NoRegister);
638 if (Pred1.
size() > 2 || Pred2.
size() > 2)
663 std::vector<MachineOperand> &Pred,
664 bool SkipDead)
const {
667 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
668 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
669 if (ClobbersCPSR || IsCPSR) {
687 for (
const auto &MO :
MI.operands())
688 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
694 switch (
MI->getOpcode()) {
695 default:
return true;
726 if (!
MI.isPredicable())
764 if (!MO.isReg() || MO.isUndef() || MO.isUse())
766 if (MO.getReg() != ARM::CPSR)
786 switch (
MI.getOpcode()) {
794 case TargetOpcode::BUNDLE:
795 return getInstBundleLength(
MI);
796 case ARM::CONSTPOOL_ENTRY:
797 case ARM::JUMPTABLE_INSTS:
798 case ARM::JUMPTABLE_ADDRS:
799 case ARM::JUMPTABLE_TBB:
800 case ARM::JUMPTABLE_TBH:
803 return MI.getOperand(2).getImm();
805 return MI.getOperand(1).getImm();
807 case ARM::INLINEASM_BR: {
809 unsigned Size = getInlineAsmLength(
MI.getOperand(0).getSymbolName(), *MAI);
817unsigned ARMBaseInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
821 while (++
I != E &&
I->isInsideBundle()) {
822 assert(!
I->isBundle() &&
"No nested bundle!");
830 unsigned DestReg,
bool KillSrc,
832 unsigned Opc = Subtarget.isThumb()
833 ? (Subtarget.
isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
850 unsigned SrcReg,
bool KillSrc,
852 unsigned Opc = Subtarget.isThumb()
853 ? (Subtarget.
isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
887 unsigned Cond,
unsigned Inactive) {
896 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
897 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
899 if (GPRDest && GPRSrc) {
907 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
908 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
911 if (SPRDest && SPRSrc)
913 else if (GPRDest && SPRSrc)
915 else if (SPRDest && GPRSrc)
917 else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) && Subtarget.hasFP64())
919 else if (ARM::QPRRegClass.
contains(DestReg, SrcReg))
920 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
925 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR)
927 if (Opc == ARM::MVE_VORR)
929 else if (Opc != ARM::MQPRCopy)
935 unsigned BeginIdx = 0;
936 unsigned SubRegs = 0;
940 if (ARM::QQPRRegClass.
contains(DestReg, SrcReg)) {
941 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
942 BeginIdx = ARM::qsub_0;
944 }
else if (ARM::QQQQPRRegClass.
contains(DestReg, SrcReg)) {
945 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
946 BeginIdx = ARM::qsub_0;
949 }
else if (ARM::DPairRegClass.
contains(DestReg, SrcReg)) {
951 BeginIdx = ARM::dsub_0;
953 }
else if (ARM::DTripleRegClass.
contains(DestReg, SrcReg)) {
955 BeginIdx = ARM::dsub_0;
957 }
else if (ARM::DQuadRegClass.
contains(DestReg, SrcReg)) {
959 BeginIdx = ARM::dsub_0;
961 }
else if (ARM::GPRPairRegClass.
contains(DestReg, SrcReg)) {
962 Opc = Subtarget.
isThumb2() ? ARM::tMOVr : ARM::MOVr;
963 BeginIdx = ARM::gsub_0;
965 }
else if (ARM::DPairSpcRegClass.
contains(DestReg, SrcReg)) {
967 BeginIdx = ARM::dsub_0;
970 }
else if (ARM::DTripleSpcRegClass.
contains(DestReg, SrcReg)) {
972 BeginIdx = ARM::dsub_0;
975 }
else if (ARM::DQuadSpcRegClass.
contains(DestReg, SrcReg)) {
977 BeginIdx = ARM::dsub_0;
980 }
else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) &&
981 !Subtarget.hasFP64()) {
983 BeginIdx = ARM::ssub_0;
985 }
else if (SrcReg == ARM::CPSR) {
988 }
else if (DestReg == ARM::CPSR) {
991 }
else if (DestReg == ARM::VPR) {
997 }
else if (SrcReg == ARM::VPR) {
1003 }
else if (DestReg == ARM::FPSCR_NZCV) {
1005 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
1009 }
else if (SrcReg == ARM::FPSCR_NZCV) {
1011 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
1017 assert(Opc &&
"Impossible reg-to-reg copy");
1023 if (
TRI->regsOverlap(SrcReg,
TRI->getSubReg(DestReg, BeginIdx))) {
1024 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
1030 for (
unsigned i = 0; i != SubRegs; ++i) {
1031 Register Dst =
TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
1032 Register Src =
TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
1033 assert(Dst && Src &&
"Bad sub-register");
1035 assert(!DstRegs.
count(Src) &&
"destructive vector copy");
1040 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) {
1044 if (Opc == ARM::MVE_VORR)
1049 if (Opc == ARM::MOVr)
1058std::optional<DestSourcePair>
1067 if (!
MI.isMoveReg() ||
1068 (
MI.getOpcode() == ARM::VORRq &&
1069 MI.getOperand(1).getReg() !=
MI.getOperand(2).getReg()))
1070 return std::nullopt;
1074std::optional<ParamLoadedValue>
1078 Register DstReg = DstSrcPair->Destination->getReg();
1099 return std::nullopt;
1106 unsigned SubIdx,
unsigned State,
1109 return MIB.
addReg(Reg, State);
1112 return MIB.
addReg(
TRI->getSubReg(Reg, SubIdx), State);
1113 return MIB.
addReg(Reg, State, SubIdx);
1118 Register SrcReg,
bool isKill,
int FI,
1130 switch (
TRI->getSpillSize(*RC)) {
1132 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1143 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1150 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1157 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1168 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1175 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1176 if (Subtarget.hasV5TEOps()) {
1196 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1212 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1213 Subtarget.hasMVEIntegerOps()) {
1218 .addMemOperand(MMO);
1224 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1227 Subtarget.hasNEON()) {
1241 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1248 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1249 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1250 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1252 Subtarget.hasNEON()) {
1261 }
else if (Subtarget.hasMVEIntegerOps()) {
1273 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1274 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0,
TRI);
1281 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1282 Subtarget.hasMVEIntegerOps()) {
1287 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1293 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1294 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0,
TRI);
1295 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_3, 0,
TRI);
1296 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_4, 0,
TRI);
1297 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_5, 0,
TRI);
1298 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_6, 0,
TRI);
1309 int &FrameIndex)
const {
1310 switch (
MI.getOpcode()) {
1314 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1315 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1316 MI.getOperand(3).getImm() == 0) {
1317 FrameIndex =
MI.getOperand(1).getIndex();
1318 return MI.getOperand(0).getReg();
1326 case ARM::VSTR_P0_off:
1327 case ARM::MVE_VSTRWU32:
1328 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1329 MI.getOperand(2).getImm() == 0) {
1330 FrameIndex =
MI.getOperand(1).getIndex();
1331 return MI.getOperand(0).getReg();
1335 case ARM::VST1d64TPseudo:
1336 case ARM::VST1d64QPseudo:
1337 if (
MI.getOperand(0).isFI() &&
MI.getOperand(2).getSubReg() == 0) {
1338 FrameIndex =
MI.getOperand(0).getIndex();
1339 return MI.getOperand(2).getReg();
1343 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1344 FrameIndex =
MI.getOperand(1).getIndex();
1345 return MI.getOperand(0).getReg();
1348 case ARM::MQQPRStore:
1349 case ARM::MQQQQPRStore:
1350 if (
MI.getOperand(1).isFI()) {
1351 FrameIndex =
MI.getOperand(1).getIndex();
1352 return MI.getOperand(0).getReg();
1361 int &FrameIndex)
const {
1363 if (
MI.mayStore() && hasStoreToStackSlot(
MI, Accesses) &&
1364 Accesses.
size() == 1) {
1366 cast<FixedStackPseudoSourceValue>(Accesses.
front()->getPseudoValue())
1388 switch (
TRI->getSpillSize(*RC)) {
1390 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1400 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1406 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1412 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1422 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1428 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1431 if (Subtarget.hasV5TEOps()) {
1454 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1467 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1468 Subtarget.hasMVEIntegerOps()) {
1470 MIB.addFrameIndex(FI)
1472 .addMemOperand(MMO);
1478 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1480 Subtarget.hasNEON()) {
1501 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1502 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1503 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1505 Subtarget.hasNEON()) {
1511 }
else if (Subtarget.hasMVEIntegerOps()) {
1531 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1532 Subtarget.hasMVEIntegerOps()) {
1536 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1560 int &FrameIndex)
const {
1561 switch (
MI.getOpcode()) {
1565 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1566 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1567 MI.getOperand(3).getImm() == 0) {
1568 FrameIndex =
MI.getOperand(1).getIndex();
1569 return MI.getOperand(0).getReg();
1577 case ARM::VLDR_P0_off:
1578 case ARM::MVE_VLDRWU32:
1579 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1580 MI.getOperand(2).getImm() == 0) {
1581 FrameIndex =
MI.getOperand(1).getIndex();
1582 return MI.getOperand(0).getReg();
1586 case ARM::VLD1d8TPseudo:
1587 case ARM::VLD1d16TPseudo:
1588 case ARM::VLD1d32TPseudo:
1589 case ARM::VLD1d64TPseudo:
1590 case ARM::VLD1d8QPseudo:
1591 case ARM::VLD1d16QPseudo:
1592 case ARM::VLD1d32QPseudo:
1593 case ARM::VLD1d64QPseudo:
1594 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1595 FrameIndex =
MI.getOperand(1).getIndex();
1596 return MI.getOperand(0).getReg();
1600 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1601 FrameIndex =
MI.getOperand(1).getIndex();
1602 return MI.getOperand(0).getReg();
1605 case ARM::MQQPRLoad:
1606 case ARM::MQQQQPRLoad:
1607 if (
MI.getOperand(1).isFI()) {
1608 FrameIndex =
MI.getOperand(1).getIndex();
1609 return MI.getOperand(0).getReg();
1618 int &FrameIndex)
const {
1620 if (
MI.mayLoad() && hasLoadFromStackSlot(
MI, Accesses) &&
1621 Accesses.
size() == 1) {
1623 cast<FixedStackPseudoSourceValue>(Accesses.
front()->getPseudoValue())
1634 bool isThumb2 = Subtarget.
isThumb2();
1641 if (isThumb1 || !
MI->getOperand(1).isDead()) {
1643 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1644 : isThumb1 ? ARM::tLDMIA_UPD
1648 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1651 if (isThumb1 || !
MI->getOperand(0).isDead()) {
1653 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA_UPD
1654 : isThumb1 ? ARM::tSTMIA_UPD
1658 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1673 [&
TRI](
const unsigned &Reg1,
const unsigned &Reg2) ->
bool {
1674 return TRI.getEncodingValue(Reg1) <
1675 TRI.getEncodingValue(Reg2);
1678 for (
const auto &Reg : ScratchRegs) {
1687 if (
MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1688 expandLoadStackGuard(
MI);
1689 MI.getParent()->erase(
MI);
1693 if (
MI.getOpcode() == ARM::MEMCPY) {
1702 if (!
MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1707 Register DstRegS =
MI.getOperand(0).getReg();
1708 Register SrcRegS =
MI.getOperand(1).getReg();
1709 if (!ARM::SPRRegClass.
contains(DstRegS, SrcRegS))
1713 unsigned DstRegD =
TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0,
1715 unsigned SrcRegD =
TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0,
1717 if (!DstRegD || !SrcRegD)
1723 if (!
MI.definesRegister(DstRegD,
TRI) ||
MI.readsRegister(DstRegD,
TRI))
1727 if (
MI.getOperand(0).isDead())
1736 int ImpDefIdx =
MI.findRegisterDefOperandIdx(DstRegD,
nullptr);
1737 if (ImpDefIdx != -1)
1738 MI.removeOperand(ImpDefIdx);
1741 MI.setDesc(
get(ARM::VMOVD));
1742 MI.getOperand(0).setReg(DstRegD);
1743 MI.getOperand(1).setReg(SrcRegD);
1750 MI.getOperand(1).setIsUndef();
1755 if (
MI.getOperand(1).isKill()) {
1756 MI.getOperand(1).setIsKill(
false);
1757 MI.addRegisterKilled(SrcRegS,
TRI,
true);
1771 assert(MCPE.isMachineConstantPoolEntry() &&
1772 "Expecting a machine constantpool entry!");
1786 cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId,
ARMCP::CPValue,
1791 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
1794 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
1802 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
1822 case ARM::tLDRpci_pic:
1823 case ARM::t2LDRpci_pic: {
1843 switch (
I->getOpcode()) {
1844 case ARM::tLDRpci_pic:
1845 case ARM::t2LDRpci_pic: {
1847 unsigned CPI =
I->getOperand(1).getIndex();
1849 I->getOperand(1).setIndex(CPI);
1850 I->getOperand(2).setImm(PCLabelId);
1854 if (!
I->isBundledWithSucc())
1865 if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic ||
1866 Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic ||
1867 Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1868 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1869 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1870 Opcode == ARM::t2MOV_ga_pcrel) {
1881 if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1882 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1883 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1884 Opcode == ARM::t2MOV_ga_pcrel)
1896 if (isARMCP0 && isARMCP1) {
1902 }
else if (!isARMCP0 && !isARMCP1) {
1906 }
else if (Opcode == ARM::PICLDR) {
1914 if (Addr0 != Addr1) {
1950 int64_t &Offset2)
const {
1957 auto IsLoadOpcode = [&](
unsigned Opcode) {
1972 case ARM::t2LDRSHi8:
1974 case ARM::t2LDRBi12:
1975 case ARM::t2LDRSHi12:
1994 if (isa<ConstantSDNode>(Load1->
getOperand(1)) &&
1996 Offset1 = cast<ConstantSDNode>(Load1->
getOperand(1))->getSExtValue();
1997 Offset2 = cast<ConstantSDNode>(Load2->
getOperand(1))->getSExtValue();
2016 int64_t Offset1, int64_t Offset2,
2017 unsigned NumLoads)
const {
2021 assert(Offset2 > Offset1);
2023 if ((Offset2 - Offset1) / 8 > 64)
2054 if (
MI.isDebugInstr())
2058 if (
MI.isTerminator() ||
MI.isPosition())
2062 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
2076 while (++
I !=
MBB->
end() &&
I->isDebugInstr())
2078 if (
I !=
MBB->
end() &&
I->getOpcode() == ARM::t2IT)
2089 if (!
MI.isCall() &&
MI.definesRegister(ARM::SP,
nullptr))
2097 unsigned NumCycles,
unsigned ExtraPredCycles,
2107 if (!Pred->
empty()) {
2109 if (LastMI->
getOpcode() == ARM::t2Bcc) {
2118 MBB, 0, 0, Probability);
2123 unsigned TCycles,
unsigned TExtra,
2125 unsigned FCycles,
unsigned FExtra,
2142 const unsigned ScalingUpFactor = 1024;
2144 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
2145 unsigned UnpredCost;
2146 if (!Subtarget.hasBranchPredictor()) {
2149 unsigned NotTakenBranchCost = 1;
2151 unsigned TUnpredCycles, FUnpredCycles;
2154 TUnpredCycles = TCycles + NotTakenBranchCost;
2155 FUnpredCycles = TakenBranchCost;
2158 TUnpredCycles = TCycles + TakenBranchCost;
2159 FUnpredCycles = FCycles + NotTakenBranchCost;
2162 PredCost -= 1 * ScalingUpFactor;
2165 unsigned TUnpredCost = Probability.
scale(TUnpredCycles * ScalingUpFactor);
2166 unsigned FUnpredCost = Probability.
getCompl().
scale(FUnpredCycles * ScalingUpFactor);
2167 UnpredCost = TUnpredCost + FUnpredCost;
2170 if (Subtarget.
isThumb2() && TCycles + FCycles > 4) {
2171 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2174 unsigned TUnpredCost = Probability.
scale(TCycles * ScalingUpFactor);
2175 unsigned FUnpredCost =
2177 UnpredCost = TUnpredCost + FUnpredCost;
2178 UnpredCost += 1 * ScalingUpFactor;
2182 return PredCost <= UnpredCost;
2187 unsigned NumInsts)
const {
2195 unsigned MaxInsts = Subtarget.
restrictIT() ? 1 : 4;
2204 if (
MI.getOpcode() == ARM::t2Bcc &&
2227 return Subtarget.isProfitableToUnpredicate();
2235 int PIdx =
MI.findFirstPredOperandIdx();
2241 PredReg =
MI.getOperand(PIdx+1).getReg();
2250 if (Opc == ARM::t2B)
2259 unsigned OpIdx2)
const {
2260 switch (
MI.getOpcode()) {
2262 case ARM::t2MOVCCr: {
2287 if (!Reg.isVirtual())
2289 if (!
MRI.hasOneNonDBGUse(Reg))
2301 if (MO.isFI() || MO.isCPI() || MO.isJTI())
2308 if (MO.getReg().isPhysical())
2310 if (MO.isDef() && !MO.isDead())
2313 bool DontMoveAcrossStores =
true;
2314 if (!
MI->isSafeToMove(DontMoveAcrossStores))
2321 unsigned &TrueOp,
unsigned &FalseOp,
2322 bool &Optimizable)
const {
2323 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2324 "Unknown select instruction");
2333 Cond.push_back(
MI.getOperand(3));
2334 Cond.push_back(
MI.getOperand(4));
2343 bool PreferFalse)
const {
2344 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2345 "Unknown select instruction");
2348 bool Invert = !
DefMI;
2350 DefMI = canFoldIntoMOVCC(
MI.getOperand(1).getReg(),
MRI,
this);
2357 Register DestReg =
MI.getOperand(0).getReg();
2360 if (!
MRI.constrainRegClass(DestReg, FalseClass))
2362 if (!
MRI.constrainRegClass(DestReg, TrueClass))
2373 i != e && !DefDesc.
operands()[i].isPredicate(); ++i)
2376 unsigned CondCode =
MI.getOperand(3).getImm();
2381 NewMI.
add(
MI.getOperand(4));
2392 NewMI.
add(FalseReg);
2423 {ARM::ADDSri, ARM::ADDri},
2424 {ARM::ADDSrr, ARM::ADDrr},
2425 {ARM::ADDSrsi, ARM::ADDrsi},
2426 {ARM::ADDSrsr, ARM::ADDrsr},
2428 {ARM::SUBSri, ARM::SUBri},
2429 {ARM::SUBSrr, ARM::SUBrr},
2430 {ARM::SUBSrsi, ARM::SUBrsi},
2431 {ARM::SUBSrsr, ARM::SUBrsr},
2433 {ARM::RSBSri, ARM::RSBri},
2434 {ARM::RSBSrsi, ARM::RSBrsi},
2435 {ARM::RSBSrsr, ARM::RSBrsr},
2437 {ARM::tADDSi3, ARM::tADDi3},
2438 {ARM::tADDSi8, ARM::tADDi8},
2439 {ARM::tADDSrr, ARM::tADDrr},
2440 {ARM::tADCS, ARM::tADC},
2442 {ARM::tSUBSi3, ARM::tSUBi3},
2443 {ARM::tSUBSi8, ARM::tSUBi8},
2444 {ARM::tSUBSrr, ARM::tSUBrr},
2445 {ARM::tSBCS, ARM::tSBC},
2446 {ARM::tRSBS, ARM::tRSB},
2447 {ARM::tLSLSri, ARM::tLSLri},
2449 {ARM::t2ADDSri, ARM::t2ADDri},
2450 {ARM::t2ADDSrr, ARM::t2ADDrr},
2451 {ARM::t2ADDSrs, ARM::t2ADDrs},
2453 {ARM::t2SUBSri, ARM::t2SUBri},
2454 {ARM::t2SUBSrr, ARM::t2SUBrr},
2455 {ARM::t2SUBSrs, ARM::t2SUBrs},
2457 {ARM::t2RSBSri, ARM::t2RSBri},
2458 {ARM::t2RSBSrs, ARM::t2RSBrs},
2463 if (OldOpc == Entry.PseudoOpc)
2464 return Entry.MachineOpc;
2475 if (NumBytes == 0 && DestReg != BaseReg) {
2484 bool isSub = NumBytes < 0;
2485 if (isSub) NumBytes = -NumBytes;
2489 unsigned ThisVal = NumBytes & llvm::rotr<uint32_t>(0xFF, RotAmt);
2490 assert(ThisVal &&
"Didn't extract field correctly");
2493 NumBytes &= ~ThisVal;
2498 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2511 unsigned NumBytes) {
2522 if (!IsPush && !IsPop)
2525 bool IsVFPPushPop =
MI->getOpcode() == ARM::VSTMDDB_UPD ||
2526 MI->getOpcode() == ARM::VLDMDIA_UPD;
2527 bool IsT1PushPop =
MI->getOpcode() == ARM::tPUSH ||
2528 MI->getOpcode() == ARM::tPOP ||
2529 MI->getOpcode() == ARM::tPOP_RET;
2531 assert((IsT1PushPop || (
MI->getOperand(0).getReg() == ARM::SP &&
2532 MI->getOperand(1).getReg() == ARM::SP)) &&
2533 "trying to fold sp update into non-sp-updating push/pop");
2538 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2543 int RegListIdx = IsT1PushPop ? 2 : 4;
2546 unsigned RegsNeeded;
2549 RegsNeeded = NumBytes / 8;
2550 RegClass = &ARM::DPRRegClass;
2552 RegsNeeded = NumBytes / 4;
2553 RegClass = &ARM::GPRRegClass;
2563 unsigned FirstRegEnc = -1;
2566 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2571 TRI->getEncodingValue(MO.
getReg()) < FirstRegEnc)
2572 FirstRegEnc =
TRI->getEncodingValue(MO.
getReg());
2575 const MCPhysReg *CSRegs =
TRI->getCalleeSavedRegs(&MF);
2578 for (
int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2580 unsigned CurReg = RegClass->
getRegister(CurRegEnc);
2581 if (IsT1PushPop && CurRegEnc >
TRI->getEncodingValue(ARM::R7))
2588 false,
false,
true));
2598 MI->getParent()->computeRegisterLiveness(
TRI, CurReg,
MI) !=
2620 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i)
2621 MI->removeOperand(i);
2634 unsigned Opcode =
MI.getOpcode();
2640 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2643 if (Opcode == ARM::ADDri) {
2644 Offset +=
MI.getOperand(FrameRegIdx+1).getImm();
2647 MI.setDesc(
TII.get(ARM::MOVr));
2648 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2649 MI.removeOperand(FrameRegIdx+1);
2655 MI.setDesc(
TII.get(ARM::SUBri));
2661 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2662 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(
Offset);
2670 unsigned ThisImmVal =
Offset & llvm::rotr<uint32_t>(0xFF, RotAmt);
2677 "Bit extraction didn't work?");
2678 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2680 unsigned ImmIdx = 0;
2682 unsigned NumBits = 0;
2686 ImmIdx = FrameRegIdx + 1;
2687 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2691 ImmIdx = FrameRegIdx+2;
2698 ImmIdx = FrameRegIdx+2;
2709 ImmIdx = FrameRegIdx+1;
2717 ImmIdx = FrameRegIdx+1;
2727 ImmIdx = FrameRegIdx+1;
2728 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2737 Offset += InstrOffs * Scale;
2738 assert((
Offset & (Scale-1)) == 0 &&
"Can't encode this offset!");
2748 int ImmedOffset =
Offset / Scale;
2749 unsigned Mask = (1 << NumBits) - 1;
2750 if ((
unsigned)
Offset <= Mask * Scale) {
2752 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2758 ImmedOffset = -ImmedOffset;
2760 ImmedOffset |= 1 << NumBits;
2768 ImmedOffset = ImmedOffset & Mask;
2771 ImmedOffset = -ImmedOffset;
2773 ImmedOffset |= 1 << NumBits;
2789 Register &SrcReg2, int64_t &CmpMask,
2790 int64_t &CmpValue)
const {
2791 switch (
MI.getOpcode()) {
2796 SrcReg =
MI.getOperand(0).getReg();
2799 CmpValue =
MI.getOperand(1).getImm();
2804 SrcReg =
MI.getOperand(0).getReg();
2805 SrcReg2 =
MI.getOperand(1).getReg();
2811 SrcReg =
MI.getOperand(0).getReg();
2813 CmpMask =
MI.getOperand(1).getImm();
2826 int CmpMask,
bool CommonUse) {
2827 switch (
MI->getOpcode()) {
2830 if (CmpMask !=
MI->getOperand(2).getImm())
2832 if (SrcReg ==
MI->getOperand(CommonUse ? 1 : 0).getReg())
2922 switch (
MI->getOpcode()) {
2923 default:
return false;
3019 if (!
MI)
return false;
3022 if (CmpMask != ~0) {
3026 UI =
MRI->use_instr_begin(SrcReg), UE =
MRI->use_instr_end();
3028 if (UI->getParent() != CmpInstr.
getParent())
3037 if (!
MI)
return false;
3046 if (
I ==
B)
return false;
3057 else if (
MI->getParent() != CmpInstr.
getParent() || CmpValue != 0) {
3062 if (CmpInstr.
getOpcode() == ARM::CMPri ||
3070 bool IsThumb1 =
false;
3087 if (
MI && IsThumb1) {
3089 if (
I != E && !
MI->readsRegister(ARM::CPSR,
TRI)) {
3090 bool CanReorder =
true;
3091 for (;
I != E; --
I) {
3092 if (
I->getOpcode() != ARM::tMOVi8) {
3098 MI =
MI->removeFromParent();
3109 bool SubAddIsThumb1 =
false;
3124 if (Instr.modifiesRegister(ARM::CPSR,
TRI) ||
3125 Instr.readsRegister(ARM::CPSR,
TRI))
3147 IsThumb1 = SubAddIsThumb1;
3162 bool isSafe =
false;
3165 while (!isSafe && ++
I != E) {
3167 for (
unsigned IO = 0, EO = Instr.getNumOperands();
3168 !isSafe && IO != EO; ++IO) {
3182 bool IsInstrVSel =
true;
3183 switch (Instr.getOpcode()) {
3185 IsInstrVSel =
false;
3219 bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr ||
3220 Opc == ARM::SUBri || Opc == ARM::t2SUBri ||
3221 Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 ||
3223 unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2;
3235 std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3269 if (Succ->isLiveIn(ARM::CPSR))
3276 unsigned CPSRRegNum =
MI->getNumExplicitOperands() - 1;
3277 MI->getOperand(CPSRRegNum).setReg(ARM::CPSR);
3278 MI->getOperand(CPSRRegNum).setIsDef(
true);
3286 for (
unsigned i = 0, e = OperandsToUpdate.
size(); i < e; i++)
3287 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second);
3289 MI->clearRegisterDeads(ARM::CPSR);
3303 int64_t CmpMask, CmpValue;
3305 if (Next !=
MI.getParent()->end() &&
3316 unsigned DefOpc =
DefMI.getOpcode();
3317 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm &&
3318 DefOpc != ARM::tMOVi32imm)
3320 if (!
DefMI.getOperand(1).isImm())
3324 if (!
MRI->hasOneNonDBGUse(Reg))
3340 if (
UseMI.getOperand(NumOps - 1).
getReg() == ARM::CPSR)
3346 unsigned UseOpc =
UseMI.getOpcode();
3347 unsigned NewUseOpc = 0;
3349 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3350 bool Commute =
false;
3352 default:
return false;
3360 case ARM::t2EORrr: {
3366 if (UseOpc == ARM::SUBrr && Commute)
3372 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3375 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3389 case ARM::ORRrr: NewUseOpc = ARM::ORRri;
break;
3390 case ARM::EORrr: NewUseOpc = ARM::EORri;
break;
3394 case ARM::t2SUBrr: {
3395 if (UseOpc == ARM::t2SUBrr && Commute)
3400 const bool ToSP =
DefMI.getOperand(0).
getReg() == ARM::SP;
3401 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3402 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3404 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3407 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3422 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri;
break;
3423 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri;
break;
3430 unsigned OpIdx = Commute ? 2 : 1;
3432 bool isKill =
UseMI.getOperand(OpIdx).isKill();
3434 Register NewReg =
MRI->createVirtualRegister(TRC);
3442 UseMI.getOperand(1).setReg(NewReg);
3443 UseMI.getOperand(1).setIsKill();
3444 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3445 DefMI.eraseFromParent();
3452 case ARM::t2ADDspImm:
3453 case ARM::t2SUBspImm:
3463 switch (
MI.getOpcode()) {
3467 assert(UOps >= 0 &&
"bad # UOps");
3475 unsigned ShOpVal =
MI.getOperand(3).getImm();
3480 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3488 if (!
MI.getOperand(2).getReg())
3491 unsigned ShOpVal =
MI.getOperand(3).getImm();
3496 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3506 case ARM::LDRSB_POST:
3507 case ARM::LDRSH_POST: {
3510 return (Rt == Rm) ? 4 : 3;
3513 case ARM::LDR_PRE_REG:
3514 case ARM::LDRB_PRE_REG: {
3519 unsigned ShOpVal =
MI.getOperand(4).getImm();
3524 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3530 case ARM::STR_PRE_REG:
3531 case ARM::STRB_PRE_REG: {
3532 unsigned ShOpVal =
MI.getOperand(4).getImm();
3537 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3544 case ARM::STRH_PRE: {
3554 case ARM::LDR_POST_REG:
3555 case ARM::LDRB_POST_REG:
3556 case ARM::LDRH_POST: {
3559 return (Rt == Rm) ? 3 : 2;
3562 case ARM::LDR_PRE_IMM:
3563 case ARM::LDRB_PRE_IMM:
3564 case ARM::LDR_POST_IMM:
3565 case ARM::LDRB_POST_IMM:
3566 case ARM::STRB_POST_IMM:
3567 case ARM::STRB_POST_REG:
3568 case ARM::STRB_PRE_IMM:
3569 case ARM::STRH_POST:
3570 case ARM::STR_POST_IMM:
3571 case ARM::STR_POST_REG:
3572 case ARM::STR_PRE_IMM:
3575 case ARM::LDRSB_PRE:
3576 case ARM::LDRSH_PRE: {
3583 unsigned ShOpVal =
MI.getOperand(4).getImm();
3588 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3601 return (Rt == Rn) ? 3 : 2;
3612 case ARM::LDRD_POST:
3613 case ARM::t2LDRD_POST:
3616 case ARM::STRD_POST:
3617 case ARM::t2STRD_POST:
3620 case ARM::LDRD_PRE: {
3627 return (Rt == Rn) ? 4 : 3;
3630 case ARM::t2LDRD_PRE: {
3633 return (Rt == Rn) ? 4 : 3;
3636 case ARM::STRD_PRE: {
3644 case ARM::t2STRD_PRE:
3647 case ARM::t2LDR_POST:
3648 case ARM::t2LDRB_POST:
3649 case ARM::t2LDRB_PRE:
3650 case ARM::t2LDRSBi12:
3651 case ARM::t2LDRSBi8:
3652 case ARM::t2LDRSBpci:
3654 case ARM::t2LDRH_POST:
3655 case ARM::t2LDRH_PRE:
3657 case ARM::t2LDRSB_POST:
3658 case ARM::t2LDRSB_PRE:
3659 case ARM::t2LDRSH_POST:
3660 case ARM::t2LDRSH_PRE:
3661 case ARM::t2LDRSHi12:
3662 case ARM::t2LDRSHi8:
3663 case ARM::t2LDRSHpci:
3667 case ARM::t2LDRDi8: {
3670 return (Rt == Rn) ? 3 : 2;
3673 case ARM::t2STRB_POST:
3674 case ARM::t2STRB_PRE:
3677 case ARM::t2STRH_POST:
3678 case ARM::t2STRH_PRE:
3680 case ARM::t2STR_POST:
3681 case ARM::t2STR_PRE:
3712 E =
MI.memoperands_end();
3714 Size += (*I)->getSize().getValue();
3721 return std::min(
Size / 4, 16U);
3726 unsigned UOps = 1 + NumRegs;
3730 case ARM::VLDMDIA_UPD:
3731 case ARM::VLDMDDB_UPD:
3732 case ARM::VLDMSIA_UPD:
3733 case ARM::VLDMSDB_UPD:
3734 case ARM::VSTMDIA_UPD:
3735 case ARM::VSTMDDB_UPD:
3736 case ARM::VSTMSIA_UPD:
3737 case ARM::VSTMSDB_UPD:
3738 case ARM::LDMIA_UPD:
3739 case ARM::LDMDA_UPD:
3740 case ARM::LDMDB_UPD:
3741 case ARM::LDMIB_UPD:
3742 case ARM::STMIA_UPD:
3743 case ARM::STMDA_UPD:
3744 case ARM::STMDB_UPD:
3745 case ARM::STMIB_UPD:
3746 case ARM::tLDMIA_UPD:
3747 case ARM::tSTMIA_UPD:
3748 case ARM::t2LDMIA_UPD:
3749 case ARM::t2LDMDB_UPD:
3750 case ARM::t2STMIA_UPD:
3751 case ARM::t2STMDB_UPD:
3754 case ARM::LDMIA_RET:
3756 case ARM::t2LDMIA_RET:
3765 if (!ItinData || ItinData->
isEmpty())
3769 unsigned Class =
Desc.getSchedClass();
3771 if (ItinUOps >= 0) {
3778 unsigned Opc =
MI.getOpcode();
3797 case ARM::VLDMDIA_UPD:
3798 case ARM::VLDMDDB_UPD:
3800 case ARM::VLDMSIA_UPD:
3801 case ARM::VLDMSDB_UPD:
3803 case ARM::VSTMDIA_UPD:
3804 case ARM::VSTMDDB_UPD:
3806 case ARM::VSTMSIA_UPD:
3807 case ARM::VSTMSDB_UPD: {
3808 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands();
3809 return (NumRegs / 2) + (NumRegs % 2) + 1;
3812 case ARM::LDMIA_RET:
3817 case ARM::LDMIA_UPD:
3818 case ARM::LDMDA_UPD:
3819 case ARM::LDMDB_UPD:
3820 case ARM::LDMIB_UPD:
3825 case ARM::STMIA_UPD:
3826 case ARM::STMDA_UPD:
3827 case ARM::STMDB_UPD:
3828 case ARM::STMIB_UPD:
3830 case ARM::tLDMIA_UPD:
3831 case ARM::tSTMIA_UPD:
3835 case ARM::t2LDMIA_RET:
3838 case ARM::t2LDMIA_UPD:
3839 case ARM::t2LDMDB_UPD:
3842 case ARM::t2STMIA_UPD:
3843 case ARM::t2STMDB_UPD: {
3844 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands() + 1;
3856 unsigned UOps = (NumRegs / 2);
3862 unsigned UOps = (NumRegs / 2);
3865 if ((NumRegs % 2) || !
MI.hasOneMemOperand() ||
3876std::optional<unsigned>
3879 unsigned DefIdx,
unsigned DefAlign)
const {
3888 DefCycle = RegNo / 2 + 1;
3893 bool isSLoad =
false;
3898 case ARM::VLDMSIA_UPD:
3899 case ARM::VLDMSDB_UPD:
3906 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3910 DefCycle = RegNo + 2;
3916std::optional<unsigned>
3919 unsigned DefIdx,
unsigned DefAlign)
const {
3929 DefCycle = RegNo / 2;
3935 DefCycle = (RegNo / 2);
3938 if ((RegNo % 2) || DefAlign < 8)
3944 DefCycle = RegNo + 2;
3950std::optional<unsigned>
3953 unsigned UseIdx,
unsigned UseAlign)
const {
3961 UseCycle = RegNo / 2 + 1;
3966 bool isSStore =
false;
3971 case ARM::VSTMSIA_UPD:
3972 case ARM::VSTMSDB_UPD:
3979 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3983 UseCycle = RegNo + 2;
3989std::optional<unsigned>
3992 unsigned UseIdx,
unsigned UseAlign)
const {
3999 UseCycle = RegNo / 2;
4005 UseCycle = (RegNo / 2);
4008 if ((RegNo % 2) || UseAlign < 8)
4019 unsigned DefIdx,
unsigned DefAlign,
const MCInstrDesc &UseMCID,
4020 unsigned UseIdx,
unsigned UseAlign)
const {
4030 std::optional<unsigned> DefCycle;
4031 bool LdmBypass =
false;
4038 case ARM::VLDMDIA_UPD:
4039 case ARM::VLDMDDB_UPD:
4041 case ARM::VLDMSIA_UPD:
4042 case ARM::VLDMSDB_UPD:
4043 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4046 case ARM::LDMIA_RET:
4051 case ARM::LDMIA_UPD:
4052 case ARM::LDMDA_UPD:
4053 case ARM::LDMDB_UPD:
4054 case ARM::LDMIB_UPD:
4056 case ARM::tLDMIA_UPD:
4058 case ARM::t2LDMIA_RET:
4061 case ARM::t2LDMIA_UPD:
4062 case ARM::t2LDMDB_UPD:
4064 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4072 std::optional<unsigned> UseCycle;
4079 case ARM::VSTMDIA_UPD:
4080 case ARM::VSTMDDB_UPD:
4082 case ARM::VSTMSIA_UPD:
4083 case ARM::VSTMSDB_UPD:
4084 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4091 case ARM::STMIA_UPD:
4092 case ARM::STMDA_UPD:
4093 case ARM::STMDB_UPD:
4094 case ARM::STMIB_UPD:
4095 case ARM::tSTMIA_UPD:
4100 case ARM::t2STMIA_UPD:
4101 case ARM::t2STMDB_UPD:
4102 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4110 if (UseCycle > *DefCycle + 1)
4111 return std::nullopt;
4113 UseCycle = *DefCycle - *UseCycle + 1;
4114 if (UseCycle > 0u) {
4120 UseCycle = *UseCycle - 1;
4122 UseClass, UseIdx)) {
4123 UseCycle = *UseCycle - 1;
4132 unsigned &DefIdx,
unsigned &Dist) {
4137 assert(
II->isInsideBundle() &&
"Empty bundle?");
4140 while (
II->isInsideBundle()) {
4141 Idx =
II->findRegisterDefOperandIdx(Reg,
TRI,
false,
true);
4148 assert(
Idx != -1 &&
"Cannot find bundled definition!");
4155 unsigned &UseIdx,
unsigned &Dist) {
4159 assert(
II->isInsideBundle() &&
"Empty bundle?");
4164 while (
II != E &&
II->isInsideBundle()) {
4165 Idx =
II->findRegisterUseOperandIdx(Reg,
TRI,
false);
4168 if (
II->getOpcode() != ARM::t2IT)
4196 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4206 case ARM::t2LDRSHs: {
4208 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4209 if (ShAmt == 0 || ShAmt == 2)
4214 }
else if (Subtarget.
isSwift()) {
4221 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4226 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4237 case ARM::t2LDRSHs: {
4239 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4240 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4247 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4254 case ARM::VLD1q8wb_fixed:
4255 case ARM::VLD1q16wb_fixed:
4256 case ARM::VLD1q32wb_fixed:
4257 case ARM::VLD1q64wb_fixed:
4258 case ARM::VLD1q8wb_register:
4259 case ARM::VLD1q16wb_register:
4260 case ARM::VLD1q32wb_register:
4261 case ARM::VLD1q64wb_register:
4268 case ARM::VLD2d8wb_fixed:
4269 case ARM::VLD2d16wb_fixed:
4270 case ARM::VLD2d32wb_fixed:
4271 case ARM::VLD2q8wb_fixed:
4272 case ARM::VLD2q16wb_fixed:
4273 case ARM::VLD2q32wb_fixed:
4274 case ARM::VLD2d8wb_register:
4275 case ARM::VLD2d16wb_register:
4276 case ARM::VLD2d32wb_register:
4277 case ARM::VLD2q8wb_register:
4278 case ARM::VLD2q16wb_register:
4279 case ARM::VLD2q32wb_register:
4284 case ARM::VLD3d8_UPD:
4285 case ARM::VLD3d16_UPD:
4286 case ARM::VLD3d32_UPD:
4287 case ARM::VLD1d64Twb_fixed:
4288 case ARM::VLD1d64Twb_register:
4289 case ARM::VLD3q8_UPD:
4290 case ARM::VLD3q16_UPD:
4291 case ARM::VLD3q32_UPD:
4296 case ARM::VLD4d8_UPD:
4297 case ARM::VLD4d16_UPD:
4298 case ARM::VLD4d32_UPD:
4299 case ARM::VLD1d64Qwb_fixed:
4300 case ARM::VLD1d64Qwb_register:
4301 case ARM::VLD4q8_UPD:
4302 case ARM::VLD4q16_UPD:
4303 case ARM::VLD4q32_UPD:
4304 case ARM::VLD1DUPq8:
4305 case ARM::VLD1DUPq16:
4306 case ARM::VLD1DUPq32:
4307 case ARM::VLD1DUPq8wb_fixed:
4308 case ARM::VLD1DUPq16wb_fixed:
4309 case ARM::VLD1DUPq32wb_fixed:
4310 case ARM::VLD1DUPq8wb_register:
4311 case ARM::VLD1DUPq16wb_register:
4312 case ARM::VLD1DUPq32wb_register:
4313 case ARM::VLD2DUPd8:
4314 case ARM::VLD2DUPd16:
4315 case ARM::VLD2DUPd32:
4316 case ARM::VLD2DUPd8wb_fixed:
4317 case ARM::VLD2DUPd16wb_fixed:
4318 case ARM::VLD2DUPd32wb_fixed:
4319 case ARM::VLD2DUPd8wb_register:
4320 case ARM::VLD2DUPd16wb_register:
4321 case ARM::VLD2DUPd32wb_register:
4322 case ARM::VLD4DUPd8:
4323 case ARM::VLD4DUPd16:
4324 case ARM::VLD4DUPd32:
4325 case ARM::VLD4DUPd8_UPD:
4326 case ARM::VLD4DUPd16_UPD:
4327 case ARM::VLD4DUPd32_UPD:
4329 case ARM::VLD1LNd16:
4330 case ARM::VLD1LNd32:
4331 case ARM::VLD1LNd8_UPD:
4332 case ARM::VLD1LNd16_UPD:
4333 case ARM::VLD1LNd32_UPD:
4335 case ARM::VLD2LNd16:
4336 case ARM::VLD2LNd32:
4337 case ARM::VLD2LNq16:
4338 case ARM::VLD2LNq32:
4339 case ARM::VLD2LNd8_UPD:
4340 case ARM::VLD2LNd16_UPD:
4341 case ARM::VLD2LNd32_UPD:
4342 case ARM::VLD2LNq16_UPD:
4343 case ARM::VLD2LNq32_UPD:
4345 case ARM::VLD4LNd16:
4346 case ARM::VLD4LNd32:
4347 case ARM::VLD4LNq16:
4348 case ARM::VLD4LNq32:
4349 case ARM::VLD4LNd8_UPD:
4350 case ARM::VLD4LNd16_UPD:
4351 case ARM::VLD4LNd32_UPD:
4352 case ARM::VLD4LNq16_UPD:
4353 case ARM::VLD4LNq32_UPD:
4367 if (!ItinData || ItinData->
isEmpty())
4368 return std::nullopt;
4374 unsigned DefAdj = 0;
4375 if (
DefMI.isBundle())
4384 unsigned UseAdj = 0;
4385 if (
UseMI.isBundle()) {
4389 return std::nullopt;
4392 return getOperandLatencyImpl(
4393 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->
getDesc(), DefAdj, DefMO,
4394 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->
getDesc(), UseAdj);
4397std::optional<unsigned> ARMBaseInstrInfo::getOperandLatencyImpl(
4399 unsigned DefIdx,
const MCInstrDesc &DefMCID,
unsigned DefAdj,
4401 unsigned UseIdx,
const MCInstrDesc &UseMCID,
unsigned UseAdj)
const {
4402 if (Reg == ARM::CPSR) {
4403 if (
DefMI.getOpcode() == ARM::FMSTAT) {
4405 return Subtarget.
isLikeA9() ? 1 : 20;
4409 if (
UseMI.isBranch())
4429 return std::nullopt;
4431 unsigned DefAlign =
DefMI.hasOneMemOperand()
4434 unsigned UseAlign =
UseMI.hasOneMemOperand()
4440 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4443 return std::nullopt;
4446 int Adj = DefAdj + UseAdj;
4450 if (Adj >= 0 || (
int)*
Latency > -Adj) {
4457std::optional<unsigned>
4459 SDNode *DefNode,
unsigned DefIdx,
4460 SDNode *UseNode,
unsigned UseIdx)
const {
4466 if (isZeroCost(DefMCID.
Opcode))
4469 if (!ItinData || ItinData->
isEmpty())
4470 return DefMCID.
mayLoad() ? 3 : 1;
4473 std::optional<unsigned>
Latency =
4476 int Threshold = 1 + Adj;
4481 auto *DefMN = cast<MachineSDNode>(DefNode);
4482 unsigned DefAlign = !DefMN->memoperands_empty()
4483 ? (*DefMN->memoperands_begin())->
getAlign().value()
4485 auto *UseMN = cast<MachineSDNode>(UseNode);
4486 unsigned UseAlign = !UseMN->memoperands_empty()
4487 ? (*UseMN->memoperands_begin())->
getAlign().value()
4490 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4492 return std::nullopt;
4513 case ARM::t2LDRSHs: {
4516 if (ShAmt == 0 || ShAmt == 2)
4531 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4548 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4555 case ARM::VLD1q8wb_register:
4556 case ARM::VLD1q16wb_register:
4557 case ARM::VLD1q32wb_register:
4558 case ARM::VLD1q64wb_register:
4559 case ARM::VLD1q8wb_fixed:
4560 case ARM::VLD1q16wb_fixed:
4561 case ARM::VLD1q32wb_fixed:
4562 case ARM::VLD1q64wb_fixed:
4566 case ARM::VLD2q8Pseudo:
4567 case ARM::VLD2q16Pseudo:
4568 case ARM::VLD2q32Pseudo:
4569 case ARM::VLD2d8wb_fixed:
4570 case ARM::VLD2d16wb_fixed:
4571 case ARM::VLD2d32wb_fixed:
4572 case ARM::VLD2q8PseudoWB_fixed:
4573 case ARM::VLD2q16PseudoWB_fixed:
4574 case ARM::VLD2q32PseudoWB_fixed:
4575 case ARM::VLD2d8wb_register:
4576 case ARM::VLD2d16wb_register:
4577 case ARM::VLD2d32wb_register:
4578 case ARM::VLD2q8PseudoWB_register:
4579 case ARM::VLD2q16PseudoWB_register:
4580 case ARM::VLD2q32PseudoWB_register:
4581 case ARM::VLD3d8Pseudo:
4582 case ARM::VLD3d16Pseudo:
4583 case ARM::VLD3d32Pseudo:
4584 case ARM::VLD1d8TPseudo:
4585 case ARM::VLD1d16TPseudo:
4586 case ARM::VLD1d32TPseudo:
4587 case ARM::VLD1d64TPseudo:
4588 case ARM::VLD1d64TPseudoWB_fixed:
4589 case ARM::VLD1d64TPseudoWB_register:
4590 case ARM::VLD3d8Pseudo_UPD:
4591 case ARM::VLD3d16Pseudo_UPD:
4592 case ARM::VLD3d32Pseudo_UPD:
4593 case ARM::VLD3q8Pseudo_UPD:
4594 case ARM::VLD3q16Pseudo_UPD:
4595 case ARM::VLD3q32Pseudo_UPD:
4596 case ARM::VLD3q8oddPseudo:
4597 case ARM::VLD3q16oddPseudo:
4598 case ARM::VLD3q32oddPseudo:
4599 case ARM::VLD3q8oddPseudo_UPD:
4600 case ARM::VLD3q16oddPseudo_UPD:
4601 case ARM::VLD3q32oddPseudo_UPD:
4602 case ARM::VLD4d8Pseudo:
4603 case ARM::VLD4d16Pseudo:
4604 case ARM::VLD4d32Pseudo:
4605 case ARM::VLD1d8QPseudo:
4606 case ARM::VLD1d16QPseudo:
4607 case ARM::VLD1d32QPseudo:
4608 case ARM::VLD1d64QPseudo:
4609 case ARM::VLD1d64QPseudoWB_fixed:
4610 case ARM::VLD1d64QPseudoWB_register:
4611 case ARM::VLD1q8HighQPseudo:
4612 case ARM::VLD1q8LowQPseudo_UPD:
4613 case ARM::VLD1q8HighTPseudo:
4614 case ARM::VLD1q8LowTPseudo_UPD:
4615 case ARM::VLD1q16HighQPseudo:
4616 case ARM::VLD1q16LowQPseudo_UPD:
4617 case ARM::VLD1q16HighTPseudo:
4618 case ARM::VLD1q16LowTPseudo_UPD:
4619 case ARM::VLD1q32HighQPseudo:
4620 case ARM::VLD1q32LowQPseudo_UPD:
4621 case ARM::VLD1q32HighTPseudo:
4622 case ARM::VLD1q32LowTPseudo_UPD:
4623 case ARM::VLD1q64HighQPseudo:
4624 case ARM::VLD1q64LowQPseudo_UPD:
4625 case ARM::VLD1q64HighTPseudo:
4626 case ARM::VLD1q64LowTPseudo_UPD:
4627 case ARM::VLD4d8Pseudo_UPD:
4628 case ARM::VLD4d16Pseudo_UPD:
4629 case ARM::VLD4d32Pseudo_UPD:
4630 case ARM::VLD4q8Pseudo_UPD:
4631 case ARM::VLD4q16Pseudo_UPD:
4632 case ARM::VLD4q32Pseudo_UPD:
4633 case ARM::VLD4q8oddPseudo:
4634 case ARM::VLD4q16oddPseudo:
4635 case ARM::VLD4q32oddPseudo:
4636 case ARM::VLD4q8oddPseudo_UPD:
4637 case ARM::VLD4q16oddPseudo_UPD:
4638 case ARM::VLD4q32oddPseudo_UPD:
4639 case ARM::VLD1DUPq8:
4640 case ARM::VLD1DUPq16:
4641 case ARM::VLD1DUPq32:
4642 case ARM::VLD1DUPq8wb_fixed:
4643 case ARM::VLD1DUPq16wb_fixed:
4644 case ARM::VLD1DUPq32wb_fixed:
4645 case ARM::VLD1DUPq8wb_register:
4646 case ARM::VLD1DUPq16wb_register:
4647 case ARM::VLD1DUPq32wb_register:
4648 case ARM::VLD2DUPd8:
4649 case ARM::VLD2DUPd16:
4650 case ARM::VLD2DUPd32:
4651 case ARM::VLD2DUPd8wb_fixed:
4652 case ARM::VLD2DUPd16wb_fixed:
4653 case ARM::VLD2DUPd32wb_fixed:
4654 case ARM::VLD2DUPd8wb_register:
4655 case ARM::VLD2DUPd16wb_register:
4656 case ARM::VLD2DUPd32wb_register:
4657 case ARM::VLD2DUPq8EvenPseudo:
4658 case ARM::VLD2DUPq8OddPseudo:
4659 case ARM::VLD2DUPq16EvenPseudo:
4660 case ARM::VLD2DUPq16OddPseudo:
4661 case ARM::VLD2DUPq32EvenPseudo:
4662 case ARM::VLD2DUPq32OddPseudo:
4663 case ARM::VLD3DUPq8EvenPseudo:
4664 case ARM::VLD3DUPq8OddPseudo:
4665 case ARM::VLD3DUPq16EvenPseudo:
4666 case ARM::VLD3DUPq16OddPseudo:
4667 case ARM::VLD3DUPq32EvenPseudo:
4668 case ARM::VLD3DUPq32OddPseudo:
4669 case ARM::VLD4DUPd8Pseudo:
4670 case ARM::VLD4DUPd16Pseudo:
4671 case ARM::VLD4DUPd32Pseudo:
4672 case ARM::VLD4DUPd8Pseudo_UPD:
4673 case ARM::VLD4DUPd16Pseudo_UPD:
4674 case ARM::VLD4DUPd32Pseudo_UPD:
4675 case ARM::VLD4DUPq8EvenPseudo:
4676 case ARM::VLD4DUPq8OddPseudo:
4677 case ARM::VLD4DUPq16EvenPseudo:
4678 case ARM::VLD4DUPq16OddPseudo:
4679 case ARM::VLD4DUPq32EvenPseudo:
4680 case ARM::VLD4DUPq32OddPseudo:
4681 case ARM::VLD1LNq8Pseudo:
4682 case ARM::VLD1LNq16Pseudo:
4683 case ARM::VLD1LNq32Pseudo:
4684 case ARM::VLD1LNq8Pseudo_UPD:
4685 case ARM::VLD1LNq16Pseudo_UPD:
4686 case ARM::VLD1LNq32Pseudo_UPD:
4687 case ARM::VLD2LNd8Pseudo:
4688 case ARM::VLD2LNd16Pseudo:
4689 case ARM::VLD2LNd32Pseudo:
4690 case ARM::VLD2LNq16Pseudo:
4691 case ARM::VLD2LNq32Pseudo:
4692 case ARM::VLD2LNd8Pseudo_UPD:
4693 case ARM::VLD2LNd16Pseudo_UPD:
4694 case ARM::VLD2LNd32Pseudo_UPD:
4695 case ARM::VLD2LNq16Pseudo_UPD:
4696 case ARM::VLD2LNq32Pseudo_UPD:
4697 case ARM::VLD4LNd8Pseudo:
4698 case ARM::VLD4LNd16Pseudo:
4699 case ARM::VLD4LNd32Pseudo:
4700 case ARM::VLD4LNq16Pseudo:
4701 case ARM::VLD4LNq32Pseudo:
4702 case ARM::VLD4LNd8Pseudo_UPD:
4703 case ARM::VLD4LNd16Pseudo_UPD:
4704 case ARM::VLD4LNd32Pseudo_UPD:
4705 case ARM::VLD4LNq16Pseudo_UPD:
4706 case ARM::VLD4LNq32Pseudo_UPD:
4716unsigned ARMBaseInstrInfo::getPredicationCost(
const MachineInstr &
MI)
const {
4717 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4727 !Subtarget.cheapPredicableCPSRDef())) {
4737 unsigned *PredCost)
const {
4738 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4744 if (
MI.isBundle()) {
4748 while (++
I != E &&
I->isInsideBundle()) {
4749 if (
I->getOpcode() != ARM::t2IT)
4750 Latency += getInstrLatency(ItinData, *
I, PredCost);
4757 !Subtarget.cheapPredicableCPSRDef()))) {
4765 return MI.mayLoad() ? 3 : 1;
4778 MI.hasOneMemOperand() ? (*
MI.memoperands_begin())->
getAlign().value() : 0;
4780 if (Adj >= 0 || (
int)
Latency > -Adj) {
4788 if (!
Node->isMachineOpcode())
4791 if (!ItinData || ItinData->
isEmpty())
4794 unsigned Opcode =
Node->getMachineOpcode();
4804bool ARMBaseInstrInfo::hasHighOperandLatency(
const TargetSchedModel &SchedModel,
4809 unsigned UseIdx)
const {
4812 if (Subtarget.nonpipelinedVFP() &&
4827 unsigned DefIdx)
const {
4829 if (!ItinData || ItinData->
isEmpty())
4834 unsigned DefClass =
DefMI.getDesc().getSchedClass();
4835 std::optional<unsigned> DefCycle =
4837 return DefCycle && DefCycle <= 2U;
4845 ErrInfo =
"Pseudo flag setting opcodes only exist in Selection DAG";
4848 if (
MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4850 if (!ARM::hGPRRegClass.
contains(
MI.getOperand(0).getReg()) &&
4851 !ARM::hGPRRegClass.contains(
MI.getOperand(1).getReg())) {
4852 ErrInfo =
"Non-flag-setting Thumb1 mov is v6-only";
4856 if (
MI.getOpcode() == ARM::tPUSH ||
4857 MI.getOpcode() == ARM::tPOP ||
4858 MI.getOpcode() == ARM::tPOP_RET) {
4860 if (MO.isImplicit() || !MO.isReg())
4863 if (Reg < ARM::R0 || Reg > ARM::R7) {