73#define DEBUG_TYPE "arm-instrinfo"
75#define GET_INSTRINFO_CTOR_DTOR
76#include "ARMGenInstrInfo.inc"
90 { ARM::VMLAS, ARM::VMULS, ARM::VADDS,
false,
false },
91 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS,
false,
false },
92 { ARM::VMLAD, ARM::VMULD, ARM::VADDD,
false,
false },
93 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD,
false,
false },
94 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS,
true,
false },
95 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS,
true,
false },
96 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD,
true,
false },
97 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD,
true,
false },
100 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd,
false,
false },
101 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd,
false,
false },
102 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq,
false,
false },
103 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq,
false,
false },
104 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd,
false,
true },
105 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd,
false,
true },
106 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq,
false,
true },
107 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq,
false,
true },
114 for (
unsigned i = 0, e = std::size(
ARM_MLxTable); i != e; ++i) {
115 if (!MLxEntryMap.insert(std::make_pair(
ARM_MLxTable[i].MLxOpc, i)).second)
127 if (usePreRAHazardRecognizer()) {
129 static_cast<const ARMSubtarget *
>(STI)->getInstrItineraryData();
149 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4,
true));
165 if (Subtarget.isThumb2() || Subtarget.hasVFP2Base())
186 bool AllowModify)
const {
191 if (
I ==
MBB.instr_begin())
201 bool CantAnalyze =
false;
205 while (
I->isDebugInstr() || !
I->isTerminator() ||
207 I->getOpcode() == ARM::t2DoLoopStartTP){
208 if (
I ==
MBB.instr_begin())
219 TBB =
I->getOperand(0).getMBB();
225 assert(!FBB &&
"FBB should have been null.");
227 TBB =
I->getOperand(0).getMBB();
228 Cond.push_back(
I->getOperand(1));
229 Cond.push_back(
I->getOperand(2));
230 }
else if (
I->isReturn()) {
233 }
else if (
I->getOpcode() == ARM::t2LoopEnd &&
240 TBB =
I->getOperand(1).getMBB();
242 Cond.push_back(
I->getOperand(0));
264 while (DI !=
MBB.instr_end()) {
287 if (
I ==
MBB.instr_begin())
299 int *BytesRemoved)
const {
300 assert(!BytesRemoved &&
"code size not handled");
311 I->eraseFromParent();
315 if (
I ==
MBB.begin())
return 1;
321 I->eraseFromParent();
330 int *BytesAdded)
const {
331 assert(!BytesAdded &&
"code size not handled");
340 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
342 "ARM branch conditions have two or three components!");
352 }
else if (
Cond.size() == 2) {
363 if (
Cond.size() == 2)
368 else if (
Cond.size() == 3)
379 if (
Cond.size() == 2) {
391 while (++
I != E &&
I->isInsideBundle()) {
392 int PIdx =
I->findFirstPredOperandIdx();
393 if (PIdx != -1 &&
I->getOperand(PIdx).getImm() !=
ARMCC::AL)
399 int PIdx =
MI.findFirstPredOperandIdx();
400 return PIdx != -1 &&
MI.getOperand(PIdx).getImm() !=
ARMCC::AL;
408 std::string GenericComment =
410 if (!GenericComment.empty())
411 return GenericComment;
415 return std::string();
419 int FirstPredOp =
MI.findFirstPredOperandIdx();
420 if (FirstPredOp != (
int)
OpIdx)
421 return std::string();
423 std::string CC =
"CC::";
430 unsigned Opc =
MI.getOpcode();
439 int PIdx =
MI.findFirstPredOperandIdx();
443 MI.getOperand(PIdx+1).setReg(Pred[1].
getReg());
450 "CPSR def isn't expected operand");
451 assert((
MI.getOperand(1).isDead() ||
452 MI.getOperand(1).getReg() != ARM::CPSR) &&
453 "if conversion tried to stop defining used CPSR");
454 MI.getOperand(1).setReg(ARM::NoRegister);
464 if (Pred1.
size() > 2 || Pred2.
size() > 2)
489 std::vector<MachineOperand> &Pred,
490 bool SkipDead)
const {
493 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
494 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
495 if (ClobbersCPSR || IsCPSR) {
513 for (
const auto &MO :
MI.operands())
514 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
520 switch (
MI->getOpcode()) {
521 default:
return true;
552 if (!
MI.isPredicable())
590 if (!MO.isReg() || MO.isUndef() || MO.isUse())
592 if (MO.getReg() != ARM::CPSR)
612 switch (
MI.getOpcode()) {
619 return MCID.getSize();
620 case TargetOpcode::BUNDLE:
621 return getInstBundleSize(
MI);
622 case TargetOpcode::COPY:
627 case ARM::CONSTPOOL_ENTRY:
628 case ARM::JUMPTABLE_INSTS:
629 case ARM::JUMPTABLE_ADDRS:
630 case ARM::JUMPTABLE_TBB:
631 case ARM::JUMPTABLE_TBH:
634 return MI.getOperand(2).getImm();
636 return MI.getOperand(1).getImm();
638 case ARM::INLINEASM_BR: {
640 unsigned Size = getInlineAsmLength(
MI.getOperand(0).getSymbolName(), *MAI);
652 unsigned Opc = Subtarget.isThumb()
653 ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
661 if (Subtarget.isMClass())
672 unsigned Opc = Subtarget.isThumb()
673 ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
678 if (Subtarget.isMClass())
707 unsigned Cond,
unsigned Inactive) {
717 bool RenamableSrc)
const {
718 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
719 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
721 if (GPRDest && GPRSrc) {
729 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
730 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
733 if (SPRDest && SPRSrc)
735 else if (GPRDest && SPRSrc)
737 else if (SPRDest && GPRSrc)
739 else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) && Subtarget.hasFP64())
741 else if (ARM::QPRRegClass.
contains(DestReg, SrcReg))
742 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
747 if (
Opc == ARM::VORRq ||
Opc == ARM::MVE_VORR)
749 if (
Opc == ARM::MVE_VORR)
751 else if (
Opc != ARM::MQPRCopy)
757 unsigned BeginIdx = 0;
758 unsigned SubRegs = 0;
762 if (ARM::QQPRRegClass.
contains(DestReg, SrcReg)) {
763 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
764 BeginIdx = ARM::qsub_0;
766 }
else if (ARM::QQQQPRRegClass.
contains(DestReg, SrcReg)) {
767 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
768 BeginIdx = ARM::qsub_0;
771 }
else if (ARM::DPairRegClass.
contains(DestReg, SrcReg)) {
773 BeginIdx = ARM::dsub_0;
775 }
else if (ARM::DTripleRegClass.
contains(DestReg, SrcReg)) {
777 BeginIdx = ARM::dsub_0;
779 }
else if (ARM::DQuadRegClass.
contains(DestReg, SrcReg)) {
781 BeginIdx = ARM::dsub_0;
783 }
else if (ARM::GPRPairRegClass.
contains(DestReg, SrcReg)) {
784 Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr;
785 BeginIdx = ARM::gsub_0;
787 }
else if (ARM::DPairSpcRegClass.
contains(DestReg, SrcReg)) {
789 BeginIdx = ARM::dsub_0;
792 }
else if (ARM::DTripleSpcRegClass.
contains(DestReg, SrcReg)) {
794 BeginIdx = ARM::dsub_0;
797 }
else if (ARM::DQuadSpcRegClass.
contains(DestReg, SrcReg)) {
799 BeginIdx = ARM::dsub_0;
802 }
else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) &&
803 !Subtarget.hasFP64()) {
805 BeginIdx = ARM::ssub_0;
807 }
else if (SrcReg == ARM::CPSR) {
810 }
else if (DestReg == ARM::CPSR) {
813 }
else if (DestReg == ARM::VPR) {
819 }
else if (SrcReg == ARM::VPR) {
825 }
else if (DestReg == ARM::FPSCR_NZCV) {
827 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
831 }
else if (SrcReg == ARM::FPSCR_NZCV) {
833 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
839 assert(
Opc &&
"Impossible reg-to-reg copy");
845 if (
TRI->regsOverlap(SrcReg,
TRI->getSubReg(DestReg, BeginIdx))) {
846 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
852 for (
unsigned i = 0; i != SubRegs; ++i) {
853 Register Dst =
TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
854 Register Src =
TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
855 assert(Dst && Src &&
"Bad sub-register");
857 assert(!DstRegs.
count(Src) &&
"destructive vector copy");
862 if (
Opc == ARM::VORRq ||
Opc == ARM::MVE_VORR) {
866 if (
Opc == ARM::MVE_VORR)
871 if (
Opc == ARM::MOVr)
880std::optional<DestSourcePair>
889 if (!
MI.isMoveReg() ||
890 (
MI.getOpcode() == ARM::VORRq &&
891 MI.getOperand(1).getReg() !=
MI.getOperand(2).getReg()))
896std::optional<ParamLoadedValue>
900 Register DstReg = DstSrcPair->Destination->getReg();
931 return MIB.
addReg(Reg, State);
935 return MIB.
addReg(Reg, State, SubIdx);
940 Register SrcReg,
bool isKill,
int FI,
953 switch (
TRI.getSpillSize(*RC)) {
955 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
966 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
973 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
980 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
987 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
998 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1005 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1006 if (Subtarget.hasV5TEOps()) {
1009 AddDReg(MIB, SrcReg, ARM::gsub_1, {});
1020 AddDReg(MIB, SrcReg, ARM::gsub_1, {});
1026 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1042 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1043 Subtarget.hasMVEIntegerOps()) {
1048 .addMemOperand(MMO);
1054 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1057 Subtarget.hasNEON()) {
1071 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, {});
1072 AddDReg(MIB, SrcReg, ARM::dsub_2, {});
1078 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1079 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1080 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1082 Subtarget.hasNEON()) {
1091 }
else if (Subtarget.hasMVEIntegerOps()) {
1103 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, {});
1104 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, {});
1105 AddDReg(MIB, SrcReg, ARM::dsub_3, {});
1111 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1112 Subtarget.hasMVEIntegerOps()) {
1117 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1123 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, {});
1124 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, {});
1125 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_3, {});
1126 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_4, {});
1127 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_5, {});
1128 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_6, {});
1129 AddDReg(MIB, SrcReg, ARM::dsub_7, {});
1139 int &FrameIndex)
const {
1140 switch (
MI.getOpcode()) {
1144 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1145 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1146 MI.getOperand(3).getImm() == 0) {
1147 FrameIndex =
MI.getOperand(1).getIndex();
1148 return MI.getOperand(0).getReg();
1157 case ARM::VSTR_P0_off:
1158 case ARM::VSTR_FPSCR_NZCVQC_off:
1159 case ARM::MVE_VSTRWU32:
1160 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1161 MI.getOperand(2).getImm() == 0) {
1162 FrameIndex =
MI.getOperand(1).getIndex();
1163 return MI.getOperand(0).getReg();
1167 case ARM::VST1d64TPseudo:
1168 case ARM::VST1d64QPseudo:
1169 if (
MI.getOperand(0).isFI() &&
MI.getOperand(2).getSubReg() == 0) {
1170 FrameIndex =
MI.getOperand(0).getIndex();
1171 return MI.getOperand(2).getReg();
1175 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1176 FrameIndex =
MI.getOperand(1).getIndex();
1177 return MI.getOperand(0).getReg();
1180 case ARM::MQQPRStore:
1181 case ARM::MQQQQPRStore:
1182 if (
MI.getOperand(1).isFI()) {
1183 FrameIndex =
MI.getOperand(1).getIndex();
1184 return MI.getOperand(0).getReg();
1193 int &FrameIndex)
const {
1195 if (
MI.mayStore() && hasStoreToStackSlot(
MI,
Accesses) &&
1212 if (
I !=
MBB.end())
DL =
I->getDebugLoc();
1221 switch (
TRI.getSpillSize(*RC)) {
1223 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1233 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1239 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1245 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1251 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1261 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1267 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1270 if (Subtarget.hasV5TEOps()) {
1293 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1306 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1307 Subtarget.hasMVEIntegerOps()) {
1309 MIB.addFrameIndex(FI)
1311 .addMemOperand(MMO);
1317 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1319 Subtarget.hasNEON()) {
1340 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1341 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1342 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1344 Subtarget.hasNEON()) {
1350 }
else if (Subtarget.hasMVEIntegerOps()) {
1370 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1371 Subtarget.hasMVEIntegerOps()) {
1375 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1399 int &FrameIndex)
const {
1400 switch (
MI.getOpcode()) {
1404 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1405 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1406 MI.getOperand(3).getImm() == 0) {
1407 FrameIndex =
MI.getOperand(1).getIndex();
1408 return MI.getOperand(0).getReg();
1417 case ARM::VLDR_P0_off:
1418 case ARM::VLDR_FPSCR_NZCVQC_off:
1419 case ARM::MVE_VLDRWU32:
1420 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1421 MI.getOperand(2).getImm() == 0) {
1422 FrameIndex =
MI.getOperand(1).getIndex();
1423 return MI.getOperand(0).getReg();
1427 case ARM::VLD1d8TPseudo:
1428 case ARM::VLD1d16TPseudo:
1429 case ARM::VLD1d32TPseudo:
1430 case ARM::VLD1d64TPseudo:
1431 case ARM::VLD1d8QPseudo:
1432 case ARM::VLD1d16QPseudo:
1433 case ARM::VLD1d32QPseudo:
1434 case ARM::VLD1d64QPseudo:
1435 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1436 FrameIndex =
MI.getOperand(1).getIndex();
1437 return MI.getOperand(0).getReg();
1441 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1442 FrameIndex =
MI.getOperand(1).getIndex();
1443 return MI.getOperand(0).getReg();
1446 case ARM::MQQPRLoad:
1447 case ARM::MQQQQPRLoad:
1448 if (
MI.getOperand(1).isFI()) {
1449 FrameIndex =
MI.getOperand(1).getIndex();
1450 return MI.getOperand(0).getReg();
1459 int &FrameIndex)
const {
1461 if (
MI.mayLoad() && hasLoadFromStackSlot(
MI,
Accesses) &&
1475 bool isThumb2 = Subtarget.
isThumb2();
1482 if (isThumb1 || !
MI->getOperand(1).isDead()) {
1484 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1485 : isThumb1 ? ARM::tLDMIA_UPD
1489 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1492 if (isThumb1 || !
MI->getOperand(0).isDead()) {
1493 MachineOperand STWb(
MI->getOperand(0));
1494 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA_UPD
1495 : isThumb1 ? ARM::tSTMIA_UPD
1499 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1502 MachineOperand LDBase(
MI->getOperand(3));
1505 MachineOperand STBase(
MI->getOperand(2));
1514 [&
TRI](
const unsigned &Reg1,
const unsigned &Reg2) ->
bool {
1515 return TRI.getEncodingValue(Reg1) <
1516 TRI.getEncodingValue(Reg2);
1519 for (
const auto &
Reg : ScratchRegs) {
1528 if (
MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1529 expandLoadStackGuard(
MI);
1530 MI.getParent()->erase(
MI);
1534 if (
MI.getOpcode() == ARM::MEMCPY) {
1543 if (!
MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1548 Register DstRegS =
MI.getOperand(0).getReg();
1549 Register SrcRegS =
MI.getOperand(1).getReg();
1550 if (!ARM::SPRRegClass.
contains(DstRegS, SrcRegS))
1555 TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, &ARM::DPRRegClass);
1557 TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, &ARM::DPRRegClass);
1558 if (!DstRegD || !SrcRegD)
1564 if (!
MI.definesRegister(DstRegD,
TRI) ||
MI.readsRegister(DstRegD,
TRI))
1568 if (
MI.getOperand(0).isDead())
1577 int ImpDefIdx =
MI.findRegisterDefOperandIdx(DstRegD,
nullptr);
1578 if (ImpDefIdx != -1)
1579 MI.removeOperand(ImpDefIdx);
1582 MI.setDesc(
get(ARM::VMOVD));
1583 MI.getOperand(0).setReg(DstRegD);
1584 MI.getOperand(1).setReg(SrcRegD);
1591 MI.getOperand(1).setIsUndef();
1596 if (
MI.getOperand(1).isKill()) {
1597 MI.getOperand(1).setIsKill(
false);
1598 MI.addRegisterKilled(SrcRegS,
TRI,
true);
1612 assert(MCPE.isMachineConstantPoolEntry() &&
1613 "Expecting a machine constantpool entry!");
1663 case ARM::tLDRpci_pic:
1664 case ARM::t2LDRpci_pic: {
1684 switch (
I->getOpcode()) {
1685 case ARM::tLDRpci_pic:
1686 case ARM::t2LDRpci_pic: {
1688 unsigned CPI =
I->getOperand(1).getIndex();
1690 I->getOperand(1).setIndex(CPI);
1691 I->getOperand(2).setImm(PCLabelId);
1695 if (!
I->isBundledWithSucc())
1706 if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic ||
1707 Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic ||
1708 Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1709 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1710 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1711 Opcode == ARM::t2MOV_ga_pcrel) {
1722 if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1723 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1724 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1725 Opcode == ARM::t2MOV_ga_pcrel)
1737 if (isARMCP0 && isARMCP1) {
1743 }
else if (!isARMCP0 && !isARMCP1) {
1747 }
else if (Opcode == ARM::PICLDR) {
1755 if (Addr0 != Addr1) {
1791 int64_t &Offset2)
const {
1793 if (Subtarget.isThumb1Only())
return false;
1798 auto IsLoadOpcode = [&](
unsigned Opcode) {
1813 case ARM::t2LDRSHi8:
1815 case ARM::t2LDRBi12:
1816 case ARM::t2LDRSHi12:
1857 int64_t Offset1, int64_t Offset2,
1858 unsigned NumLoads)
const {
1860 if (Subtarget.isThumb1Only())
return false;
1862 assert(Offset2 > Offset1);
1864 if ((Offset2 - Offset1) / 8 > 64)
1895 if (
MI.isDebugInstr())
1899 if (
MI.isTerminator() ||
MI.isPosition())
1903 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1917 while (++
I !=
MBB->end() &&
I->isDebugInstr())
1919 if (
I !=
MBB->end() &&
I->getOpcode() == ARM::t2IT)
1930 if (!
MI.isCall() &&
MI.definesRegister(ARM::SP,
nullptr))
1938 unsigned NumCycles,
unsigned ExtraPredCycles,
1946 if (
MBB.getParent()->getFunction().hasOptSize()) {
1948 if (!Pred->empty()) {
1950 if (LastMI->
getOpcode() == ARM::t2Bcc) {
1959 MBB, 0, 0, Probability);
1964 unsigned TCycles,
unsigned TExtra,
1966 unsigned FCycles,
unsigned FExtra,
1975 if (Subtarget.isThumb2() &&
TBB.getParent()->getFunction().hasMinSize()) {
1983 const unsigned ScalingUpFactor = 1024;
1985 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
1986 unsigned UnpredCost;
1987 if (!Subtarget.hasBranchPredictor()) {
1990 unsigned NotTakenBranchCost = 1;
1991 unsigned TakenBranchCost = Subtarget.getMispredictionPenalty();
1992 unsigned TUnpredCycles, FUnpredCycles;
1995 TUnpredCycles = TCycles + NotTakenBranchCost;
1996 FUnpredCycles = TakenBranchCost;
1999 TUnpredCycles = TCycles + TakenBranchCost;
2000 FUnpredCycles = FCycles + NotTakenBranchCost;
2003 PredCost -= 1 * ScalingUpFactor;
2006 unsigned TUnpredCost = Probability.
scale(TUnpredCycles * ScalingUpFactor);
2007 unsigned FUnpredCost = Probability.
getCompl().
scale(FUnpredCycles * ScalingUpFactor);
2008 UnpredCost = TUnpredCost + FUnpredCost;
2011 if (Subtarget.isThumb2() && TCycles + FCycles > 4) {
2012 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2015 unsigned TUnpredCost = Probability.
scale(TCycles * ScalingUpFactor);
2016 unsigned FUnpredCost =
2018 UnpredCost = TUnpredCost + FUnpredCost;
2019 UnpredCost += 1 * ScalingUpFactor;
2020 UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10;
2023 return PredCost <= UnpredCost;
2028 unsigned NumInsts)
const {
2032 if (!Subtarget.isThumb2())
2036 unsigned MaxInsts = Subtarget.restrictIT() ? 1 : 4;
2045 if (
MI.getOpcode() == ARM::t2Bcc &&
2057 if (Subtarget.isThumb2())
2068 return Subtarget.isProfitableToUnpredicate();
2076 int PIdx =
MI.findFirstPredOperandIdx();
2082 PredReg =
MI.getOperand(PIdx+1).getReg();
2091 if (
Opc == ARM::t2B)
2100 unsigned OpIdx2)
const {
2101 switch (
MI.getOpcode()) {
2103 case ARM::t2MOVCCr: {
2108 if (CC ==
ARMCC::AL || PredReg != ARM::CPSR)
2128 if (!Reg.isVirtual())
2142 if (MO.isFI() || MO.isCPI() || MO.isJTI())
2149 if (MO.getReg().isPhysical())
2151 if (MO.isDef() && !MO.isDead())
2154 bool DontMoveAcrossStores =
true;
2155 if (!
MI->isSafeToMove(DontMoveAcrossStores))
2163 bool PreferFalse)
const {
2164 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2165 "Unknown select instruction");
2168 bool Invert = !
DefMI;
2170 DefMI = canFoldIntoMOVCC(
MI.getOperand(1).getReg(), MRI,
this);
2177 Register DestReg =
MI.getOperand(0).getReg();
2193 i != e && !DefDesc.
operands()[i].isPredicate(); ++i)
2196 unsigned CondCode =
MI.getOperand(3).getImm();
2201 NewMI.
add(
MI.getOperand(4));
2212 NewMI.
add(FalseReg);
2223 if (
DefMI->getParent() !=
MI.getParent())
2227 DefMI->eraseFromParent();
2243 {ARM::ADDSri, ARM::ADDri},
2244 {ARM::ADDSrr, ARM::ADDrr},
2245 {ARM::ADDSrsi, ARM::ADDrsi},
2246 {ARM::ADDSrsr, ARM::ADDrsr},
2248 {ARM::SUBSri, ARM::SUBri},
2249 {ARM::SUBSrr, ARM::SUBrr},
2250 {ARM::SUBSrsi, ARM::SUBrsi},
2251 {ARM::SUBSrsr, ARM::SUBrsr},
2253 {ARM::RSBSri, ARM::RSBri},
2254 {ARM::RSBSrsi, ARM::RSBrsi},
2255 {ARM::RSBSrsr, ARM::RSBrsr},
2257 {ARM::tADDSi3, ARM::tADDi3},
2258 {ARM::tADDSi8, ARM::tADDi8},
2259 {ARM::tADDSrr, ARM::tADDrr},
2260 {ARM::tADCS, ARM::tADC},
2262 {ARM::tSUBSi3, ARM::tSUBi3},
2263 {ARM::tSUBSi8, ARM::tSUBi8},
2264 {ARM::tSUBSrr, ARM::tSUBrr},
2265 {ARM::tSBCS, ARM::tSBC},
2266 {ARM::tRSBS, ARM::tRSB},
2267 {ARM::tLSLSri, ARM::tLSLri},
2269 {ARM::t2ADDSri, ARM::t2ADDri},
2270 {ARM::t2ADDSrr, ARM::t2ADDrr},
2271 {ARM::t2ADDSrs, ARM::t2ADDrs},
2273 {ARM::t2SUBSri, ARM::t2SUBri},
2274 {ARM::t2SUBSrr, ARM::t2SUBrr},
2275 {ARM::t2SUBSrs, ARM::t2SUBrs},
2277 {ARM::t2RSBSri, ARM::t2RSBri},
2278 {ARM::t2RSBSrs, ARM::t2RSBrs},
2283 if (OldOpc == Entry.PseudoOpc)
2284 return Entry.MachineOpc;
2295 if (NumBytes == 0 && DestReg != BaseReg) {
2304 bool isSub = NumBytes < 0;
2305 if (isSub) NumBytes = -NumBytes;
2310 assert(ThisVal &&
"Didn't extract field correctly");
2313 NumBytes &= ~ThisVal;
2318 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2331 unsigned NumBytes) {
2342 if (!IsPush && !IsPop)
2345 bool IsVFPPushPop =
MI->getOpcode() == ARM::VSTMDDB_UPD ||
2346 MI->getOpcode() == ARM::VLDMDIA_UPD;
2347 bool IsT1PushPop =
MI->getOpcode() == ARM::tPUSH ||
2348 MI->getOpcode() == ARM::tPOP ||
2349 MI->getOpcode() == ARM::tPOP_RET;
2351 assert((IsT1PushPop || (
MI->getOperand(0).getReg() == ARM::SP &&
2352 MI->getOperand(1).getReg() == ARM::SP)) &&
2353 "trying to fold sp update into non-sp-updating push/pop");
2358 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2363 int RegListIdx = IsT1PushPop ? 2 : 4;
2366 unsigned RegsNeeded;
2369 RegsNeeded = NumBytes / 8;
2370 RegClass = &ARM::DPRRegClass;
2372 RegsNeeded = NumBytes / 4;
2373 RegClass = &ARM::GPRRegClass;
2383 unsigned FirstRegEnc = -1;
2386 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2391 TRI->getEncodingValue(MO.
getReg()) < FirstRegEnc)
2392 FirstRegEnc =
TRI->getEncodingValue(MO.
getReg());
2395 const MCPhysReg *CSRegs =
TRI->getCalleeSavedRegs(&MF);
2398 for (
int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2401 if (IsT1PushPop && CurRegEnc >
TRI->getEncodingValue(ARM::R7))
2408 false,
false,
true));
2418 MI->getParent()->computeRegisterLiveness(
TRI, CurReg,
MI) !=
2440 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i)
2441 MI->removeOperand(i);
2454 unsigned Opcode =
MI.getOpcode();
2460 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2463 if (Opcode == ARM::ADDri) {
2464 Offset +=
MI.getOperand(FrameRegIdx+1).getImm();
2467 MI.setDesc(
TII.get(ARM::MOVr));
2468 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2469 MI.removeOperand(FrameRegIdx+1);
2475 MI.setDesc(
TII.get(ARM::SUBri));
2481 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2482 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(
Offset);
2497 "Bit extraction didn't work?");
2498 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2500 unsigned ImmIdx = 0;
2502 unsigned NumBits = 0;
2506 ImmIdx = FrameRegIdx + 1;
2507 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2511 ImmIdx = FrameRegIdx+2;
2518 ImmIdx = FrameRegIdx+2;
2529 ImmIdx = FrameRegIdx+1;
2537 ImmIdx = FrameRegIdx+1;
2547 ImmIdx = FrameRegIdx+1;
2548 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2557 Offset += InstrOffs * Scale;
2558 assert((
Offset & (Scale-1)) == 0 &&
"Can't encode this offset!");
2568 int ImmedOffset =
Offset / Scale;
2569 unsigned Mask = (1 << NumBits) - 1;
2570 if ((
unsigned)
Offset <= Mask * Scale) {
2572 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2578 ImmedOffset = -ImmedOffset;
2580 ImmedOffset |= 1 << NumBits;
2588 ImmedOffset = ImmedOffset & Mask;
2591 ImmedOffset = -ImmedOffset;
2593 ImmedOffset |= 1 << NumBits;
2609 Register &SrcReg2, int64_t &CmpMask,
2610 int64_t &CmpValue)
const {
2611 switch (
MI.getOpcode()) {
2616 SrcReg =
MI.getOperand(0).getReg();
2619 CmpValue =
MI.getOperand(1).getImm();
2624 SrcReg =
MI.getOperand(0).getReg();
2625 SrcReg2 =
MI.getOperand(1).getReg();
2631 SrcReg =
MI.getOperand(0).getReg();
2633 CmpMask =
MI.getOperand(1).getImm();
2646 int CmpMask,
bool CommonUse) {
2647 switch (
MI->getOpcode()) {
2650 if (CmpMask !=
MI->getOperand(2).getImm())
2652 if (SrcReg ==
MI->getOperand(CommonUse ? 1 : 0).getReg())
2742 switch (
MI->getOpcode()) {
2743 default:
return false;
2839 if (!
MI)
return false;
2842 if (CmpMask != ~0) {
2848 if (UI->getParent() != CmpInstr.
getParent())
2857 if (!
MI)
return false;
2866 if (
I ==
B)
return false;
2877 else if (
MI->getParent() != CmpInstr.
getParent() || CmpValue != 0) {
2882 if (CmpInstr.
getOpcode() == ARM::CMPri ||
2890 bool IsThumb1 =
false;
2907 if (
MI && IsThumb1) {
2909 if (
I != E && !
MI->readsRegister(ARM::CPSR,
TRI)) {
2910 bool CanReorder =
true;
2911 for (;
I != E; --
I) {
2912 if (
I->getOpcode() != ARM::tMOVi8) {
2918 MI =
MI->removeFromParent();
2929 bool SubAddIsThumb1 =
false;
2944 if (Instr.modifiesRegister(ARM::CPSR,
TRI) ||
2945 Instr.readsRegister(ARM::CPSR,
TRI))
2967 IsThumb1 = SubAddIsThumb1;
2982 bool isSafe =
false;
2985 while (!isSafe && ++
I != E) {
2987 for (
unsigned IO = 0, EO = Instr.getNumOperands();
2988 !isSafe && IO != EO; ++IO) {
3002 bool IsInstrVSel =
true;
3003 switch (Instr.getOpcode()) {
3005 IsInstrVSel =
false;
3039 bool IsSub =
Opc == ARM::SUBrr ||
Opc == ARM::t2SUBrr ||
3040 Opc == ARM::SUBri ||
Opc == ARM::t2SUBri ||
3041 Opc == ARM::tSUBrr ||
Opc == ARM::tSUBi3 ||
3043 unsigned OpI =
Opc != ARM::tSUBrr ? 1 : 2;
3055 std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3089 if (Succ->isLiveIn(ARM::CPSR))
3096 unsigned CPSRRegNum =
MI->getNumExplicitOperands() - 1;
3097 MI->getOperand(CPSRRegNum).setReg(ARM::CPSR);
3098 MI->getOperand(CPSRRegNum).setIsDef(
true);
3106 for (
auto &[MO,
Cond] : OperandsToUpdate)
3109 MI->clearRegisterDeads(ARM::CPSR);
3123 int64_t CmpMask, CmpValue;
3125 if (
Next !=
MI.getParent()->end() &&
3136 unsigned DefOpc =
DefMI.getOpcode();
3137 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm &&
3138 DefOpc != ARM::tMOVi32imm)
3140 if (!
DefMI.getOperand(1).isImm())
3160 if (
UseMI.getOperand(
NumOps - 1).getReg() == ARM::CPSR)
3166 unsigned UseOpc =
UseMI.getOpcode();
3167 unsigned NewUseOpc = 0;
3169 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3170 bool Commute =
false;
3172 default:
return false;
3180 case ARM::t2EORrr: {
3181 Commute =
UseMI.getOperand(2).getReg() != Reg;
3186 if (UseOpc == ARM::SUBrr && Commute)
3192 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3195 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3209 case ARM::ORRrr: NewUseOpc = ARM::ORRri;
break;
3210 case ARM::EORrr: NewUseOpc = ARM::EORri;
break;
3214 case ARM::t2SUBrr: {
3215 if (UseOpc == ARM::t2SUBrr && Commute)
3220 const bool ToSP =
DefMI.getOperand(0).getReg() == ARM::SP;
3221 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3222 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3224 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3227 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3242 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri;
break;
3243 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri;
break;
3250 unsigned OpIdx = Commute ? 2 : 1;
3252 bool isKill =
UseMI.getOperand(
OpIdx).isKill();
3262 UseMI.getOperand(1).setReg(NewReg);
3263 UseMI.getOperand(1).setIsKill();
3264 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3265 DefMI.eraseFromParent();
3272 case ARM::t2ADDspImm:
3273 case ARM::t2SUBspImm:
3283 switch (
MI.getOpcode()) {
3287 assert(UOps >= 0 &&
"bad # UOps");
3295 unsigned ShOpVal =
MI.getOperand(3).getImm();
3300 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3308 if (!
MI.getOperand(2).getReg())
3311 unsigned ShOpVal =
MI.getOperand(3).getImm();
3316 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3326 case ARM::LDRSB_POST:
3327 case ARM::LDRSH_POST: {
3330 return (Rt == Rm) ? 4 : 3;
3333 case ARM::LDR_PRE_REG:
3334 case ARM::LDRB_PRE_REG: {
3339 unsigned ShOpVal =
MI.getOperand(4).getImm();
3344 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3350 case ARM::STR_PRE_REG:
3351 case ARM::STRB_PRE_REG: {
3352 unsigned ShOpVal =
MI.getOperand(4).getImm();
3357 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3364 case ARM::STRH_PRE: {
3374 case ARM::LDR_POST_REG:
3375 case ARM::LDRB_POST_REG:
3376 case ARM::LDRH_POST: {
3379 return (Rt == Rm) ? 3 : 2;
3382 case ARM::LDR_PRE_IMM:
3383 case ARM::LDRB_PRE_IMM:
3384 case ARM::LDR_POST_IMM:
3385 case ARM::LDRB_POST_IMM:
3386 case ARM::STRB_POST_IMM:
3387 case ARM::STRB_POST_REG:
3388 case ARM::STRB_PRE_IMM:
3389 case ARM::STRH_POST:
3390 case ARM::STR_POST_IMM:
3391 case ARM::STR_POST_REG:
3392 case ARM::STR_PRE_IMM:
3395 case ARM::LDRSB_PRE:
3396 case ARM::LDRSH_PRE: {
3403 unsigned ShOpVal =
MI.getOperand(4).getImm();
3408 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3421 return (Rt == Rn) ? 3 : 2;
3432 case ARM::LDRD_POST:
3433 case ARM::t2LDRD_POST:
3436 case ARM::STRD_POST:
3437 case ARM::t2STRD_POST:
3440 case ARM::LDRD_PRE: {
3447 return (Rt == Rn) ? 4 : 3;
3450 case ARM::t2LDRD_PRE: {
3453 return (Rt == Rn) ? 4 : 3;
3456 case ARM::STRD_PRE: {
3464 case ARM::t2STRD_PRE:
3467 case ARM::t2LDR_POST:
3468 case ARM::t2LDRB_POST:
3469 case ARM::t2LDRB_PRE:
3470 case ARM::t2LDRSBi12:
3471 case ARM::t2LDRSBi8:
3472 case ARM::t2LDRSBpci:
3474 case ARM::t2LDRH_POST:
3475 case ARM::t2LDRH_PRE:
3477 case ARM::t2LDRSB_POST:
3478 case ARM::t2LDRSB_PRE:
3479 case ARM::t2LDRSH_POST:
3480 case ARM::t2LDRSH_PRE:
3481 case ARM::t2LDRSHi12:
3482 case ARM::t2LDRSHi8:
3483 case ARM::t2LDRSHpci:
3487 case ARM::t2LDRDi8: {
3490 return (Rt == Rn) ? 3 : 2;
3493 case ARM::t2STRB_POST:
3494 case ARM::t2STRB_PRE:
3497 case ARM::t2STRH_POST:
3498 case ARM::t2STRH_PRE:
3500 case ARM::t2STR_POST:
3501 case ARM::t2STR_PRE:
3532 E =
MI.memoperands_end();
3534 Size += (*I)->getSize().getValue();
3541 return std::min(
Size / 4, 16U);
3546 unsigned UOps = 1 + NumRegs;
3550 case ARM::VLDMDIA_UPD:
3551 case ARM::VLDMDDB_UPD:
3552 case ARM::VLDMSIA_UPD:
3553 case ARM::VLDMSDB_UPD:
3554 case ARM::VSTMDIA_UPD:
3555 case ARM::VSTMDDB_UPD:
3556 case ARM::VSTMSIA_UPD:
3557 case ARM::VSTMSDB_UPD:
3558 case ARM::LDMIA_UPD:
3559 case ARM::LDMDA_UPD:
3560 case ARM::LDMDB_UPD:
3561 case ARM::LDMIB_UPD:
3562 case ARM::STMIA_UPD:
3563 case ARM::STMDA_UPD:
3564 case ARM::STMDB_UPD:
3565 case ARM::STMIB_UPD:
3566 case ARM::tLDMIA_UPD:
3567 case ARM::tSTMIA_UPD:
3568 case ARM::t2LDMIA_UPD:
3569 case ARM::t2LDMDB_UPD:
3570 case ARM::t2STMIA_UPD:
3571 case ARM::t2STMDB_UPD:
3574 case ARM::LDMIA_RET:
3576 case ARM::t2LDMIA_RET:
3585 if (!ItinData || ItinData->
isEmpty())
3589 unsigned Class =
Desc.getSchedClass();
3591 if (ItinUOps >= 0) {
3592 if (Subtarget.isSwift() && (
Desc.mayLoad() ||
Desc.mayStore()))
3598 unsigned Opc =
MI.getOpcode();
3617 case ARM::VLDMDIA_UPD:
3618 case ARM::VLDMDDB_UPD:
3620 case ARM::VLDMSIA_UPD:
3621 case ARM::VLDMSDB_UPD:
3623 case ARM::VSTMDIA_UPD:
3624 case ARM::VSTMDDB_UPD:
3626 case ARM::VSTMSIA_UPD:
3627 case ARM::VSTMSDB_UPD: {
3628 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands();
3629 return (NumRegs / 2) + (NumRegs % 2) + 1;
3632 case ARM::LDMIA_RET:
3637 case ARM::LDMIA_UPD:
3638 case ARM::LDMDA_UPD:
3639 case ARM::LDMDB_UPD:
3640 case ARM::LDMIB_UPD:
3645 case ARM::STMIA_UPD:
3646 case ARM::STMDA_UPD:
3647 case ARM::STMDB_UPD:
3648 case ARM::STMIB_UPD:
3650 case ARM::tLDMIA_UPD:
3651 case ARM::tSTMIA_UPD:
3655 case ARM::t2LDMIA_RET:
3658 case ARM::t2LDMIA_UPD:
3659 case ARM::t2LDMDB_UPD:
3662 case ARM::t2STMIA_UPD:
3663 case ARM::t2STMDB_UPD: {
3664 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands() + 1;
3665 switch (Subtarget.getLdStMultipleTiming()) {
3676 unsigned UOps = (NumRegs / 2);
3682 unsigned UOps = (NumRegs / 2);
3685 if ((NumRegs % 2) || !
MI.hasOneMemOperand() ||
3686 (*
MI.memoperands_begin())->getAlign() <
Align(8))
3696std::optional<unsigned>
3699 unsigned DefIdx,
unsigned DefAlign)
const {
3708 DefCycle = RegNo / 2 + 1;
3713 bool isSLoad =
false;
3718 case ARM::VLDMSIA_UPD:
3719 case ARM::VLDMSDB_UPD:
3726 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3730 DefCycle = RegNo + 2;
3736std::optional<unsigned>
3739 unsigned DefIdx,
unsigned DefAlign)
const {
3746 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3749 DefCycle = RegNo / 2;
3754 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3755 DefCycle = (RegNo / 2);
3758 if ((RegNo % 2) || DefAlign < 8)
3764 DefCycle = RegNo + 2;
3770std::optional<unsigned>
3773 unsigned UseIdx,
unsigned UseAlign)
const {
3779 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3781 UseCycle = RegNo / 2 + 1;
3784 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3786 bool isSStore =
false;
3791 case ARM::VSTMSIA_UPD:
3792 case ARM::VSTMSDB_UPD:
3799 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3803 UseCycle = RegNo + 2;
3809std::optional<unsigned>
3812 unsigned UseIdx,
unsigned UseAlign)
const {
3818 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3819 UseCycle = RegNo / 2;
3824 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3825 UseCycle = (RegNo / 2);
3828 if ((RegNo % 2) || UseAlign < 8)
3839 unsigned DefIdx,
unsigned DefAlign,
const MCInstrDesc &UseMCID,
3840 unsigned UseIdx,
unsigned UseAlign)
const {
3850 std::optional<unsigned> DefCycle;
3851 bool LdmBypass =
false;
3858 case ARM::VLDMDIA_UPD:
3859 case ARM::VLDMDDB_UPD:
3861 case ARM::VLDMSIA_UPD:
3862 case ARM::VLDMSDB_UPD:
3863 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3866 case ARM::LDMIA_RET:
3871 case ARM::LDMIA_UPD:
3872 case ARM::LDMDA_UPD:
3873 case ARM::LDMDB_UPD:
3874 case ARM::LDMIB_UPD:
3876 case ARM::tLDMIA_UPD:
3878 case ARM::t2LDMIA_RET:
3881 case ARM::t2LDMIA_UPD:
3882 case ARM::t2LDMDB_UPD:
3884 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3892 std::optional<unsigned> UseCycle;
3899 case ARM::VSTMDIA_UPD:
3900 case ARM::VSTMDDB_UPD:
3902 case ARM::VSTMSIA_UPD:
3903 case ARM::VSTMSDB_UPD:
3904 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3911 case ARM::STMIA_UPD:
3912 case ARM::STMDA_UPD:
3913 case ARM::STMDB_UPD:
3914 case ARM::STMIB_UPD:
3915 case ARM::tSTMIA_UPD:
3920 case ARM::t2STMIA_UPD:
3921 case ARM::t2STMDB_UPD:
3922 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3930 if (UseCycle > *DefCycle + 1)
3931 return std::nullopt;
3933 UseCycle = *DefCycle - *UseCycle + 1;
3934 if (UseCycle > 0u) {
3940 UseCycle = *UseCycle - 1;
3942 UseClass, UseIdx)) {
3943 UseCycle = *UseCycle - 1;
3952 unsigned &DefIdx,
unsigned &Dist) {
3957 assert(
II->isInsideBundle() &&
"Empty bundle?");
3960 while (
II->isInsideBundle()) {
3961 Idx =
II->findRegisterDefOperandIdx(
Reg,
TRI,
false,
true);
3968 assert(Idx != -1 &&
"Cannot find bundled definition!");
3975 unsigned &UseIdx,
unsigned &Dist) {
3979 assert(
II->isInsideBundle() &&
"Empty bundle?");
3984 while (
II !=
E &&
II->isInsideBundle()) {
3985 Idx =
II->findRegisterUseOperandIdx(
Reg,
TRI,
false);
3988 if (
II->getOpcode() != ARM::t2IT)
4016 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4026 case ARM::t2LDRSHs: {
4028 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4029 if (ShAmt == 0 || ShAmt == 2)
4034 }
else if (Subtarget.
isSwift()) {
4041 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4046 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4057 case ARM::t2LDRSHs: {
4059 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4060 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4067 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4074 case ARM::VLD1q8wb_fixed:
4075 case ARM::VLD1q16wb_fixed:
4076 case ARM::VLD1q32wb_fixed:
4077 case ARM::VLD1q64wb_fixed:
4078 case ARM::VLD1q8wb_register:
4079 case ARM::VLD1q16wb_register:
4080 case ARM::VLD1q32wb_register:
4081 case ARM::VLD1q64wb_register:
4088 case ARM::VLD2d8wb_fixed:
4089 case ARM::VLD2d16wb_fixed:
4090 case ARM::VLD2d32wb_fixed:
4091 case ARM::VLD2q8wb_fixed:
4092 case ARM::VLD2q16wb_fixed:
4093 case ARM::VLD2q32wb_fixed:
4094 case ARM::VLD2d8wb_register:
4095 case ARM::VLD2d16wb_register:
4096 case ARM::VLD2d32wb_register:
4097 case ARM::VLD2q8wb_register:
4098 case ARM::VLD2q16wb_register:
4099 case ARM::VLD2q32wb_register:
4104 case ARM::VLD3d8_UPD:
4105 case ARM::VLD3d16_UPD:
4106 case ARM::VLD3d32_UPD:
4107 case ARM::VLD1d64Twb_fixed:
4108 case ARM::VLD1d64Twb_register:
4109 case ARM::VLD3q8_UPD:
4110 case ARM::VLD3q16_UPD:
4111 case ARM::VLD3q32_UPD:
4116 case ARM::VLD4d8_UPD:
4117 case ARM::VLD4d16_UPD:
4118 case ARM::VLD4d32_UPD:
4119 case ARM::VLD1d64Qwb_fixed:
4120 case ARM::VLD1d64Qwb_register:
4121 case ARM::VLD4q8_UPD:
4122 case ARM::VLD4q16_UPD:
4123 case ARM::VLD4q32_UPD:
4124 case ARM::VLD1DUPq8:
4125 case ARM::VLD1DUPq16:
4126 case ARM::VLD1DUPq32:
4127 case ARM::VLD1DUPq8wb_fixed:
4128 case ARM::VLD1DUPq16wb_fixed:
4129 case ARM::VLD1DUPq32wb_fixed:
4130 case ARM::VLD1DUPq8wb_register:
4131 case ARM::VLD1DUPq16wb_register:
4132 case ARM::VLD1DUPq32wb_register:
4133 case ARM::VLD2DUPd8:
4134 case ARM::VLD2DUPd16:
4135 case ARM::VLD2DUPd32:
4136 case ARM::VLD2DUPd8wb_fixed:
4137 case ARM::VLD2DUPd16wb_fixed:
4138 case ARM::VLD2DUPd32wb_fixed:
4139 case ARM::VLD2DUPd8wb_register:
4140 case ARM::VLD2DUPd16wb_register:
4141 case ARM::VLD2DUPd32wb_register:
4142 case ARM::VLD4DUPd8:
4143 case ARM::VLD4DUPd16:
4144 case ARM::VLD4DUPd32:
4145 case ARM::VLD4DUPd8_UPD:
4146 case ARM::VLD4DUPd16_UPD:
4147 case ARM::VLD4DUPd32_UPD:
4149 case ARM::VLD1LNd16:
4150 case ARM::VLD1LNd32:
4151 case ARM::VLD1LNd8_UPD:
4152 case ARM::VLD1LNd16_UPD:
4153 case ARM::VLD1LNd32_UPD:
4155 case ARM::VLD2LNd16:
4156 case ARM::VLD2LNd32:
4157 case ARM::VLD2LNq16:
4158 case ARM::VLD2LNq32:
4159 case ARM::VLD2LNd8_UPD:
4160 case ARM::VLD2LNd16_UPD:
4161 case ARM::VLD2LNd32_UPD:
4162 case ARM::VLD2LNq16_UPD:
4163 case ARM::VLD2LNq32_UPD:
4165 case ARM::VLD4LNd16:
4166 case ARM::VLD4LNd32:
4167 case ARM::VLD4LNq16:
4168 case ARM::VLD4LNq32:
4169 case ARM::VLD4LNd8_UPD:
4170 case ARM::VLD4LNd16_UPD:
4171 case ARM::VLD4LNd32_UPD:
4172 case ARM::VLD4LNq16_UPD:
4173 case ARM::VLD4LNq32_UPD:
4187 if (!ItinData || ItinData->
isEmpty())
4188 return std::nullopt;
4194 unsigned DefAdj = 0;
4195 if (
DefMI.isBundle())
4204 unsigned UseAdj = 0;
4205 if (
UseMI.isBundle()) {
4209 return std::nullopt;
4212 return getOperandLatencyImpl(
4213 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->
getDesc(), DefAdj, DefMO,
4214 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->
getDesc(), UseAdj);
4217std::optional<unsigned> ARMBaseInstrInfo::getOperandLatencyImpl(
4219 unsigned DefIdx,
const MCInstrDesc &DefMCID,
unsigned DefAdj,
4221 unsigned UseIdx,
const MCInstrDesc &UseMCID,
unsigned UseAdj)
const {
4222 if (Reg == ARM::CPSR) {
4223 if (
DefMI.getOpcode() == ARM::FMSTAT) {
4225 return Subtarget.
isLikeA9() ? 1 : 20;
4229 if (
UseMI.isBranch())
4248 return std::nullopt;
4250 unsigned DefAlign =
DefMI.hasOneMemOperand()
4251 ? (*
DefMI.memoperands_begin())->getAlign().value()
4253 unsigned UseAlign =
UseMI.hasOneMemOperand()
4254 ? (*
UseMI.memoperands_begin())->getAlign().value()
4259 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4262 return std::nullopt;
4265 int Adj = DefAdj + UseAdj;
4269 if (Adj >= 0 || (
int)*
Latency > -Adj) {
4276std::optional<unsigned>
4278 SDNode *DefNode,
unsigned DefIdx,
4279 SDNode *UseNode,
unsigned UseIdx)
const {
4285 if (isZeroCost(DefMCID.
Opcode))
4288 if (!ItinData || ItinData->
isEmpty())
4289 return DefMCID.
mayLoad() ? 3 : 1;
4292 std::optional<unsigned>
Latency =
4294 int Adj = Subtarget.getPreISelOperandLatencyAdjustment();
4295 int Threshold = 1 + Adj;
4301 unsigned DefAlign = !DefMN->memoperands_empty()
4302 ? (*DefMN->memoperands_begin())->getAlign().value()
4305 unsigned UseAlign = !UseMN->memoperands_empty()
4306 ? (*UseMN->memoperands_begin())->getAlign().value()
4309 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4311 return std::nullopt;
4314 (Subtarget.isCortexA8() || Subtarget.isLikeA9() ||
4315 Subtarget.isCortexA7())) {
4332 case ARM::t2LDRSHs: {
4335 if (ShAmt == 0 || ShAmt == 2)
4340 }
else if (DefIdx == 0 &&
Latency > 2U && Subtarget.isSwift()) {
4350 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4367 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4374 case ARM::VLD1q8wb_register:
4375 case ARM::VLD1q16wb_register:
4376 case ARM::VLD1q32wb_register:
4377 case ARM::VLD1q64wb_register:
4378 case ARM::VLD1q8wb_fixed:
4379 case ARM::VLD1q16wb_fixed:
4380 case ARM::VLD1q32wb_fixed:
4381 case ARM::VLD1q64wb_fixed:
4385 case ARM::VLD2q8Pseudo:
4386 case ARM::VLD2q16Pseudo:
4387 case ARM::VLD2q32Pseudo:
4388 case ARM::VLD2d8wb_fixed:
4389 case ARM::VLD2d16wb_fixed:
4390 case ARM::VLD2d32wb_fixed:
4391 case ARM::VLD2q8PseudoWB_fixed:
4392 case ARM::VLD2q16PseudoWB_fixed:
4393 case ARM::VLD2q32PseudoWB_fixed:
4394 case ARM::VLD2d8wb_register:
4395 case ARM::VLD2d16wb_register:
4396 case ARM::VLD2d32wb_register:
4397 case ARM::VLD2q8PseudoWB_register:
4398 case ARM::VLD2q16PseudoWB_register:
4399 case ARM::VLD2q32PseudoWB_register:
4400 case ARM::VLD3d8Pseudo:
4401 case ARM::VLD3d16Pseudo:
4402 case ARM::VLD3d32Pseudo:
4403 case ARM::VLD1d8TPseudo:
4404 case ARM::VLD1d16TPseudo:
4405 case ARM::VLD1d32TPseudo:
4406 case ARM::VLD1d64TPseudo:
4407 case ARM::VLD1d64TPseudoWB_fixed:
4408 case ARM::VLD1d64TPseudoWB_register:
4409 case ARM::VLD3d8Pseudo_UPD:
4410 case ARM::VLD3d16Pseudo_UPD:
4411 case ARM::VLD3d32Pseudo_UPD:
4412 case ARM::VLD3q8Pseudo_UPD:
4413 case ARM::VLD3q16Pseudo_UPD:
4414 case ARM::VLD3q32Pseudo_UPD:
4415 case ARM::VLD3q8oddPseudo:
4416 case ARM::VLD3q16oddPseudo:
4417 case ARM::VLD3q32oddPseudo:
4418 case ARM::VLD3q8oddPseudo_UPD:
4419 case ARM::VLD3q16oddPseudo_UPD:
4420 case ARM::VLD3q32oddPseudo_UPD:
4421 case ARM::VLD4d8Pseudo:
4422 case ARM::VLD4d16Pseudo:
4423 case ARM::VLD4d32Pseudo:
4424 case ARM::VLD1d8QPseudo:
4425 case ARM::VLD1d16QPseudo:
4426 case ARM::VLD1d32QPseudo:
4427 case ARM::VLD1d64QPseudo:
4428 case ARM::VLD1d64QPseudoWB_fixed:
4429 case ARM::VLD1d64QPseudoWB_register:
4430 case ARM::VLD1q8HighQPseudo:
4431 case ARM::VLD1q8LowQPseudo_UPD:
4432 case ARM::VLD1q8HighTPseudo:
4433 case ARM::VLD1q8LowTPseudo_UPD:
4434 case ARM::VLD1q16HighQPseudo:
4435 case ARM::VLD1q16LowQPseudo_UPD:
4436 case ARM::VLD1q16HighTPseudo:
4437 case ARM::VLD1q16LowTPseudo_UPD:
4438 case ARM::VLD1q32HighQPseudo:
4439 case ARM::VLD1q32LowQPseudo_UPD:
4440 case ARM::VLD1q32HighTPseudo:
4441 case ARM::VLD1q32LowTPseudo_UPD:
4442 case ARM::VLD1q64HighQPseudo:
4443 case ARM::VLD1q64LowQPseudo_UPD:
4444 case ARM::VLD1q64HighTPseudo:
4445 case ARM::VLD1q64LowTPseudo_UPD:
4446 case ARM::VLD4d8Pseudo_UPD:
4447 case ARM::VLD4d16Pseudo_UPD:
4448 case ARM::VLD4d32Pseudo_UPD:
4449 case ARM::VLD4q8Pseudo_UPD:
4450 case ARM::VLD4q16Pseudo_UPD:
4451 case ARM::VLD4q32Pseudo_UPD:
4452 case ARM::VLD4q8oddPseudo:
4453 case ARM::VLD4q16oddPseudo:
4454 case ARM::VLD4q32oddPseudo:
4455 case ARM::VLD4q8oddPseudo_UPD:
4456 case ARM::VLD4q16oddPseudo_UPD:
4457 case ARM::VLD4q32oddPseudo_UPD:
4458 case ARM::VLD1DUPq8:
4459 case ARM::VLD1DUPq16:
4460 case ARM::VLD1DUPq32:
4461 case ARM::VLD1DUPq8wb_fixed:
4462 case ARM::VLD1DUPq16wb_fixed:
4463 case ARM::VLD1DUPq32wb_fixed:
4464 case ARM::VLD1DUPq8wb_register:
4465 case ARM::VLD1DUPq16wb_register:
4466 case ARM::VLD1DUPq32wb_register:
4467 case ARM::VLD2DUPd8:
4468 case ARM::VLD2DUPd16:
4469 case ARM::VLD2DUPd32:
4470 case ARM::VLD2DUPd8wb_fixed:
4471 case ARM::VLD2DUPd16wb_fixed:
4472 case ARM::VLD2DUPd32wb_fixed:
4473 case ARM::VLD2DUPd8wb_register:
4474 case ARM::VLD2DUPd16wb_register:
4475 case ARM::VLD2DUPd32wb_register:
4476 case ARM::VLD2DUPq8EvenPseudo:
4477 case ARM::VLD2DUPq8OddPseudo:
4478 case ARM::VLD2DUPq16EvenPseudo:
4479 case ARM::VLD2DUPq16OddPseudo:
4480 case ARM::VLD2DUPq32EvenPseudo:
4481 case ARM::VLD2DUPq32OddPseudo:
4482 case ARM::VLD3DUPq8EvenPseudo:
4483 case ARM::VLD3DUPq8OddPseudo:
4484 case ARM::VLD3DUPq16EvenPseudo:
4485 case ARM::VLD3DUPq16OddPseudo:
4486 case ARM::VLD3DUPq32EvenPseudo:
4487 case ARM::VLD3DUPq32OddPseudo:
4488 case ARM::VLD4DUPd8Pseudo:
4489 case ARM::VLD4DUPd16Pseudo:
4490 case ARM::VLD4DUPd32Pseudo:
4491 case ARM::VLD4DUPd8Pseudo_UPD:
4492 case ARM::VLD4DUPd16Pseudo_UPD:
4493 case ARM::VLD4DUPd32Pseudo_UPD:
4494 case ARM::VLD4DUPq8EvenPseudo:
4495 case ARM::VLD4DUPq8OddPseudo:
4496 case ARM::VLD4DUPq16EvenPseudo:
4497 case ARM::VLD4DUPq16OddPseudo:
4498 case ARM::VLD4DUPq32EvenPseudo:
4499 case ARM::VLD4DUPq32OddPseudo:
4500 case ARM::VLD1LNq8Pseudo:
4501 case ARM::VLD1LNq16Pseudo:
4502 case ARM::VLD1LNq32Pseudo:
4503 case ARM::VLD1LNq8Pseudo_UPD:
4504 case ARM::VLD1LNq16Pseudo_UPD:
4505 case ARM::VLD1LNq32Pseudo_UPD:
4506 case ARM::VLD2LNd8Pseudo:
4507 case ARM::VLD2LNd16Pseudo:
4508 case ARM::VLD2LNd32Pseudo:
4509 case ARM::VLD2LNq16Pseudo:
4510 case ARM::VLD2LNq32Pseudo:
4511 case ARM::VLD2LNd8Pseudo_UPD:
4512 case ARM::VLD2LNd16Pseudo_UPD:
4513 case ARM::VLD2LNd32Pseudo_UPD:
4514 case ARM::VLD2LNq16Pseudo_UPD:
4515 case ARM::VLD2LNq32Pseudo_UPD:
4516 case ARM::VLD4LNd8Pseudo:
4517 case ARM::VLD4LNd16Pseudo:
4518 case ARM::VLD4LNd32Pseudo:
4519 case ARM::VLD4LNq16Pseudo:
4520 case ARM::VLD4LNq32Pseudo:
4521 case ARM::VLD4LNd8Pseudo_UPD:
4522 case ARM::VLD4LNd16Pseudo_UPD:
4523 case ARM::VLD4LNd32Pseudo_UPD:
4524 case ARM::VLD4LNq16Pseudo_UPD:
4525 case ARM::VLD4LNq32Pseudo_UPD:
4535unsigned ARMBaseInstrInfo::getPredicationCost(
const MachineInstr &
MI)
const {
4536 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4545 if (
MCID.isCall() || (
MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4546 !Subtarget.cheapPredicableCPSRDef())) {
4556 unsigned *PredCost)
const {
4557 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4563 if (
MI.isBundle()) {
4567 while (++
I !=
E &&
I->isInsideBundle()) {
4568 if (
I->getOpcode() != ARM::t2IT)
4569 Latency += getInstrLatency(ItinData, *
I, PredCost);
4574 const MCInstrDesc &MCID =
MI.getDesc();
4576 !Subtarget.cheapPredicableCPSRDef()))) {
4584 return MI.mayLoad() ? 3 : 1;
4597 MI.hasOneMemOperand() ? (*
MI.memoperands_begin())->getAlign().value() : 0;
4599 if (Adj >= 0 || (
int)
Latency > -Adj) {
4607 if (!
Node->isMachineOpcode())
4610 if (!ItinData || ItinData->
isEmpty())
4613 unsigned Opcode =
Node->getMachineOpcode();
4623bool ARMBaseInstrInfo::hasHighOperandLatency(
const TargetSchedModel &SchedModel,
4628 unsigned UseIdx)
const {
4631 if (Subtarget.nonpipelinedVFP() &&
4646 unsigned DefIdx)
const {
4648 if (!ItinData || ItinData->
isEmpty())
4653 unsigned DefClass =
DefMI.getDesc().getSchedClass();
4654 std::optional<unsigned> DefCycle =
4656 return DefCycle && DefCycle <= 2U;
4664 ErrInfo =
"Pseudo flag setting opcodes only exist in Selection DAG";
4667 if (
MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4669 if (!ARM::hGPRRegClass.
contains(
MI.getOperand(0).getReg()) &&
4670 !ARM::hGPRRegClass.contains(
MI.getOperand(1).getReg())) {
4671 ErrInfo =
"Non-flag-setting Thumb1 mov is v6-only";
4675 if (
MI.getOpcode() == ARM::tPUSH ||
4676 MI.getOpcode() == ARM::tPOP ||
4677 MI.getOpcode() == ARM::tPOP_RET) {
4679 if (MO.isImplicit() || !MO.isReg())
4683 if (!(
MI.getOpcode() == ARM::tPUSH &&
Reg == ARM::LR) &&
4684 !(
MI.getOpcode() == ARM::tPOP_RET &&
Reg == ARM::PC)) {
4685 ErrInfo =
"Unsupported register in Thumb1 push/pop";
4691 if (
MI.getOpcode() == ARM::MVE_VMOV_q_rr) {
4692 assert(
MI.getOperand(4).isImm() &&
MI.getOperand(5).isImm());
4693 if ((
MI.getOperand(4).getImm() != 2 &&
MI.getOperand(4).getImm() != 3) ||
4694 MI.getOperand(4).getImm() !=
MI.getOperand(5).getImm() + 2) {
4695 ErrInfo =
"Incorrect array index for MVE_VMOV_q_rr";
4716 for (
auto Op :
MI.operands()) {
4723 ErrInfo =
"Incorrect AddrMode Imm for instruction";
4733 unsigned LoadImmOpc,
4734 unsigned LoadOpc)
const {
4735 assert(!Subtarget.isROPI() && !Subtarget.isRWPI() &&
4736 "ROPI/RWPI not currently supported with stack guard");
4744 if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) {
4745 assert(!Subtarget.isReadTPSoft() &&
4746 "TLS stack protector requires hardware TLS register");
4756 Module &M = *
MBB.getParent()->getFunction().getParent();
4757 Offset = M.getStackProtectorGuardOffset();
4762 unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri;
4773 bool IsIndirect = Subtarget.isGVIndirectSymbol(GV);
4776 if (Subtarget.isTargetMachO()) {
4778 }
else if (Subtarget.isTargetCOFF()) {
4781 else if (IsIndirect)
4783 }
else if (IsIndirect) {
4787 if (LoadImmOpc == ARM::tMOVi32imm) {
4790 ARMSysReg::lookupMClassSysRegByName(
"apsr_nzcvq")->Encoding;
4826 unsigned &AddSubOpc,
4827 bool &NegAcc,
bool &HasLane)
const {
4829 if (
I == MLxEntryMap.end())
4833 MulOpc = Entry.MulOpc;
4834 AddSubOpc = Entry.AddSubOpc;
4835 NegAcc = Entry.NegAcc;
4836 HasLane = Entry.HasLane;
4860std::pair<uint16_t, uint16_t>
4864 if (Subtarget.hasNEON()) {
4873 (
MI.getOpcode() == ARM::VMOVRS ||
MI.getOpcode() == ARM::VMOVSR ||
4874 MI.getOpcode() == ARM::VMOVS))
4881 return std::make_pair(
ExeNEON, 0);
4886 return std::make_pair(
ExeNEON, 0);
4889 return std::make_pair(
ExeVFP, 0);
4895 unsigned SReg,
unsigned &Lane) {
4897 TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
4904 DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
4906 assert(DReg &&
"S-register with no D super-register?");
4931 if (
MI.definesRegister(DReg,
TRI) ||
MI.readsRegister(DReg,
TRI)) {
4937 ImplicitSReg =
TRI->getSubReg(DReg,
4938 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
4940 MI.getParent()->computeRegisterLiveness(
TRI, ImplicitSReg,
MI);
4955 unsigned DstReg, SrcReg;
4960 switch (
MI.getOpcode()) {
4972 assert(Subtarget.hasNEON() &&
"VORRd requires NEON");
4975 DstReg =
MI.getOperand(0).getReg();
4976 SrcReg =
MI.getOperand(1).getReg();
4978 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
4979 MI.removeOperand(i - 1);
4982 MI.setDesc(
get(ARM::VORRd));
4994 DstReg =
MI.getOperand(0).getReg();
4995 SrcReg =
MI.getOperand(1).getReg();
4997 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
4998 MI.removeOperand(i - 1);
5005 MI.setDesc(
get(ARM::VGETLNi32));
5021 DstReg =
MI.getOperand(0).getReg();
5022 SrcReg =
MI.getOperand(1).getReg();
5030 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5031 MI.removeOperand(i - 1);
5035 MI.setDesc(
get(ARM::VSETLNi32));
5054 DstReg =
MI.getOperand(0).getReg();
5055 SrcReg =
MI.getOperand(1).getReg();
5057 unsigned DstLane = 0, SrcLane = 0;
5066 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5067 MI.removeOperand(i - 1);
5072 MI.setDesc(
get(ARM::VDUPLN32d));
5106 MCRegister CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5107 bool CurUndef = !
MI.readsRegister(CurReg,
TRI);
5110 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5111 CurUndef = !
MI.readsRegister(CurReg,
TRI);
5116 if (SrcLane == DstLane)
5119 MI.setDesc(
get(ARM::VEXTd32));
5124 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5125 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5128 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5129 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5134 if (SrcLane != DstLane)
5140 if (ImplicitSReg != 0)
5166 auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance();
5167 if (!PartialUpdateClearance)
5178 switch (
MI.getOpcode()) {
5184 case ARM::VMOVv4i16:
5185 case ARM::VMOVv2i32:
5186 case ARM::VMOVv2f32:
5187 case ARM::VMOVv1i64:
5188 UseOp =
MI.findRegisterUseOperandIdx(Reg,
TRI,
false);
5192 case ARM::VLD1LNd32:
5201 if (UseOp != -1 &&
MI.getOperand(UseOp).readsReg())
5205 if (Reg.isVirtual()) {
5207 if (!MO.
getSubReg() ||
MI.readsVirtualRegister(Reg))
5209 }
else if (ARM::SPRRegClass.
contains(Reg)) {
5212 TRI->getMatchingSuperReg(Reg, ARM::ssub_0, &ARM::DPRRegClass);
5213 if (!DReg || !
MI.definesRegister(DReg,
TRI))
5219 return PartialUpdateClearance;
5226 assert(OpNum <
MI.getDesc().getNumDefs() &&
"OpNum is not a def");
5231 assert(Reg.isPhysical() &&
"Can't break virtual register dependencies.");
5232 unsigned DReg = Reg;
5235 if (ARM::SPRRegClass.
contains(Reg)) {
5236 DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5237 assert(
TRI->isSuperRegister(Reg, DReg) &&
"Register enums broken");
5240 assert(ARM::DPRRegClass.
contains(DReg) &&
"Can only break D-reg deps");
5241 assert(
MI.definesRegister(DReg,
TRI) &&
"MI doesn't clobber full D-reg");
5254 MI.addRegisterKilled(DReg,
TRI,
true);
5258 return Subtarget.hasFeature(ARM::HasV6KOps);
5262 if (
MI->getNumOperands() < 4)
5264 unsigned ShOpVal =
MI->getOperand(3).getImm();
5268 ((ShImm == 1 || ShImm == 2) &&
5278 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5279 assert(
MI.isRegSequenceLike() &&
"Invalid kind of instruction");
5281 switch (
MI.getOpcode()) {
5293 MOReg = &
MI.getOperand(2);
5305 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5306 assert(
MI.isExtractSubregLike() &&
"Invalid kind of instruction");
5308 switch (
MI.getOpcode()) {
5319 InputReg.
SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5328 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5329 assert(
MI.isInsertSubregLike() &&
"Invalid kind of instruction");
5331 switch (
MI.getOpcode()) {
5332 case ARM::VSETLNi32:
5333 case ARM::MVE_VMOV_to_lane_32:
5341 BaseReg.Reg = MOBaseReg.
getReg();
5344 InsertedReg.
Reg = MOInsertedReg.
getReg();
5352std::pair<unsigned, unsigned>
5355 return std::make_pair(TF & Mask, TF & ~Mask);
5360 using namespace ARMII;
5362 static const std::pair<unsigned, const char *> TargetFlags[] = {
5363 {MO_LO16,
"arm-lo16"}, {MO_HI16,
"arm-hi16"},
5364 {MO_LO_0_7,
"arm-lo-0-7"}, {MO_HI_0_7,
"arm-hi-0-7"},
5365 {MO_LO_8_15,
"arm-lo-8-15"}, {MO_HI_8_15,
"arm-hi-8-15"},
5372 using namespace ARMII;
5374 static const std::pair<unsigned, const char *> TargetFlags[] = {
5375 {MO_COFFSTUB,
"arm-coffstub"},
5376 {MO_GOT,
"arm-got"},
5377 {MO_SBREL,
"arm-sbrel"},
5378 {MO_DLLIMPORT,
"arm-dllimport"},
5379 {MO_SECREL,
"arm-secrel"},
5380 {MO_NONLAZY,
"arm-nonlazy"}};
5384std::optional<RegImmPair>
5387 unsigned Opcode =
MI.getOpcode();
5394 return std::nullopt;
5397 if (Opcode == ARM::SUBri)
5399 else if (Opcode != ARM::ADDri)
5400 return std::nullopt;
5405 if (!
MI.getOperand(1).isReg() || !
MI.getOperand(2).isImm())
5406 return std::nullopt;
5408 Offset =
MI.getOperand(2).getImm() * Sign;
5416 for (
auto I = From;
I != To; ++
I)
5417 if (
I->modifiesRegister(Reg,
TRI))
5430 if (CmpMI->modifiesRegister(ARM::CPSR,
TRI))
5432 if (CmpMI->readsRegister(ARM::CPSR,
TRI))
5438 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5440 Register Reg = CmpMI->getOperand(0).getReg();
5443 if (Pred !=
ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5456 if (Subtarget->isThumb()) {
5458 return ForCodesize ? 2 : 1;
5459 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff ||
5462 return ForCodesize ? 4 : 1;
5464 return ForCodesize ? 4 : 2;
5466 return ForCodesize ? 4 : 2;
5468 return ForCodesize ? 4 : 2;
5471 return ForCodesize ? 4 : 1;
5473 return ForCodesize ? 4 : 1;
5474 if (Subtarget->hasV6T2Ops() && Val <= 0xffff)
5475 return ForCodesize ? 4 : 1;
5477 return ForCodesize ? 8 : 2;
5479 return ForCodesize ? 8 : 2;
5482 return ForCodesize ? 8 : 2;
5483 return ForCodesize ? 8 : 3;
5647 MachineFunction *MF =
C.getMF();
5649 const ARMBaseRegisterInfo *ARI =
5650 static_cast<const ARMBaseRegisterInfo *
>(&
TRI);
5659 C.isAvailableAcrossAndOutOfSeq(
Reg,
TRI) &&
5660 C.isAvailableInsideSeq(
Reg,
TRI))
5674 for (;
I !=
E; ++
I) {
5678 if (
MI.modifiesRegister(ARM::LR, &
TRI))
5682 unsigned Opcode =
MI.getOpcode();
5683 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR ||
5684 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET ||
5685 Opcode == ARM::tBXNS_RET || Opcode == ARM::t2BXAUT_RET) {
5691 if (
MI.readsRegister(ARM::LR, &
TRI))
5697std::optional<std::unique_ptr<outliner::OutlinedFunction>>
5700 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
5701 unsigned MinRepeats)
const {
5702 unsigned SequenceSize = 0;
5703 for (
auto &
MI : RepeatedSequenceLocs[0])
5707 unsigned FlagsSetInAll = 0xF;
5712 FlagsSetInAll &=
C.Flags;
5731 return C.isAnyUnavailableAcrossOrOutOfSeq({ARM::R12, ARM::CPSR},
TRI);
5739 llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
5742 if (RepeatedSequenceLocs.size() < MinRepeats)
5743 return std::nullopt;
5762 if (std::distance(RepeatedSequenceLocs.begin(), NoBTI) >
5763 std::distance(NoBTI, RepeatedSequenceLocs.end()))
5764 RepeatedSequenceLocs.erase(NoBTI, RepeatedSequenceLocs.end());
5766 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoBTI);
5768 if (RepeatedSequenceLocs.size() < MinRepeats)
5769 return std::nullopt;
5779 if (std::distance(RepeatedSequenceLocs.begin(), NoPAC) >
5780 std::distance(NoPAC, RepeatedSequenceLocs.end()))
5781 RepeatedSequenceLocs.erase(NoPAC, RepeatedSequenceLocs.end());
5783 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoPAC);
5785 if (RepeatedSequenceLocs.size() < MinRepeats)
5786 return std::nullopt;
5791 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back().getOpcode();
5794 auto SetCandidateCallInfo =
5795 [&RepeatedSequenceLocs](
unsigned CallID,
unsigned NumBytesForCall) {
5797 C.setCallInfo(CallID, NumBytesForCall);
5802 const auto &SomeMFI =
5805 if (SomeMFI.branchTargetEnforcement()) {
5814 if (SomeMFI.shouldSignReturnAddress(
true)) {
5824 if (RepeatedSequenceLocs[0].back().isTerminator()) {
5828 }
else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
5829 LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL ||
5830 LastInstrOpcode == ARM::tBLXr ||
5831 LastInstrOpcode == ARM::tBLXr_noip ||
5832 LastInstrOpcode == ARM::tBLXi) {
5840 unsigned NumBytesNoStackCalls = 0;
5841 std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
5846 const auto Last =
C.getMBB()->rbegin();
5847 const bool LRIsAvailable =
5848 C.getMBB()->isReturnBlock() && !
Last->isCall()
5851 :
C.isAvailableAcrossAndOutOfSeq(ARM::LR,
TRI);
5852 if (LRIsAvailable) {
5856 CandidatesWithoutStackFixups.push_back(
C);
5861 else if (findRegisterToSaveLRTo(
C)) {
5865 CandidatesWithoutStackFixups.push_back(
C);
5870 else if (
C.isAvailableInsideSeq(ARM::SP,
TRI)) {
5873 CandidatesWithoutStackFixups.push_back(
C);
5879 NumBytesNoStackCalls += SequenceSize;
5885 if (NumBytesNoStackCalls <=
5886 RepeatedSequenceLocs.size() * Costs.
CallDefault) {
5887 RepeatedSequenceLocs = CandidatesWithoutStackFixups;
5889 if (RepeatedSequenceLocs.size() < MinRepeats)
5890 return std::nullopt;
5915 return std::make_unique<outliner::OutlinedFunction>(
5916 RepeatedSequenceLocs, SequenceSize, NumBytesToCreateFrame, FrameID);
5919bool ARMBaseInstrInfo::checkAndUpdateStackOffset(
MachineInstr *
MI,
5922 int SPIdx =
MI->findRegisterUseOperandIdx(ARM::SP,
nullptr);
5947 unsigned NumOps =
MI->getDesc().getNumOperands();
5948 unsigned ImmIdx =
NumOps - 3;
5952 int64_t OffVal =
Offset.getImm();
5958 unsigned NumBits = 0;
5987 assert((
Fixup & 3) == 0 &&
"Can't encode this offset!");
6007 assert(((OffVal * Scale +
Fixup) & (Scale - 1)) == 0 &&
6008 "Can't encode this offset!");
6009 OffVal +=
Fixup / Scale;
6011 unsigned Mask = (1 << NumBits) - 1;
6013 if (OffVal <= Mask) {
6015 MI->getOperand(ImmIdx).setImm(OffVal);
6023 Function &
F, std::vector<outliner::Candidate> &Candidates)
const {
6027 const Function &CFn =
C.getMF()->getFunction();
6034 ARMGenInstrInfo::mergeOutliningCandidateAttributes(
F, Candidates);
6042 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
6061 unsigned &Flags)
const {
6064 assert(
MBB.getParent()->getRegInfo().tracksLiveness() &&
6065 "Suitable Machine Function for outlining must track liveness");
6073 bool R12AvailableInBlock = LRU.
available(ARM::R12);
6074 bool CPSRAvailableInBlock = LRU.
available(ARM::CPSR);
6078 if (R12AvailableInBlock && CPSRAvailableInBlock)
6086 if (R12AvailableInBlock && !LRU.
available(ARM::R12))
6088 if (CPSRAvailableInBlock && !LRU.
available(ARM::CPSR))
6098 bool LRIsAvailable =
6099 MBB.isReturnBlock() && !
MBB.back().isCall()
6111 unsigned Flags)
const {
6117 unsigned Opc =
MI.getOpcode();
6118 if (
Opc == ARM::tPICADD ||
Opc == ARM::PICADD ||
Opc == ARM::PICSTR ||
6119 Opc == ARM::PICSTRB ||
Opc == ARM::PICSTRH ||
Opc == ARM::PICLDR ||
6120 Opc == ARM::PICLDRB ||
Opc == ARM::PICLDRH ||
Opc == ARM::PICLDRSB ||
6121 Opc == ARM::PICLDRSH ||
Opc == ARM::t2LDRpci_pic ||
6122 Opc == ARM::t2MOVi16_ga_pcrel ||
Opc == ARM::t2MOVTi16_ga_pcrel ||
6123 Opc == ARM::t2MOV_ga_pcrel)
6127 if (
Opc == ARM::t2BF_LabelPseudo ||
Opc == ARM::t2DoLoopStart ||
6128 Opc == ARM::t2DoLoopStartTP ||
Opc == ARM::t2WhileLoopStart ||
6129 Opc == ARM::t2WhileLoopStartLR ||
Opc == ARM::t2WhileLoopStartTP ||
6130 Opc == ARM::t2LoopDec ||
Opc == ARM::t2LoopEnd ||
6131 Opc == ARM::t2LoopEndDec)
6140 if (
MI.isTerminator())
6146 if (
MI.readsRegister(ARM::LR,
TRI) ||
MI.readsRegister(ARM::PC,
TRI))
6154 if (MOP.isGlobal()) {
6163 (Callee->getName() ==
"\01__gnu_mcount_nc" ||
6164 Callee->getName() ==
"\01mcount" || Callee->getName() ==
"__mcount"))
6172 if (
Opc == ARM::BL ||
Opc == ARM::tBL ||
Opc == ARM::BLX ||
6173 Opc == ARM::BLX_noip ||
Opc == ARM::tBLXr ||
Opc == ARM::tBLXr_noip ||
6178 return UnknownCallOutlineType;
6186 return UnknownCallOutlineType;
6194 return UnknownCallOutlineType;
6202 if (
MI.modifiesRegister(ARM::LR,
TRI) ||
MI.modifiesRegister(ARM::PC,
TRI))
6206 if (
MI.modifiesRegister(ARM::SP,
TRI) ||
MI.readsRegister(ARM::SP,
TRI)) {
6219 bool MightNeedStackFixUp =
6223 if (!MightNeedStackFixUp)
6229 if (
MI.modifiesRegister(ARM::SP,
TRI))
6234 if (checkAndUpdateStackOffset(&
MI, Subtarget.getStackAlignment().value(),
6243 if (
MI.readsRegister(ARM::ITSTATE,
TRI) ||
6244 MI.modifiesRegister(ARM::ITSTATE,
TRI))
6248 if (
MI.isCFIInstruction())
6263 int Align = std::max(Subtarget.getStackAlignment().value(), uint64_t(8));
6265 assert(Align >= 8 && Align <= 256);
6267 assert(Subtarget.isThumb2());
6279 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM;
6293 CFIBuilder.buildDefCFAOffset(Align);
6298 CFIBuilder.buildOffset(ARM::LR, -LROffset);
6301 CFIBuilder.buildOffset(ARM::RA_AUTH_CODE, -Align);
6307 bool CFI,
bool Auth)
const {
6308 int Align = Subtarget.getStackAlignment().value();
6311 assert(Subtarget.isThumb2());
6323 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
6327 if (!Subtarget.isThumb())
6329 MIB.
addImm(Subtarget.getStackAlignment().value())
6337 CFIBuilder.buildDefCFAOffset(0);
6338 CFIBuilder.buildRestore(ARM::LR);
6340 CFIBuilder.buildUndefined(ARM::RA_AUTH_CODE);
6354 bool isThumb = Subtarget.isThumb();
6355 unsigned FuncOp =
isThumb ? 2 : 0;
6356 unsigned Opc =
Call->getOperand(FuncOp).isReg()
6357 ?
isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
6358 :
isThumb ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd
6362 .
add(
Call->getOperand(FuncOp));
6365 Call->eraseFromParent();
6370 return MI.isCall() && !
MI.isReturn();
6378 Et = std::prev(
MBB.end());
6383 if (!
MBB.isLiveIn(ARM::LR))
6384 MBB.addLiveIn(ARM::LR);
6388 saveLROnStack(
MBB, It,
true, Auth);
6393 "Can only fix up stack references once");
6394 fixupPostOutline(
MBB);
6397 restoreLRFromStack(
MBB, Et,
true, Auth);
6417 fixupPostOutline(
MBB);
6426 bool isThumb = Subtarget.isThumb();
6432 ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6438 It =
MBB.insert(It, MIB);
6452 It =
MBB.insert(It, CallMIB);
6459 Register Reg = findRegisterToSaveLRTo(
C);
6460 assert(Reg != 0 &&
"No callee-saved register available?");
6467 CallPt =
MBB.insert(It, CallMIB);
6475 if (!
MBB.isLiveIn(ARM::LR))
6476 MBB.addLiveIn(ARM::LR);
6479 CallPt =
MBB.insert(It, CallMIB);
6490bool ARMBaseInstrInfo::isReMaterializableImpl(
6524 static int constexpr MAX_STAGES = 30;
6525 static int constexpr LAST_IS_USE = MAX_STAGES;
6526 static int constexpr SEEN_AS_LIVE = MAX_STAGES + 1;
6527 typedef std::bitset<MAX_STAGES + 2> IterNeed;
6528 typedef std::map<Register, IterNeed> IterNeeds;
6531 const IterNeeds &CIN);
6543 : EndLoop(EndLoop), LoopCount(LoopCount),
6545 TII(MF->getSubtarget().getInstrInfo()) {}
6547 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
6549 return MI == EndLoop ||
MI == LoopCount;
6552 bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS)
override {
6553 if (tooMuchRegisterPressure(SSD, SMS))
6559 std::optional<bool> createTripCountGreaterCondition(
6560 int TC, MachineBasicBlock &
MBB,
6561 SmallVectorImpl<MachineOperand> &
Cond)
override {
6570 }
else if (EndLoop->
getOpcode() == ARM::t2LoopEnd) {
6573 MachineInstr *LoopDec =
nullptr;
6575 if (
I.getOpcode() == ARM::t2LoopDec)
6577 assert(LoopDec &&
"Unable to find copied LoopDec");
6583 .
addReg(ARM::NoRegister);
6591 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
6593 void adjustTripCount(
int TripCountAdjust)
override {}
6597 const IterNeeds &CIN) {
6599 for (
const auto &
N : CIN) {
6600 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6601 for (
int I = 0;
I < Cnt; ++
I)
6606 for (
const auto &
N : CIN) {
6607 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6608 for (
int I = 0;
I < Cnt; ++
I)
6616 IterNeeds CrossIterationNeeds;
6621 for (
auto &SU : SSD.
SUnits) {
6624 for (
auto &S : SU.Succs)
6628 CrossIterationNeeds[
Reg.
id()].set(0);
6629 }
else if (S.isAssignedRegDep()) {
6631 if (OStg >= 0 && OStg != Stg) {
6634 CrossIterationNeeds[
Reg.
id()] |= ((1 << (OStg - Stg)) - 1);
6643 std::vector<SUnit *> ProposedSchedule;
6647 std::deque<SUnit *> Instrs =
6649 std::sort(Instrs.begin(), Instrs.end(),
6650 [](
SUnit *
A,
SUnit *
B) { return A->NodeNum > B->NodeNum; });
6657 for (
auto *SU : ProposedSchedule)
6661 if (!MO.isReg() || !MO.getReg())
6664 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6665 if (CIter == CrossIterationNeeds.end() || CIter->second[LAST_IS_USE] ||
6666 CIter->second[SEEN_AS_LIVE])
6668 if (MO.isDef() && !MO.isDead())
6669 CIter->second.set(SEEN_AS_LIVE);
6670 else if (MO.isUse())
6671 CIter->second.set(LAST_IS_USE);
6673 for (
auto &CI : CrossIterationNeeds)
6674 CI.second.reset(LAST_IS_USE);
6680 RPTracker.init(MF, &RegClassInfo,
nullptr, EndLoop->
getParent(),
6683 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6685 for (
auto *SU : ProposedSchedule) {
6687 RPTracker.setPos(std::next(CurInstI));
6693 if (!MO.isReg() || !MO.getReg())
6696 if (MO.isDef() && !MO.isDead()) {
6697 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6698 if (CIter != CrossIterationNeeds.end()) {
6699 CIter->second.reset(0);
6700 CIter->second.reset(SEEN_AS_LIVE);
6704 for (
auto &S : SU->Preds) {
6706 if (S.isAssignedRegDep()) {
6708 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6709 if (CIter != CrossIterationNeeds.end()) {
6711 assert(Stg2 <= Stg &&
"Data dependence upon earlier stage");
6712 if (Stg - Stg2 < MAX_STAGES)
6713 CIter->second.set(Stg - Stg2);
6714 CIter->second.set(SEEN_AS_LIVE);
6719 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6722 auto &
P = RPTracker.getPressure().MaxSetPressure;
6723 for (
unsigned I = 0,
E =
P.size();
I <
E; ++
I) {
6725 if (
I == ARM::DQuad_with_ssub_0 ||
I == ARM::DTripleSpc_with_ssub_0 ||
6726 I == ARM::DTriple_with_qsub_0_in_QPR)
6738std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
6742 if (Preheader == LoopBB)
6743 Preheader = *std::next(LoopBB->
pred_begin());
6745 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2Bcc) {
6751 for (
auto &L : LoopBB->
instrs()) {
6758 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, CCSetter);
6772 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2LoopEnd) {
6773 for (
auto &L : LoopBB->
instrs())
6778 Register LoopDecResult =
I->getOperand(0).getReg();
6781 if (!LoopDec || LoopDec->
getOpcode() != ARM::t2LoopDec)
6784 for (
auto &J : Preheader->
instrs())
6785 if (J.getOpcode() == ARM::t2DoLoopStart)
6789 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, LoopDec);
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
@ MachineOutlinerNoLRSave
Only emit a branch.
@ MachineOutlinerThunk
Emit a call and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isThumb(const MCSubtargetInfo &STI)
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, MachineInstr &MI, MCRegister DReg, unsigned Lane, MCRegister &ImplicitSReg)
getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, set ImplicitSReg to a register n...
static const MachineInstr * getBundledUseMI(const TargetRegisterInfo *TRI, const MachineInstr &MI, unsigned Reg, unsigned &UseIdx, unsigned &Dist)
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI)
Create a copy of a const pool value.
static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, int CmpMask, bool CommonUse)
isSuitableForMask - Identify a suitable 'and' instruction that operates on the given source register ...
static int adjustDefLatency(const ARMSubtarget &Subtarget, const MachineInstr &DefMI, const MCInstrDesc &DefMCID, unsigned DefAlign)
Return the number of cycles to add to (or subtract from) the static itinerary based on the def opcode...
static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, const MachineInstr &MI)
static MCRegister getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, unsigned SReg, unsigned &Lane)
static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[]
static bool isEligibleForITBlock(const MachineInstr *MI)
static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC)
getCmpToAddCondition - assume the flags are set by CMP(a,b), return the condition code if we modify t...
static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1)
static bool isLRAvailable(const TargetRegisterInfo &TRI, MachineBasicBlock::reverse_iterator I, MachineBasicBlock::reverse_iterator E)
static const ARM_MLxEntry ARM_MLxTable[]
static bool isRedundantFlagInstr(const MachineInstr *CmpI, Register SrcReg, Register SrcReg2, int64_t ImmValue, const MachineInstr *OI, bool &IsThumb1)
isRedundantFlagInstr - check whether the first instruction, whose only purpose is to update flags,...
static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, unsigned NumRegs)
static const MachineInstr * getBundledDefMI(const TargetRegisterInfo *TRI, const MachineInstr *MI, unsigned Reg, unsigned &DefIdx, unsigned &Dist)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DXIL Forward Handle Accesses
This file defines the DenseMap class.
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
TargetInstrInfo::RegSubRegPair RegSubRegPair
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
PowerPC TLS Dynamic Call Fixup
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallSet class.
This file defines the SmallVector class.
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static bool isCPSRDefined(const MachineInstr &MI)
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction to set the zero flag so that we can remove a "comparis...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
void copyFromCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister DestReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &, unsigned, const TargetRegisterInfo *) const override
unsigned getNumLDMAddresses(const MachineInstr &MI) const
Get the number of addresses by LDM or VLDM or zero for unknown.
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI) const override
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Returns the size of the specified MachineInstr.
void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister SrcReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
const MachineInstrBuilder & AddDReg(MachineInstrBuilder &MIB, unsigned Reg, unsigned SubIdx, RegState State) const
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
ARM supports the MachineOutliner.
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Enable outlining by default at -Oz.
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const override
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool isPredicated(const MachineInstr &MI) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI, unsigned LoadImmOpc, unsigned LoadOpc) const
bool isPredicable(const MachineInstr &MI) const override
isPredicable - Return true if the specified instruction can be predicated.
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
Specialization of TargetInstrInfo::describeLoadedValue, used to enhance debug entry value description...
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
const ARMBaseRegisterInfo & getRegisterInfo() const
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to determine if two loads are lo...
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const override
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const override
bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const override
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool expandPostRAPseudo(MachineInstr &MI) const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to determine (in conjunction w...
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
VFP/NEON execution domains.
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFpMLxInstruction(unsigned Opcode) const
isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS instruction.
bool isSwiftFastImmShift(const MachineInstr *MI) const
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, LaneBitmask UsedLanes=LaneBitmask::getAll()) const override
ARMBaseInstrInfo(const ARMSubtarget &STI, const ARMBaseRegisterInfo &TRI)
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2 if h...
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void breakPartialRegDependency(MachineInstr &, unsigned, const TargetRegisterInfo *TRI) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const ARMSubtarget & getSubtarget() const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const override
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
bool shouldSink(const MachineInstr &MI) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
static ARMConstantPoolMBB * Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID, unsigned char PCAdj)
static ARMConstantPoolSymbol * Create(LLVMContext &C, StringRef s, unsigned ID, unsigned char PCAdj)
ARMConstantPoolValue - ARM specific constantpool value.
bool isMachineBasicBlock() const
bool isGlobalValue() const
ARMCP::ARMCPModifier getModifier() const
bool mustAddCurrentAddress() const
virtual bool hasSameValue(ARMConstantPoolValue *ACPV)
hasSameValue - Return true if this ARM constpool value can share the same constantpool entry as anoth...
bool isBlockAddress() const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool isThumb2Function() const
bool branchTargetEnforcement() const
unsigned createPICLabelUId()
bool isThumb1OnlyFunction() const
bool isThumbFunction() const
bool shouldSignReturnAddress() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
Align getStackAlignment() const
getStackAlignment - Returns the minimum alignment known to hold of the stack frame on entry to the fu...
bool enableMachinePipeliner() const override
Returns true if machine pipeliner should be enabled.
@ DoubleIssueCheckUnalignedAccess
Can load/store 2 registers/cycle, but needs an extra cycle if the access is not 64-bit aligned.
@ SingleIssue
Can load/store 1 register/cycle.
@ DoubleIssue
Can load/store 2 registers/cycle.
@ SingleIssuePlusExtras
Can load/store 1 register/cycle, but needs an extra cycle for address computation and potentially als...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool test(unsigned Idx) const
size_type size() const
size - Returns the number of bits in this bitvector.
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
Helper class for creating CFI instructions and inserting them into MIR.
void buildRegister(MCRegister Reg1, MCRegister Reg2) const
void buildRestore(MCRegister Reg) const
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool hasDLLImportStorageClass() const
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
Itinerary data supplied by a subtarget to be used by a target.
int getNumMicroOps(unsigned ItinClassIndx) const
Return the number of micro-ops that the given class decodes to.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Return true if there is a pipeline forwarding between instructions of itinerary classes DefClass and ...
bool isEmpty() const
Returns true if there are no itineraries.
A set of register units used to track register liveness.
bool available(MCRegister Reg) const
Returns true if no part of physical register Reg is live.
LLVM_ABI void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
LLVM_ABI void accumulate(const MachineInstr &MI)
Adds all register units used, defined or clobbered in MI.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
bool isCall() const
Return true if the instruction is a call.
unsigned getOpcode() const
Return the opcode number for this descriptor.
LLVM_ABI bool hasImplicitDefOfPhysReg(MCRegister Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
Wrapper class representing physical registers. Should be passed by value.
bool isValid() const
isValid - Returns true until all the operands have been visited.
unsigned pred_size() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MachineInstrBundleIterator< MachineInstr > iterator
LivenessQueryResult
Possible outcome of a register liveness query to computeRegisterLiveness()
@ LQR_Dead
Register is known to be fully dead.
@ LQR_Live
Register is known to be (at least partially) live.
@ LQR_Unknown
Register liveness not decidable from local neighborhood.
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
MachineConstantPoolValue * MachineCPVal
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getNumObjects() const
Return the number of objects.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
ArrayRef< MachineMemOperand * >::iterator mmo_iterator
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool isCall(QueryType Type=AnyInBundle) const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isRegSequence() const
bool isInsertSubreg() const
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
LLVM_ABI void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
LLVM_ABI MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
use_instr_iterator use_instr_begin(Register RegNo) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
static use_instr_iterator use_instr_end()
const TargetRegisterInfo * getTargetRegisterInfo() const
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
A Module instance is used to store all the information related to an LLVM module.
void AddHazardRecognizer(std::unique_ptr< ScheduleHazardRecognizer > &&)
Track the current register pressure at some position in the instruction stream, and remember the high...
LLVM_ABI void increaseRegPressure(VirtRegOrUnit VRegOrUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
LLVM_ABI void decreaseRegPressure(VirtRegOrUnit VRegOrUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
unsigned getRegPressureSetLimit(unsigned Idx) const
Get the register unit limit for the given pressure set index.
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
constexpr unsigned id() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
@ Anti
A register anti-dependence (aka WAR).
This class represents the scheduled code.
unsigned getMaxStageCount()
Return the maximum stage count needed for this schedule.
int stageScheduled(SUnit *SU) const
Return the stage for a scheduled instruction.
int getInitiationInterval() const
Return the initiation interval for this schedule.
std::deque< SUnit * > & getInstructions(int cycle)
Return the instructions that are scheduled at the specified cycle.
int getFirstCycle() const
Return the first cycle in the completed schedule.
int getFinalCycle() const
Return the last cycle in the finalized schedule.
Scheduling unit. This is a node in the scheduling DAG.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
std::vector< SUnit > SUnits
The scheduling units.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
LLVM_ABI unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Wrapper class representing a virtual register or register unit.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
static CondCodes getOppositeCondition(CondCodes CC)
ARMII - This namespace holds all of the target specific flags that instruction info tracks.
@ MO_OPTION_MASK
MO_OPTION_MASK - Most flags are mutually exclusive; this mask selects just that part of the flag set.
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_GOT
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
AddrMode
ARM Addressing Modes.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned char getAM5FP16Offset(unsigned AM5Opc)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
ShiftOpc getAM2ShiftOpc(unsigned AM2Opc)
unsigned getAM2Offset(unsigned AM2Opc)
unsigned getSOImmValRotate(unsigned Imm)
getSOImmValRotate - Try to handle Imm with an immediate shifter operand, computing the rotate amount ...
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
ShiftOpc getSORegShOp(unsigned Op)
AddrOpc getAM5Op(unsigned AM5Opc)
bool isSOImmTwoPartValNeg(unsigned V)
isSOImmTwoPartValNeg - Return true if the specified value can be obtained by two SOImmVal,...
unsigned getSOImmTwoPartSecond(unsigned V)
getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, return the second chunk of ...
bool isSOImmTwoPartVal(unsigned V)
isSOImmTwoPartVal - Return true if the specified value can be obtained by or'ing together two SOImmVa...
AddrOpc getAM5FP16Op(unsigned AM5Opc)
unsigned getT2SOImmTwoPartSecond(unsigned Imm)
unsigned getT2SOImmTwoPartFirst(unsigned Imm)
bool isT2SOImmTwoPartVal(unsigned Imm)
unsigned char getAM5Offset(unsigned AM5Opc)
unsigned getSOImmTwoPartFirst(unsigned V)
getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, return the first chunk of it...
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
Define some predicates that are used for node matching.
@ C
The default llvm calling convention, compatible with C.
InstrType
Represents how an instruction should be mapped by the outliner.
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
constexpr T rotr(T V, int R)
static bool isIndirectCall(const MachineInstr &MI)
MachineInstr * findCMPToFoldIntoCBZ(MachineInstr *Br, const TargetRegisterInfo *TRI)
Search backwards from a tBcc to find a tCMPi8 against 0, meaning we can convert them to a tCBZ or tCB...
static bool isCondBranchOpcode(int Opc)
bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns true if Val1 has a lower Constant Materialization Cost than Val2.
static bool isPushOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond)
static bool isVCTP(const MachineInstr *MI)
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
bool IsCPSRDead< MachineInstr >(const MachineInstr *MI)
constexpr RegState getKillRegState(bool B)
unsigned getBLXpredOpcode(const MachineFunction &MF)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
static bool isARMLowRegister(MCRegister Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
static bool isIndirectBranchOpcode(int Opc)
bool isLegalAddressImm(unsigned Opcode, int Imm, const TargetInstrInfo *TII)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool registerDefinedBetween(unsigned Reg, MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, const TargetRegisterInfo *TRI)
Return true if Reg is defd between From and To.
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static bool isSEHInstruction(const MachineInstr &MI)
static bool isCalleeSavedRegister(MCRegister Reg, const MCPhysReg *CSRegs)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, MachineFunction &MF, MachineInstr *MI, unsigned NumBytes)
Tries to add registers to the reglist of a given base-updating push/pop instruction to adjust the sta...
auto reverse(ContainerTy &&C)
static bool isJumpTableBranchOpcode(int Opc)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
static bool isPopOpcode(int Opc)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
void addPredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned Cond, unsigned Inactive)
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
void addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, Register DestReg)
unsigned ConstantMaterializationCost(unsigned Val, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns the number of instructions required to materialize the given constant in a register,...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static bool isIndirectControlFlowNotComingBack(const MachineInstr &MI)
FunctionAddr VTableAddr Next
ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition,...
unsigned getMatchingCondBranchOpcode(unsigned Opc)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static bool isUncondBranchOpcode(int Opc)
auto partition(R &&Range, UnaryPredicate P)
Provide wrappers to std::partition which take ranges instead of having to pass begin/end explicitly.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
static bool isSpeculationBarrierEndBBOpcode(int Opc)
unsigned getBLXOpcode(const MachineFunction &MF)
void addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB)
bool isV8EligibleForIT(const InstrType *Instr)
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
constexpr RegState getUndefRegState(bool B)
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
ARM_MLxEntry - Record information about MLA / MLS instructions.
Map pseudo instructions that imply an 'S' bit onto real opcodes.
OutlinerCosts(const ARMSubtarget &target)
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
static constexpr LaneBitmask getAll()
static constexpr LaneBitmask getNone()
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.