73#define DEBUG_TYPE "arm-instrinfo"
75#define GET_INSTRINFO_CTOR_DTOR
76#include "ARMGenInstrInfo.inc"
90 { ARM::VMLAS, ARM::VMULS, ARM::VADDS,
false,
false },
91 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS,
false,
false },
92 { ARM::VMLAD, ARM::VMULD, ARM::VADDD,
false,
false },
93 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD,
false,
false },
94 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS,
true,
false },
95 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS,
true,
false },
96 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD,
true,
false },
97 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD,
true,
false },
100 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd,
false,
false },
101 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd,
false,
false },
102 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq,
false,
false },
103 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq,
false,
false },
104 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd,
false,
true },
105 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd,
false,
true },
106 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq,
false,
true },
107 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq,
false,
true },
114 for (
unsigned i = 0, e = std::size(
ARM_MLxTable); i != e; ++i) {
115 if (!MLxEntryMap.insert(std::make_pair(
ARM_MLxTable[i].MLxOpc, i)).second)
127 if (usePreRAHazardRecognizer()) {
129 static_cast<const ARMSubtarget *
>(STI)->getInstrItineraryData();
149 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4,
true));
165 if (Subtarget.isThumb2() || Subtarget.hasVFP2Base())
186 bool AllowModify)
const {
191 if (
I ==
MBB.instr_begin())
201 bool CantAnalyze =
false;
205 while (
I->isDebugInstr() || !
I->isTerminator() ||
207 I->getOpcode() == ARM::t2DoLoopStartTP){
208 if (
I ==
MBB.instr_begin())
219 TBB =
I->getOperand(0).getMBB();
225 assert(!FBB &&
"FBB should have been null.");
227 TBB =
I->getOperand(0).getMBB();
228 Cond.push_back(
I->getOperand(1));
229 Cond.push_back(
I->getOperand(2));
230 }
else if (
I->isReturn()) {
233 }
else if (
I->getOpcode() == ARM::t2LoopEnd &&
240 TBB =
I->getOperand(1).getMBB();
242 Cond.push_back(
I->getOperand(0));
264 while (DI !=
MBB.instr_end()) {
287 if (
I ==
MBB.instr_begin())
299 int *BytesRemoved)
const {
300 assert(!BytesRemoved &&
"code size not handled");
311 I->eraseFromParent();
315 if (
I ==
MBB.begin())
return 1;
321 I->eraseFromParent();
330 int *BytesAdded)
const {
331 assert(!BytesAdded &&
"code size not handled");
340 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
342 "ARM branch conditions have two or three components!");
352 }
else if (
Cond.size() == 2) {
363 if (
Cond.size() == 2)
368 else if (
Cond.size() == 3)
379 if (
Cond.size() == 2) {
391 while (++
I != E &&
I->isInsideBundle()) {
392 int PIdx =
I->findFirstPredOperandIdx();
393 if (PIdx != -1 &&
I->getOperand(PIdx).getImm() !=
ARMCC::AL)
399 int PIdx =
MI.findFirstPredOperandIdx();
400 return PIdx != -1 &&
MI.getOperand(PIdx).getImm() !=
ARMCC::AL;
408 std::string GenericComment =
410 if (!GenericComment.empty())
411 return GenericComment;
415 return std::string();
419 int FirstPredOp =
MI.findFirstPredOperandIdx();
420 if (FirstPredOp != (
int)
OpIdx)
421 return std::string();
423 std::string CC =
"CC::";
430 unsigned Opc =
MI.getOpcode();
439 int PIdx =
MI.findFirstPredOperandIdx();
443 MI.getOperand(PIdx+1).setReg(Pred[1].
getReg());
450 "CPSR def isn't expected operand");
451 assert((
MI.getOperand(1).isDead() ||
452 MI.getOperand(1).getReg() != ARM::CPSR) &&
453 "if conversion tried to stop defining used CPSR");
454 MI.getOperand(1).setReg(ARM::NoRegister);
464 if (Pred1.
size() > 2 || Pred2.
size() > 2)
489 std::vector<MachineOperand> &Pred,
490 bool SkipDead)
const {
493 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
494 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
495 if (ClobbersCPSR || IsCPSR) {
513 for (
const auto &MO :
MI.operands())
514 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
520 switch (
MI->getOpcode()) {
521 default:
return true;
552 if (!
MI.isPredicable())
590 if (!MO.isReg() || MO.isUndef() || MO.isUse())
592 if (MO.getReg() != ARM::CPSR)
612 switch (
MI.getOpcode()) {
619 return MCID.getSize();
620 case TargetOpcode::BUNDLE:
621 return getInstBundleLength(
MI);
622 case TargetOpcode::COPY:
627 case ARM::CONSTPOOL_ENTRY:
628 case ARM::JUMPTABLE_INSTS:
629 case ARM::JUMPTABLE_ADDRS:
630 case ARM::JUMPTABLE_TBB:
631 case ARM::JUMPTABLE_TBH:
634 return MI.getOperand(2).getImm();
636 return MI.getOperand(1).getImm();
638 case ARM::INLINEASM_BR: {
640 unsigned Size = getInlineAsmLength(
MI.getOperand(0).getSymbolName(), *MAI);
648unsigned ARMBaseInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
652 while (++
I != E &&
I->isInsideBundle()) {
653 assert(!
I->isBundle() &&
"No nested bundle!");
663 unsigned Opc = Subtarget.isThumb()
664 ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
672 if (Subtarget.isMClass())
683 unsigned Opc = Subtarget.isThumb()
684 ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
689 if (Subtarget.isMClass())
718 unsigned Cond,
unsigned Inactive) {
728 bool RenamableSrc)
const {
729 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
730 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
732 if (GPRDest && GPRSrc) {
740 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
741 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
744 if (SPRDest && SPRSrc)
746 else if (GPRDest && SPRSrc)
748 else if (SPRDest && GPRSrc)
750 else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) && Subtarget.hasFP64())
752 else if (ARM::QPRRegClass.
contains(DestReg, SrcReg))
753 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
758 if (
Opc == ARM::VORRq ||
Opc == ARM::MVE_VORR)
760 if (
Opc == ARM::MVE_VORR)
762 else if (
Opc != ARM::MQPRCopy)
768 unsigned BeginIdx = 0;
769 unsigned SubRegs = 0;
773 if (ARM::QQPRRegClass.
contains(DestReg, SrcReg)) {
774 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
775 BeginIdx = ARM::qsub_0;
777 }
else if (ARM::QQQQPRRegClass.
contains(DestReg, SrcReg)) {
778 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
779 BeginIdx = ARM::qsub_0;
782 }
else if (ARM::DPairRegClass.
contains(DestReg, SrcReg)) {
784 BeginIdx = ARM::dsub_0;
786 }
else if (ARM::DTripleRegClass.
contains(DestReg, SrcReg)) {
788 BeginIdx = ARM::dsub_0;
790 }
else if (ARM::DQuadRegClass.
contains(DestReg, SrcReg)) {
792 BeginIdx = ARM::dsub_0;
794 }
else if (ARM::GPRPairRegClass.
contains(DestReg, SrcReg)) {
795 Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr;
796 BeginIdx = ARM::gsub_0;
798 }
else if (ARM::DPairSpcRegClass.
contains(DestReg, SrcReg)) {
800 BeginIdx = ARM::dsub_0;
803 }
else if (ARM::DTripleSpcRegClass.
contains(DestReg, SrcReg)) {
805 BeginIdx = ARM::dsub_0;
808 }
else if (ARM::DQuadSpcRegClass.
contains(DestReg, SrcReg)) {
810 BeginIdx = ARM::dsub_0;
813 }
else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) &&
814 !Subtarget.hasFP64()) {
816 BeginIdx = ARM::ssub_0;
818 }
else if (SrcReg == ARM::CPSR) {
821 }
else if (DestReg == ARM::CPSR) {
824 }
else if (DestReg == ARM::VPR) {
830 }
else if (SrcReg == ARM::VPR) {
836 }
else if (DestReg == ARM::FPSCR_NZCV) {
838 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
842 }
else if (SrcReg == ARM::FPSCR_NZCV) {
844 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
850 assert(
Opc &&
"Impossible reg-to-reg copy");
856 if (
TRI->regsOverlap(SrcReg,
TRI->getSubReg(DestReg, BeginIdx))) {
857 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
863 for (
unsigned i = 0; i != SubRegs; ++i) {
864 Register Dst =
TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
865 Register Src =
TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
866 assert(Dst && Src &&
"Bad sub-register");
868 assert(!DstRegs.
count(Src) &&
"destructive vector copy");
873 if (
Opc == ARM::VORRq ||
Opc == ARM::MVE_VORR) {
877 if (
Opc == ARM::MVE_VORR)
882 if (
Opc == ARM::MOVr)
891std::optional<DestSourcePair>
900 if (!
MI.isMoveReg() ||
901 (
MI.getOpcode() == ARM::VORRq &&
902 MI.getOperand(1).getReg() !=
MI.getOperand(2).getReg()))
907std::optional<ParamLoadedValue>
911 Register DstReg = DstSrcPair->Destination->getReg();
942 return MIB.
addReg(Reg, State);
946 return MIB.
addReg(Reg, State, SubIdx);
951 Register SrcReg,
bool isKill,
int FI,
964 switch (
TRI.getSpillSize(*RC)) {
966 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
977 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
984 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
991 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
998 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1009 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1016 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1017 if (Subtarget.hasV5TEOps()) {
1020 AddDReg(MIB, SrcReg, ARM::gsub_1, {});
1031 AddDReg(MIB, SrcReg, ARM::gsub_1, {});
1037 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1053 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1054 Subtarget.hasMVEIntegerOps()) {
1059 .addMemOperand(MMO);
1065 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1068 Subtarget.hasNEON()) {
1082 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, {});
1083 AddDReg(MIB, SrcReg, ARM::dsub_2, {});
1089 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1090 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1091 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1093 Subtarget.hasNEON()) {
1102 }
else if (Subtarget.hasMVEIntegerOps()) {
1114 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, {});
1115 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, {});
1116 AddDReg(MIB, SrcReg, ARM::dsub_3, {});
1122 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1123 Subtarget.hasMVEIntegerOps()) {
1128 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1134 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, {});
1135 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, {});
1136 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_3, {});
1137 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_4, {});
1138 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_5, {});
1139 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_6, {});
1140 AddDReg(MIB, SrcReg, ARM::dsub_7, {});
1150 int &FrameIndex)
const {
1151 switch (
MI.getOpcode()) {
1155 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1156 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1157 MI.getOperand(3).getImm() == 0) {
1158 FrameIndex =
MI.getOperand(1).getIndex();
1159 return MI.getOperand(0).getReg();
1168 case ARM::VSTR_P0_off:
1169 case ARM::VSTR_FPSCR_NZCVQC_off:
1170 case ARM::MVE_VSTRWU32:
1171 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1172 MI.getOperand(2).getImm() == 0) {
1173 FrameIndex =
MI.getOperand(1).getIndex();
1174 return MI.getOperand(0).getReg();
1178 case ARM::VST1d64TPseudo:
1179 case ARM::VST1d64QPseudo:
1180 if (
MI.getOperand(0).isFI() &&
MI.getOperand(2).getSubReg() == 0) {
1181 FrameIndex =
MI.getOperand(0).getIndex();
1182 return MI.getOperand(2).getReg();
1186 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1187 FrameIndex =
MI.getOperand(1).getIndex();
1188 return MI.getOperand(0).getReg();
1191 case ARM::MQQPRStore:
1192 case ARM::MQQQQPRStore:
1193 if (
MI.getOperand(1).isFI()) {
1194 FrameIndex =
MI.getOperand(1).getIndex();
1195 return MI.getOperand(0).getReg();
1204 int &FrameIndex)
const {
1206 if (
MI.mayStore() && hasStoreToStackSlot(
MI,
Accesses) &&
1223 if (
I !=
MBB.end())
DL =
I->getDebugLoc();
1232 switch (
TRI.getSpillSize(*RC)) {
1234 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1244 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1250 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1256 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1262 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1272 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1278 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1281 if (Subtarget.hasV5TEOps()) {
1304 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1317 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1318 Subtarget.hasMVEIntegerOps()) {
1320 MIB.addFrameIndex(FI)
1322 .addMemOperand(MMO);
1328 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1330 Subtarget.hasNEON()) {
1351 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1352 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1353 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1355 Subtarget.hasNEON()) {
1361 }
else if (Subtarget.hasMVEIntegerOps()) {
1381 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1382 Subtarget.hasMVEIntegerOps()) {
1386 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1410 int &FrameIndex)
const {
1411 switch (
MI.getOpcode()) {
1415 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1416 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1417 MI.getOperand(3).getImm() == 0) {
1418 FrameIndex =
MI.getOperand(1).getIndex();
1419 return MI.getOperand(0).getReg();
1428 case ARM::VLDR_P0_off:
1429 case ARM::VLDR_FPSCR_NZCVQC_off:
1430 case ARM::MVE_VLDRWU32:
1431 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1432 MI.getOperand(2).getImm() == 0) {
1433 FrameIndex =
MI.getOperand(1).getIndex();
1434 return MI.getOperand(0).getReg();
1438 case ARM::VLD1d8TPseudo:
1439 case ARM::VLD1d16TPseudo:
1440 case ARM::VLD1d32TPseudo:
1441 case ARM::VLD1d64TPseudo:
1442 case ARM::VLD1d8QPseudo:
1443 case ARM::VLD1d16QPseudo:
1444 case ARM::VLD1d32QPseudo:
1445 case ARM::VLD1d64QPseudo:
1446 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1447 FrameIndex =
MI.getOperand(1).getIndex();
1448 return MI.getOperand(0).getReg();
1452 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1453 FrameIndex =
MI.getOperand(1).getIndex();
1454 return MI.getOperand(0).getReg();
1457 case ARM::MQQPRLoad:
1458 case ARM::MQQQQPRLoad:
1459 if (
MI.getOperand(1).isFI()) {
1460 FrameIndex =
MI.getOperand(1).getIndex();
1461 return MI.getOperand(0).getReg();
1470 int &FrameIndex)
const {
1472 if (
MI.mayLoad() && hasLoadFromStackSlot(
MI,
Accesses) &&
1486 bool isThumb2 = Subtarget.
isThumb2();
1493 if (isThumb1 || !
MI->getOperand(1).isDead()) {
1495 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1496 : isThumb1 ? ARM::tLDMIA_UPD
1500 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1503 if (isThumb1 || !
MI->getOperand(0).isDead()) {
1504 MachineOperand STWb(
MI->getOperand(0));
1505 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA_UPD
1506 : isThumb1 ? ARM::tSTMIA_UPD
1510 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1513 MachineOperand LDBase(
MI->getOperand(3));
1516 MachineOperand STBase(
MI->getOperand(2));
1525 [&
TRI](
const unsigned &Reg1,
const unsigned &Reg2) ->
bool {
1526 return TRI.getEncodingValue(Reg1) <
1527 TRI.getEncodingValue(Reg2);
1530 for (
const auto &
Reg : ScratchRegs) {
1539 if (
MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1540 expandLoadStackGuard(
MI);
1541 MI.getParent()->erase(
MI);
1545 if (
MI.getOpcode() == ARM::MEMCPY) {
1554 if (!
MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1559 Register DstRegS =
MI.getOperand(0).getReg();
1560 Register SrcRegS =
MI.getOperand(1).getReg();
1561 if (!ARM::SPRRegClass.
contains(DstRegS, SrcRegS))
1566 TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, &ARM::DPRRegClass);
1568 TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, &ARM::DPRRegClass);
1569 if (!DstRegD || !SrcRegD)
1575 if (!
MI.definesRegister(DstRegD,
TRI) ||
MI.readsRegister(DstRegD,
TRI))
1579 if (
MI.getOperand(0).isDead())
1588 int ImpDefIdx =
MI.findRegisterDefOperandIdx(DstRegD,
nullptr);
1589 if (ImpDefIdx != -1)
1590 MI.removeOperand(ImpDefIdx);
1593 MI.setDesc(
get(ARM::VMOVD));
1594 MI.getOperand(0).setReg(DstRegD);
1595 MI.getOperand(1).setReg(SrcRegD);
1602 MI.getOperand(1).setIsUndef();
1607 if (
MI.getOperand(1).isKill()) {
1608 MI.getOperand(1).setIsKill(
false);
1609 MI.addRegisterKilled(SrcRegS,
TRI,
true);
1623 assert(MCPE.isMachineConstantPoolEntry() &&
1624 "Expecting a machine constantpool entry!");
1673 case ARM::tLDRpci_pic:
1674 case ARM::t2LDRpci_pic: {
1694 switch (
I->getOpcode()) {
1695 case ARM::tLDRpci_pic:
1696 case ARM::t2LDRpci_pic: {
1698 unsigned CPI =
I->getOperand(1).getIndex();
1700 I->getOperand(1).setIndex(CPI);
1701 I->getOperand(2).setImm(PCLabelId);
1705 if (!
I->isBundledWithSucc())
1716 if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic ||
1717 Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic ||
1718 Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1719 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1720 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1721 Opcode == ARM::t2MOV_ga_pcrel) {
1732 if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1733 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1734 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1735 Opcode == ARM::t2MOV_ga_pcrel)
1747 if (isARMCP0 && isARMCP1) {
1753 }
else if (!isARMCP0 && !isARMCP1) {
1757 }
else if (Opcode == ARM::PICLDR) {
1765 if (Addr0 != Addr1) {
1801 int64_t &Offset2)
const {
1803 if (Subtarget.isThumb1Only())
return false;
1808 auto IsLoadOpcode = [&](
unsigned Opcode) {
1823 case ARM::t2LDRSHi8:
1825 case ARM::t2LDRBi12:
1826 case ARM::t2LDRSHi12:
1867 int64_t Offset1, int64_t Offset2,
1868 unsigned NumLoads)
const {
1870 if (Subtarget.isThumb1Only())
return false;
1872 assert(Offset2 > Offset1);
1874 if ((Offset2 - Offset1) / 8 > 64)
1905 if (
MI.isDebugInstr())
1909 if (
MI.isTerminator() ||
MI.isPosition())
1913 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1927 while (++
I !=
MBB->end() &&
I->isDebugInstr())
1929 if (
I !=
MBB->end() &&
I->getOpcode() == ARM::t2IT)
1940 if (!
MI.isCall() &&
MI.definesRegister(ARM::SP,
nullptr))
1948 unsigned NumCycles,
unsigned ExtraPredCycles,
1956 if (
MBB.getParent()->getFunction().hasOptSize()) {
1958 if (!Pred->empty()) {
1960 if (LastMI->
getOpcode() == ARM::t2Bcc) {
1969 MBB, 0, 0, Probability);
1974 unsigned TCycles,
unsigned TExtra,
1976 unsigned FCycles,
unsigned FExtra,
1985 if (Subtarget.isThumb2() &&
TBB.getParent()->getFunction().hasMinSize()) {
1993 const unsigned ScalingUpFactor = 1024;
1995 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
1996 unsigned UnpredCost;
1997 if (!Subtarget.hasBranchPredictor()) {
2000 unsigned NotTakenBranchCost = 1;
2001 unsigned TakenBranchCost = Subtarget.getMispredictionPenalty();
2002 unsigned TUnpredCycles, FUnpredCycles;
2005 TUnpredCycles = TCycles + NotTakenBranchCost;
2006 FUnpredCycles = TakenBranchCost;
2009 TUnpredCycles = TCycles + TakenBranchCost;
2010 FUnpredCycles = FCycles + NotTakenBranchCost;
2013 PredCost -= 1 * ScalingUpFactor;
2016 unsigned TUnpredCost = Probability.
scale(TUnpredCycles * ScalingUpFactor);
2017 unsigned FUnpredCost = Probability.
getCompl().
scale(FUnpredCycles * ScalingUpFactor);
2018 UnpredCost = TUnpredCost + FUnpredCost;
2021 if (Subtarget.isThumb2() && TCycles + FCycles > 4) {
2022 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2025 unsigned TUnpredCost = Probability.
scale(TCycles * ScalingUpFactor);
2026 unsigned FUnpredCost =
2028 UnpredCost = TUnpredCost + FUnpredCost;
2029 UnpredCost += 1 * ScalingUpFactor;
2030 UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10;
2033 return PredCost <= UnpredCost;
2038 unsigned NumInsts)
const {
2042 if (!Subtarget.isThumb2())
2046 unsigned MaxInsts = Subtarget.restrictIT() ? 1 : 4;
2055 if (
MI.getOpcode() == ARM::t2Bcc &&
2067 if (Subtarget.isThumb2())
2078 return Subtarget.isProfitableToUnpredicate();
2086 int PIdx =
MI.findFirstPredOperandIdx();
2092 PredReg =
MI.getOperand(PIdx+1).getReg();
2101 if (
Opc == ARM::t2B)
2110 unsigned OpIdx2)
const {
2111 switch (
MI.getOpcode()) {
2113 case ARM::t2MOVCCr: {
2118 if (CC ==
ARMCC::AL || PredReg != ARM::CPSR)
2138 if (!Reg.isVirtual())
2140 if (!
MRI.hasOneNonDBGUse(Reg))
2152 if (MO.isFI() || MO.isCPI() || MO.isJTI())
2159 if (MO.getReg().isPhysical())
2161 if (MO.isDef() && !MO.isDead())
2164 bool DontMoveAcrossStores =
true;
2165 if (!
MI->isSafeToMove(DontMoveAcrossStores))
2172 unsigned &TrueOp,
unsigned &FalseOp,
2173 bool &Optimizable)
const {
2174 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2175 "Unknown select instruction");
2184 Cond.push_back(
MI.getOperand(3));
2185 Cond.push_back(
MI.getOperand(4));
2194 bool PreferFalse)
const {
2195 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2196 "Unknown select instruction");
2199 bool Invert = !
DefMI;
2201 DefMI = canFoldIntoMOVCC(
MI.getOperand(1).getReg(),
MRI,
this);
2208 Register DestReg =
MI.getOperand(0).getReg();
2211 if (!
MRI.constrainRegClass(DestReg, FalseClass))
2213 if (!
MRI.constrainRegClass(DestReg, TrueClass))
2224 i != e && !DefDesc.
operands()[i].isPredicate(); ++i)
2227 unsigned CondCode =
MI.getOperand(3).getImm();
2232 NewMI.
add(
MI.getOperand(4));
2243 NewMI.
add(FalseReg);
2254 if (
DefMI->getParent() !=
MI.getParent())
2258 DefMI->eraseFromParent();
2274 {ARM::ADDSri, ARM::ADDri},
2275 {ARM::ADDSrr, ARM::ADDrr},
2276 {ARM::ADDSrsi, ARM::ADDrsi},
2277 {ARM::ADDSrsr, ARM::ADDrsr},
2279 {ARM::SUBSri, ARM::SUBri},
2280 {ARM::SUBSrr, ARM::SUBrr},
2281 {ARM::SUBSrsi, ARM::SUBrsi},
2282 {ARM::SUBSrsr, ARM::SUBrsr},
2284 {ARM::RSBSri, ARM::RSBri},
2285 {ARM::RSBSrsi, ARM::RSBrsi},
2286 {ARM::RSBSrsr, ARM::RSBrsr},
2288 {ARM::tADDSi3, ARM::tADDi3},
2289 {ARM::tADDSi8, ARM::tADDi8},
2290 {ARM::tADDSrr, ARM::tADDrr},
2291 {ARM::tADCS, ARM::tADC},
2293 {ARM::tSUBSi3, ARM::tSUBi3},
2294 {ARM::tSUBSi8, ARM::tSUBi8},
2295 {ARM::tSUBSrr, ARM::tSUBrr},
2296 {ARM::tSBCS, ARM::tSBC},
2297 {ARM::tRSBS, ARM::tRSB},
2298 {ARM::tLSLSri, ARM::tLSLri},
2300 {ARM::t2ADDSri, ARM::t2ADDri},
2301 {ARM::t2ADDSrr, ARM::t2ADDrr},
2302 {ARM::t2ADDSrs, ARM::t2ADDrs},
2304 {ARM::t2SUBSri, ARM::t2SUBri},
2305 {ARM::t2SUBSrr, ARM::t2SUBrr},
2306 {ARM::t2SUBSrs, ARM::t2SUBrs},
2308 {ARM::t2RSBSri, ARM::t2RSBri},
2309 {ARM::t2RSBSrs, ARM::t2RSBrs},
2314 if (OldOpc == Entry.PseudoOpc)
2315 return Entry.MachineOpc;
2326 if (NumBytes == 0 && DestReg != BaseReg) {
2335 bool isSub = NumBytes < 0;
2336 if (isSub) NumBytes = -NumBytes;
2341 assert(ThisVal &&
"Didn't extract field correctly");
2344 NumBytes &= ~ThisVal;
2349 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2362 unsigned NumBytes) {
2373 if (!IsPush && !IsPop)
2376 bool IsVFPPushPop =
MI->getOpcode() == ARM::VSTMDDB_UPD ||
2377 MI->getOpcode() == ARM::VLDMDIA_UPD;
2378 bool IsT1PushPop =
MI->getOpcode() == ARM::tPUSH ||
2379 MI->getOpcode() == ARM::tPOP ||
2380 MI->getOpcode() == ARM::tPOP_RET;
2382 assert((IsT1PushPop || (
MI->getOperand(0).getReg() == ARM::SP &&
2383 MI->getOperand(1).getReg() == ARM::SP)) &&
2384 "trying to fold sp update into non-sp-updating push/pop");
2389 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2394 int RegListIdx = IsT1PushPop ? 2 : 4;
2397 unsigned RegsNeeded;
2400 RegsNeeded = NumBytes / 8;
2401 RegClass = &ARM::DPRRegClass;
2403 RegsNeeded = NumBytes / 4;
2404 RegClass = &ARM::GPRRegClass;
2414 unsigned FirstRegEnc = -1;
2417 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2422 TRI->getEncodingValue(MO.
getReg()) < FirstRegEnc)
2423 FirstRegEnc =
TRI->getEncodingValue(MO.
getReg());
2426 const MCPhysReg *CSRegs =
TRI->getCalleeSavedRegs(&MF);
2429 for (
int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2432 if (IsT1PushPop && CurRegEnc >
TRI->getEncodingValue(ARM::R7))
2439 false,
false,
true));
2449 MI->getParent()->computeRegisterLiveness(
TRI, CurReg,
MI) !=
2471 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i)
2472 MI->removeOperand(i);
2485 unsigned Opcode =
MI.getOpcode();
2491 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2494 if (Opcode == ARM::ADDri) {
2495 Offset +=
MI.getOperand(FrameRegIdx+1).getImm();
2498 MI.setDesc(
TII.get(ARM::MOVr));
2499 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2500 MI.removeOperand(FrameRegIdx+1);
2506 MI.setDesc(
TII.get(ARM::SUBri));
2512 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2513 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(
Offset);
2528 "Bit extraction didn't work?");
2529 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2531 unsigned ImmIdx = 0;
2533 unsigned NumBits = 0;
2537 ImmIdx = FrameRegIdx + 1;
2538 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2542 ImmIdx = FrameRegIdx+2;
2549 ImmIdx = FrameRegIdx+2;
2560 ImmIdx = FrameRegIdx+1;
2568 ImmIdx = FrameRegIdx+1;
2578 ImmIdx = FrameRegIdx+1;
2579 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2588 Offset += InstrOffs * Scale;
2589 assert((
Offset & (Scale-1)) == 0 &&
"Can't encode this offset!");
2599 int ImmedOffset =
Offset / Scale;
2600 unsigned Mask = (1 << NumBits) - 1;
2601 if ((
unsigned)
Offset <= Mask * Scale) {
2603 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2609 ImmedOffset = -ImmedOffset;
2611 ImmedOffset |= 1 << NumBits;
2619 ImmedOffset = ImmedOffset & Mask;
2622 ImmedOffset = -ImmedOffset;
2624 ImmedOffset |= 1 << NumBits;
2640 Register &SrcReg2, int64_t &CmpMask,
2641 int64_t &CmpValue)
const {
2642 switch (
MI.getOpcode()) {
2647 SrcReg =
MI.getOperand(0).getReg();
2650 CmpValue =
MI.getOperand(1).getImm();
2655 SrcReg =
MI.getOperand(0).getReg();
2656 SrcReg2 =
MI.getOperand(1).getReg();
2662 SrcReg =
MI.getOperand(0).getReg();
2664 CmpMask =
MI.getOperand(1).getImm();
2677 int CmpMask,
bool CommonUse) {
2678 switch (
MI->getOpcode()) {
2681 if (CmpMask !=
MI->getOperand(2).getImm())
2683 if (SrcReg ==
MI->getOperand(CommonUse ? 1 : 0).getReg())
2773 switch (
MI->getOpcode()) {
2774 default:
return false;
2870 if (!
MI)
return false;
2873 if (CmpMask != ~0) {
2877 UI =
MRI->use_instr_begin(SrcReg), UE =
MRI->use_instr_end();
2879 if (UI->getParent() != CmpInstr.
getParent())
2888 if (!
MI)
return false;
2897 if (
I ==
B)
return false;
2908 else if (
MI->getParent() != CmpInstr.
getParent() || CmpValue != 0) {
2913 if (CmpInstr.
getOpcode() == ARM::CMPri ||
2921 bool IsThumb1 =
false;
2938 if (
MI && IsThumb1) {
2940 if (
I != E && !
MI->readsRegister(ARM::CPSR,
TRI)) {
2941 bool CanReorder =
true;
2942 for (;
I != E; --
I) {
2943 if (
I->getOpcode() != ARM::tMOVi8) {
2949 MI =
MI->removeFromParent();
2960 bool SubAddIsThumb1 =
false;
2975 if (Instr.modifiesRegister(ARM::CPSR,
TRI) ||
2976 Instr.readsRegister(ARM::CPSR,
TRI))
2998 IsThumb1 = SubAddIsThumb1;
3013 bool isSafe =
false;
3016 while (!isSafe && ++
I != E) {
3018 for (
unsigned IO = 0, EO = Instr.getNumOperands();
3019 !isSafe && IO != EO; ++IO) {
3033 bool IsInstrVSel =
true;
3034 switch (Instr.getOpcode()) {
3036 IsInstrVSel =
false;
3070 bool IsSub =
Opc == ARM::SUBrr ||
Opc == ARM::t2SUBrr ||
3071 Opc == ARM::SUBri ||
Opc == ARM::t2SUBri ||
3072 Opc == ARM::tSUBrr ||
Opc == ARM::tSUBi3 ||
3074 unsigned OpI =
Opc != ARM::tSUBrr ? 1 : 2;
3086 std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3120 if (Succ->isLiveIn(ARM::CPSR))
3127 unsigned CPSRRegNum =
MI->getNumExplicitOperands() - 1;
3128 MI->getOperand(CPSRRegNum).setReg(ARM::CPSR);
3129 MI->getOperand(CPSRRegNum).setIsDef(
true);
3137 for (
auto &[MO,
Cond] : OperandsToUpdate)
3140 MI->clearRegisterDeads(ARM::CPSR);
3154 int64_t CmpMask, CmpValue;
3156 if (
Next !=
MI.getParent()->end() &&
3167 unsigned DefOpc =
DefMI.getOpcode();
3168 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm &&
3169 DefOpc != ARM::tMOVi32imm)
3171 if (!
DefMI.getOperand(1).isImm())
3175 if (!
MRI->hasOneNonDBGUse(Reg))
3191 if (
UseMI.getOperand(
NumOps - 1).getReg() == ARM::CPSR)
3197 unsigned UseOpc =
UseMI.getOpcode();
3198 unsigned NewUseOpc = 0;
3200 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3201 bool Commute =
false;
3203 default:
return false;
3211 case ARM::t2EORrr: {
3212 Commute =
UseMI.getOperand(2).getReg() != Reg;
3217 if (UseOpc == ARM::SUBrr && Commute)
3223 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3226 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3240 case ARM::ORRrr: NewUseOpc = ARM::ORRri;
break;
3241 case ARM::EORrr: NewUseOpc = ARM::EORri;
break;
3245 case ARM::t2SUBrr: {
3246 if (UseOpc == ARM::t2SUBrr && Commute)
3251 const bool ToSP =
DefMI.getOperand(0).getReg() == ARM::SP;
3252 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3253 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3255 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3258 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3273 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri;
break;
3274 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri;
break;
3281 unsigned OpIdx = Commute ? 2 : 1;
3283 bool isKill =
UseMI.getOperand(
OpIdx).isKill();
3285 Register NewReg =
MRI->createVirtualRegister(TRC);
3293 UseMI.getOperand(1).setReg(NewReg);
3294 UseMI.getOperand(1).setIsKill();
3295 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3296 DefMI.eraseFromParent();
3303 case ARM::t2ADDspImm:
3304 case ARM::t2SUBspImm:
3307 MRI->constrainRegClass(
UseMI.getOperand(0).getReg(), TRC);
3314 switch (
MI.getOpcode()) {
3318 assert(UOps >= 0 &&
"bad # UOps");
3326 unsigned ShOpVal =
MI.getOperand(3).getImm();
3331 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3339 if (!
MI.getOperand(2).getReg())
3342 unsigned ShOpVal =
MI.getOperand(3).getImm();
3347 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3357 case ARM::LDRSB_POST:
3358 case ARM::LDRSH_POST: {
3361 return (Rt == Rm) ? 4 : 3;
3364 case ARM::LDR_PRE_REG:
3365 case ARM::LDRB_PRE_REG: {
3370 unsigned ShOpVal =
MI.getOperand(4).getImm();
3375 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3381 case ARM::STR_PRE_REG:
3382 case ARM::STRB_PRE_REG: {
3383 unsigned ShOpVal =
MI.getOperand(4).getImm();
3388 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3395 case ARM::STRH_PRE: {
3405 case ARM::LDR_POST_REG:
3406 case ARM::LDRB_POST_REG:
3407 case ARM::LDRH_POST: {
3410 return (Rt == Rm) ? 3 : 2;
3413 case ARM::LDR_PRE_IMM:
3414 case ARM::LDRB_PRE_IMM:
3415 case ARM::LDR_POST_IMM:
3416 case ARM::LDRB_POST_IMM:
3417 case ARM::STRB_POST_IMM:
3418 case ARM::STRB_POST_REG:
3419 case ARM::STRB_PRE_IMM:
3420 case ARM::STRH_POST:
3421 case ARM::STR_POST_IMM:
3422 case ARM::STR_POST_REG:
3423 case ARM::STR_PRE_IMM:
3426 case ARM::LDRSB_PRE:
3427 case ARM::LDRSH_PRE: {
3434 unsigned ShOpVal =
MI.getOperand(4).getImm();
3439 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3452 return (Rt == Rn) ? 3 : 2;
3463 case ARM::LDRD_POST:
3464 case ARM::t2LDRD_POST:
3467 case ARM::STRD_POST:
3468 case ARM::t2STRD_POST:
3471 case ARM::LDRD_PRE: {
3478 return (Rt == Rn) ? 4 : 3;
3481 case ARM::t2LDRD_PRE: {
3484 return (Rt == Rn) ? 4 : 3;
3487 case ARM::STRD_PRE: {
3495 case ARM::t2STRD_PRE:
3498 case ARM::t2LDR_POST:
3499 case ARM::t2LDRB_POST:
3500 case ARM::t2LDRB_PRE:
3501 case ARM::t2LDRSBi12:
3502 case ARM::t2LDRSBi8:
3503 case ARM::t2LDRSBpci:
3505 case ARM::t2LDRH_POST:
3506 case ARM::t2LDRH_PRE:
3508 case ARM::t2LDRSB_POST:
3509 case ARM::t2LDRSB_PRE:
3510 case ARM::t2LDRSH_POST:
3511 case ARM::t2LDRSH_PRE:
3512 case ARM::t2LDRSHi12:
3513 case ARM::t2LDRSHi8:
3514 case ARM::t2LDRSHpci:
3518 case ARM::t2LDRDi8: {
3521 return (Rt == Rn) ? 3 : 2;
3524 case ARM::t2STRB_POST:
3525 case ARM::t2STRB_PRE:
3528 case ARM::t2STRH_POST:
3529 case ARM::t2STRH_PRE:
3531 case ARM::t2STR_POST:
3532 case ARM::t2STR_PRE:
3563 E =
MI.memoperands_end();
3565 Size += (*I)->getSize().getValue();
3572 return std::min(
Size / 4, 16U);
3577 unsigned UOps = 1 + NumRegs;
3581 case ARM::VLDMDIA_UPD:
3582 case ARM::VLDMDDB_UPD:
3583 case ARM::VLDMSIA_UPD:
3584 case ARM::VLDMSDB_UPD:
3585 case ARM::VSTMDIA_UPD:
3586 case ARM::VSTMDDB_UPD:
3587 case ARM::VSTMSIA_UPD:
3588 case ARM::VSTMSDB_UPD:
3589 case ARM::LDMIA_UPD:
3590 case ARM::LDMDA_UPD:
3591 case ARM::LDMDB_UPD:
3592 case ARM::LDMIB_UPD:
3593 case ARM::STMIA_UPD:
3594 case ARM::STMDA_UPD:
3595 case ARM::STMDB_UPD:
3596 case ARM::STMIB_UPD:
3597 case ARM::tLDMIA_UPD:
3598 case ARM::tSTMIA_UPD:
3599 case ARM::t2LDMIA_UPD:
3600 case ARM::t2LDMDB_UPD:
3601 case ARM::t2STMIA_UPD:
3602 case ARM::t2STMDB_UPD:
3605 case ARM::LDMIA_RET:
3607 case ARM::t2LDMIA_RET:
3616 if (!ItinData || ItinData->
isEmpty())
3620 unsigned Class =
Desc.getSchedClass();
3622 if (ItinUOps >= 0) {
3623 if (Subtarget.isSwift() && (
Desc.mayLoad() ||
Desc.mayStore()))
3629 unsigned Opc =
MI.getOpcode();
3648 case ARM::VLDMDIA_UPD:
3649 case ARM::VLDMDDB_UPD:
3651 case ARM::VLDMSIA_UPD:
3652 case ARM::VLDMSDB_UPD:
3654 case ARM::VSTMDIA_UPD:
3655 case ARM::VSTMDDB_UPD:
3657 case ARM::VSTMSIA_UPD:
3658 case ARM::VSTMSDB_UPD: {
3659 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands();
3660 return (NumRegs / 2) + (NumRegs % 2) + 1;
3663 case ARM::LDMIA_RET:
3668 case ARM::LDMIA_UPD:
3669 case ARM::LDMDA_UPD:
3670 case ARM::LDMDB_UPD:
3671 case ARM::LDMIB_UPD:
3676 case ARM::STMIA_UPD:
3677 case ARM::STMDA_UPD:
3678 case ARM::STMDB_UPD:
3679 case ARM::STMIB_UPD:
3681 case ARM::tLDMIA_UPD:
3682 case ARM::tSTMIA_UPD:
3686 case ARM::t2LDMIA_RET:
3689 case ARM::t2LDMIA_UPD:
3690 case ARM::t2LDMDB_UPD:
3693 case ARM::t2STMIA_UPD:
3694 case ARM::t2STMDB_UPD: {
3695 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands() + 1;
3696 switch (Subtarget.getLdStMultipleTiming()) {
3707 unsigned UOps = (NumRegs / 2);
3713 unsigned UOps = (NumRegs / 2);
3716 if ((NumRegs % 2) || !
MI.hasOneMemOperand() ||
3717 (*
MI.memoperands_begin())->getAlign() <
Align(8))
3727std::optional<unsigned>
3730 unsigned DefIdx,
unsigned DefAlign)
const {
3739 DefCycle = RegNo / 2 + 1;
3744 bool isSLoad =
false;
3749 case ARM::VLDMSIA_UPD:
3750 case ARM::VLDMSDB_UPD:
3757 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3761 DefCycle = RegNo + 2;
3767std::optional<unsigned>
3770 unsigned DefIdx,
unsigned DefAlign)
const {
3777 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3780 DefCycle = RegNo / 2;
3785 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3786 DefCycle = (RegNo / 2);
3789 if ((RegNo % 2) || DefAlign < 8)
3795 DefCycle = RegNo + 2;
3801std::optional<unsigned>
3804 unsigned UseIdx,
unsigned UseAlign)
const {
3810 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3812 UseCycle = RegNo / 2 + 1;
3815 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3817 bool isSStore =
false;
3822 case ARM::VSTMSIA_UPD:
3823 case ARM::VSTMSDB_UPD:
3830 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3834 UseCycle = RegNo + 2;
3840std::optional<unsigned>
3843 unsigned UseIdx,
unsigned UseAlign)
const {
3849 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3850 UseCycle = RegNo / 2;
3855 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3856 UseCycle = (RegNo / 2);
3859 if ((RegNo % 2) || UseAlign < 8)
3870 unsigned DefIdx,
unsigned DefAlign,
const MCInstrDesc &UseMCID,
3871 unsigned UseIdx,
unsigned UseAlign)
const {
3881 std::optional<unsigned> DefCycle;
3882 bool LdmBypass =
false;
3889 case ARM::VLDMDIA_UPD:
3890 case ARM::VLDMDDB_UPD:
3892 case ARM::VLDMSIA_UPD:
3893 case ARM::VLDMSDB_UPD:
3894 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3897 case ARM::LDMIA_RET:
3902 case ARM::LDMIA_UPD:
3903 case ARM::LDMDA_UPD:
3904 case ARM::LDMDB_UPD:
3905 case ARM::LDMIB_UPD:
3907 case ARM::tLDMIA_UPD:
3909 case ARM::t2LDMIA_RET:
3912 case ARM::t2LDMIA_UPD:
3913 case ARM::t2LDMDB_UPD:
3915 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3923 std::optional<unsigned> UseCycle;
3930 case ARM::VSTMDIA_UPD:
3931 case ARM::VSTMDDB_UPD:
3933 case ARM::VSTMSIA_UPD:
3934 case ARM::VSTMSDB_UPD:
3935 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3942 case ARM::STMIA_UPD:
3943 case ARM::STMDA_UPD:
3944 case ARM::STMDB_UPD:
3945 case ARM::STMIB_UPD:
3946 case ARM::tSTMIA_UPD:
3951 case ARM::t2STMIA_UPD:
3952 case ARM::t2STMDB_UPD:
3953 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3961 if (UseCycle > *DefCycle + 1)
3962 return std::nullopt;
3964 UseCycle = *DefCycle - *UseCycle + 1;
3965 if (UseCycle > 0u) {
3971 UseCycle = *UseCycle - 1;
3973 UseClass, UseIdx)) {
3974 UseCycle = *UseCycle - 1;
3983 unsigned &DefIdx,
unsigned &Dist) {
3988 assert(
II->isInsideBundle() &&
"Empty bundle?");
3991 while (
II->isInsideBundle()) {
3992 Idx =
II->findRegisterDefOperandIdx(
Reg,
TRI,
false,
true);
3999 assert(Idx != -1 &&
"Cannot find bundled definition!");
4006 unsigned &UseIdx,
unsigned &Dist) {
4010 assert(
II->isInsideBundle() &&
"Empty bundle?");
4015 while (
II !=
E &&
II->isInsideBundle()) {
4016 Idx =
II->findRegisterUseOperandIdx(
Reg,
TRI,
false);
4019 if (
II->getOpcode() != ARM::t2IT)
4047 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4057 case ARM::t2LDRSHs: {
4059 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4060 if (ShAmt == 0 || ShAmt == 2)
4065 }
else if (Subtarget.
isSwift()) {
4072 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4077 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4088 case ARM::t2LDRSHs: {
4090 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4091 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4098 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4105 case ARM::VLD1q8wb_fixed:
4106 case ARM::VLD1q16wb_fixed:
4107 case ARM::VLD1q32wb_fixed:
4108 case ARM::VLD1q64wb_fixed:
4109 case ARM::VLD1q8wb_register:
4110 case ARM::VLD1q16wb_register:
4111 case ARM::VLD1q32wb_register:
4112 case ARM::VLD1q64wb_register:
4119 case ARM::VLD2d8wb_fixed:
4120 case ARM::VLD2d16wb_fixed:
4121 case ARM::VLD2d32wb_fixed:
4122 case ARM::VLD2q8wb_fixed:
4123 case ARM::VLD2q16wb_fixed:
4124 case ARM::VLD2q32wb_fixed:
4125 case ARM::VLD2d8wb_register:
4126 case ARM::VLD2d16wb_register:
4127 case ARM::VLD2d32wb_register:
4128 case ARM::VLD2q8wb_register:
4129 case ARM::VLD2q16wb_register:
4130 case ARM::VLD2q32wb_register:
4135 case ARM::VLD3d8_UPD:
4136 case ARM::VLD3d16_UPD:
4137 case ARM::VLD3d32_UPD:
4138 case ARM::VLD1d64Twb_fixed:
4139 case ARM::VLD1d64Twb_register:
4140 case ARM::VLD3q8_UPD:
4141 case ARM::VLD3q16_UPD:
4142 case ARM::VLD3q32_UPD:
4147 case ARM::VLD4d8_UPD:
4148 case ARM::VLD4d16_UPD:
4149 case ARM::VLD4d32_UPD:
4150 case ARM::VLD1d64Qwb_fixed:
4151 case ARM::VLD1d64Qwb_register:
4152 case ARM::VLD4q8_UPD:
4153 case ARM::VLD4q16_UPD:
4154 case ARM::VLD4q32_UPD:
4155 case ARM::VLD1DUPq8:
4156 case ARM::VLD1DUPq16:
4157 case ARM::VLD1DUPq32:
4158 case ARM::VLD1DUPq8wb_fixed:
4159 case ARM::VLD1DUPq16wb_fixed:
4160 case ARM::VLD1DUPq32wb_fixed:
4161 case ARM::VLD1DUPq8wb_register:
4162 case ARM::VLD1DUPq16wb_register:
4163 case ARM::VLD1DUPq32wb_register:
4164 case ARM::VLD2DUPd8:
4165 case ARM::VLD2DUPd16:
4166 case ARM::VLD2DUPd32:
4167 case ARM::VLD2DUPd8wb_fixed:
4168 case ARM::VLD2DUPd16wb_fixed:
4169 case ARM::VLD2DUPd32wb_fixed:
4170 case ARM::VLD2DUPd8wb_register:
4171 case ARM::VLD2DUPd16wb_register:
4172 case ARM::VLD2DUPd32wb_register:
4173 case ARM::VLD4DUPd8:
4174 case ARM::VLD4DUPd16:
4175 case ARM::VLD4DUPd32:
4176 case ARM::VLD4DUPd8_UPD:
4177 case ARM::VLD4DUPd16_UPD:
4178 case ARM::VLD4DUPd32_UPD:
4180 case ARM::VLD1LNd16:
4181 case ARM::VLD1LNd32:
4182 case ARM::VLD1LNd8_UPD:
4183 case ARM::VLD1LNd16_UPD:
4184 case ARM::VLD1LNd32_UPD:
4186 case ARM::VLD2LNd16:
4187 case ARM::VLD2LNd32:
4188 case ARM::VLD2LNq16:
4189 case ARM::VLD2LNq32:
4190 case ARM::VLD2LNd8_UPD:
4191 case ARM::VLD2LNd16_UPD:
4192 case ARM::VLD2LNd32_UPD:
4193 case ARM::VLD2LNq16_UPD:
4194 case ARM::VLD2LNq32_UPD:
4196 case ARM::VLD4LNd16:
4197 case ARM::VLD4LNd32:
4198 case ARM::VLD4LNq16:
4199 case ARM::VLD4LNq32:
4200 case ARM::VLD4LNd8_UPD:
4201 case ARM::VLD4LNd16_UPD:
4202 case ARM::VLD4LNd32_UPD:
4203 case ARM::VLD4LNq16_UPD:
4204 case ARM::VLD4LNq32_UPD:
4218 if (!ItinData || ItinData->
isEmpty())
4219 return std::nullopt;
4225 unsigned DefAdj = 0;
4226 if (
DefMI.isBundle())
4235 unsigned UseAdj = 0;
4236 if (
UseMI.isBundle()) {
4240 return std::nullopt;
4243 return getOperandLatencyImpl(
4244 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->
getDesc(), DefAdj, DefMO,
4245 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->
getDesc(), UseAdj);
4248std::optional<unsigned> ARMBaseInstrInfo::getOperandLatencyImpl(
4250 unsigned DefIdx,
const MCInstrDesc &DefMCID,
unsigned DefAdj,
4252 unsigned UseIdx,
const MCInstrDesc &UseMCID,
unsigned UseAdj)
const {
4253 if (Reg == ARM::CPSR) {
4254 if (
DefMI.getOpcode() == ARM::FMSTAT) {
4256 return Subtarget.
isLikeA9() ? 1 : 20;
4260 if (
UseMI.isBranch())
4279 return std::nullopt;
4281 unsigned DefAlign =
DefMI.hasOneMemOperand()
4282 ? (*
DefMI.memoperands_begin())->getAlign().value()
4284 unsigned UseAlign =
UseMI.hasOneMemOperand()
4285 ? (*
UseMI.memoperands_begin())->getAlign().value()
4290 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4293 return std::nullopt;
4296 int Adj = DefAdj + UseAdj;
4300 if (Adj >= 0 || (
int)*
Latency > -Adj) {
4307std::optional<unsigned>
4309 SDNode *DefNode,
unsigned DefIdx,
4310 SDNode *UseNode,
unsigned UseIdx)
const {
4316 if (isZeroCost(DefMCID.
Opcode))
4319 if (!ItinData || ItinData->
isEmpty())
4320 return DefMCID.
mayLoad() ? 3 : 1;
4323 std::optional<unsigned>
Latency =
4325 int Adj = Subtarget.getPreISelOperandLatencyAdjustment();
4326 int Threshold = 1 + Adj;
4332 unsigned DefAlign = !DefMN->memoperands_empty()
4333 ? (*DefMN->memoperands_begin())->getAlign().value()
4336 unsigned UseAlign = !UseMN->memoperands_empty()
4337 ? (*UseMN->memoperands_begin())->getAlign().value()
4340 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4342 return std::nullopt;
4345 (Subtarget.isCortexA8() || Subtarget.isLikeA9() ||
4346 Subtarget.isCortexA7())) {
4363 case ARM::t2LDRSHs: {
4366 if (ShAmt == 0 || ShAmt == 2)
4371 }
else if (DefIdx == 0 &&
Latency > 2U && Subtarget.isSwift()) {
4381 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4398 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4405 case ARM::VLD1q8wb_register:
4406 case ARM::VLD1q16wb_register:
4407 case ARM::VLD1q32wb_register:
4408 case ARM::VLD1q64wb_register:
4409 case ARM::VLD1q8wb_fixed:
4410 case ARM::VLD1q16wb_fixed:
4411 case ARM::VLD1q32wb_fixed:
4412 case ARM::VLD1q64wb_fixed:
4416 case ARM::VLD2q8Pseudo:
4417 case ARM::VLD2q16Pseudo:
4418 case ARM::VLD2q32Pseudo:
4419 case ARM::VLD2d8wb_fixed:
4420 case ARM::VLD2d16wb_fixed:
4421 case ARM::VLD2d32wb_fixed:
4422 case ARM::VLD2q8PseudoWB_fixed:
4423 case ARM::VLD2q16PseudoWB_fixed:
4424 case ARM::VLD2q32PseudoWB_fixed:
4425 case ARM::VLD2d8wb_register:
4426 case ARM::VLD2d16wb_register:
4427 case ARM::VLD2d32wb_register:
4428 case ARM::VLD2q8PseudoWB_register:
4429 case ARM::VLD2q16PseudoWB_register:
4430 case ARM::VLD2q32PseudoWB_register:
4431 case ARM::VLD3d8Pseudo:
4432 case ARM::VLD3d16Pseudo:
4433 case ARM::VLD3d32Pseudo:
4434 case ARM::VLD1d8TPseudo:
4435 case ARM::VLD1d16TPseudo:
4436 case ARM::VLD1d32TPseudo:
4437 case ARM::VLD1d64TPseudo:
4438 case ARM::VLD1d64TPseudoWB_fixed:
4439 case ARM::VLD1d64TPseudoWB_register:
4440 case ARM::VLD3d8Pseudo_UPD:
4441 case ARM::VLD3d16Pseudo_UPD:
4442 case ARM::VLD3d32Pseudo_UPD:
4443 case ARM::VLD3q8Pseudo_UPD:
4444 case ARM::VLD3q16Pseudo_UPD:
4445 case ARM::VLD3q32Pseudo_UPD:
4446 case ARM::VLD3q8oddPseudo:
4447 case ARM::VLD3q16oddPseudo:
4448 case ARM::VLD3q32oddPseudo:
4449 case ARM::VLD3q8oddPseudo_UPD:
4450 case ARM::VLD3q16oddPseudo_UPD:
4451 case ARM::VLD3q32oddPseudo_UPD:
4452 case ARM::VLD4d8Pseudo:
4453 case ARM::VLD4d16Pseudo:
4454 case ARM::VLD4d32Pseudo:
4455 case ARM::VLD1d8QPseudo:
4456 case ARM::VLD1d16QPseudo:
4457 case ARM::VLD1d32QPseudo:
4458 case ARM::VLD1d64QPseudo:
4459 case ARM::VLD1d64QPseudoWB_fixed:
4460 case ARM::VLD1d64QPseudoWB_register:
4461 case ARM::VLD1q8HighQPseudo:
4462 case ARM::VLD1q8LowQPseudo_UPD:
4463 case ARM::VLD1q8HighTPseudo:
4464 case ARM::VLD1q8LowTPseudo_UPD:
4465 case ARM::VLD1q16HighQPseudo:
4466 case ARM::VLD1q16LowQPseudo_UPD:
4467 case ARM::VLD1q16HighTPseudo:
4468 case ARM::VLD1q16LowTPseudo_UPD:
4469 case ARM::VLD1q32HighQPseudo:
4470 case ARM::VLD1q32LowQPseudo_UPD:
4471 case ARM::VLD1q32HighTPseudo:
4472 case ARM::VLD1q32LowTPseudo_UPD:
4473 case ARM::VLD1q64HighQPseudo:
4474 case ARM::VLD1q64LowQPseudo_UPD:
4475 case ARM::VLD1q64HighTPseudo:
4476 case ARM::VLD1q64LowTPseudo_UPD:
4477 case ARM::VLD4d8Pseudo_UPD:
4478 case ARM::VLD4d16Pseudo_UPD:
4479 case ARM::VLD4d32Pseudo_UPD:
4480 case ARM::VLD4q8Pseudo_UPD:
4481 case ARM::VLD4q16Pseudo_UPD:
4482 case ARM::VLD4q32Pseudo_UPD:
4483 case ARM::VLD4q8oddPseudo:
4484 case ARM::VLD4q16oddPseudo:
4485 case ARM::VLD4q32oddPseudo:
4486 case ARM::VLD4q8oddPseudo_UPD:
4487 case ARM::VLD4q16oddPseudo_UPD:
4488 case ARM::VLD4q32oddPseudo_UPD:
4489 case ARM::VLD1DUPq8:
4490 case ARM::VLD1DUPq16:
4491 case ARM::VLD1DUPq32:
4492 case ARM::VLD1DUPq8wb_fixed:
4493 case ARM::VLD1DUPq16wb_fixed:
4494 case ARM::VLD1DUPq32wb_fixed:
4495 case ARM::VLD1DUPq8wb_register:
4496 case ARM::VLD1DUPq16wb_register:
4497 case ARM::VLD1DUPq32wb_register:
4498 case ARM::VLD2DUPd8:
4499 case ARM::VLD2DUPd16:
4500 case ARM::VLD2DUPd32:
4501 case ARM::VLD2DUPd8wb_fixed:
4502 case ARM::VLD2DUPd16wb_fixed:
4503 case ARM::VLD2DUPd32wb_fixed:
4504 case ARM::VLD2DUPd8wb_register:
4505 case ARM::VLD2DUPd16wb_register:
4506 case ARM::VLD2DUPd32wb_register:
4507 case ARM::VLD2DUPq8EvenPseudo:
4508 case ARM::VLD2DUPq8OddPseudo:
4509 case ARM::VLD2DUPq16EvenPseudo:
4510 case ARM::VLD2DUPq16OddPseudo:
4511 case ARM::VLD2DUPq32EvenPseudo:
4512 case ARM::VLD2DUPq32OddPseudo:
4513 case ARM::VLD3DUPq8EvenPseudo:
4514 case ARM::VLD3DUPq8OddPseudo:
4515 case ARM::VLD3DUPq16EvenPseudo:
4516 case ARM::VLD3DUPq16OddPseudo:
4517 case ARM::VLD3DUPq32EvenPseudo:
4518 case ARM::VLD3DUPq32OddPseudo:
4519 case ARM::VLD4DUPd8Pseudo:
4520 case ARM::VLD4DUPd16Pseudo:
4521 case ARM::VLD4DUPd32Pseudo:
4522 case ARM::VLD4DUPd8Pseudo_UPD:
4523 case ARM::VLD4DUPd16Pseudo_UPD:
4524 case ARM::VLD4DUPd32Pseudo_UPD:
4525 case ARM::VLD4DUPq8EvenPseudo:
4526 case ARM::VLD4DUPq8OddPseudo:
4527 case ARM::VLD4DUPq16EvenPseudo:
4528 case ARM::VLD4DUPq16OddPseudo:
4529 case ARM::VLD4DUPq32EvenPseudo:
4530 case ARM::VLD4DUPq32OddPseudo:
4531 case ARM::VLD1LNq8Pseudo:
4532 case ARM::VLD1LNq16Pseudo:
4533 case ARM::VLD1LNq32Pseudo:
4534 case ARM::VLD1LNq8Pseudo_UPD:
4535 case ARM::VLD1LNq16Pseudo_UPD:
4536 case ARM::VLD1LNq32Pseudo_UPD:
4537 case ARM::VLD2LNd8Pseudo:
4538 case ARM::VLD2LNd16Pseudo:
4539 case ARM::VLD2LNd32Pseudo:
4540 case ARM::VLD2LNq16Pseudo:
4541 case ARM::VLD2LNq32Pseudo:
4542 case ARM::VLD2LNd8Pseudo_UPD:
4543 case ARM::VLD2LNd16Pseudo_UPD:
4544 case ARM::VLD2LNd32Pseudo_UPD:
4545 case ARM::VLD2LNq16Pseudo_UPD:
4546 case ARM::VLD2LNq32Pseudo_UPD:
4547 case ARM::VLD4LNd8Pseudo:
4548 case ARM::VLD4LNd16Pseudo:
4549 case ARM::VLD4LNd32Pseudo:
4550 case ARM::VLD4LNq16Pseudo:
4551 case ARM::VLD4LNq32Pseudo:
4552 case ARM::VLD4LNd8Pseudo_UPD:
4553 case ARM::VLD4LNd16Pseudo_UPD:
4554 case ARM::VLD4LNd32Pseudo_UPD:
4555 case ARM::VLD4LNq16Pseudo_UPD:
4556 case ARM::VLD4LNq32Pseudo_UPD:
4566unsigned ARMBaseInstrInfo::getPredicationCost(
const MachineInstr &
MI)
const {
4567 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4576 if (
MCID.isCall() || (
MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4577 !Subtarget.cheapPredicableCPSRDef())) {
4587 unsigned *PredCost)
const {
4588 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4594 if (
MI.isBundle()) {
4598 while (++
I !=
E &&
I->isInsideBundle()) {
4599 if (
I->getOpcode() != ARM::t2IT)
4600 Latency += getInstrLatency(ItinData, *
I, PredCost);
4605 const MCInstrDesc &MCID =
MI.getDesc();
4607 !Subtarget.cheapPredicableCPSRDef()))) {
4615 return MI.mayLoad() ? 3 : 1;
4628 MI.hasOneMemOperand() ? (*
MI.memoperands_begin())->getAlign().value() : 0;
4630 if (Adj >= 0 || (
int)
Latency > -Adj) {
4638 if (!
Node->isMachineOpcode())
4641 if (!ItinData || ItinData->
isEmpty())
4644 unsigned Opcode =
Node->getMachineOpcode();
4654bool ARMBaseInstrInfo::hasHighOperandLatency(
const TargetSchedModel &SchedModel,
4659 unsigned UseIdx)
const {
4662 if (Subtarget.nonpipelinedVFP() &&
4677 unsigned DefIdx)
const {
4679 if (!ItinData || ItinData->
isEmpty())
4684 unsigned DefClass =
DefMI.getDesc().getSchedClass();
4685 std::optional<unsigned> DefCycle =
4687 return DefCycle && DefCycle <= 2U;
4695 ErrInfo =
"Pseudo flag setting opcodes only exist in Selection DAG";
4698 if (
MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4700 if (!ARM::hGPRRegClass.
contains(
MI.getOperand(0).getReg()) &&
4701 !ARM::hGPRRegClass.contains(
MI.getOperand(1).getReg())) {
4702 ErrInfo =
"Non-flag-setting Thumb1 mov is v6-only";
4706 if (
MI.getOpcode() == ARM::tPUSH ||
4707 MI.getOpcode() == ARM::tPOP ||
4708 MI.getOpcode() == ARM::tPOP_RET) {
4710 if (MO.isImplicit() || !MO.isReg())
4714 if (!(
MI.getOpcode() == ARM::tPUSH &&
Reg == ARM::LR) &&
4715 !(
MI.getOpcode() == ARM::tPOP_RET &&
Reg == ARM::PC)) {
4716 ErrInfo =
"Unsupported register in Thumb1 push/pop";
4722 if (
MI.getOpcode() == ARM::MVE_VMOV_q_rr) {
4723 assert(
MI.getOperand(4).isImm() &&
MI.getOperand(5).isImm());
4724 if ((
MI.getOperand(4).getImm() != 2 &&
MI.getOperand(4).getImm() != 3) ||
4725 MI.getOperand(4).getImm() !=
MI.getOperand(5).getImm() + 2) {
4726 ErrInfo =
"Incorrect array index for MVE_VMOV_q_rr";
4747 for (
auto Op :
MI.operands()) {
4754 ErrInfo =
"Incorrect AddrMode Imm for instruction";
4764 unsigned LoadImmOpc,
4765 unsigned LoadOpc)
const {
4766 assert(!Subtarget.isROPI() && !Subtarget.isRWPI() &&
4767 "ROPI/RWPI not currently supported with stack guard");
4775 if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) {
4776 assert(!Subtarget.isReadTPSoft() &&
4777 "TLS stack protector requires hardware TLS register");
4787 Module &M = *
MBB.getParent()->getFunction().getParent();
4788 Offset = M.getStackProtectorGuardOffset();
4793 unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri;
4804 bool IsIndirect = Subtarget.isGVIndirectSymbol(GV);
4807 if (Subtarget.isTargetMachO()) {
4809 }
else if (Subtarget.isTargetCOFF()) {
4812 else if (IsIndirect)
4814 }
else if (IsIndirect) {
4818 if (LoadImmOpc == ARM::tMOVi32imm) {
4821 ARMSysReg::lookupMClassSysRegByName(
"apsr_nzcvq")->Encoding;
4857 unsigned &AddSubOpc,
4858 bool &NegAcc,
bool &HasLane)
const {
4860 if (
I == MLxEntryMap.end())
4864 MulOpc = Entry.MulOpc;
4865 AddSubOpc = Entry.AddSubOpc;
4866 NegAcc = Entry.NegAcc;
4867 HasLane = Entry.HasLane;
4891std::pair<uint16_t, uint16_t>
4895 if (Subtarget.hasNEON()) {
4904 (
MI.getOpcode() == ARM::VMOVRS ||
MI.getOpcode() == ARM::VMOVSR ||
4905 MI.getOpcode() == ARM::VMOVS))
4912 return std::make_pair(
ExeNEON, 0);
4917 return std::make_pair(
ExeNEON, 0);
4920 return std::make_pair(
ExeVFP, 0);
4926 unsigned SReg,
unsigned &Lane) {
4928 TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
4935 DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
4937 assert(DReg &&
"S-register with no D super-register?");
4962 if (
MI.definesRegister(DReg,
TRI) ||
MI.readsRegister(DReg,
TRI)) {
4968 ImplicitSReg =
TRI->getSubReg(DReg,
4969 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
4971 MI.getParent()->computeRegisterLiveness(
TRI, ImplicitSReg,
MI);
4986 unsigned DstReg, SrcReg;
4991 switch (
MI.getOpcode()) {
5003 assert(Subtarget.hasNEON() &&
"VORRd requires NEON");
5006 DstReg =
MI.getOperand(0).getReg();
5007 SrcReg =
MI.getOperand(1).getReg();
5009 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5010 MI.removeOperand(i - 1);
5013 MI.setDesc(
get(ARM::VORRd));
5025 DstReg =
MI.getOperand(0).getReg();
5026 SrcReg =
MI.getOperand(1).getReg();
5028 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5029 MI.removeOperand(i - 1);
5036 MI.setDesc(
get(ARM::VGETLNi32));
5052 DstReg =
MI.getOperand(0).getReg();
5053 SrcReg =
MI.getOperand(1).getReg();
5061 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5062 MI.removeOperand(i - 1);
5066 MI.setDesc(
get(ARM::VSETLNi32));
5085 DstReg =
MI.getOperand(0).getReg();
5086 SrcReg =
MI.getOperand(1).getReg();
5088 unsigned DstLane = 0, SrcLane = 0;
5097 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5098 MI.removeOperand(i - 1);
5103 MI.setDesc(
get(ARM::VDUPLN32d));
5137 MCRegister CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5138 bool CurUndef = !
MI.readsRegister(CurReg,
TRI);
5141 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5142 CurUndef = !
MI.readsRegister(CurReg,
TRI);
5147 if (SrcLane == DstLane)
5150 MI.setDesc(
get(ARM::VEXTd32));
5155 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5156 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5159 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5160 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5165 if (SrcLane != DstLane)
5171 if (ImplicitSReg != 0)
5197 auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance();
5198 if (!PartialUpdateClearance)
5209 switch (
MI.getOpcode()) {
5215 case ARM::VMOVv4i16:
5216 case ARM::VMOVv2i32:
5217 case ARM::VMOVv2f32:
5218 case ARM::VMOVv1i64:
5219 UseOp =
MI.findRegisterUseOperandIdx(Reg,
TRI,
false);
5223 case ARM::VLD1LNd32:
5232 if (UseOp != -1 &&
MI.getOperand(UseOp).readsReg())
5236 if (Reg.isVirtual()) {
5238 if (!MO.
getSubReg() ||
MI.readsVirtualRegister(Reg))
5240 }
else if (ARM::SPRRegClass.
contains(Reg)) {
5243 TRI->getMatchingSuperReg(Reg, ARM::ssub_0, &ARM::DPRRegClass);
5244 if (!DReg || !
MI.definesRegister(DReg,
TRI))
5250 return PartialUpdateClearance;
5257 assert(OpNum <
MI.getDesc().getNumDefs() &&
"OpNum is not a def");
5262 assert(Reg.isPhysical() &&
"Can't break virtual register dependencies.");
5263 unsigned DReg = Reg;
5266 if (ARM::SPRRegClass.
contains(Reg)) {
5267 DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5268 assert(
TRI->isSuperRegister(Reg, DReg) &&
"Register enums broken");
5271 assert(ARM::DPRRegClass.
contains(DReg) &&
"Can only break D-reg deps");
5272 assert(
MI.definesRegister(DReg,
TRI) &&
"MI doesn't clobber full D-reg");
5285 MI.addRegisterKilled(DReg,
TRI,
true);
5289 return Subtarget.hasFeature(ARM::HasV6KOps);
5293 if (
MI->getNumOperands() < 4)
5295 unsigned ShOpVal =
MI->getOperand(3).getImm();
5299 ((ShImm == 1 || ShImm == 2) &&
5309 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5310 assert(
MI.isRegSequenceLike() &&
"Invalid kind of instruction");
5312 switch (
MI.getOpcode()) {
5324 MOReg = &
MI.getOperand(2);
5336 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5337 assert(
MI.isExtractSubregLike() &&
"Invalid kind of instruction");
5339 switch (
MI.getOpcode()) {
5350 InputReg.
SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5359 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5360 assert(
MI.isInsertSubregLike() &&
"Invalid kind of instruction");
5362 switch (
MI.getOpcode()) {
5363 case ARM::VSETLNi32:
5364 case ARM::MVE_VMOV_to_lane_32:
5372 BaseReg.Reg = MOBaseReg.
getReg();
5375 InsertedReg.
Reg = MOInsertedReg.
getReg();
5383std::pair<unsigned, unsigned>
5386 return std::make_pair(TF & Mask, TF & ~Mask);
5391 using namespace ARMII;
5393 static const std::pair<unsigned, const char *> TargetFlags[] = {
5394 {MO_LO16,
"arm-lo16"}, {MO_HI16,
"arm-hi16"},
5395 {MO_LO_0_7,
"arm-lo-0-7"}, {MO_HI_0_7,
"arm-hi-0-7"},
5396 {MO_LO_8_15,
"arm-lo-8-15"}, {MO_HI_8_15,
"arm-hi-8-15"},
5403 using namespace ARMII;
5405 static const std::pair<unsigned, const char *> TargetFlags[] = {
5406 {MO_COFFSTUB,
"arm-coffstub"},
5407 {MO_GOT,
"arm-got"},
5408 {MO_SBREL,
"arm-sbrel"},
5409 {MO_DLLIMPORT,
"arm-dllimport"},
5410 {MO_SECREL,
"arm-secrel"},
5411 {MO_NONLAZY,
"arm-nonlazy"}};
5415std::optional<RegImmPair>
5418 unsigned Opcode =
MI.getOpcode();
5425 return std::nullopt;
5428 if (Opcode == ARM::SUBri)
5430 else if (Opcode != ARM::ADDri)
5431 return std::nullopt;
5436 if (!
MI.getOperand(1).isReg() || !
MI.getOperand(2).isImm())
5437 return std::nullopt;
5439 Offset =
MI.getOperand(2).getImm() * Sign;
5447 for (
auto I = From;
I != To; ++
I)
5448 if (
I->modifiesRegister(Reg,
TRI))
5461 if (CmpMI->modifiesRegister(ARM::CPSR,
TRI))
5463 if (CmpMI->readsRegister(ARM::CPSR,
TRI))
5469 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5471 Register Reg = CmpMI->getOperand(0).getReg();
5474 if (Pred !=
ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5487 if (Subtarget->isThumb()) {
5489 return ForCodesize ? 2 : 1;
5490 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff ||
5493 return ForCodesize ? 4 : 1;
5495 return ForCodesize ? 4 : 2;
5497 return ForCodesize ? 4 : 2;
5499 return ForCodesize ? 4 : 2;
5502 return ForCodesize ? 4 : 1;
5504 return ForCodesize ? 4 : 1;
5505 if (Subtarget->hasV6T2Ops() && Val <= 0xffff)
5506 return ForCodesize ? 4 : 1;
5508 return ForCodesize ? 8 : 2;
5510 return ForCodesize ? 8 : 2;
5513 return ForCodesize ? 8 : 2;
5514 return ForCodesize ? 8 : 3;
5678 MachineFunction *MF =
C.getMF();
5680 const ARMBaseRegisterInfo *ARI =
5681 static_cast<const ARMBaseRegisterInfo *
>(&
TRI);
5690 C.isAvailableAcrossAndOutOfSeq(
Reg,
TRI) &&
5691 C.isAvailableInsideSeq(
Reg,
TRI))
5705 for (;
I !=
E; ++
I) {
5709 if (
MI.modifiesRegister(ARM::LR, &
TRI))
5713 unsigned Opcode =
MI.getOpcode();
5714 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR ||
5715 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET ||
5716 Opcode == ARM::tBXNS_RET) {
5722 if (
MI.readsRegister(ARM::LR, &
TRI))
5728std::optional<std::unique_ptr<outliner::OutlinedFunction>>
5731 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
5732 unsigned MinRepeats)
const {
5733 unsigned SequenceSize = 0;
5734 for (
auto &
MI : RepeatedSequenceLocs[0])
5738 unsigned FlagsSetInAll = 0xF;
5743 FlagsSetInAll &=
C.Flags;
5762 return C.isAnyUnavailableAcrossOrOutOfSeq({ARM::R12, ARM::CPSR},
TRI);
5770 llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
5773 if (RepeatedSequenceLocs.size() < MinRepeats)
5774 return std::nullopt;
5793 if (std::distance(RepeatedSequenceLocs.begin(), NoBTI) >
5794 std::distance(NoBTI, RepeatedSequenceLocs.end()))
5795 RepeatedSequenceLocs.erase(NoBTI, RepeatedSequenceLocs.end());
5797 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoBTI);
5799 if (RepeatedSequenceLocs.size() < MinRepeats)
5800 return std::nullopt;
5810 if (std::distance(RepeatedSequenceLocs.begin(), NoPAC) >
5811 std::distance(NoPAC, RepeatedSequenceLocs.end()))
5812 RepeatedSequenceLocs.erase(NoPAC, RepeatedSequenceLocs.end());
5814 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoPAC);
5816 if (RepeatedSequenceLocs.size() < MinRepeats)
5817 return std::nullopt;
5822 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back().getOpcode();
5825 auto SetCandidateCallInfo =
5826 [&RepeatedSequenceLocs](
unsigned CallID,
unsigned NumBytesForCall) {
5828 C.setCallInfo(CallID, NumBytesForCall);
5833 const auto &SomeMFI =
5836 if (SomeMFI.branchTargetEnforcement()) {
5845 if (SomeMFI.shouldSignReturnAddress(
true)) {
5855 if (RepeatedSequenceLocs[0].back().isTerminator()) {
5859 }
else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
5860 LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL ||
5861 LastInstrOpcode == ARM::tBLXr ||
5862 LastInstrOpcode == ARM::tBLXr_noip ||
5863 LastInstrOpcode == ARM::tBLXi) {
5871 unsigned NumBytesNoStackCalls = 0;
5872 std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
5877 const auto Last =
C.getMBB()->rbegin();
5878 const bool LRIsAvailable =
5879 C.getMBB()->isReturnBlock() && !
Last->isCall()
5882 :
C.isAvailableAcrossAndOutOfSeq(ARM::LR,
TRI);
5883 if (LRIsAvailable) {
5887 CandidatesWithoutStackFixups.push_back(
C);
5892 else if (findRegisterToSaveLRTo(
C)) {
5896 CandidatesWithoutStackFixups.push_back(
C);
5901 else if (
C.isAvailableInsideSeq(ARM::SP,
TRI)) {
5904 CandidatesWithoutStackFixups.push_back(
C);
5910 NumBytesNoStackCalls += SequenceSize;
5916 if (NumBytesNoStackCalls <=
5917 RepeatedSequenceLocs.size() * Costs.
CallDefault) {
5918 RepeatedSequenceLocs = CandidatesWithoutStackFixups;
5920 if (RepeatedSequenceLocs.size() < MinRepeats)
5921 return std::nullopt;
5946 return std::make_unique<outliner::OutlinedFunction>(
5947 RepeatedSequenceLocs, SequenceSize, NumBytesToCreateFrame, FrameID);
5950bool ARMBaseInstrInfo::checkAndUpdateStackOffset(
MachineInstr *
MI,
5953 int SPIdx =
MI->findRegisterUseOperandIdx(ARM::SP,
nullptr);
5978 unsigned NumOps =
MI->getDesc().getNumOperands();
5979 unsigned ImmIdx =
NumOps - 3;
5983 int64_t OffVal =
Offset.getImm();
5989 unsigned NumBits = 0;
6018 assert((
Fixup & 3) == 0 &&
"Can't encode this offset!");
6038 assert(((OffVal * Scale +
Fixup) & (Scale - 1)) == 0 &&
6039 "Can't encode this offset!");
6040 OffVal +=
Fixup / Scale;
6042 unsigned Mask = (1 << NumBits) - 1;
6044 if (OffVal <= Mask) {
6046 MI->getOperand(ImmIdx).setImm(OffVal);
6054 Function &
F, std::vector<outliner::Candidate> &Candidates)
const {
6058 const Function &CFn =
C.getMF()->getFunction();
6065 ARMGenInstrInfo::mergeOutliningCandidateAttributes(
F, Candidates);
6073 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
6092 unsigned &Flags)
const {
6095 assert(
MBB.getParent()->getRegInfo().tracksLiveness() &&
6096 "Suitable Machine Function for outlining must track liveness");
6104 bool R12AvailableInBlock = LRU.
available(ARM::R12);
6105 bool CPSRAvailableInBlock = LRU.
available(ARM::CPSR);
6109 if (R12AvailableInBlock && CPSRAvailableInBlock)
6117 if (R12AvailableInBlock && !LRU.
available(ARM::R12))
6119 if (CPSRAvailableInBlock && !LRU.
available(ARM::CPSR))
6129 bool LRIsAvailable =
6130 MBB.isReturnBlock() && !
MBB.back().isCall()
6142 unsigned Flags)
const {
6148 unsigned Opc =
MI.getOpcode();
6149 if (
Opc == ARM::tPICADD ||
Opc == ARM::PICADD ||
Opc == ARM::PICSTR ||
6150 Opc == ARM::PICSTRB ||
Opc == ARM::PICSTRH ||
Opc == ARM::PICLDR ||
6151 Opc == ARM::PICLDRB ||
Opc == ARM::PICLDRH ||
Opc == ARM::PICLDRSB ||
6152 Opc == ARM::PICLDRSH ||
Opc == ARM::t2LDRpci_pic ||
6153 Opc == ARM::t2MOVi16_ga_pcrel ||
Opc == ARM::t2MOVTi16_ga_pcrel ||
6154 Opc == ARM::t2MOV_ga_pcrel)
6158 if (
Opc == ARM::t2BF_LabelPseudo ||
Opc == ARM::t2DoLoopStart ||
6159 Opc == ARM::t2DoLoopStartTP ||
Opc == ARM::t2WhileLoopStart ||
6160 Opc == ARM::t2WhileLoopStartLR ||
Opc == ARM::t2WhileLoopStartTP ||
6161 Opc == ARM::t2LoopDec ||
Opc == ARM::t2LoopEnd ||
6162 Opc == ARM::t2LoopEndDec)
6171 if (
MI.isTerminator())
6177 if (
MI.readsRegister(ARM::LR,
TRI) ||
MI.readsRegister(ARM::PC,
TRI))
6185 if (MOP.isGlobal()) {
6194 (Callee->getName() ==
"\01__gnu_mcount_nc" ||
6195 Callee->getName() ==
"\01mcount" || Callee->getName() ==
"__mcount"))
6203 if (
Opc == ARM::BL ||
Opc == ARM::tBL ||
Opc == ARM::BLX ||
6204 Opc == ARM::BLX_noip ||
Opc == ARM::tBLXr ||
Opc == ARM::tBLXr_noip ||
6209 return UnknownCallOutlineType;
6217 return UnknownCallOutlineType;
6225 return UnknownCallOutlineType;
6233 if (
MI.modifiesRegister(ARM::LR,
TRI) ||
MI.modifiesRegister(ARM::PC,
TRI))
6237 if (
MI.modifiesRegister(ARM::SP,
TRI) ||
MI.readsRegister(ARM::SP,
TRI)) {
6250 bool MightNeedStackFixUp =
6254 if (!MightNeedStackFixUp)
6260 if (
MI.modifiesRegister(ARM::SP,
TRI))
6265 if (checkAndUpdateStackOffset(&
MI, Subtarget.getStackAlignment().value(),
6274 if (
MI.readsRegister(ARM::ITSTATE,
TRI) ||
6275 MI.modifiesRegister(ARM::ITSTATE,
TRI))
6279 if (
MI.isCFIInstruction())
6294 int Align = std::max(Subtarget.getStackAlignment().value(), uint64_t(8));
6296 assert(Align >= 8 && Align <= 256);
6298 assert(Subtarget.isThumb2());
6310 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM;
6324 CFIBuilder.buildDefCFAOffset(Align);
6329 CFIBuilder.buildOffset(ARM::LR, -LROffset);
6332 CFIBuilder.buildOffset(ARM::RA_AUTH_CODE, -Align);
6338 bool CFI,
bool Auth)
const {
6339 int Align = Subtarget.getStackAlignment().value();
6342 assert(Subtarget.isThumb2());
6354 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
6358 if (!Subtarget.isThumb())
6360 MIB.
addImm(Subtarget.getStackAlignment().value())
6368 CFIBuilder.buildDefCFAOffset(0);
6369 CFIBuilder.buildRestore(ARM::LR);
6371 CFIBuilder.buildUndefined(ARM::RA_AUTH_CODE);
6385 bool isThumb = Subtarget.isThumb();
6386 unsigned FuncOp =
isThumb ? 2 : 0;
6387 unsigned Opc =
Call->getOperand(FuncOp).isReg()
6388 ?
isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
6389 :
isThumb ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd
6393 .
add(
Call->getOperand(FuncOp));
6396 Call->eraseFromParent();
6401 return MI.isCall() && !
MI.isReturn();
6409 Et = std::prev(
MBB.end());
6414 if (!
MBB.isLiveIn(ARM::LR))
6415 MBB.addLiveIn(ARM::LR);
6419 saveLROnStack(
MBB, It,
true, Auth);
6424 "Can only fix up stack references once");
6425 fixupPostOutline(
MBB);
6428 restoreLRFromStack(
MBB, Et,
true, Auth);
6448 fixupPostOutline(
MBB);
6457 bool isThumb = Subtarget.isThumb();
6463 ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6469 It =
MBB.insert(It, MIB);
6483 It =
MBB.insert(It, CallMIB);
6490 Register Reg = findRegisterToSaveLRTo(
C);
6491 assert(Reg != 0 &&
"No callee-saved register available?");
6498 CallPt =
MBB.insert(It, CallMIB);
6506 if (!
MBB.isLiveIn(ARM::LR))
6507 MBB.addLiveIn(ARM::LR);
6510 CallPt =
MBB.insert(It, CallMIB);
6521bool ARMBaseInstrInfo::isReMaterializableImpl(
6555 static int constexpr MAX_STAGES = 30;
6556 static int constexpr LAST_IS_USE = MAX_STAGES;
6557 static int constexpr SEEN_AS_LIVE = MAX_STAGES + 1;
6558 typedef std::bitset<MAX_STAGES + 2> IterNeed;
6559 typedef std::map<Register, IterNeed> IterNeeds;
6562 const IterNeeds &CIN);
6574 : EndLoop(EndLoop), LoopCount(LoopCount),
6576 TII(MF->getSubtarget().getInstrInfo()) {}
6578 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
6580 return MI == EndLoop ||
MI == LoopCount;
6583 bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS)
override {
6584 if (tooMuchRegisterPressure(SSD, SMS))
6590 std::optional<bool> createTripCountGreaterCondition(
6591 int TC, MachineBasicBlock &
MBB,
6592 SmallVectorImpl<MachineOperand> &
Cond)
override {
6601 }
else if (EndLoop->
getOpcode() == ARM::t2LoopEnd) {
6604 MachineInstr *LoopDec =
nullptr;
6606 if (
I.getOpcode() == ARM::t2LoopDec)
6608 assert(LoopDec &&
"Unable to find copied LoopDec");
6614 .
addReg(ARM::NoRegister);
6622 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
6624 void adjustTripCount(
int TripCountAdjust)
override {}
6628 const IterNeeds &CIN) {
6630 for (
const auto &
N : CIN) {
6631 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6632 for (
int I = 0;
I < Cnt; ++
I)
6637 for (
const auto &
N : CIN) {
6638 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6639 for (
int I = 0;
I < Cnt; ++
I)
6647 IterNeeds CrossIterationNeeds;
6652 for (
auto &SU : SSD.
SUnits) {
6655 for (
auto &S : SU.Succs)
6659 CrossIterationNeeds[
Reg.
id()].set(0);
6660 }
else if (S.isAssignedRegDep()) {
6662 if (OStg >= 0 && OStg != Stg) {
6665 CrossIterationNeeds[
Reg.
id()] |= ((1 << (OStg - Stg)) - 1);
6674 std::vector<SUnit *> ProposedSchedule;
6678 std::deque<SUnit *> Instrs =
6680 std::sort(Instrs.begin(), Instrs.end(),
6681 [](
SUnit *
A,
SUnit *
B) { return A->NodeNum > B->NodeNum; });
6688 for (
auto *SU : ProposedSchedule)
6692 if (!MO.isReg() || !MO.getReg())
6695 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6696 if (CIter == CrossIterationNeeds.end() || CIter->second[LAST_IS_USE] ||
6697 CIter->second[SEEN_AS_LIVE])
6699 if (MO.isDef() && !MO.isDead())
6700 CIter->second.set(SEEN_AS_LIVE);
6701 else if (MO.isUse())
6702 CIter->second.set(LAST_IS_USE);
6704 for (
auto &CI : CrossIterationNeeds)
6705 CI.second.reset(LAST_IS_USE);
6711 RPTracker.init(MF, &RegClassInfo,
nullptr, EndLoop->
getParent(),
6714 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6716 for (
auto *SU : ProposedSchedule) {
6718 RPTracker.setPos(std::next(CurInstI));
6724 if (!MO.isReg() || !MO.getReg())
6727 if (MO.isDef() && !MO.isDead()) {
6728 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6729 if (CIter != CrossIterationNeeds.end()) {
6730 CIter->second.reset(0);
6731 CIter->second.reset(SEEN_AS_LIVE);
6735 for (
auto &S : SU->Preds) {
6737 if (S.isAssignedRegDep()) {
6739 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6740 if (CIter != CrossIterationNeeds.end()) {
6742 assert(Stg2 <= Stg &&
"Data dependence upon earlier stage");
6743 if (Stg - Stg2 < MAX_STAGES)
6744 CIter->second.set(Stg - Stg2);
6745 CIter->second.set(SEEN_AS_LIVE);
6750 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6753 auto &
P = RPTracker.getPressure().MaxSetPressure;
6754 for (
unsigned I = 0,
E =
P.size();
I <
E; ++
I) {
6756 if (
I == ARM::DQuad_with_ssub_0 ||
I == ARM::DTripleSpc_with_ssub_0 ||
6757 I == ARM::DTriple_with_qsub_0_in_QPR)
6769std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
6773 if (Preheader == LoopBB)
6774 Preheader = *std::next(LoopBB->
pred_begin());
6776 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2Bcc) {
6782 for (
auto &L : LoopBB->
instrs()) {
6789 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, CCSetter);
6803 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2LoopEnd) {
6804 for (
auto &L : LoopBB->
instrs())
6809 Register LoopDecResult =
I->getOperand(0).getReg();
6812 if (!LoopDec || LoopDec->
getOpcode() != ARM::t2LoopDec)
6815 for (
auto &J : Preheader->
instrs())
6816 if (J.getOpcode() == ARM::t2DoLoopStart)
6820 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, LoopDec);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
@ MachineOutlinerNoLRSave
Only emit a branch.
@ MachineOutlinerThunk
Emit a call and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isThumb(const MCSubtargetInfo &STI)
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, MachineInstr &MI, MCRegister DReg, unsigned Lane, MCRegister &ImplicitSReg)
getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, set ImplicitSReg to a register n...
static const MachineInstr * getBundledUseMI(const TargetRegisterInfo *TRI, const MachineInstr &MI, unsigned Reg, unsigned &UseIdx, unsigned &Dist)
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI)
Create a copy of a const pool value.
static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, int CmpMask, bool CommonUse)
isSuitableForMask - Identify a suitable 'and' instruction that operates on the given source register ...
static int adjustDefLatency(const ARMSubtarget &Subtarget, const MachineInstr &DefMI, const MCInstrDesc &DefMCID, unsigned DefAlign)
Return the number of cycles to add to (or subtract from) the static itinerary based on the def opcode...
static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, const MachineInstr &MI)
static MCRegister getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, unsigned SReg, unsigned &Lane)
static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[]
static bool isEligibleForITBlock(const MachineInstr *MI)
static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC)
getCmpToAddCondition - assume the flags are set by CMP(a,b), return the condition code if we modify t...
static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1)
static bool isLRAvailable(const TargetRegisterInfo &TRI, MachineBasicBlock::reverse_iterator I, MachineBasicBlock::reverse_iterator E)
static const ARM_MLxEntry ARM_MLxTable[]
static bool isRedundantFlagInstr(const MachineInstr *CmpI, Register SrcReg, Register SrcReg2, int64_t ImmValue, const MachineInstr *OI, bool &IsThumb1)
isRedundantFlagInstr - check whether the first instruction, whose only purpose is to update flags,...
static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, unsigned NumRegs)
static const MachineInstr * getBundledDefMI(const TargetRegisterInfo *TRI, const MachineInstr *MI, unsigned Reg, unsigned &DefIdx, unsigned &Dist)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DXIL Forward Handle Accesses
This file defines the DenseMap class.
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
TargetInstrInfo::RegSubRegPair RegSubRegPair
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
PowerPC TLS Dynamic Call Fixup
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallSet class.
This file defines the SmallVector class.
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static bool isCPSRDefined(const MachineInstr &MI)
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction to set the zero flag so that we can remove a "comparis...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
void copyFromCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister DestReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &, unsigned, const TargetRegisterInfo *) const override
unsigned getNumLDMAddresses(const MachineInstr &MI) const
Get the number of addresses by LDM or VLDM or zero for unknown.
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI) const override
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Returns the size of the specified MachineInstr.
void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister SrcReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
const MachineInstrBuilder & AddDReg(MachineInstrBuilder &MIB, unsigned Reg, unsigned SubIdx, RegState State) const
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
ARM supports the MachineOutliner.
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Enable outlining by default at -Oz.
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const override
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool isPredicated(const MachineInstr &MI) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI, unsigned LoadImmOpc, unsigned LoadOpc) const
bool isPredicable(const MachineInstr &MI) const override
isPredicable - Return true if the specified instruction can be predicated.
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
Specialization of TargetInstrInfo::describeLoadedValue, used to enhance debug entry value description...
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
const ARMBaseRegisterInfo & getRegisterInfo() const
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig) const override
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to determine if two loads are lo...
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const override
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const override
bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const override
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool expandPostRAPseudo(MachineInstr &MI) const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to determine (in conjunction w...
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
VFP/NEON execution domains.
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFpMLxInstruction(unsigned Opcode) const
isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS instruction.
bool isSwiftFastImmShift(const MachineInstr *MI) const
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
ARMBaseInstrInfo(const ARMSubtarget &STI, const ARMBaseRegisterInfo &TRI)
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2 if h...
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void breakPartialRegDependency(MachineInstr &, unsigned, const TargetRegisterInfo *TRI) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const ARMSubtarget & getSubtarget() const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const override
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
bool shouldSink(const MachineInstr &MI) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
static ARMConstantPoolMBB * Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID, unsigned char PCAdj)
static ARMConstantPoolSymbol * Create(LLVMContext &C, StringRef s, unsigned ID, unsigned char PCAdj)
ARMConstantPoolValue - ARM specific constantpool value.
bool isMachineBasicBlock() const
bool isGlobalValue() const
ARMCP::ARMCPModifier getModifier() const
bool mustAddCurrentAddress() const
virtual bool hasSameValue(ARMConstantPoolValue *ACPV)
hasSameValue - Return true if this ARM constpool value can share the same constantpool entry as anoth...
bool isBlockAddress() const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool isThumb2Function() const
bool branchTargetEnforcement() const
unsigned createPICLabelUId()
bool isThumb1OnlyFunction() const
bool isThumbFunction() const
bool shouldSignReturnAddress() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
Align getStackAlignment() const
getStackAlignment - Returns the minimum alignment known to hold of the stack frame on entry to the fu...
bool enableMachinePipeliner() const override
Returns true if machine pipeliner should be enabled.
@ DoubleIssueCheckUnalignedAccess
Can load/store 2 registers/cycle, but needs an extra cycle if the access is not 64-bit aligned.
@ SingleIssue
Can load/store 1 register/cycle.
@ DoubleIssue
Can load/store 2 registers/cycle.
@ SingleIssuePlusExtras
Can load/store 1 register/cycle, but needs an extra cycle for address computation and potentially als...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool test(unsigned Idx) const
size_type size() const
size - Returns the number of bits in this bitvector.
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
Helper class for creating CFI instructions and inserting them into MIR.
void buildRegister(MCRegister Reg1, MCRegister Reg2) const
void buildRestore(MCRegister Reg) const
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool hasDLLImportStorageClass() const
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
Itinerary data supplied by a subtarget to be used by a target.
int getNumMicroOps(unsigned ItinClassIndx) const
Return the number of micro-ops that the given class decodes to.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Return true if there is a pipeline forwarding between instructions of itinerary classes DefClass and ...
bool isEmpty() const
Returns true if there are no itineraries.
A set of register units used to track register liveness.
bool available(MCRegister Reg) const
Returns true if no part of physical register Reg is live.
LLVM_ABI void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
LLVM_ABI void accumulate(const MachineInstr &MI)
Adds all register units used, defined or clobbered in MI.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
bool isCall() const
Return true if the instruction is a call.
unsigned getOpcode() const
Return the opcode number for this descriptor.
LLVM_ABI bool hasImplicitDefOfPhysReg(MCRegister Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
Wrapper class representing physical registers. Should be passed by value.
bool isValid() const
isValid - Returns true until all the operands have been visited.
unsigned pred_size() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MachineInstrBundleIterator< MachineInstr > iterator
LivenessQueryResult
Possible outcome of a register liveness query to computeRegisterLiveness()
@ LQR_Dead
Register is known to be fully dead.
@ LQR_Live
Register is known to be (at least partially) live.
@ LQR_Unknown
Register liveness not decidable from local neighborhood.
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
MachineConstantPoolValue * MachineCPVal
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getNumObjects() const
Return the number of objects.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
ArrayRef< MachineMemOperand * >::iterator mmo_iterator
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool isCall(QueryType Type=AnyInBundle) const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isRegSequence() const
bool isInsertSubreg() const
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
LLVM_ABI void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
LLVM_ABI MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
const TargetRegisterInfo * getTargetRegisterInfo() const
A Module instance is used to store all the information related to an LLVM module.
void AddHazardRecognizer(std::unique_ptr< ScheduleHazardRecognizer > &&)
Track the current register pressure at some position in the instruction stream, and remember the high...
LLVM_ABI void increaseRegPressure(VirtRegOrUnit VRegOrUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
LLVM_ABI void decreaseRegPressure(VirtRegOrUnit VRegOrUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
unsigned getRegPressureSetLimit(unsigned Idx) const
Get the register unit limit for the given pressure set index.
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
constexpr unsigned id() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
@ Anti
A register anti-dependence (aka WAR).
This class represents the scheduled code.
unsigned getMaxStageCount()
Return the maximum stage count needed for this schedule.
int stageScheduled(SUnit *SU) const
Return the stage for a scheduled instruction.
int getInitiationInterval() const
Return the initiation interval for this schedule.
std::deque< SUnit * > & getInstructions(int cycle)
Return the instructions that are scheduled at the specified cycle.
int getFirstCycle() const
Return the first cycle in the completed schedule.
int getFinalCycle() const
Return the last cycle in the finalized schedule.
Scheduling unit. This is a node in the scheduling DAG.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
std::vector< SUnit > SUnits
The scheduling units.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
LLVM_ABI unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Wrapper class representing a virtual register or register unit.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
static CondCodes getOppositeCondition(CondCodes CC)
ARMII - This namespace holds all of the target specific flags that instruction info tracks.
@ MO_OPTION_MASK
MO_OPTION_MASK - Most flags are mutually exclusive; this mask selects just that part of the flag set.
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_GOT
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
AddrMode
ARM Addressing Modes.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned char getAM5FP16Offset(unsigned AM5Opc)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
ShiftOpc getAM2ShiftOpc(unsigned AM2Opc)
unsigned getAM2Offset(unsigned AM2Opc)
unsigned getSOImmValRotate(unsigned Imm)
getSOImmValRotate - Try to handle Imm with an immediate shifter operand, computing the rotate amount ...
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
ShiftOpc getSORegShOp(unsigned Op)
AddrOpc getAM5Op(unsigned AM5Opc)
bool isSOImmTwoPartValNeg(unsigned V)
isSOImmTwoPartValNeg - Return true if the specified value can be obtained by two SOImmVal,...
unsigned getSOImmTwoPartSecond(unsigned V)
getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, return the second chunk of ...
bool isSOImmTwoPartVal(unsigned V)
isSOImmTwoPartVal - Return true if the specified value can be obtained by or'ing together two SOImmVa...
AddrOpc getAM5FP16Op(unsigned AM5Opc)
unsigned getT2SOImmTwoPartSecond(unsigned Imm)
unsigned getT2SOImmTwoPartFirst(unsigned Imm)
bool isT2SOImmTwoPartVal(unsigned Imm)
unsigned char getAM5Offset(unsigned AM5Opc)
unsigned getSOImmTwoPartFirst(unsigned V)
getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, return the first chunk of it...
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
Define some predicates that are used for node matching.
@ C
The default llvm calling convention, compatible with C.
InstrType
Represents how an instruction should be mapped by the outliner.
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
constexpr T rotr(T V, int R)
static bool isIndirectCall(const MachineInstr &MI)
MachineInstr * findCMPToFoldIntoCBZ(MachineInstr *Br, const TargetRegisterInfo *TRI)
Search backwards from a tBcc to find a tCMPi8 against 0, meaning we can convert them to a tCBZ or tCB...
static bool isCondBranchOpcode(int Opc)
bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns true if Val1 has a lower Constant Materialization Cost than Val2.
static bool isPushOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond)
static bool isVCTP(const MachineInstr *MI)
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
bool IsCPSRDead< MachineInstr >(const MachineInstr *MI)
constexpr RegState getKillRegState(bool B)
unsigned getBLXpredOpcode(const MachineFunction &MF)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
static bool isARMLowRegister(MCRegister Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
static bool isIndirectBranchOpcode(int Opc)
bool isLegalAddressImm(unsigned Opcode, int Imm, const TargetInstrInfo *TII)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool registerDefinedBetween(unsigned Reg, MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, const TargetRegisterInfo *TRI)
Return true if Reg is defd between From and To.
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static bool isSEHInstruction(const MachineInstr &MI)
static bool isCalleeSavedRegister(MCRegister Reg, const MCPhysReg *CSRegs)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, MachineFunction &MF, MachineInstr *MI, unsigned NumBytes)
Tries to add registers to the reglist of a given base-updating push/pop instruction to adjust the sta...
auto reverse(ContainerTy &&C)
static bool isJumpTableBranchOpcode(int Opc)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static bool isPopOpcode(int Opc)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
void addPredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned Cond, unsigned Inactive)
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
void addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, Register DestReg)
unsigned ConstantMaterializationCost(unsigned Val, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns the number of instructions required to materialize the given constant in a register,...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static bool isIndirectControlFlowNotComingBack(const MachineInstr &MI)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
FunctionAddr VTableAddr Next
ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition,...
unsigned getMatchingCondBranchOpcode(unsigned Opc)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static bool isUncondBranchOpcode(int Opc)
auto partition(R &&Range, UnaryPredicate P)
Provide wrappers to std::partition which take ranges instead of having to pass begin/end explicitly.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
static bool isSpeculationBarrierEndBBOpcode(int Opc)
unsigned getBLXOpcode(const MachineFunction &MF)
void addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB)
bool isV8EligibleForIT(const InstrType *Instr)
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
constexpr RegState getUndefRegState(bool B)
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
ARM_MLxEntry - Record information about MLA / MLS instructions.
Map pseudo instructions that imply an 'S' bit onto real opcodes.
OutlinerCosts(const ARMSubtarget &target)
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
static constexpr LaneBitmask getAll()
static constexpr LaneBitmask getNone()
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.