73#define DEBUG_TYPE "arm-instrinfo"
75#define GET_INSTRINFO_CTOR_DTOR
76#include "ARMGenInstrInfo.inc"
90 { ARM::VMLAS, ARM::VMULS, ARM::VADDS,
false,
false },
91 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS,
false,
false },
92 { ARM::VMLAD, ARM::VMULD, ARM::VADDD,
false,
false },
93 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD,
false,
false },
94 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS,
true,
false },
95 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS,
true,
false },
96 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD,
true,
false },
97 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD,
true,
false },
100 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd,
false,
false },
101 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd,
false,
false },
102 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq,
false,
false },
103 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq,
false,
false },
104 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd,
false,
true },
105 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd,
false,
true },
106 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq,
false,
true },
107 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq,
false,
true },
114 for (
unsigned i = 0, e = std::size(
ARM_MLxTable); i != e; ++i) {
115 if (!MLxEntryMap.insert(std::make_pair(
ARM_MLxTable[i].MLxOpc, i)).second)
127 if (usePreRAHazardRecognizer()) {
129 static_cast<const ARMSubtarget *
>(STI)->getInstrItineraryData();
149 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4,
true));
165 if (Subtarget.isThumb2() || Subtarget.hasVFP2Base())
186 bool AllowModify)
const {
191 if (
I ==
MBB.instr_begin())
201 bool CantAnalyze =
false;
205 while (
I->isDebugInstr() || !
I->isTerminator() ||
207 I->getOpcode() == ARM::t2DoLoopStartTP){
208 if (
I ==
MBB.instr_begin())
219 TBB =
I->getOperand(0).getMBB();
225 assert(!FBB &&
"FBB should have been null.");
227 TBB =
I->getOperand(0).getMBB();
228 Cond.push_back(
I->getOperand(1));
229 Cond.push_back(
I->getOperand(2));
230 }
else if (
I->isReturn()) {
233 }
else if (
I->getOpcode() == ARM::t2LoopEnd &&
240 TBB =
I->getOperand(1).getMBB();
242 Cond.push_back(
I->getOperand(0));
264 while (DI !=
MBB.instr_end()) {
287 if (
I ==
MBB.instr_begin())
299 int *BytesRemoved)
const {
300 assert(!BytesRemoved &&
"code size not handled");
311 I->eraseFromParent();
315 if (
I ==
MBB.begin())
return 1;
321 I->eraseFromParent();
330 int *BytesAdded)
const {
331 assert(!BytesAdded &&
"code size not handled");
340 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
342 "ARM branch conditions have two or three components!");
352 }
else if (
Cond.size() == 2) {
363 if (
Cond.size() == 2)
368 else if (
Cond.size() == 3)
379 if (
Cond.size() == 2) {
391 while (++
I != E &&
I->isInsideBundle()) {
392 int PIdx =
I->findFirstPredOperandIdx();
393 if (PIdx != -1 &&
I->getOperand(PIdx).getImm() !=
ARMCC::AL)
399 int PIdx =
MI.findFirstPredOperandIdx();
400 return PIdx != -1 &&
MI.getOperand(PIdx).getImm() !=
ARMCC::AL;
408 std::string GenericComment =
410 if (!GenericComment.empty())
411 return GenericComment;
415 return std::string();
419 int FirstPredOp =
MI.findFirstPredOperandIdx();
420 if (FirstPredOp != (
int)
OpIdx)
421 return std::string();
423 std::string CC =
"CC::";
430 unsigned Opc =
MI.getOpcode();
439 int PIdx =
MI.findFirstPredOperandIdx();
443 MI.getOperand(PIdx+1).setReg(Pred[1].
getReg());
450 "CPSR def isn't expected operand");
451 assert((
MI.getOperand(1).isDead() ||
452 MI.getOperand(1).getReg() != ARM::CPSR) &&
453 "if conversion tried to stop defining used CPSR");
454 MI.getOperand(1).setReg(ARM::NoRegister);
464 if (Pred1.
size() > 2 || Pred2.
size() > 2)
489 std::vector<MachineOperand> &Pred,
490 bool SkipDead)
const {
493 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
494 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
495 if (ClobbersCPSR || IsCPSR) {
513 for (
const auto &MO :
MI.operands())
514 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
520 switch (
MI->getOpcode()) {
521 default:
return true;
552 if (!
MI.isPredicable())
590 if (!MO.isReg() || MO.isUndef() || MO.isUse())
592 if (MO.getReg() != ARM::CPSR)
612 switch (
MI.getOpcode()) {
619 return MCID.getSize();
620 case TargetOpcode::BUNDLE:
621 return getInstBundleLength(
MI);
622 case ARM::CONSTPOOL_ENTRY:
623 case ARM::JUMPTABLE_INSTS:
624 case ARM::JUMPTABLE_ADDRS:
625 case ARM::JUMPTABLE_TBB:
626 case ARM::JUMPTABLE_TBH:
629 return MI.getOperand(2).getImm();
631 return MI.getOperand(1).getImm();
633 case ARM::INLINEASM_BR: {
635 unsigned Size = getInlineAsmLength(
MI.getOperand(0).getSymbolName(), *MAI);
643unsigned ARMBaseInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
647 while (++
I != E &&
I->isInsideBundle()) {
648 assert(!
I->isBundle() &&
"No nested bundle!");
658 unsigned Opc = Subtarget.isThumb()
659 ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
667 if (Subtarget.isMClass())
678 unsigned Opc = Subtarget.isThumb()
679 ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
684 if (Subtarget.isMClass())
713 unsigned Cond,
unsigned Inactive) {
723 bool RenamableSrc)
const {
724 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
725 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
727 if (GPRDest && GPRSrc) {
735 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
736 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
739 if (SPRDest && SPRSrc)
741 else if (GPRDest && SPRSrc)
743 else if (SPRDest && GPRSrc)
745 else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) && Subtarget.hasFP64())
747 else if (ARM::QPRRegClass.
contains(DestReg, SrcReg))
748 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
753 if (
Opc == ARM::VORRq ||
Opc == ARM::MVE_VORR)
755 if (
Opc == ARM::MVE_VORR)
757 else if (
Opc != ARM::MQPRCopy)
763 unsigned BeginIdx = 0;
764 unsigned SubRegs = 0;
768 if (ARM::QQPRRegClass.
contains(DestReg, SrcReg)) {
769 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
770 BeginIdx = ARM::qsub_0;
772 }
else if (ARM::QQQQPRRegClass.
contains(DestReg, SrcReg)) {
773 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
774 BeginIdx = ARM::qsub_0;
777 }
else if (ARM::DPairRegClass.
contains(DestReg, SrcReg)) {
779 BeginIdx = ARM::dsub_0;
781 }
else if (ARM::DTripleRegClass.
contains(DestReg, SrcReg)) {
783 BeginIdx = ARM::dsub_0;
785 }
else if (ARM::DQuadRegClass.
contains(DestReg, SrcReg)) {
787 BeginIdx = ARM::dsub_0;
789 }
else if (ARM::GPRPairRegClass.
contains(DestReg, SrcReg)) {
790 Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr;
791 BeginIdx = ARM::gsub_0;
793 }
else if (ARM::DPairSpcRegClass.
contains(DestReg, SrcReg)) {
795 BeginIdx = ARM::dsub_0;
798 }
else if (ARM::DTripleSpcRegClass.
contains(DestReg, SrcReg)) {
800 BeginIdx = ARM::dsub_0;
803 }
else if (ARM::DQuadSpcRegClass.
contains(DestReg, SrcReg)) {
805 BeginIdx = ARM::dsub_0;
808 }
else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) &&
809 !Subtarget.hasFP64()) {
811 BeginIdx = ARM::ssub_0;
813 }
else if (SrcReg == ARM::CPSR) {
816 }
else if (DestReg == ARM::CPSR) {
819 }
else if (DestReg == ARM::VPR) {
825 }
else if (SrcReg == ARM::VPR) {
831 }
else if (DestReg == ARM::FPSCR_NZCV) {
833 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
837 }
else if (SrcReg == ARM::FPSCR_NZCV) {
839 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
845 assert(
Opc &&
"Impossible reg-to-reg copy");
851 if (
TRI->regsOverlap(SrcReg,
TRI->getSubReg(DestReg, BeginIdx))) {
852 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
858 for (
unsigned i = 0; i != SubRegs; ++i) {
859 Register Dst =
TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
860 Register Src =
TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
861 assert(Dst && Src &&
"Bad sub-register");
863 assert(!DstRegs.
count(Src) &&
"destructive vector copy");
868 if (
Opc == ARM::VORRq ||
Opc == ARM::MVE_VORR) {
872 if (
Opc == ARM::MVE_VORR)
877 if (
Opc == ARM::MOVr)
886std::optional<DestSourcePair>
895 if (!
MI.isMoveReg() ||
896 (
MI.getOpcode() == ARM::VORRq &&
897 MI.getOperand(1).getReg() !=
MI.getOperand(2).getReg()))
902std::optional<ParamLoadedValue>
906 Register DstReg = DstSrcPair->Destination->getReg();
935 unsigned State)
const {
937 return MIB.
addReg(Reg, State);
941 return MIB.
addReg(Reg, State, SubIdx);
946 Register SrcReg,
bool isKill,
int FI,
959 switch (
TRI.getSpillSize(*RC)) {
961 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
972 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
979 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
986 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
993 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1004 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1011 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1012 if (Subtarget.hasV5TEOps()) {
1015 AddDReg(MIB, SrcReg, ARM::gsub_1, 0);
1026 AddDReg(MIB, SrcReg, ARM::gsub_1, 0);
1032 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1048 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1049 Subtarget.hasMVEIntegerOps()) {
1054 .addMemOperand(MMO);
1060 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1063 Subtarget.hasNEON()) {
1077 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0);
1078 AddDReg(MIB, SrcReg, ARM::dsub_2, 0);
1084 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1085 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1086 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1088 Subtarget.hasNEON()) {
1097 }
else if (Subtarget.hasMVEIntegerOps()) {
1109 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0);
1110 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0);
1111 AddDReg(MIB, SrcReg, ARM::dsub_3, 0);
1117 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1118 Subtarget.hasMVEIntegerOps()) {
1123 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1129 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0);
1130 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0);
1131 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_3, 0);
1132 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_4, 0);
1133 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_5, 0);
1134 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_6, 0);
1135 AddDReg(MIB, SrcReg, ARM::dsub_7, 0);
1145 int &FrameIndex)
const {
1146 switch (
MI.getOpcode()) {
1150 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1151 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1152 MI.getOperand(3).getImm() == 0) {
1153 FrameIndex =
MI.getOperand(1).getIndex();
1154 return MI.getOperand(0).getReg();
1163 case ARM::VSTR_P0_off:
1164 case ARM::VSTR_FPSCR_NZCVQC_off:
1165 case ARM::MVE_VSTRWU32:
1166 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1167 MI.getOperand(2).getImm() == 0) {
1168 FrameIndex =
MI.getOperand(1).getIndex();
1169 return MI.getOperand(0).getReg();
1173 case ARM::VST1d64TPseudo:
1174 case ARM::VST1d64QPseudo:
1175 if (
MI.getOperand(0).isFI() &&
MI.getOperand(2).getSubReg() == 0) {
1176 FrameIndex =
MI.getOperand(0).getIndex();
1177 return MI.getOperand(2).getReg();
1181 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1182 FrameIndex =
MI.getOperand(1).getIndex();
1183 return MI.getOperand(0).getReg();
1186 case ARM::MQQPRStore:
1187 case ARM::MQQQQPRStore:
1188 if (
MI.getOperand(1).isFI()) {
1189 FrameIndex =
MI.getOperand(1).getIndex();
1190 return MI.getOperand(0).getReg();
1199 int &FrameIndex)
const {
1201 if (
MI.mayStore() && hasStoreToStackSlot(
MI,
Accesses) &&
1218 if (
I !=
MBB.end())
DL =
I->getDebugLoc();
1227 switch (
TRI.getSpillSize(*RC)) {
1229 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1239 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1245 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1251 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1257 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1267 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1273 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1276 if (Subtarget.hasV5TEOps()) {
1299 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1312 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1313 Subtarget.hasMVEIntegerOps()) {
1315 MIB.addFrameIndex(FI)
1317 .addMemOperand(MMO);
1323 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1325 Subtarget.hasNEON()) {
1346 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1347 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1348 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1350 Subtarget.hasNEON()) {
1356 }
else if (Subtarget.hasMVEIntegerOps()) {
1376 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1377 Subtarget.hasMVEIntegerOps()) {
1381 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1405 int &FrameIndex)
const {
1406 switch (
MI.getOpcode()) {
1410 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1411 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1412 MI.getOperand(3).getImm() == 0) {
1413 FrameIndex =
MI.getOperand(1).getIndex();
1414 return MI.getOperand(0).getReg();
1423 case ARM::VLDR_P0_off:
1424 case ARM::VLDR_FPSCR_NZCVQC_off:
1425 case ARM::MVE_VLDRWU32:
1426 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1427 MI.getOperand(2).getImm() == 0) {
1428 FrameIndex =
MI.getOperand(1).getIndex();
1429 return MI.getOperand(0).getReg();
1433 case ARM::VLD1d8TPseudo:
1434 case ARM::VLD1d16TPseudo:
1435 case ARM::VLD1d32TPseudo:
1436 case ARM::VLD1d64TPseudo:
1437 case ARM::VLD1d8QPseudo:
1438 case ARM::VLD1d16QPseudo:
1439 case ARM::VLD1d32QPseudo:
1440 case ARM::VLD1d64QPseudo:
1441 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1442 FrameIndex =
MI.getOperand(1).getIndex();
1443 return MI.getOperand(0).getReg();
1447 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1448 FrameIndex =
MI.getOperand(1).getIndex();
1449 return MI.getOperand(0).getReg();
1452 case ARM::MQQPRLoad:
1453 case ARM::MQQQQPRLoad:
1454 if (
MI.getOperand(1).isFI()) {
1455 FrameIndex =
MI.getOperand(1).getIndex();
1456 return MI.getOperand(0).getReg();
1465 int &FrameIndex)
const {
1467 if (
MI.mayLoad() && hasLoadFromStackSlot(
MI,
Accesses) &&
1481 bool isThumb2 = Subtarget.
isThumb2();
1488 if (isThumb1 || !
MI->getOperand(1).isDead()) {
1490 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1491 : isThumb1 ? ARM::tLDMIA_UPD
1495 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1498 if (isThumb1 || !
MI->getOperand(0).isDead()) {
1499 MachineOperand STWb(
MI->getOperand(0));
1500 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA_UPD
1501 : isThumb1 ? ARM::tSTMIA_UPD
1505 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1508 MachineOperand LDBase(
MI->getOperand(3));
1511 MachineOperand STBase(
MI->getOperand(2));
1520 [&
TRI](
const unsigned &Reg1,
const unsigned &Reg2) ->
bool {
1521 return TRI.getEncodingValue(Reg1) <
1522 TRI.getEncodingValue(Reg2);
1525 for (
const auto &
Reg : ScratchRegs) {
1534 if (
MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1535 expandLoadStackGuard(
MI);
1536 MI.getParent()->erase(
MI);
1540 if (
MI.getOpcode() == ARM::MEMCPY) {
1549 if (!
MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1554 Register DstRegS =
MI.getOperand(0).getReg();
1555 Register SrcRegS =
MI.getOperand(1).getReg();
1556 if (!ARM::SPRRegClass.
contains(DstRegS, SrcRegS))
1561 TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, &ARM::DPRRegClass);
1563 TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, &ARM::DPRRegClass);
1564 if (!DstRegD || !SrcRegD)
1570 if (!
MI.definesRegister(DstRegD,
TRI) ||
MI.readsRegister(DstRegD,
TRI))
1574 if (
MI.getOperand(0).isDead())
1583 int ImpDefIdx =
MI.findRegisterDefOperandIdx(DstRegD,
nullptr);
1584 if (ImpDefIdx != -1)
1585 MI.removeOperand(ImpDefIdx);
1588 MI.setDesc(
get(ARM::VMOVD));
1589 MI.getOperand(0).setReg(DstRegD);
1590 MI.getOperand(1).setReg(SrcRegD);
1597 MI.getOperand(1).setIsUndef();
1602 if (
MI.getOperand(1).isKill()) {
1603 MI.getOperand(1).setIsKill(
false);
1604 MI.addRegisterKilled(SrcRegS,
TRI,
true);
1618 assert(MCPE.isMachineConstantPoolEntry() &&
1619 "Expecting a machine constantpool entry!");
1668 case ARM::tLDRpci_pic:
1669 case ARM::t2LDRpci_pic: {
1689 switch (
I->getOpcode()) {
1690 case ARM::tLDRpci_pic:
1691 case ARM::t2LDRpci_pic: {
1693 unsigned CPI =
I->getOperand(1).getIndex();
1695 I->getOperand(1).setIndex(CPI);
1696 I->getOperand(2).setImm(PCLabelId);
1700 if (!
I->isBundledWithSucc())
1711 if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic ||
1712 Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic ||
1713 Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1714 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1715 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1716 Opcode == ARM::t2MOV_ga_pcrel) {
1727 if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1728 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1729 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1730 Opcode == ARM::t2MOV_ga_pcrel)
1742 if (isARMCP0 && isARMCP1) {
1748 }
else if (!isARMCP0 && !isARMCP1) {
1752 }
else if (Opcode == ARM::PICLDR) {
1760 if (Addr0 != Addr1) {
1796 int64_t &Offset2)
const {
1798 if (Subtarget.isThumb1Only())
return false;
1803 auto IsLoadOpcode = [&](
unsigned Opcode) {
1818 case ARM::t2LDRSHi8:
1820 case ARM::t2LDRBi12:
1821 case ARM::t2LDRSHi12:
1862 int64_t Offset1, int64_t Offset2,
1863 unsigned NumLoads)
const {
1865 if (Subtarget.isThumb1Only())
return false;
1867 assert(Offset2 > Offset1);
1869 if ((Offset2 - Offset1) / 8 > 64)
1900 if (
MI.isDebugInstr())
1904 if (
MI.isTerminator() ||
MI.isPosition())
1908 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1922 while (++
I !=
MBB->end() &&
I->isDebugInstr())
1924 if (
I !=
MBB->end() &&
I->getOpcode() == ARM::t2IT)
1935 if (!
MI.isCall() &&
MI.definesRegister(ARM::SP,
nullptr))
1943 unsigned NumCycles,
unsigned ExtraPredCycles,
1951 if (
MBB.getParent()->getFunction().hasOptSize()) {
1953 if (!Pred->empty()) {
1955 if (LastMI->
getOpcode() == ARM::t2Bcc) {
1964 MBB, 0, 0, Probability);
1969 unsigned TCycles,
unsigned TExtra,
1971 unsigned FCycles,
unsigned FExtra,
1980 if (Subtarget.isThumb2() &&
TBB.getParent()->getFunction().hasMinSize()) {
1988 const unsigned ScalingUpFactor = 1024;
1990 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
1991 unsigned UnpredCost;
1992 if (!Subtarget.hasBranchPredictor()) {
1995 unsigned NotTakenBranchCost = 1;
1996 unsigned TakenBranchCost = Subtarget.getMispredictionPenalty();
1997 unsigned TUnpredCycles, FUnpredCycles;
2000 TUnpredCycles = TCycles + NotTakenBranchCost;
2001 FUnpredCycles = TakenBranchCost;
2004 TUnpredCycles = TCycles + TakenBranchCost;
2005 FUnpredCycles = FCycles + NotTakenBranchCost;
2008 PredCost -= 1 * ScalingUpFactor;
2011 unsigned TUnpredCost = Probability.
scale(TUnpredCycles * ScalingUpFactor);
2012 unsigned FUnpredCost = Probability.
getCompl().
scale(FUnpredCycles * ScalingUpFactor);
2013 UnpredCost = TUnpredCost + FUnpredCost;
2016 if (Subtarget.isThumb2() && TCycles + FCycles > 4) {
2017 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2020 unsigned TUnpredCost = Probability.
scale(TCycles * ScalingUpFactor);
2021 unsigned FUnpredCost =
2023 UnpredCost = TUnpredCost + FUnpredCost;
2024 UnpredCost += 1 * ScalingUpFactor;
2025 UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10;
2028 return PredCost <= UnpredCost;
2033 unsigned NumInsts)
const {
2037 if (!Subtarget.isThumb2())
2041 unsigned MaxInsts = Subtarget.restrictIT() ? 1 : 4;
2050 if (
MI.getOpcode() == ARM::t2Bcc &&
2062 if (Subtarget.isThumb2())
2073 return Subtarget.isProfitableToUnpredicate();
2081 int PIdx =
MI.findFirstPredOperandIdx();
2087 PredReg =
MI.getOperand(PIdx+1).getReg();
2096 if (
Opc == ARM::t2B)
2105 unsigned OpIdx2)
const {
2106 switch (
MI.getOpcode()) {
2108 case ARM::t2MOVCCr: {
2113 if (CC ==
ARMCC::AL || PredReg != ARM::CPSR)
2133 if (!Reg.isVirtual())
2135 if (!
MRI.hasOneNonDBGUse(Reg))
2147 if (MO.isFI() || MO.isCPI() || MO.isJTI())
2154 if (MO.getReg().isPhysical())
2156 if (MO.isDef() && !MO.isDead())
2159 bool DontMoveAcrossStores =
true;
2160 if (!
MI->isSafeToMove(DontMoveAcrossStores))
2167 unsigned &TrueOp,
unsigned &FalseOp,
2168 bool &Optimizable)
const {
2169 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2170 "Unknown select instruction");
2179 Cond.push_back(
MI.getOperand(3));
2180 Cond.push_back(
MI.getOperand(4));
2189 bool PreferFalse)
const {
2190 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2191 "Unknown select instruction");
2194 bool Invert = !
DefMI;
2196 DefMI = canFoldIntoMOVCC(
MI.getOperand(1).getReg(),
MRI,
this);
2203 Register DestReg =
MI.getOperand(0).getReg();
2206 if (!
MRI.constrainRegClass(DestReg, FalseClass))
2208 if (!
MRI.constrainRegClass(DestReg, TrueClass))
2219 i != e && !DefDesc.
operands()[i].isPredicate(); ++i)
2222 unsigned CondCode =
MI.getOperand(3).getImm();
2227 NewMI.
add(
MI.getOperand(4));
2238 NewMI.
add(FalseReg);
2249 if (
DefMI->getParent() !=
MI.getParent())
2253 DefMI->eraseFromParent();
2269 {ARM::ADDSri, ARM::ADDri},
2270 {ARM::ADDSrr, ARM::ADDrr},
2271 {ARM::ADDSrsi, ARM::ADDrsi},
2272 {ARM::ADDSrsr, ARM::ADDrsr},
2274 {ARM::SUBSri, ARM::SUBri},
2275 {ARM::SUBSrr, ARM::SUBrr},
2276 {ARM::SUBSrsi, ARM::SUBrsi},
2277 {ARM::SUBSrsr, ARM::SUBrsr},
2279 {ARM::RSBSri, ARM::RSBri},
2280 {ARM::RSBSrsi, ARM::RSBrsi},
2281 {ARM::RSBSrsr, ARM::RSBrsr},
2283 {ARM::tADDSi3, ARM::tADDi3},
2284 {ARM::tADDSi8, ARM::tADDi8},
2285 {ARM::tADDSrr, ARM::tADDrr},
2286 {ARM::tADCS, ARM::tADC},
2288 {ARM::tSUBSi3, ARM::tSUBi3},
2289 {ARM::tSUBSi8, ARM::tSUBi8},
2290 {ARM::tSUBSrr, ARM::tSUBrr},
2291 {ARM::tSBCS, ARM::tSBC},
2292 {ARM::tRSBS, ARM::tRSB},
2293 {ARM::tLSLSri, ARM::tLSLri},
2295 {ARM::t2ADDSri, ARM::t2ADDri},
2296 {ARM::t2ADDSrr, ARM::t2ADDrr},
2297 {ARM::t2ADDSrs, ARM::t2ADDrs},
2299 {ARM::t2SUBSri, ARM::t2SUBri},
2300 {ARM::t2SUBSrr, ARM::t2SUBrr},
2301 {ARM::t2SUBSrs, ARM::t2SUBrs},
2303 {ARM::t2RSBSri, ARM::t2RSBri},
2304 {ARM::t2RSBSrs, ARM::t2RSBrs},
2309 if (OldOpc == Entry.PseudoOpc)
2310 return Entry.MachineOpc;
2321 if (NumBytes == 0 && DestReg != BaseReg) {
2330 bool isSub = NumBytes < 0;
2331 if (isSub) NumBytes = -NumBytes;
2336 assert(ThisVal &&
"Didn't extract field correctly");
2339 NumBytes &= ~ThisVal;
2344 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2357 unsigned NumBytes) {
2368 if (!IsPush && !IsPop)
2371 bool IsVFPPushPop =
MI->getOpcode() == ARM::VSTMDDB_UPD ||
2372 MI->getOpcode() == ARM::VLDMDIA_UPD;
2373 bool IsT1PushPop =
MI->getOpcode() == ARM::tPUSH ||
2374 MI->getOpcode() == ARM::tPOP ||
2375 MI->getOpcode() == ARM::tPOP_RET;
2377 assert((IsT1PushPop || (
MI->getOperand(0).getReg() == ARM::SP &&
2378 MI->getOperand(1).getReg() == ARM::SP)) &&
2379 "trying to fold sp update into non-sp-updating push/pop");
2384 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2389 int RegListIdx = IsT1PushPop ? 2 : 4;
2392 unsigned RegsNeeded;
2395 RegsNeeded = NumBytes / 8;
2396 RegClass = &ARM::DPRRegClass;
2398 RegsNeeded = NumBytes / 4;
2399 RegClass = &ARM::GPRRegClass;
2409 unsigned FirstRegEnc = -1;
2412 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2417 TRI->getEncodingValue(MO.
getReg()) < FirstRegEnc)
2418 FirstRegEnc =
TRI->getEncodingValue(MO.
getReg());
2421 const MCPhysReg *CSRegs =
TRI->getCalleeSavedRegs(&MF);
2424 for (
int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2427 if (IsT1PushPop && CurRegEnc >
TRI->getEncodingValue(ARM::R7))
2434 false,
false,
true));
2444 MI->getParent()->computeRegisterLiveness(
TRI, CurReg,
MI) !=
2466 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i)
2467 MI->removeOperand(i);
2480 unsigned Opcode =
MI.getOpcode();
2486 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2489 if (Opcode == ARM::ADDri) {
2490 Offset +=
MI.getOperand(FrameRegIdx+1).getImm();
2493 MI.setDesc(
TII.get(ARM::MOVr));
2494 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2495 MI.removeOperand(FrameRegIdx+1);
2501 MI.setDesc(
TII.get(ARM::SUBri));
2507 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2508 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(
Offset);
2523 "Bit extraction didn't work?");
2524 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2526 unsigned ImmIdx = 0;
2528 unsigned NumBits = 0;
2532 ImmIdx = FrameRegIdx + 1;
2533 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2537 ImmIdx = FrameRegIdx+2;
2544 ImmIdx = FrameRegIdx+2;
2555 ImmIdx = FrameRegIdx+1;
2563 ImmIdx = FrameRegIdx+1;
2573 ImmIdx = FrameRegIdx+1;
2574 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2583 Offset += InstrOffs * Scale;
2584 assert((
Offset & (Scale-1)) == 0 &&
"Can't encode this offset!");
2594 int ImmedOffset =
Offset / Scale;
2595 unsigned Mask = (1 << NumBits) - 1;
2596 if ((
unsigned)
Offset <= Mask * Scale) {
2598 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2604 ImmedOffset = -ImmedOffset;
2606 ImmedOffset |= 1 << NumBits;
2614 ImmedOffset = ImmedOffset & Mask;
2617 ImmedOffset = -ImmedOffset;
2619 ImmedOffset |= 1 << NumBits;
2635 Register &SrcReg2, int64_t &CmpMask,
2636 int64_t &CmpValue)
const {
2637 switch (
MI.getOpcode()) {
2642 SrcReg =
MI.getOperand(0).getReg();
2645 CmpValue =
MI.getOperand(1).getImm();
2650 SrcReg =
MI.getOperand(0).getReg();
2651 SrcReg2 =
MI.getOperand(1).getReg();
2657 SrcReg =
MI.getOperand(0).getReg();
2659 CmpMask =
MI.getOperand(1).getImm();
2672 int CmpMask,
bool CommonUse) {
2673 switch (
MI->getOpcode()) {
2676 if (CmpMask !=
MI->getOperand(2).getImm())
2678 if (SrcReg ==
MI->getOperand(CommonUse ? 1 : 0).getReg())
2768 switch (
MI->getOpcode()) {
2769 default:
return false;
2865 if (!
MI)
return false;
2868 if (CmpMask != ~0) {
2872 UI =
MRI->use_instr_begin(SrcReg), UE =
MRI->use_instr_end();
2874 if (UI->getParent() != CmpInstr.
getParent())
2883 if (!
MI)
return false;
2892 if (
I ==
B)
return false;
2903 else if (
MI->getParent() != CmpInstr.
getParent() || CmpValue != 0) {
2908 if (CmpInstr.
getOpcode() == ARM::CMPri ||
2916 bool IsThumb1 =
false;
2933 if (
MI && IsThumb1) {
2935 if (
I != E && !
MI->readsRegister(ARM::CPSR,
TRI)) {
2936 bool CanReorder =
true;
2937 for (;
I != E; --
I) {
2938 if (
I->getOpcode() != ARM::tMOVi8) {
2944 MI =
MI->removeFromParent();
2955 bool SubAddIsThumb1 =
false;
2970 if (Instr.modifiesRegister(ARM::CPSR,
TRI) ||
2971 Instr.readsRegister(ARM::CPSR,
TRI))
2993 IsThumb1 = SubAddIsThumb1;
3008 bool isSafe =
false;
3011 while (!isSafe && ++
I != E) {
3013 for (
unsigned IO = 0, EO = Instr.getNumOperands();
3014 !isSafe && IO != EO; ++IO) {
3028 bool IsInstrVSel =
true;
3029 switch (Instr.getOpcode()) {
3031 IsInstrVSel =
false;
3065 bool IsSub =
Opc == ARM::SUBrr ||
Opc == ARM::t2SUBrr ||
3066 Opc == ARM::SUBri ||
Opc == ARM::t2SUBri ||
3067 Opc == ARM::tSUBrr ||
Opc == ARM::tSUBi3 ||
3069 unsigned OpI =
Opc != ARM::tSUBrr ? 1 : 2;
3081 std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3115 if (Succ->isLiveIn(ARM::CPSR))
3122 unsigned CPSRRegNum =
MI->getNumExplicitOperands() - 1;
3123 MI->getOperand(CPSRRegNum).setReg(ARM::CPSR);
3124 MI->getOperand(CPSRRegNum).setIsDef(
true);
3132 for (
auto &[MO,
Cond] : OperandsToUpdate)
3135 MI->clearRegisterDeads(ARM::CPSR);
3149 int64_t CmpMask, CmpValue;
3151 if (
Next !=
MI.getParent()->end() &&
3162 unsigned DefOpc =
DefMI.getOpcode();
3163 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm &&
3164 DefOpc != ARM::tMOVi32imm)
3166 if (!
DefMI.getOperand(1).isImm())
3170 if (!
MRI->hasOneNonDBGUse(Reg))
3186 if (
UseMI.getOperand(
NumOps - 1).getReg() == ARM::CPSR)
3192 unsigned UseOpc =
UseMI.getOpcode();
3193 unsigned NewUseOpc = 0;
3195 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3196 bool Commute =
false;
3198 default:
return false;
3206 case ARM::t2EORrr: {
3207 Commute =
UseMI.getOperand(2).getReg() != Reg;
3212 if (UseOpc == ARM::SUBrr && Commute)
3218 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3221 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3235 case ARM::ORRrr: NewUseOpc = ARM::ORRri;
break;
3236 case ARM::EORrr: NewUseOpc = ARM::EORri;
break;
3240 case ARM::t2SUBrr: {
3241 if (UseOpc == ARM::t2SUBrr && Commute)
3246 const bool ToSP =
DefMI.getOperand(0).getReg() == ARM::SP;
3247 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3248 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3250 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3253 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3268 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri;
break;
3269 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri;
break;
3276 unsigned OpIdx = Commute ? 2 : 1;
3278 bool isKill =
UseMI.getOperand(
OpIdx).isKill();
3280 Register NewReg =
MRI->createVirtualRegister(TRC);
3288 UseMI.getOperand(1).setReg(NewReg);
3289 UseMI.getOperand(1).setIsKill();
3290 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3291 DefMI.eraseFromParent();
3298 case ARM::t2ADDspImm:
3299 case ARM::t2SUBspImm:
3302 MRI->constrainRegClass(
UseMI.getOperand(0).getReg(), TRC);
3309 switch (
MI.getOpcode()) {
3313 assert(UOps >= 0 &&
"bad # UOps");
3321 unsigned ShOpVal =
MI.getOperand(3).getImm();
3326 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3334 if (!
MI.getOperand(2).getReg())
3337 unsigned ShOpVal =
MI.getOperand(3).getImm();
3342 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3352 case ARM::LDRSB_POST:
3353 case ARM::LDRSH_POST: {
3356 return (Rt == Rm) ? 4 : 3;
3359 case ARM::LDR_PRE_REG:
3360 case ARM::LDRB_PRE_REG: {
3365 unsigned ShOpVal =
MI.getOperand(4).getImm();
3370 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3376 case ARM::STR_PRE_REG:
3377 case ARM::STRB_PRE_REG: {
3378 unsigned ShOpVal =
MI.getOperand(4).getImm();
3383 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3390 case ARM::STRH_PRE: {
3400 case ARM::LDR_POST_REG:
3401 case ARM::LDRB_POST_REG:
3402 case ARM::LDRH_POST: {
3405 return (Rt == Rm) ? 3 : 2;
3408 case ARM::LDR_PRE_IMM:
3409 case ARM::LDRB_PRE_IMM:
3410 case ARM::LDR_POST_IMM:
3411 case ARM::LDRB_POST_IMM:
3412 case ARM::STRB_POST_IMM:
3413 case ARM::STRB_POST_REG:
3414 case ARM::STRB_PRE_IMM:
3415 case ARM::STRH_POST:
3416 case ARM::STR_POST_IMM:
3417 case ARM::STR_POST_REG:
3418 case ARM::STR_PRE_IMM:
3421 case ARM::LDRSB_PRE:
3422 case ARM::LDRSH_PRE: {
3429 unsigned ShOpVal =
MI.getOperand(4).getImm();
3434 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3447 return (Rt == Rn) ? 3 : 2;
3458 case ARM::LDRD_POST:
3459 case ARM::t2LDRD_POST:
3462 case ARM::STRD_POST:
3463 case ARM::t2STRD_POST:
3466 case ARM::LDRD_PRE: {
3473 return (Rt == Rn) ? 4 : 3;
3476 case ARM::t2LDRD_PRE: {
3479 return (Rt == Rn) ? 4 : 3;
3482 case ARM::STRD_PRE: {
3490 case ARM::t2STRD_PRE:
3493 case ARM::t2LDR_POST:
3494 case ARM::t2LDRB_POST:
3495 case ARM::t2LDRB_PRE:
3496 case ARM::t2LDRSBi12:
3497 case ARM::t2LDRSBi8:
3498 case ARM::t2LDRSBpci:
3500 case ARM::t2LDRH_POST:
3501 case ARM::t2LDRH_PRE:
3503 case ARM::t2LDRSB_POST:
3504 case ARM::t2LDRSB_PRE:
3505 case ARM::t2LDRSH_POST:
3506 case ARM::t2LDRSH_PRE:
3507 case ARM::t2LDRSHi12:
3508 case ARM::t2LDRSHi8:
3509 case ARM::t2LDRSHpci:
3513 case ARM::t2LDRDi8: {
3516 return (Rt == Rn) ? 3 : 2;
3519 case ARM::t2STRB_POST:
3520 case ARM::t2STRB_PRE:
3523 case ARM::t2STRH_POST:
3524 case ARM::t2STRH_PRE:
3526 case ARM::t2STR_POST:
3527 case ARM::t2STR_PRE:
3558 E =
MI.memoperands_end();
3560 Size += (*I)->getSize().getValue();
3567 return std::min(
Size / 4, 16U);
3572 unsigned UOps = 1 + NumRegs;
3576 case ARM::VLDMDIA_UPD:
3577 case ARM::VLDMDDB_UPD:
3578 case ARM::VLDMSIA_UPD:
3579 case ARM::VLDMSDB_UPD:
3580 case ARM::VSTMDIA_UPD:
3581 case ARM::VSTMDDB_UPD:
3582 case ARM::VSTMSIA_UPD:
3583 case ARM::VSTMSDB_UPD:
3584 case ARM::LDMIA_UPD:
3585 case ARM::LDMDA_UPD:
3586 case ARM::LDMDB_UPD:
3587 case ARM::LDMIB_UPD:
3588 case ARM::STMIA_UPD:
3589 case ARM::STMDA_UPD:
3590 case ARM::STMDB_UPD:
3591 case ARM::STMIB_UPD:
3592 case ARM::tLDMIA_UPD:
3593 case ARM::tSTMIA_UPD:
3594 case ARM::t2LDMIA_UPD:
3595 case ARM::t2LDMDB_UPD:
3596 case ARM::t2STMIA_UPD:
3597 case ARM::t2STMDB_UPD:
3600 case ARM::LDMIA_RET:
3602 case ARM::t2LDMIA_RET:
3611 if (!ItinData || ItinData->
isEmpty())
3615 unsigned Class =
Desc.getSchedClass();
3617 if (ItinUOps >= 0) {
3618 if (Subtarget.isSwift() && (
Desc.mayLoad() ||
Desc.mayStore()))
3624 unsigned Opc =
MI.getOpcode();
3643 case ARM::VLDMDIA_UPD:
3644 case ARM::VLDMDDB_UPD:
3646 case ARM::VLDMSIA_UPD:
3647 case ARM::VLDMSDB_UPD:
3649 case ARM::VSTMDIA_UPD:
3650 case ARM::VSTMDDB_UPD:
3652 case ARM::VSTMSIA_UPD:
3653 case ARM::VSTMSDB_UPD: {
3654 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands();
3655 return (NumRegs / 2) + (NumRegs % 2) + 1;
3658 case ARM::LDMIA_RET:
3663 case ARM::LDMIA_UPD:
3664 case ARM::LDMDA_UPD:
3665 case ARM::LDMDB_UPD:
3666 case ARM::LDMIB_UPD:
3671 case ARM::STMIA_UPD:
3672 case ARM::STMDA_UPD:
3673 case ARM::STMDB_UPD:
3674 case ARM::STMIB_UPD:
3676 case ARM::tLDMIA_UPD:
3677 case ARM::tSTMIA_UPD:
3681 case ARM::t2LDMIA_RET:
3684 case ARM::t2LDMIA_UPD:
3685 case ARM::t2LDMDB_UPD:
3688 case ARM::t2STMIA_UPD:
3689 case ARM::t2STMDB_UPD: {
3690 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands() + 1;
3691 switch (Subtarget.getLdStMultipleTiming()) {
3702 unsigned UOps = (NumRegs / 2);
3708 unsigned UOps = (NumRegs / 2);
3711 if ((NumRegs % 2) || !
MI.hasOneMemOperand() ||
3712 (*
MI.memoperands_begin())->getAlign() <
Align(8))
3722std::optional<unsigned>
3725 unsigned DefIdx,
unsigned DefAlign)
const {
3734 DefCycle = RegNo / 2 + 1;
3739 bool isSLoad =
false;
3744 case ARM::VLDMSIA_UPD:
3745 case ARM::VLDMSDB_UPD:
3752 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3756 DefCycle = RegNo + 2;
3762std::optional<unsigned>
3765 unsigned DefIdx,
unsigned DefAlign)
const {
3772 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3775 DefCycle = RegNo / 2;
3780 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3781 DefCycle = (RegNo / 2);
3784 if ((RegNo % 2) || DefAlign < 8)
3790 DefCycle = RegNo + 2;
3796std::optional<unsigned>
3799 unsigned UseIdx,
unsigned UseAlign)
const {
3805 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3807 UseCycle = RegNo / 2 + 1;
3810 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3812 bool isSStore =
false;
3817 case ARM::VSTMSIA_UPD:
3818 case ARM::VSTMSDB_UPD:
3825 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3829 UseCycle = RegNo + 2;
3835std::optional<unsigned>
3838 unsigned UseIdx,
unsigned UseAlign)
const {
3844 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3845 UseCycle = RegNo / 2;
3850 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3851 UseCycle = (RegNo / 2);
3854 if ((RegNo % 2) || UseAlign < 8)
3865 unsigned DefIdx,
unsigned DefAlign,
const MCInstrDesc &UseMCID,
3866 unsigned UseIdx,
unsigned UseAlign)
const {
3876 std::optional<unsigned> DefCycle;
3877 bool LdmBypass =
false;
3884 case ARM::VLDMDIA_UPD:
3885 case ARM::VLDMDDB_UPD:
3887 case ARM::VLDMSIA_UPD:
3888 case ARM::VLDMSDB_UPD:
3889 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3892 case ARM::LDMIA_RET:
3897 case ARM::LDMIA_UPD:
3898 case ARM::LDMDA_UPD:
3899 case ARM::LDMDB_UPD:
3900 case ARM::LDMIB_UPD:
3902 case ARM::tLDMIA_UPD:
3904 case ARM::t2LDMIA_RET:
3907 case ARM::t2LDMIA_UPD:
3908 case ARM::t2LDMDB_UPD:
3910 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3918 std::optional<unsigned> UseCycle;
3925 case ARM::VSTMDIA_UPD:
3926 case ARM::VSTMDDB_UPD:
3928 case ARM::VSTMSIA_UPD:
3929 case ARM::VSTMSDB_UPD:
3930 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3937 case ARM::STMIA_UPD:
3938 case ARM::STMDA_UPD:
3939 case ARM::STMDB_UPD:
3940 case ARM::STMIB_UPD:
3941 case ARM::tSTMIA_UPD:
3946 case ARM::t2STMIA_UPD:
3947 case ARM::t2STMDB_UPD:
3948 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3956 if (UseCycle > *DefCycle + 1)
3957 return std::nullopt;
3959 UseCycle = *DefCycle - *UseCycle + 1;
3960 if (UseCycle > 0u) {
3966 UseCycle = *UseCycle - 1;
3968 UseClass, UseIdx)) {
3969 UseCycle = *UseCycle - 1;
3978 unsigned &DefIdx,
unsigned &Dist) {
3983 assert(
II->isInsideBundle() &&
"Empty bundle?");
3986 while (
II->isInsideBundle()) {
3987 Idx =
II->findRegisterDefOperandIdx(
Reg,
TRI,
false,
true);
3994 assert(Idx != -1 &&
"Cannot find bundled definition!");
4001 unsigned &UseIdx,
unsigned &Dist) {
4005 assert(
II->isInsideBundle() &&
"Empty bundle?");
4010 while (
II !=
E &&
II->isInsideBundle()) {
4011 Idx =
II->findRegisterUseOperandIdx(
Reg,
TRI,
false);
4014 if (
II->getOpcode() != ARM::t2IT)
4042 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4052 case ARM::t2LDRSHs: {
4054 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4055 if (ShAmt == 0 || ShAmt == 2)
4060 }
else if (Subtarget.
isSwift()) {
4067 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4072 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4083 case ARM::t2LDRSHs: {
4085 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4086 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4093 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4100 case ARM::VLD1q8wb_fixed:
4101 case ARM::VLD1q16wb_fixed:
4102 case ARM::VLD1q32wb_fixed:
4103 case ARM::VLD1q64wb_fixed:
4104 case ARM::VLD1q8wb_register:
4105 case ARM::VLD1q16wb_register:
4106 case ARM::VLD1q32wb_register:
4107 case ARM::VLD1q64wb_register:
4114 case ARM::VLD2d8wb_fixed:
4115 case ARM::VLD2d16wb_fixed:
4116 case ARM::VLD2d32wb_fixed:
4117 case ARM::VLD2q8wb_fixed:
4118 case ARM::VLD2q16wb_fixed:
4119 case ARM::VLD2q32wb_fixed:
4120 case ARM::VLD2d8wb_register:
4121 case ARM::VLD2d16wb_register:
4122 case ARM::VLD2d32wb_register:
4123 case ARM::VLD2q8wb_register:
4124 case ARM::VLD2q16wb_register:
4125 case ARM::VLD2q32wb_register:
4130 case ARM::VLD3d8_UPD:
4131 case ARM::VLD3d16_UPD:
4132 case ARM::VLD3d32_UPD:
4133 case ARM::VLD1d64Twb_fixed:
4134 case ARM::VLD1d64Twb_register:
4135 case ARM::VLD3q8_UPD:
4136 case ARM::VLD3q16_UPD:
4137 case ARM::VLD3q32_UPD:
4142 case ARM::VLD4d8_UPD:
4143 case ARM::VLD4d16_UPD:
4144 case ARM::VLD4d32_UPD:
4145 case ARM::VLD1d64Qwb_fixed:
4146 case ARM::VLD1d64Qwb_register:
4147 case ARM::VLD4q8_UPD:
4148 case ARM::VLD4q16_UPD:
4149 case ARM::VLD4q32_UPD:
4150 case ARM::VLD1DUPq8:
4151 case ARM::VLD1DUPq16:
4152 case ARM::VLD1DUPq32:
4153 case ARM::VLD1DUPq8wb_fixed:
4154 case ARM::VLD1DUPq16wb_fixed:
4155 case ARM::VLD1DUPq32wb_fixed:
4156 case ARM::VLD1DUPq8wb_register:
4157 case ARM::VLD1DUPq16wb_register:
4158 case ARM::VLD1DUPq32wb_register:
4159 case ARM::VLD2DUPd8:
4160 case ARM::VLD2DUPd16:
4161 case ARM::VLD2DUPd32:
4162 case ARM::VLD2DUPd8wb_fixed:
4163 case ARM::VLD2DUPd16wb_fixed:
4164 case ARM::VLD2DUPd32wb_fixed:
4165 case ARM::VLD2DUPd8wb_register:
4166 case ARM::VLD2DUPd16wb_register:
4167 case ARM::VLD2DUPd32wb_register:
4168 case ARM::VLD4DUPd8:
4169 case ARM::VLD4DUPd16:
4170 case ARM::VLD4DUPd32:
4171 case ARM::VLD4DUPd8_UPD:
4172 case ARM::VLD4DUPd16_UPD:
4173 case ARM::VLD4DUPd32_UPD:
4175 case ARM::VLD1LNd16:
4176 case ARM::VLD1LNd32:
4177 case ARM::VLD1LNd8_UPD:
4178 case ARM::VLD1LNd16_UPD:
4179 case ARM::VLD1LNd32_UPD:
4181 case ARM::VLD2LNd16:
4182 case ARM::VLD2LNd32:
4183 case ARM::VLD2LNq16:
4184 case ARM::VLD2LNq32:
4185 case ARM::VLD2LNd8_UPD:
4186 case ARM::VLD2LNd16_UPD:
4187 case ARM::VLD2LNd32_UPD:
4188 case ARM::VLD2LNq16_UPD:
4189 case ARM::VLD2LNq32_UPD:
4191 case ARM::VLD4LNd16:
4192 case ARM::VLD4LNd32:
4193 case ARM::VLD4LNq16:
4194 case ARM::VLD4LNq32:
4195 case ARM::VLD4LNd8_UPD:
4196 case ARM::VLD4LNd16_UPD:
4197 case ARM::VLD4LNd32_UPD:
4198 case ARM::VLD4LNq16_UPD:
4199 case ARM::VLD4LNq32_UPD:
4213 if (!ItinData || ItinData->
isEmpty())
4214 return std::nullopt;
4220 unsigned DefAdj = 0;
4221 if (
DefMI.isBundle())
4230 unsigned UseAdj = 0;
4231 if (
UseMI.isBundle()) {
4235 return std::nullopt;
4238 return getOperandLatencyImpl(
4239 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->
getDesc(), DefAdj, DefMO,
4240 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->
getDesc(), UseAdj);
4243std::optional<unsigned> ARMBaseInstrInfo::getOperandLatencyImpl(
4245 unsigned DefIdx,
const MCInstrDesc &DefMCID,
unsigned DefAdj,
4247 unsigned UseIdx,
const MCInstrDesc &UseMCID,
unsigned UseAdj)
const {
4248 if (Reg == ARM::CPSR) {
4249 if (
DefMI.getOpcode() == ARM::FMSTAT) {
4251 return Subtarget.
isLikeA9() ? 1 : 20;
4255 if (
UseMI.isBranch())
4274 return std::nullopt;
4276 unsigned DefAlign =
DefMI.hasOneMemOperand()
4277 ? (*
DefMI.memoperands_begin())->getAlign().value()
4279 unsigned UseAlign =
UseMI.hasOneMemOperand()
4280 ? (*
UseMI.memoperands_begin())->getAlign().value()
4285 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4288 return std::nullopt;
4291 int Adj = DefAdj + UseAdj;
4295 if (Adj >= 0 || (
int)*
Latency > -Adj) {
4302std::optional<unsigned>
4304 SDNode *DefNode,
unsigned DefIdx,
4305 SDNode *UseNode,
unsigned UseIdx)
const {
4311 if (isZeroCost(DefMCID.
Opcode))
4314 if (!ItinData || ItinData->
isEmpty())
4315 return DefMCID.
mayLoad() ? 3 : 1;
4318 std::optional<unsigned>
Latency =
4320 int Adj = Subtarget.getPreISelOperandLatencyAdjustment();
4321 int Threshold = 1 + Adj;
4327 unsigned DefAlign = !DefMN->memoperands_empty()
4328 ? (*DefMN->memoperands_begin())->getAlign().value()
4331 unsigned UseAlign = !UseMN->memoperands_empty()
4332 ? (*UseMN->memoperands_begin())->getAlign().value()
4335 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4337 return std::nullopt;
4340 (Subtarget.isCortexA8() || Subtarget.isLikeA9() ||
4341 Subtarget.isCortexA7())) {
4358 case ARM::t2LDRSHs: {
4361 if (ShAmt == 0 || ShAmt == 2)
4366 }
else if (DefIdx == 0 &&
Latency > 2U && Subtarget.isSwift()) {
4376 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4393 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4400 case ARM::VLD1q8wb_register:
4401 case ARM::VLD1q16wb_register:
4402 case ARM::VLD1q32wb_register:
4403 case ARM::VLD1q64wb_register:
4404 case ARM::VLD1q8wb_fixed:
4405 case ARM::VLD1q16wb_fixed:
4406 case ARM::VLD1q32wb_fixed:
4407 case ARM::VLD1q64wb_fixed:
4411 case ARM::VLD2q8Pseudo:
4412 case ARM::VLD2q16Pseudo:
4413 case ARM::VLD2q32Pseudo:
4414 case ARM::VLD2d8wb_fixed:
4415 case ARM::VLD2d16wb_fixed:
4416 case ARM::VLD2d32wb_fixed:
4417 case ARM::VLD2q8PseudoWB_fixed:
4418 case ARM::VLD2q16PseudoWB_fixed:
4419 case ARM::VLD2q32PseudoWB_fixed:
4420 case ARM::VLD2d8wb_register:
4421 case ARM::VLD2d16wb_register:
4422 case ARM::VLD2d32wb_register:
4423 case ARM::VLD2q8PseudoWB_register:
4424 case ARM::VLD2q16PseudoWB_register:
4425 case ARM::VLD2q32PseudoWB_register:
4426 case ARM::VLD3d8Pseudo:
4427 case ARM::VLD3d16Pseudo:
4428 case ARM::VLD3d32Pseudo:
4429 case ARM::VLD1d8TPseudo:
4430 case ARM::VLD1d16TPseudo:
4431 case ARM::VLD1d32TPseudo:
4432 case ARM::VLD1d64TPseudo:
4433 case ARM::VLD1d64TPseudoWB_fixed:
4434 case ARM::VLD1d64TPseudoWB_register:
4435 case ARM::VLD3d8Pseudo_UPD:
4436 case ARM::VLD3d16Pseudo_UPD:
4437 case ARM::VLD3d32Pseudo_UPD:
4438 case ARM::VLD3q8Pseudo_UPD:
4439 case ARM::VLD3q16Pseudo_UPD:
4440 case ARM::VLD3q32Pseudo_UPD:
4441 case ARM::VLD3q8oddPseudo:
4442 case ARM::VLD3q16oddPseudo:
4443 case ARM::VLD3q32oddPseudo:
4444 case ARM::VLD3q8oddPseudo_UPD:
4445 case ARM::VLD3q16oddPseudo_UPD:
4446 case ARM::VLD3q32oddPseudo_UPD:
4447 case ARM::VLD4d8Pseudo:
4448 case ARM::VLD4d16Pseudo:
4449 case ARM::VLD4d32Pseudo:
4450 case ARM::VLD1d8QPseudo:
4451 case ARM::VLD1d16QPseudo:
4452 case ARM::VLD1d32QPseudo:
4453 case ARM::VLD1d64QPseudo:
4454 case ARM::VLD1d64QPseudoWB_fixed:
4455 case ARM::VLD1d64QPseudoWB_register:
4456 case ARM::VLD1q8HighQPseudo:
4457 case ARM::VLD1q8LowQPseudo_UPD:
4458 case ARM::VLD1q8HighTPseudo:
4459 case ARM::VLD1q8LowTPseudo_UPD:
4460 case ARM::VLD1q16HighQPseudo:
4461 case ARM::VLD1q16LowQPseudo_UPD:
4462 case ARM::VLD1q16HighTPseudo:
4463 case ARM::VLD1q16LowTPseudo_UPD:
4464 case ARM::VLD1q32HighQPseudo:
4465 case ARM::VLD1q32LowQPseudo_UPD:
4466 case ARM::VLD1q32HighTPseudo:
4467 case ARM::VLD1q32LowTPseudo_UPD:
4468 case ARM::VLD1q64HighQPseudo:
4469 case ARM::VLD1q64LowQPseudo_UPD:
4470 case ARM::VLD1q64HighTPseudo:
4471 case ARM::VLD1q64LowTPseudo_UPD:
4472 case ARM::VLD4d8Pseudo_UPD:
4473 case ARM::VLD4d16Pseudo_UPD:
4474 case ARM::VLD4d32Pseudo_UPD:
4475 case ARM::VLD4q8Pseudo_UPD:
4476 case ARM::VLD4q16Pseudo_UPD:
4477 case ARM::VLD4q32Pseudo_UPD:
4478 case ARM::VLD4q8oddPseudo:
4479 case ARM::VLD4q16oddPseudo:
4480 case ARM::VLD4q32oddPseudo:
4481 case ARM::VLD4q8oddPseudo_UPD:
4482 case ARM::VLD4q16oddPseudo_UPD:
4483 case ARM::VLD4q32oddPseudo_UPD:
4484 case ARM::VLD1DUPq8:
4485 case ARM::VLD1DUPq16:
4486 case ARM::VLD1DUPq32:
4487 case ARM::VLD1DUPq8wb_fixed:
4488 case ARM::VLD1DUPq16wb_fixed:
4489 case ARM::VLD1DUPq32wb_fixed:
4490 case ARM::VLD1DUPq8wb_register:
4491 case ARM::VLD1DUPq16wb_register:
4492 case ARM::VLD1DUPq32wb_register:
4493 case ARM::VLD2DUPd8:
4494 case ARM::VLD2DUPd16:
4495 case ARM::VLD2DUPd32:
4496 case ARM::VLD2DUPd8wb_fixed:
4497 case ARM::VLD2DUPd16wb_fixed:
4498 case ARM::VLD2DUPd32wb_fixed:
4499 case ARM::VLD2DUPd8wb_register:
4500 case ARM::VLD2DUPd16wb_register:
4501 case ARM::VLD2DUPd32wb_register:
4502 case ARM::VLD2DUPq8EvenPseudo:
4503 case ARM::VLD2DUPq8OddPseudo:
4504 case ARM::VLD2DUPq16EvenPseudo:
4505 case ARM::VLD2DUPq16OddPseudo:
4506 case ARM::VLD2DUPq32EvenPseudo:
4507 case ARM::VLD2DUPq32OddPseudo:
4508 case ARM::VLD3DUPq8EvenPseudo:
4509 case ARM::VLD3DUPq8OddPseudo:
4510 case ARM::VLD3DUPq16EvenPseudo:
4511 case ARM::VLD3DUPq16OddPseudo:
4512 case ARM::VLD3DUPq32EvenPseudo:
4513 case ARM::VLD3DUPq32OddPseudo:
4514 case ARM::VLD4DUPd8Pseudo:
4515 case ARM::VLD4DUPd16Pseudo:
4516 case ARM::VLD4DUPd32Pseudo:
4517 case ARM::VLD4DUPd8Pseudo_UPD:
4518 case ARM::VLD4DUPd16Pseudo_UPD:
4519 case ARM::VLD4DUPd32Pseudo_UPD:
4520 case ARM::VLD4DUPq8EvenPseudo:
4521 case ARM::VLD4DUPq8OddPseudo:
4522 case ARM::VLD4DUPq16EvenPseudo:
4523 case ARM::VLD4DUPq16OddPseudo:
4524 case ARM::VLD4DUPq32EvenPseudo:
4525 case ARM::VLD4DUPq32OddPseudo:
4526 case ARM::VLD1LNq8Pseudo:
4527 case ARM::VLD1LNq16Pseudo:
4528 case ARM::VLD1LNq32Pseudo:
4529 case ARM::VLD1LNq8Pseudo_UPD:
4530 case ARM::VLD1LNq16Pseudo_UPD:
4531 case ARM::VLD1LNq32Pseudo_UPD:
4532 case ARM::VLD2LNd8Pseudo:
4533 case ARM::VLD2LNd16Pseudo:
4534 case ARM::VLD2LNd32Pseudo:
4535 case ARM::VLD2LNq16Pseudo:
4536 case ARM::VLD2LNq32Pseudo:
4537 case ARM::VLD2LNd8Pseudo_UPD:
4538 case ARM::VLD2LNd16Pseudo_UPD:
4539 case ARM::VLD2LNd32Pseudo_UPD:
4540 case ARM::VLD2LNq16Pseudo_UPD:
4541 case ARM::VLD2LNq32Pseudo_UPD:
4542 case ARM::VLD4LNd8Pseudo:
4543 case ARM::VLD4LNd16Pseudo:
4544 case ARM::VLD4LNd32Pseudo:
4545 case ARM::VLD4LNq16Pseudo:
4546 case ARM::VLD4LNq32Pseudo:
4547 case ARM::VLD4LNd8Pseudo_UPD:
4548 case ARM::VLD4LNd16Pseudo_UPD:
4549 case ARM::VLD4LNd32Pseudo_UPD:
4550 case ARM::VLD4LNq16Pseudo_UPD:
4551 case ARM::VLD4LNq32Pseudo_UPD:
4561unsigned ARMBaseInstrInfo::getPredicationCost(
const MachineInstr &
MI)
const {
4562 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4571 if (
MCID.isCall() || (
MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4572 !Subtarget.cheapPredicableCPSRDef())) {
4582 unsigned *PredCost)
const {
4583 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4589 if (
MI.isBundle()) {
4593 while (++
I !=
E &&
I->isInsideBundle()) {
4594 if (
I->getOpcode() != ARM::t2IT)
4595 Latency += getInstrLatency(ItinData, *
I, PredCost);
4600 const MCInstrDesc &MCID =
MI.getDesc();
4602 !Subtarget.cheapPredicableCPSRDef()))) {
4610 return MI.mayLoad() ? 3 : 1;
4623 MI.hasOneMemOperand() ? (*
MI.memoperands_begin())->getAlign().value() : 0;
4625 if (Adj >= 0 || (
int)
Latency > -Adj) {
4633 if (!
Node->isMachineOpcode())
4636 if (!ItinData || ItinData->
isEmpty())
4639 unsigned Opcode =
Node->getMachineOpcode();
4649bool ARMBaseInstrInfo::hasHighOperandLatency(
const TargetSchedModel &SchedModel,
4654 unsigned UseIdx)
const {
4657 if (Subtarget.nonpipelinedVFP() &&
4672 unsigned DefIdx)
const {
4674 if (!ItinData || ItinData->
isEmpty())
4679 unsigned DefClass =
DefMI.getDesc().getSchedClass();
4680 std::optional<unsigned> DefCycle =
4682 return DefCycle && DefCycle <= 2U;
4690 ErrInfo =
"Pseudo flag setting opcodes only exist in Selection DAG";
4693 if (
MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4695 if (!ARM::hGPRRegClass.
contains(
MI.getOperand(0).getReg()) &&
4696 !ARM::hGPRRegClass.contains(
MI.getOperand(1).getReg())) {
4697 ErrInfo =
"Non-flag-setting Thumb1 mov is v6-only";
4701 if (
MI.getOpcode() == ARM::tPUSH ||
4702 MI.getOpcode() == ARM::tPOP ||
4703 MI.getOpcode() == ARM::tPOP_RET) {
4705 if (MO.isImplicit() || !MO.isReg())
4709 if (!(
MI.getOpcode() == ARM::tPUSH &&
Reg == ARM::LR) &&
4710 !(
MI.getOpcode() == ARM::tPOP_RET &&
Reg == ARM::PC)) {
4711 ErrInfo =
"Unsupported register in Thumb1 push/pop";
4717 if (
MI.getOpcode() == ARM::MVE_VMOV_q_rr) {
4718 assert(
MI.getOperand(4).isImm() &&
MI.getOperand(5).isImm());
4719 if ((
MI.getOperand(4).getImm() != 2 &&
MI.getOperand(4).getImm() != 3) ||
4720 MI.getOperand(4).getImm() !=
MI.getOperand(5).getImm() + 2) {
4721 ErrInfo =
"Incorrect array index for MVE_VMOV_q_rr";
4742 for (
auto Op :
MI.operands()) {
4749 ErrInfo =
"Incorrect AddrMode Imm for instruction";
4759 unsigned LoadImmOpc,
4760 unsigned LoadOpc)
const {
4761 assert(!Subtarget.isROPI() && !Subtarget.isRWPI() &&
4762 "ROPI/RWPI not currently supported with stack guard");
4770 if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) {
4771 assert(!Subtarget.isReadTPSoft() &&
4772 "TLS stack protector requires hardware TLS register");
4782 Module &M = *
MBB.getParent()->getFunction().getParent();
4783 Offset = M.getStackProtectorGuardOffset();
4788 unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri;
4799 bool IsIndirect = Subtarget.isGVIndirectSymbol(GV);
4802 if (Subtarget.isTargetMachO()) {
4804 }
else if (Subtarget.isTargetCOFF()) {
4807 else if (IsIndirect)
4809 }
else if (IsIndirect) {
4813 if (LoadImmOpc == ARM::tMOVi32imm) {
4816 ARMSysReg::lookupMClassSysRegByName(
"apsr_nzcvq")->Encoding;
4852 unsigned &AddSubOpc,
4853 bool &NegAcc,
bool &HasLane)
const {
4855 if (
I == MLxEntryMap.end())
4859 MulOpc = Entry.MulOpc;
4860 AddSubOpc = Entry.AddSubOpc;
4861 NegAcc = Entry.NegAcc;
4862 HasLane = Entry.HasLane;
4886std::pair<uint16_t, uint16_t>
4890 if (Subtarget.hasNEON()) {
4899 (
MI.getOpcode() == ARM::VMOVRS ||
MI.getOpcode() == ARM::VMOVSR ||
4900 MI.getOpcode() == ARM::VMOVS))
4907 return std::make_pair(
ExeNEON, 0);
4912 return std::make_pair(
ExeNEON, 0);
4915 return std::make_pair(
ExeVFP, 0);
4921 unsigned SReg,
unsigned &Lane) {
4923 TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
4930 DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
4932 assert(DReg &&
"S-register with no D super-register?");
4957 if (
MI.definesRegister(DReg,
TRI) ||
MI.readsRegister(DReg,
TRI)) {
4963 ImplicitSReg =
TRI->getSubReg(DReg,
4964 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
4966 MI.getParent()->computeRegisterLiveness(
TRI, ImplicitSReg,
MI);
4981 unsigned DstReg, SrcReg;
4986 switch (
MI.getOpcode()) {
4998 assert(Subtarget.hasNEON() &&
"VORRd requires NEON");
5001 DstReg =
MI.getOperand(0).getReg();
5002 SrcReg =
MI.getOperand(1).getReg();
5004 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5005 MI.removeOperand(i - 1);
5008 MI.setDesc(
get(ARM::VORRd));
5020 DstReg =
MI.getOperand(0).getReg();
5021 SrcReg =
MI.getOperand(1).getReg();
5023 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5024 MI.removeOperand(i - 1);
5031 MI.setDesc(
get(ARM::VGETLNi32));
5047 DstReg =
MI.getOperand(0).getReg();
5048 SrcReg =
MI.getOperand(1).getReg();
5056 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5057 MI.removeOperand(i - 1);
5061 MI.setDesc(
get(ARM::VSETLNi32));
5080 DstReg =
MI.getOperand(0).getReg();
5081 SrcReg =
MI.getOperand(1).getReg();
5083 unsigned DstLane = 0, SrcLane = 0;
5092 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5093 MI.removeOperand(i - 1);
5098 MI.setDesc(
get(ARM::VDUPLN32d));
5132 MCRegister CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5133 bool CurUndef = !
MI.readsRegister(CurReg,
TRI);
5136 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5137 CurUndef = !
MI.readsRegister(CurReg,
TRI);
5142 if (SrcLane == DstLane)
5145 MI.setDesc(
get(ARM::VEXTd32));
5150 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5151 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5154 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5155 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5160 if (SrcLane != DstLane)
5166 if (ImplicitSReg != 0)
5192 auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance();
5193 if (!PartialUpdateClearance)
5204 switch (
MI.getOpcode()) {
5210 case ARM::VMOVv4i16:
5211 case ARM::VMOVv2i32:
5212 case ARM::VMOVv2f32:
5213 case ARM::VMOVv1i64:
5214 UseOp =
MI.findRegisterUseOperandIdx(Reg,
TRI,
false);
5218 case ARM::VLD1LNd32:
5227 if (UseOp != -1 &&
MI.getOperand(UseOp).readsReg())
5231 if (Reg.isVirtual()) {
5233 if (!MO.
getSubReg() ||
MI.readsVirtualRegister(Reg))
5235 }
else if (ARM::SPRRegClass.
contains(Reg)) {
5238 TRI->getMatchingSuperReg(Reg, ARM::ssub_0, &ARM::DPRRegClass);
5239 if (!DReg || !
MI.definesRegister(DReg,
TRI))
5245 return PartialUpdateClearance;
5252 assert(OpNum <
MI.getDesc().getNumDefs() &&
"OpNum is not a def");
5257 assert(Reg.isPhysical() &&
"Can't break virtual register dependencies.");
5258 unsigned DReg = Reg;
5261 if (ARM::SPRRegClass.
contains(Reg)) {
5262 DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5263 assert(
TRI->isSuperRegister(Reg, DReg) &&
"Register enums broken");
5266 assert(ARM::DPRRegClass.
contains(DReg) &&
"Can only break D-reg deps");
5267 assert(
MI.definesRegister(DReg,
TRI) &&
"MI doesn't clobber full D-reg");
5280 MI.addRegisterKilled(DReg,
TRI,
true);
5284 return Subtarget.hasFeature(ARM::HasV6KOps);
5288 if (
MI->getNumOperands() < 4)
5290 unsigned ShOpVal =
MI->getOperand(3).getImm();
5294 ((ShImm == 1 || ShImm == 2) &&
5304 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5305 assert(
MI.isRegSequenceLike() &&
"Invalid kind of instruction");
5307 switch (
MI.getOpcode()) {
5319 MOReg = &
MI.getOperand(2);
5331 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5332 assert(
MI.isExtractSubregLike() &&
"Invalid kind of instruction");
5334 switch (
MI.getOpcode()) {
5345 InputReg.
SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5354 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5355 assert(
MI.isInsertSubregLike() &&
"Invalid kind of instruction");
5357 switch (
MI.getOpcode()) {
5358 case ARM::VSETLNi32:
5359 case ARM::MVE_VMOV_to_lane_32:
5367 BaseReg.Reg = MOBaseReg.
getReg();
5370 InsertedReg.
Reg = MOInsertedReg.
getReg();
5378std::pair<unsigned, unsigned>
5381 return std::make_pair(TF & Mask, TF & ~Mask);
5386 using namespace ARMII;
5388 static const std::pair<unsigned, const char *> TargetFlags[] = {
5389 {MO_LO16,
"arm-lo16"}, {MO_HI16,
"arm-hi16"},
5390 {MO_LO_0_7,
"arm-lo-0-7"}, {MO_HI_0_7,
"arm-hi-0-7"},
5391 {MO_LO_8_15,
"arm-lo-8-15"}, {MO_HI_8_15,
"arm-hi-8-15"},
5398 using namespace ARMII;
5400 static const std::pair<unsigned, const char *> TargetFlags[] = {
5401 {MO_COFFSTUB,
"arm-coffstub"},
5402 {MO_GOT,
"arm-got"},
5403 {MO_SBREL,
"arm-sbrel"},
5404 {MO_DLLIMPORT,
"arm-dllimport"},
5405 {MO_SECREL,
"arm-secrel"},
5406 {MO_NONLAZY,
"arm-nonlazy"}};
5410std::optional<RegImmPair>
5413 unsigned Opcode =
MI.getOpcode();
5420 return std::nullopt;
5423 if (Opcode == ARM::SUBri)
5425 else if (Opcode != ARM::ADDri)
5426 return std::nullopt;
5431 if (!
MI.getOperand(1).isReg() || !
MI.getOperand(2).isImm())
5432 return std::nullopt;
5434 Offset =
MI.getOperand(2).getImm() * Sign;
5442 for (
auto I = From;
I != To; ++
I)
5443 if (
I->modifiesRegister(Reg,
TRI))
5456 if (CmpMI->modifiesRegister(ARM::CPSR,
TRI))
5458 if (CmpMI->readsRegister(ARM::CPSR,
TRI))
5464 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5466 Register Reg = CmpMI->getOperand(0).getReg();
5469 if (Pred !=
ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5482 if (Subtarget->isThumb()) {
5484 return ForCodesize ? 2 : 1;
5485 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff ||
5488 return ForCodesize ? 4 : 1;
5490 return ForCodesize ? 4 : 2;
5492 return ForCodesize ? 4 : 2;
5494 return ForCodesize ? 4 : 2;
5497 return ForCodesize ? 4 : 1;
5499 return ForCodesize ? 4 : 1;
5500 if (Subtarget->hasV6T2Ops() && Val <= 0xffff)
5501 return ForCodesize ? 4 : 1;
5503 return ForCodesize ? 8 : 2;
5505 return ForCodesize ? 8 : 2;
5508 return ForCodesize ? 8 : 2;
5509 return ForCodesize ? 8 : 3;
5673 MachineFunction *MF =
C.getMF();
5675 const ARMBaseRegisterInfo *ARI =
5676 static_cast<const ARMBaseRegisterInfo *
>(&
TRI);
5685 C.isAvailableAcrossAndOutOfSeq(
Reg,
TRI) &&
5686 C.isAvailableInsideSeq(
Reg,
TRI))
5700 for (;
I !=
E; ++
I) {
5704 if (
MI.modifiesRegister(ARM::LR, &
TRI))
5708 unsigned Opcode =
MI.getOpcode();
5709 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR ||
5710 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET ||
5711 Opcode == ARM::tBXNS_RET) {
5717 if (
MI.readsRegister(ARM::LR, &
TRI))
5723std::optional<std::unique_ptr<outliner::OutlinedFunction>>
5726 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
5727 unsigned MinRepeats)
const {
5728 unsigned SequenceSize = 0;
5729 for (
auto &
MI : RepeatedSequenceLocs[0])
5733 unsigned FlagsSetInAll = 0xF;
5738 FlagsSetInAll &=
C.Flags;
5757 return C.isAnyUnavailableAcrossOrOutOfSeq({ARM::R12, ARM::CPSR},
TRI);
5765 llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
5768 if (RepeatedSequenceLocs.size() < MinRepeats)
5769 return std::nullopt;
5788 if (std::distance(RepeatedSequenceLocs.begin(), NoBTI) >
5789 std::distance(NoBTI, RepeatedSequenceLocs.end()))
5790 RepeatedSequenceLocs.erase(NoBTI, RepeatedSequenceLocs.end());
5792 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoBTI);
5794 if (RepeatedSequenceLocs.size() < MinRepeats)
5795 return std::nullopt;
5805 if (std::distance(RepeatedSequenceLocs.begin(), NoPAC) >
5806 std::distance(NoPAC, RepeatedSequenceLocs.end()))
5807 RepeatedSequenceLocs.erase(NoPAC, RepeatedSequenceLocs.end());
5809 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoPAC);
5811 if (RepeatedSequenceLocs.size() < MinRepeats)
5812 return std::nullopt;
5817 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back().getOpcode();
5820 auto SetCandidateCallInfo =
5821 [&RepeatedSequenceLocs](
unsigned CallID,
unsigned NumBytesForCall) {
5823 C.setCallInfo(CallID, NumBytesForCall);
5828 const auto &SomeMFI =
5831 if (SomeMFI.branchTargetEnforcement()) {
5840 if (SomeMFI.shouldSignReturnAddress(
true)) {
5850 if (RepeatedSequenceLocs[0].back().isTerminator()) {
5854 }
else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
5855 LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL ||
5856 LastInstrOpcode == ARM::tBLXr ||
5857 LastInstrOpcode == ARM::tBLXr_noip ||
5858 LastInstrOpcode == ARM::tBLXi) {
5866 unsigned NumBytesNoStackCalls = 0;
5867 std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
5872 const auto Last =
C.getMBB()->rbegin();
5873 const bool LRIsAvailable =
5874 C.getMBB()->isReturnBlock() && !
Last->isCall()
5877 :
C.isAvailableAcrossAndOutOfSeq(ARM::LR,
TRI);
5878 if (LRIsAvailable) {
5882 CandidatesWithoutStackFixups.push_back(
C);
5887 else if (findRegisterToSaveLRTo(
C)) {
5891 CandidatesWithoutStackFixups.push_back(
C);
5896 else if (
C.isAvailableInsideSeq(ARM::SP,
TRI)) {
5899 CandidatesWithoutStackFixups.push_back(
C);
5905 NumBytesNoStackCalls += SequenceSize;
5911 if (NumBytesNoStackCalls <=
5912 RepeatedSequenceLocs.size() * Costs.
CallDefault) {
5913 RepeatedSequenceLocs = CandidatesWithoutStackFixups;
5915 if (RepeatedSequenceLocs.size() < MinRepeats)
5916 return std::nullopt;
5941 return std::make_unique<outliner::OutlinedFunction>(
5942 RepeatedSequenceLocs, SequenceSize, NumBytesToCreateFrame, FrameID);
5945bool ARMBaseInstrInfo::checkAndUpdateStackOffset(
MachineInstr *
MI,
5948 int SPIdx =
MI->findRegisterUseOperandIdx(ARM::SP,
nullptr);
5973 unsigned NumOps =
MI->getDesc().getNumOperands();
5974 unsigned ImmIdx =
NumOps - 3;
5978 int64_t OffVal =
Offset.getImm();
5984 unsigned NumBits = 0;
6013 assert((
Fixup & 3) == 0 &&
"Can't encode this offset!");
6033 assert(((OffVal * Scale +
Fixup) & (Scale - 1)) == 0 &&
6034 "Can't encode this offset!");
6035 OffVal +=
Fixup / Scale;
6037 unsigned Mask = (1 << NumBits) - 1;
6039 if (OffVal <= Mask) {
6041 MI->getOperand(ImmIdx).setImm(OffVal);
6049 Function &
F, std::vector<outliner::Candidate> &Candidates)
const {
6053 const Function &CFn =
C.getMF()->getFunction();
6060 ARMGenInstrInfo::mergeOutliningCandidateAttributes(
F, Candidates);
6068 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
6087 unsigned &Flags)
const {
6090 assert(
MBB.getParent()->getRegInfo().tracksLiveness() &&
6091 "Suitable Machine Function for outlining must track liveness");
6099 bool R12AvailableInBlock = LRU.
available(ARM::R12);
6100 bool CPSRAvailableInBlock = LRU.
available(ARM::CPSR);
6104 if (R12AvailableInBlock && CPSRAvailableInBlock)
6112 if (R12AvailableInBlock && !LRU.
available(ARM::R12))
6114 if (CPSRAvailableInBlock && !LRU.
available(ARM::CPSR))
6124 bool LRIsAvailable =
6125 MBB.isReturnBlock() && !
MBB.back().isCall()
6137 unsigned Flags)
const {
6143 unsigned Opc =
MI.getOpcode();
6144 if (
Opc == ARM::tPICADD ||
Opc == ARM::PICADD ||
Opc == ARM::PICSTR ||
6145 Opc == ARM::PICSTRB ||
Opc == ARM::PICSTRH ||
Opc == ARM::PICLDR ||
6146 Opc == ARM::PICLDRB ||
Opc == ARM::PICLDRH ||
Opc == ARM::PICLDRSB ||
6147 Opc == ARM::PICLDRSH ||
Opc == ARM::t2LDRpci_pic ||
6148 Opc == ARM::t2MOVi16_ga_pcrel ||
Opc == ARM::t2MOVTi16_ga_pcrel ||
6149 Opc == ARM::t2MOV_ga_pcrel)
6153 if (
Opc == ARM::t2BF_LabelPseudo ||
Opc == ARM::t2DoLoopStart ||
6154 Opc == ARM::t2DoLoopStartTP ||
Opc == ARM::t2WhileLoopStart ||
6155 Opc == ARM::t2WhileLoopStartLR ||
Opc == ARM::t2WhileLoopStartTP ||
6156 Opc == ARM::t2LoopDec ||
Opc == ARM::t2LoopEnd ||
6157 Opc == ARM::t2LoopEndDec)
6166 if (
MI.isTerminator())
6172 if (
MI.readsRegister(ARM::LR,
TRI) ||
MI.readsRegister(ARM::PC,
TRI))
6180 if (MOP.isGlobal()) {
6189 (Callee->getName() ==
"\01__gnu_mcount_nc" ||
6190 Callee->getName() ==
"\01mcount" || Callee->getName() ==
"__mcount"))
6198 if (
Opc == ARM::BL ||
Opc == ARM::tBL ||
Opc == ARM::BLX ||
6199 Opc == ARM::BLX_noip ||
Opc == ARM::tBLXr ||
Opc == ARM::tBLXr_noip ||
6204 return UnknownCallOutlineType;
6212 return UnknownCallOutlineType;
6220 return UnknownCallOutlineType;
6228 if (
MI.modifiesRegister(ARM::LR,
TRI) ||
MI.modifiesRegister(ARM::PC,
TRI))
6232 if (
MI.modifiesRegister(ARM::SP,
TRI) ||
MI.readsRegister(ARM::SP,
TRI)) {
6245 bool MightNeedStackFixUp =
6249 if (!MightNeedStackFixUp)
6255 if (
MI.modifiesRegister(ARM::SP,
TRI))
6260 if (checkAndUpdateStackOffset(&
MI, Subtarget.getStackAlignment().value(),
6269 if (
MI.readsRegister(ARM::ITSTATE,
TRI) ||
6270 MI.modifiesRegister(ARM::ITSTATE,
TRI))
6274 if (
MI.isCFIInstruction())
6289 int Align = std::max(Subtarget.getStackAlignment().value(), uint64_t(8));
6291 assert(Align >= 8 && Align <= 256);
6293 assert(Subtarget.isThumb2());
6305 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM;
6319 CFIBuilder.buildDefCFAOffset(Align);
6324 CFIBuilder.buildOffset(ARM::LR, -LROffset);
6327 CFIBuilder.buildOffset(ARM::RA_AUTH_CODE, -Align);
6333 bool CFI,
bool Auth)
const {
6334 int Align = Subtarget.getStackAlignment().value();
6337 assert(Subtarget.isThumb2());
6349 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
6353 if (!Subtarget.isThumb())
6355 MIB.
addImm(Subtarget.getStackAlignment().value())
6363 CFIBuilder.buildDefCFAOffset(0);
6364 CFIBuilder.buildRestore(ARM::LR);
6366 CFIBuilder.buildUndefined(ARM::RA_AUTH_CODE);
6380 bool isThumb = Subtarget.isThumb();
6381 unsigned FuncOp =
isThumb ? 2 : 0;
6382 unsigned Opc =
Call->getOperand(FuncOp).isReg()
6383 ?
isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
6384 :
isThumb ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd
6388 .
add(
Call->getOperand(FuncOp));
6391 Call->eraseFromParent();
6396 return MI.isCall() && !
MI.isReturn();
6404 Et = std::prev(
MBB.end());
6409 if (!
MBB.isLiveIn(ARM::LR))
6410 MBB.addLiveIn(ARM::LR);
6414 saveLROnStack(
MBB, It,
true, Auth);
6419 "Can only fix up stack references once");
6420 fixupPostOutline(
MBB);
6423 restoreLRFromStack(
MBB, Et,
true, Auth);
6443 fixupPostOutline(
MBB);
6452 bool isThumb = Subtarget.isThumb();
6458 ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6464 It =
MBB.insert(It, MIB);
6478 It =
MBB.insert(It, CallMIB);
6485 Register Reg = findRegisterToSaveLRTo(
C);
6486 assert(Reg != 0 &&
"No callee-saved register available?");
6493 CallPt =
MBB.insert(It, CallMIB);
6501 if (!
MBB.isLiveIn(ARM::LR))
6502 MBB.addLiveIn(ARM::LR);
6505 CallPt =
MBB.insert(It, CallMIB);
6516bool ARMBaseInstrInfo::isReMaterializableImpl(
6550 static int constexpr MAX_STAGES = 30;
6551 static int constexpr LAST_IS_USE = MAX_STAGES;
6552 static int constexpr SEEN_AS_LIVE = MAX_STAGES + 1;
6553 typedef std::bitset<MAX_STAGES + 2> IterNeed;
6554 typedef std::map<Register, IterNeed> IterNeeds;
6557 const IterNeeds &CIN);
6569 : EndLoop(EndLoop), LoopCount(LoopCount),
6571 TII(MF->getSubtarget().getInstrInfo()) {}
6573 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
6575 return MI == EndLoop ||
MI == LoopCount;
6578 bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS)
override {
6579 if (tooMuchRegisterPressure(SSD, SMS))
6585 std::optional<bool> createTripCountGreaterCondition(
6586 int TC, MachineBasicBlock &
MBB,
6587 SmallVectorImpl<MachineOperand> &
Cond)
override {
6596 }
else if (EndLoop->
getOpcode() == ARM::t2LoopEnd) {
6599 MachineInstr *LoopDec =
nullptr;
6601 if (
I.getOpcode() == ARM::t2LoopDec)
6603 assert(LoopDec &&
"Unable to find copied LoopDec");
6609 .
addReg(ARM::NoRegister);
6617 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
6619 void adjustTripCount(
int TripCountAdjust)
override {}
6623 const IterNeeds &CIN) {
6625 for (
const auto &
N : CIN) {
6626 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6627 for (
int I = 0;
I < Cnt; ++
I)
6632 for (
const auto &
N : CIN) {
6633 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6634 for (
int I = 0;
I < Cnt; ++
I)
6642 IterNeeds CrossIterationNeeds;
6647 for (
auto &SU : SSD.
SUnits) {
6650 for (
auto &S : SU.Succs)
6654 CrossIterationNeeds[
Reg.
id()].set(0);
6655 }
else if (S.isAssignedRegDep()) {
6657 if (OStg >= 0 && OStg != Stg) {
6660 CrossIterationNeeds[
Reg.
id()] |= ((1 << (OStg - Stg)) - 1);
6669 std::vector<SUnit *> ProposedSchedule;
6673 std::deque<SUnit *> Instrs =
6675 std::sort(Instrs.begin(), Instrs.end(),
6676 [](
SUnit *
A,
SUnit *
B) { return A->NodeNum > B->NodeNum; });
6683 for (
auto *SU : ProposedSchedule)
6687 if (!MO.isReg() || !MO.getReg())
6690 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6691 if (CIter == CrossIterationNeeds.end() || CIter->second[LAST_IS_USE] ||
6692 CIter->second[SEEN_AS_LIVE])
6694 if (MO.isDef() && !MO.isDead())
6695 CIter->second.set(SEEN_AS_LIVE);
6696 else if (MO.isUse())
6697 CIter->second.set(LAST_IS_USE);
6699 for (
auto &CI : CrossIterationNeeds)
6700 CI.second.reset(LAST_IS_USE);
6706 RPTracker.init(MF, &RegClassInfo,
nullptr, EndLoop->
getParent(),
6709 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6711 for (
auto *SU : ProposedSchedule) {
6713 RPTracker.setPos(std::next(CurInstI));
6719 if (!MO.isReg() || !MO.getReg())
6722 if (MO.isDef() && !MO.isDead()) {
6723 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6724 if (CIter != CrossIterationNeeds.end()) {
6725 CIter->second.reset(0);
6726 CIter->second.reset(SEEN_AS_LIVE);
6730 for (
auto &S : SU->Preds) {
6732 if (S.isAssignedRegDep()) {
6734 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6735 if (CIter != CrossIterationNeeds.end()) {
6737 assert(Stg2 <= Stg &&
"Data dependence upon earlier stage");
6738 if (Stg - Stg2 < MAX_STAGES)
6739 CIter->second.set(Stg - Stg2);
6740 CIter->second.set(SEEN_AS_LIVE);
6745 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6748 auto &
P = RPTracker.getPressure().MaxSetPressure;
6749 for (
unsigned I = 0,
E =
P.size();
I <
E; ++
I) {
6751 if (
I == ARM::DQuad_with_ssub_0 ||
I == ARM::DTripleSpc_with_ssub_0 ||
6752 I == ARM::DTriple_with_qsub_0_in_QPR)
6764std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
6768 if (Preheader == LoopBB)
6769 Preheader = *std::next(LoopBB->
pred_begin());
6771 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2Bcc) {
6777 for (
auto &L : LoopBB->
instrs()) {
6784 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, CCSetter);
6798 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2LoopEnd) {
6799 for (
auto &L : LoopBB->
instrs())
6804 Register LoopDecResult =
I->getOperand(0).getReg();
6807 if (!LoopDec || LoopDec->
getOpcode() != ARM::t2LoopDec)
6810 for (
auto &J : Preheader->
instrs())
6811 if (J.getOpcode() == ARM::t2DoLoopStart)
6815 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, LoopDec);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
@ MachineOutlinerNoLRSave
Only emit a branch.
@ MachineOutlinerThunk
Emit a call and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isThumb(const MCSubtargetInfo &STI)
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, MachineInstr &MI, MCRegister DReg, unsigned Lane, MCRegister &ImplicitSReg)
getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, set ImplicitSReg to a register n...
static const MachineInstr * getBundledUseMI(const TargetRegisterInfo *TRI, const MachineInstr &MI, unsigned Reg, unsigned &UseIdx, unsigned &Dist)
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI)
Create a copy of a const pool value.
static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, int CmpMask, bool CommonUse)
isSuitableForMask - Identify a suitable 'and' instruction that operates on the given source register ...
static int adjustDefLatency(const ARMSubtarget &Subtarget, const MachineInstr &DefMI, const MCInstrDesc &DefMCID, unsigned DefAlign)
Return the number of cycles to add to (or subtract from) the static itinerary based on the def opcode...
static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, const MachineInstr &MI)
static MCRegister getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, unsigned SReg, unsigned &Lane)
static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[]
static bool isEligibleForITBlock(const MachineInstr *MI)
static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC)
getCmpToAddCondition - assume the flags are set by CMP(a,b), return the condition code if we modify t...
static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1)
static bool isLRAvailable(const TargetRegisterInfo &TRI, MachineBasicBlock::reverse_iterator I, MachineBasicBlock::reverse_iterator E)
static const ARM_MLxEntry ARM_MLxTable[]
static bool isRedundantFlagInstr(const MachineInstr *CmpI, Register SrcReg, Register SrcReg2, int64_t ImmValue, const MachineInstr *OI, bool &IsThumb1)
isRedundantFlagInstr - check whether the first instruction, whose only purpose is to update flags,...
static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, unsigned NumRegs)
static const MachineInstr * getBundledDefMI(const TargetRegisterInfo *TRI, const MachineInstr *MI, unsigned Reg, unsigned &DefIdx, unsigned &Dist)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DXIL Forward Handle Accesses
This file defines the DenseMap class.
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
TargetInstrInfo::RegSubRegPair RegSubRegPair
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
PowerPC TLS Dynamic Call Fixup
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallSet class.
This file defines the SmallVector class.
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static bool isCPSRDefined(const MachineInstr &MI)
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction to set the zero flag so that we can remove a "comparis...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void copyFromCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister DestReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &, unsigned, const TargetRegisterInfo *) const override
unsigned getNumLDMAddresses(const MachineInstr &MI) const
Get the number of addresses by LDM or VLDM or zero for unknown.
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
const MachineInstrBuilder & AddDReg(MachineInstrBuilder &MIB, unsigned Reg, unsigned SubIdx, unsigned State) const
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI) const override
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Returns the size of the specified MachineInstr.
void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister SrcReg, bool KillSrc, const ARMSubtarget &Subtarget) const
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
ARM supports the MachineOutliner.
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Enable outlining by default at -Oz.
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const override
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool isPredicated(const MachineInstr &MI) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI, unsigned LoadImmOpc, unsigned LoadOpc) const
bool isPredicable(const MachineInstr &MI) const override
isPredicable - Return true if the specified instruction can be predicated.
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
Specialization of TargetInstrInfo::describeLoadedValue, used to enhance debug entry value description...
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
const ARMBaseRegisterInfo & getRegisterInfo() const
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig) const override
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to determine if two loads are lo...
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const override
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const override
bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const override
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool expandPostRAPseudo(MachineInstr &MI) const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to determine (in conjunction w...
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
VFP/NEON execution domains.
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFpMLxInstruction(unsigned Opcode) const
isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS instruction.
bool isSwiftFastImmShift(const MachineInstr *MI) const
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
ARMBaseInstrInfo(const ARMSubtarget &STI, const ARMBaseRegisterInfo &TRI)
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2 if h...
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void breakPartialRegDependency(MachineInstr &, unsigned, const TargetRegisterInfo *TRI) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const ARMSubtarget & getSubtarget() const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const override
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
bool shouldSink(const MachineInstr &MI) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
static ARMConstantPoolMBB * Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID, unsigned char PCAdj)
static ARMConstantPoolSymbol * Create(LLVMContext &C, StringRef s, unsigned ID, unsigned char PCAdj)
ARMConstantPoolValue - ARM specific constantpool value.
bool isMachineBasicBlock() const
bool isGlobalValue() const
ARMCP::ARMCPModifier getModifier() const
bool mustAddCurrentAddress() const
virtual bool hasSameValue(ARMConstantPoolValue *ACPV)
hasSameValue - Return true if this ARM constpool value can share the same constantpool entry as anoth...
bool isBlockAddress() const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool isThumb2Function() const
bool branchTargetEnforcement() const
unsigned createPICLabelUId()
bool isThumb1OnlyFunction() const
bool isThumbFunction() const
bool shouldSignReturnAddress() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
Align getStackAlignment() const
getStackAlignment - Returns the minimum alignment known to hold of the stack frame on entry to the fu...
bool enableMachinePipeliner() const override
Returns true if machine pipeliner should be enabled.
@ DoubleIssueCheckUnalignedAccess
Can load/store 2 registers/cycle, but needs an extra cycle if the access is not 64-bit aligned.
@ SingleIssue
Can load/store 1 register/cycle.
@ DoubleIssue
Can load/store 2 registers/cycle.
@ SingleIssuePlusExtras
Can load/store 1 register/cycle, but needs an extra cycle for address computation and potentially als...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool test(unsigned Idx) const
size_type size() const
size - Returns the number of bits in this bitvector.
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
Helper class for creating CFI instructions and inserting them into MIR.
void buildRegister(MCRegister Reg1, MCRegister Reg2) const
void buildRestore(MCRegister Reg) const
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool hasDLLImportStorageClass() const
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
Itinerary data supplied by a subtarget to be used by a target.
int getNumMicroOps(unsigned ItinClassIndx) const
Return the number of micro-ops that the given class decodes to.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Return true if there is a pipeline forwarding between instructions of itinerary classes DefClass and ...
bool isEmpty() const
Returns true if there are no itineraries.
A set of register units used to track register liveness.
bool available(MCRegister Reg) const
Returns true if no part of physical register Reg is live.
LLVM_ABI void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
LLVM_ABI void accumulate(const MachineInstr &MI)
Adds all register units used, defined or clobbered in MI.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
bool isCall() const
Return true if the instruction is a call.
unsigned getOpcode() const
Return the opcode number for this descriptor.
LLVM_ABI bool hasImplicitDefOfPhysReg(MCRegister Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
Wrapper class representing physical registers. Should be passed by value.
bool isValid() const
isValid - Returns true until all the operands have been visited.
unsigned pred_size() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MachineInstrBundleIterator< MachineInstr > iterator
LivenessQueryResult
Possible outcome of a register liveness query to computeRegisterLiveness()
@ LQR_Dead
Register is known to be fully dead.
@ LQR_Live
Register is known to be (at least partially) live.
@ LQR_Unknown
Register liveness not decidable from local neighborhood.
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
MachineConstantPoolValue * MachineCPVal
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getNumObjects() const
Return the number of objects.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
ArrayRef< MachineMemOperand * >::iterator mmo_iterator
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool isCall(QueryType Type=AnyInBundle) const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isRegSequence() const
bool isInsertSubreg() const
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
LLVM_ABI void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
LLVM_ABI MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
const TargetRegisterInfo * getTargetRegisterInfo() const
A Module instance is used to store all the information related to an LLVM module.
void AddHazardRecognizer(std::unique_ptr< ScheduleHazardRecognizer > &&)
Track the current register pressure at some position in the instruction stream, and remember the high...
LLVM_ABI void increaseRegPressure(VirtRegOrUnit VRegOrUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
LLVM_ABI void decreaseRegPressure(VirtRegOrUnit VRegOrUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
unsigned getRegPressureSetLimit(unsigned Idx) const
Get the register unit limit for the given pressure set index.
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
constexpr unsigned id() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
@ Anti
A register anti-dependence (aka WAR).
This class represents the scheduled code.
unsigned getMaxStageCount()
Return the maximum stage count needed for this schedule.
int stageScheduled(SUnit *SU) const
Return the stage for a scheduled instruction.
int getInitiationInterval() const
Return the initiation interval for this schedule.
std::deque< SUnit * > & getInstructions(int cycle)
Return the instructions that are scheduled at the specified cycle.
int getFirstCycle() const
Return the first cycle in the completed schedule.
int getFinalCycle() const
Return the last cycle in the finalized schedule.
Scheduling unit. This is a node in the scheduling DAG.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
std::vector< SUnit > SUnits
The scheduling units.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
LLVM_ABI unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Wrapper class representing a virtual register or register unit.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
static CondCodes getOppositeCondition(CondCodes CC)
ARMII - This namespace holds all of the target specific flags that instruction info tracks.
@ MO_OPTION_MASK
MO_OPTION_MASK - Most flags are mutually exclusive; this mask selects just that part of the flag set.
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_GOT
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
AddrMode
ARM Addressing Modes.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned char getAM5FP16Offset(unsigned AM5Opc)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
ShiftOpc getAM2ShiftOpc(unsigned AM2Opc)
unsigned getAM2Offset(unsigned AM2Opc)
unsigned getSOImmValRotate(unsigned Imm)
getSOImmValRotate - Try to handle Imm with an immediate shifter operand, computing the rotate amount ...
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
ShiftOpc getSORegShOp(unsigned Op)
AddrOpc getAM5Op(unsigned AM5Opc)
bool isSOImmTwoPartValNeg(unsigned V)
isSOImmTwoPartValNeg - Return true if the specified value can be obtained by two SOImmVal,...
unsigned getSOImmTwoPartSecond(unsigned V)
getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, return the second chunk of ...
bool isSOImmTwoPartVal(unsigned V)
isSOImmTwoPartVal - Return true if the specified value can be obtained by or'ing together two SOImmVa...
AddrOpc getAM5FP16Op(unsigned AM5Opc)
unsigned getT2SOImmTwoPartSecond(unsigned Imm)
unsigned getT2SOImmTwoPartFirst(unsigned Imm)
bool isT2SOImmTwoPartVal(unsigned Imm)
unsigned char getAM5Offset(unsigned AM5Opc)
unsigned getSOImmTwoPartFirst(unsigned V)
getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, return the first chunk of it...
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
Define some predicates that are used for node matching.
@ C
The default llvm calling convention, compatible with C.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
InstrType
Represents how an instruction should be mapped by the outliner.
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
constexpr T rotr(T V, int R)
static bool isIndirectCall(const MachineInstr &MI)
MachineInstr * findCMPToFoldIntoCBZ(MachineInstr *Br, const TargetRegisterInfo *TRI)
Search backwards from a tBcc to find a tCMPi8 against 0, meaning we can convert them to a tCBZ or tCB...
static bool isCondBranchOpcode(int Opc)
bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns true if Val1 has a lower Constant Materialization Cost than Val2.
static bool isPushOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond)
static bool isVCTP(const MachineInstr *MI)
bool IsCPSRDead< MachineInstr >(const MachineInstr *MI)
unsigned getBLXpredOpcode(const MachineFunction &MF)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
static bool isARMLowRegister(MCRegister Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
static bool isIndirectBranchOpcode(int Opc)
bool isLegalAddressImm(unsigned Opcode, int Imm, const TargetInstrInfo *TII)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool registerDefinedBetween(unsigned Reg, MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, const TargetRegisterInfo *TRI)
Return true if Reg is defd between From and To.
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static bool isSEHInstruction(const MachineInstr &MI)
static bool isCalleeSavedRegister(MCRegister Reg, const MCPhysReg *CSRegs)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, MachineFunction &MF, MachineInstr *MI, unsigned NumBytes)
Tries to add registers to the reglist of a given base-updating push/pop instruction to adjust the sta...
auto reverse(ContainerTy &&C)
static bool isJumpTableBranchOpcode(int Opc)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static bool isPopOpcode(int Opc)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
void addPredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned Cond, unsigned Inactive)
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
unsigned getUndefRegState(bool B)
void addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, Register DestReg)
unsigned ConstantMaterializationCost(unsigned Val, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns the number of instructions required to materialize the given constant in a register,...
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static bool isIndirectControlFlowNotComingBack(const MachineInstr &MI)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
FunctionAddr VTableAddr Next
ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition,...
unsigned getMatchingCondBranchOpcode(unsigned Opc)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static bool isUncondBranchOpcode(int Opc)
auto partition(R &&Range, UnaryPredicate P)
Provide wrappers to std::partition which take ranges instead of having to pass begin/end explicitly.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
static bool isSpeculationBarrierEndBBOpcode(int Opc)
unsigned getBLXOpcode(const MachineFunction &MF)
void addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB)
bool isV8EligibleForIT(const InstrType *Instr)
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
ARM_MLxEntry - Record information about MLA / MLS instructions.
Map pseudo instructions that imply an 'S' bit onto real opcodes.
OutlinerCosts(const ARMSubtarget &target)
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
static constexpr LaneBitmask getAll()
static constexpr LaneBitmask getNone()
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.