36 #define DEBUG_TYPE "gi-combiner"
39 using namespace MIPatternMatch;
45 cl::desc(
"Force all indexed operations to be "
46 "legal for the GlobalISel combiner"));
53 MDT(MDT), LI(LI), RBI(
Builder.getMF().getSubtarget().getRegBankInfo()),
54 TRI(
Builder.getMF().getSubtarget().getRegisterInfo()) {
67 assert(
I < ByteWidth &&
"I must be in [0, ByteWidth)");
86 assert(
I < ByteWidth &&
"I must be in [0, ByteWidth)");
87 return ByteWidth -
I - 1;
114 bool BigEndian =
true, LittleEndian =
true;
115 for (
unsigned MemOffset = 0; MemOffset <
Width; ++ MemOffset) {
116 auto MemOffsetAndIdx = MemOffset2Idx.
find(MemOffset);
117 if (MemOffsetAndIdx == MemOffset2Idx.
end())
119 const int64_t Idx = MemOffsetAndIdx->second - LowestIdx;
120 assert(Idx >= 0 &&
"Expected non-negative byte offset?");
123 if (!BigEndian && !LittleEndian)
127 assert((BigEndian != LittleEndian) &&
128 "Pattern cannot be both big and little endian!");
135 assert(
LI &&
"Must have LegalizerInfo to query isLegal!");
151 return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) &&
152 isLegal({TargetOpcode::G_CONSTANT, {EltTy}});
179 unsigned ToOpcode)
const {
204 if (
MI.getOpcode() != TargetOpcode::COPY)
213 MI.eraseFromParent();
218 bool IsUndef =
false;
229 assert(
MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
230 "Invalid instruction");
241 switch (
Def->getOpcode()) {
242 case TargetOpcode::G_BUILD_VECTOR:
247 Ops.push_back(BuildVecMO.getReg());
249 case TargetOpcode::G_IMPLICIT_DEF: {
258 "All undefs should have the same type");
262 EltIdx != EltEnd; ++EltIdx)
263 Ops.push_back(
Undef->getOperand(0).getReg());
290 MI.eraseFromParent();
305 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
306 "Invalid instruction kind");
331 if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
336 if (DstNumElts % SrcNumElts != 0)
342 unsigned NumConcat = DstNumElts / SrcNumElts;
345 for (
unsigned i = 0;
i != DstNumElts; ++
i) {
352 if ((Idx % SrcNumElts != (
i % SrcNumElts)) ||
353 (ConcatSrcs[
i / SrcNumElts] >= 0 &&
354 ConcatSrcs[
i / SrcNumElts] != (
int)(Idx / SrcNumElts)))
357 ConcatSrcs[
i / SrcNumElts] = Idx / SrcNumElts;
364 for (
auto Src : ConcatSrcs) {
370 Ops.push_back(UndefReg);
390 MI.eraseFromParent();
399 const LLT TyForCandidate,
400 unsigned OpcodeForCandidate,
405 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
416 if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
419 else if (CurrentUse.
ExtendOpcode == TargetOpcode::G_ANYEXT &&
420 OpcodeForCandidate != TargetOpcode::G_ANYEXT)
421 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
425 if (CurrentUse.
Ty == TyForCandidate) {
427 OpcodeForCandidate == TargetOpcode::G_ZEXT)
429 else if (CurrentUse.
ExtendOpcode == TargetOpcode::G_ZEXT &&
430 OpcodeForCandidate == TargetOpcode::G_SEXT)
431 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
440 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
451 static void InsertInsnsWithoutSideEffectsBeforeUse(
463 InsertBB = PredBB->
getMBB();
468 if (InsertBB ==
DefMI.getParent()) {
470 Inserter(InsertBB, std::next(InsertPt), UseMO);
524 unsigned PreferredOpcode =
526 ? TargetOpcode::G_ANYEXT
527 : isa<GSExtLoad>(&
MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
528 Preferred = {
LLT(), PreferredOpcode,
nullptr};
530 if (
UseMI.getOpcode() == TargetOpcode::G_SEXT ||
531 UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
532 (
UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
533 const auto &MMO = LoadMI->
getMMO();
535 if (MMO.isAtomic() &&
UseMI.getOpcode() != TargetOpcode::G_ANYEXT)
542 if (
LI->
getAction({LoadMI->getOpcode(), {UseTy, SrcTy}, {MMDesc}})
546 Preferred = ChoosePreferredUse(Preferred,
557 assert(Preferred.Ty != LoadValueTy &&
"Extending to same type?");
575 if (PreviouslyEmitted) {
585 EmittedInsns[InsertIntoBB] = NewMI;
592 ? TargetOpcode::G_SEXTLOAD
594 ? TargetOpcode::G_ZEXTLOAD
595 : TargetOpcode::G_LOAD));
598 auto &LoadValue =
MI.getOperand(0);
601 Uses.push_back(&UseMO);
603 for (
auto *UseMO :
Uses) {
613 if (UseDstReg != ChosenDstReg) {
614 if (Preferred.
Ty == UseDstTy) {
651 InsertInsnsWithoutSideEffectsBeforeUse(
Builder,
MI, *UseMO,
666 InsertInsnsWithoutSideEffectsBeforeUse(
Builder,
MI, *UseMO, InsertTruncAt);
669 MI.getOperand(0).setReg(ChosenDstReg);
675 assert(
MI.getOpcode() == TargetOpcode::G_AND);
694 APInt MaskVal = MaybeMask->Value;
700 GAnyLoad *LoadMI = getOpcodeDef<GAnyLoad>(SrcReg,
MRI);
713 if (MaskSizeBits > LoadSizeBits)
730 {TargetOpcode::G_ZEXTLOAD, {LoadTy,
MRI.
getType(PtrReg)}, {MemDesc}}))
734 B.setInstrAndDebugLoc(*LoadMI);
735 auto &MF =
B.getMF();
737 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MaskSizeBits / 8);
738 B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO);
746 "shouldn't consider debug uses");
754 if (DefOrUse ==
MBB.
end())
756 return &*DefOrUse == &
DefMI;
762 "shouldn't consider debug uses");
765 else if (
DefMI.getParent() !=
UseMI.getParent())
772 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
783 uint64_t SizeInBits =
MI.getOperand(2).getImm();
786 if (
auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser,
MRI)) {
788 auto LoadSizeBits = LoadMI->getMemSizeInBits();
791 if (LoadSizeBits == SizeInBits)
798 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
801 MI.eraseFromParent();
806 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
813 auto *LoadDef = getOpcodeDef<GLoad>(SrcReg,
MRI);
815 !LoadDef->isSimple())
822 LoadDef->getMemSizeInBits());
841 MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
847 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
849 unsigned ScalarSizeBits;
850 std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
859 auto &MMO = LoadDef->
getMMO();
862 auto PtrInfo = MMO.getPointerInfo();
866 MI.eraseFromParent();
871 auto &MF = *
MI.getParent()->getParent();
872 const auto &TLI = *MF.getSubtarget().getTargetLowering();
875 unsigned Opcode =
MI.getOpcode();
876 assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
877 Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
880 Base =
MI.getOperand(1).getReg();
882 if (BaseDef && BaseDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
885 LLVM_DEBUG(
dbgs() <<
"Searching for post-indexing opportunity for: " <<
MI);
888 if (
Use.getOpcode() != TargetOpcode::G_PTR_ADD)
891 Offset =
Use.getOperand(2).getReg();
893 !TLI.isIndexingLegal(
MI,
Base, Offset,
false,
MRI)) {
894 LLVM_DEBUG(
dbgs() <<
" Ignoring candidate with illegal addrmode: "
904 LLVM_DEBUG(
dbgs() <<
" Ignoring candidate with offset after mem-op: "
913 bool MemOpDominatesAddrUses =
true;
914 for (
auto &PtrAddUse :
917 MemOpDominatesAddrUses =
false;
922 if (!MemOpDominatesAddrUses) {
924 dbgs() <<
" Ignoring candidate as memop does not dominate uses: "
930 Addr =
Use.getOperand(0).getReg();
939 auto &MF = *
MI.getParent()->getParent();
940 const auto &TLI = *MF.getSubtarget().getTargetLowering();
943 unsigned Opcode =
MI.getOpcode();
944 assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
945 Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
948 Addr =
MI.getOperand(1).getReg();
959 !TLI.isIndexingLegal(
MI,
Base, Offset,
true,
MRI)) {
965 if (BaseDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX) {
966 LLVM_DEBUG(
dbgs() <<
" Skipping, frame index would need copy anyway.");
970 if (
MI.getOpcode() == TargetOpcode::G_STORE) {
972 if (
Base ==
MI.getOperand(0).getReg()) {
973 LLVM_DEBUG(
dbgs() <<
" Skipping, storing base so need copy anyway.");
979 if (
MI.getOperand(0).getReg() ==
Addr) {
980 LLVM_DEBUG(
dbgs() <<
" Skipping, does not dominate all addr uses");
990 LLVM_DEBUG(
dbgs() <<
" Skipping, does not dominate all addr uses.");
1008 unsigned Opcode =
MI.getOpcode();
1009 if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
1010 Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
1018 MatchInfo.
IsPre = findPreIndexCandidate(
MI, MatchInfo.
Addr, MatchInfo.
Base,
1020 if (!MatchInfo.
IsPre &&
1021 !findPostIndexCandidate(
MI, MatchInfo.
Addr, MatchInfo.
Base,
1032 unsigned Opcode =
MI.getOpcode();
1033 bool IsStore = Opcode == TargetOpcode::G_STORE;
1036 case TargetOpcode::G_LOAD:
1037 NewOpcode = TargetOpcode::G_INDEXED_LOAD;
1039 case TargetOpcode::G_SEXTLOAD:
1040 NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
1042 case TargetOpcode::G_ZEXTLOAD:
1043 NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
1045 case TargetOpcode::G_STORE:
1046 NewOpcode = TargetOpcode::G_INDEXED_STORE;
1055 MIB.
addUse(
MI.getOperand(0).getReg());
1057 MIB.
addDef(
MI.getOperand(0).getReg());
1064 MI.eraseFromParent();
1072 unsigned Opcode =
MI.getOpcode();
1073 bool IsDiv, IsSigned;
1078 case TargetOpcode::G_SDIV:
1079 case TargetOpcode::G_UDIV: {
1081 IsSigned = Opcode == TargetOpcode::G_SDIV;
1084 case TargetOpcode::G_SREM:
1085 case TargetOpcode::G_UREM: {
1087 IsSigned = Opcode == TargetOpcode::G_SREM;
1093 unsigned DivOpcode, RemOpcode, DivremOpcode;
1095 DivOpcode = TargetOpcode::G_SDIV;
1096 RemOpcode = TargetOpcode::G_SREM;
1097 DivremOpcode = TargetOpcode::G_SDIVREM;
1099 DivOpcode = TargetOpcode::G_UDIV;
1100 RemOpcode = TargetOpcode::G_UREM;
1101 DivremOpcode = TargetOpcode::G_UDIVREM;
1120 if (
MI.getParent() ==
UseMI.getParent() &&
1121 ((IsDiv &&
UseMI.getOpcode() == RemOpcode) ||
1122 (!IsDiv &&
UseMI.getOpcode() == DivOpcode)) &&
1134 unsigned Opcode =
MI.getOpcode();
1135 assert(OtherMI &&
"OtherMI shouldn't be empty.");
1138 if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
1139 DestDivReg =
MI.getOperand(0).getReg();
1143 DestRemReg =
MI.getOperand(0).getReg();
1147 Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
1157 : TargetOpcode::G_UDIVREM,
1158 {DestDivReg, DestRemReg},
1159 {
MI.getOperand(1).
getReg(),
MI.getOperand(2).getReg()});
1160 MI.eraseFromParent();
1166 assert(
MI.getOpcode() == TargetOpcode::G_BR);
1185 assert(std::next(BrIt) ==
MBB->
end() &&
"expected G_BR to be a terminator");
1187 BrCond = &*std::prev(BrIt);
1188 if (BrCond->
getOpcode() != TargetOpcode::G_BRCOND)
1194 return BrCondTarget !=
MI.getOperand(0).getMBB() &&
1212 MI.getOperand(0).setMBB(FallthroughBB);
1234 return Helper.lowerMemcpyInline(
MI) ==
1235 LegalizerHelper::LegalizeResult::Legalized;
1243 LegalizerHelper::LegalizeResult::Legalized;
1257 case TargetOpcode::G_FNEG: {
1261 case TargetOpcode::G_FABS: {
1265 case TargetOpcode::G_FPTRUNC:
1267 case TargetOpcode::G_FSQRT: {
1273 case TargetOpcode::G_FLOG2: {
1299 assert(Cst &&
"Optional is unexpectedly empty!");
1305 MI.eraseFromParent();
1316 if (
MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1326 if (!Add2Def || Add2Def->
getOpcode() != TargetOpcode::G_PTR_ADD)
1339 Type *AccessTy =
nullptr;
1340 auto &MF = *
MI.getMF();
1342 if (
auto *LdSt = dyn_cast<GLoadStore>(&
UseMI)) {
1344 MF.getFunction().getContext());
1349 APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1354 AMOld.
BaseOffs = MaybeImm2Val->Value.getSExtValue();
1357 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1358 if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
1359 !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
1372 assert(
MI.getOpcode() == TargetOpcode::G_PTR_ADD &&
"Expected G_PTR_ADD");
1378 MI.getOperand(1).setReg(MatchInfo.
Base);
1379 MI.getOperand(2).setReg(NewOffset.getReg(0));
1392 unsigned Opcode =
MI.getOpcode();
1393 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1394 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1395 Opcode == TargetOpcode::G_USHLSAT) &&
1396 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1416 (MaybeImmVal->Value.getSExtValue() + MaybeImm2Val->Value).getSExtValue();
1421 if (Opcode == TargetOpcode::G_USHLSAT &&
1430 unsigned Opcode =
MI.getOpcode();
1431 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1432 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1433 Opcode == TargetOpcode::G_USHLSAT) &&
1434 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1439 auto Imm = MatchInfo.
Imm;
1441 if (
Imm >= ScalarSizeInBits) {
1443 if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1445 MI.eraseFromParent();
1450 Imm = ScalarSizeInBits - 1;
1456 MI.getOperand(1).setReg(MatchInfo.
Reg);
1457 MI.getOperand(2).setReg(NewImm);
1473 unsigned ShiftOpcode =
MI.getOpcode();
1474 assert((ShiftOpcode == TargetOpcode::G_SHL ||
1475 ShiftOpcode == TargetOpcode::G_ASHR ||
1476 ShiftOpcode == TargetOpcode::G_LSHR ||
1477 ShiftOpcode == TargetOpcode::G_USHLSAT ||
1478 ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1479 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1482 Register LogicDest =
MI.getOperand(1).getReg();
1487 unsigned LogicOpcode = LogicMI->
getOpcode();
1488 if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1489 LogicOpcode != TargetOpcode::G_XOR)
1498 const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1502 if (
MI->getOpcode() != ShiftOpcode ||
1512 ShiftVal = MaybeImmVal->Value.getSExtValue();
1523 if (matchFirstShift(LogicMIOp1, C0Val)) {
1525 MatchInfo.
Shift2 = LogicMIOp1;
1526 }
else if (matchFirstShift(LogicMIOp2, C0Val)) {
1528 MatchInfo.
Shift2 = LogicMIOp2;
1532 MatchInfo.
ValSum = C0Val + C1Val;
1538 MatchInfo.
Logic = LogicMI;
1544 unsigned Opcode =
MI.getOpcode();
1545 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1546 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
1547 Opcode == TargetOpcode::G_SSHLSAT) &&
1548 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1560 Register Shift2Const =
MI.getOperand(2).getReg();
1573 MI.eraseFromParent();
1577 unsigned &ShiftVal) {
1578 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
"Expected a G_MUL");
1584 ShiftVal = MaybeImmVal->Value.exactLogBase2();
1585 return (
static_cast<int32_t
>(ShiftVal) != -1);
1589 unsigned &ShiftVal) {
1590 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
"Expected a G_MUL");
1595 MI.setDesc(MIB.
getTII().
get(TargetOpcode::G_SHL));
1596 MI.getOperand(2).setReg(ShiftCst.getReg(0));
1603 assert(
MI.getOpcode() == TargetOpcode::G_SHL &&
KB);
1616 if (!MaybeShiftAmtVal)
1630 int64_t ShiftAmt = MaybeShiftAmtVal->Value.getSExtValue();
1631 MatchData.
Reg = ExtSrc;
1632 MatchData.
Imm = ShiftAmt;
1635 return MinLeadingZeros >= ShiftAmt;
1641 int64_t ShiftAmtVal = MatchData.
Imm;
1649 MI.eraseFromParent();
1656 for (
unsigned I = 0;
I <
Merge.getNumSources(); ++
I)
1659 auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0],
MRI);
1660 if (!Unmerge || Unmerge->getNumDefs() !=
Merge.getNumSources())
1663 for (
unsigned I = 0;
I < MergedValues.size(); ++
I)
1664 if (MergedValues[
I] != Unmerge->getReg(
I))
1667 MatchInfo = Unmerge->getSourceReg();
1681 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1682 "Expected an unmerge");
1683 auto &Unmerge = cast<GUnmerge>(
MI);
1686 auto *SrcInstr = getOpcodeDef<GMergeLikeOp>(SrcReg,
MRI);
1694 if (SrcMergeTy != Dst0Ty && !SameSize)
1698 for (
unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx)
1699 Operands.push_back(SrcInstr->getSourceReg(Idx));
1705 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1706 "Expected an unmerge");
1708 "Not enough operands to replace all defs");
1709 unsigned NumElems =
MI.getNumOperands() - 1;
1713 bool CanReuseInputDirectly = DstTy == SrcTy;
1715 for (
unsigned Idx = 0; Idx < NumElems; ++Idx) {
1716 Register DstReg =
MI.getOperand(Idx).getReg();
1718 if (CanReuseInputDirectly)
1723 MI.eraseFromParent();
1728 unsigned SrcIdx =
MI.getNumOperands() - 1;
1729 Register SrcReg =
MI.getOperand(SrcIdx).getReg();
1731 if (SrcInstr->
getOpcode() != TargetOpcode::G_CONSTANT &&
1732 SrcInstr->
getOpcode() != TargetOpcode::G_FCONSTANT)
1743 for (
unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
1745 Val = Val.
lshr(ShiftAmt);
1753 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1754 "Expected an unmerge");
1755 assert((
MI.getNumOperands() - 1 == Csts.size()) &&
1756 "Not enough operands to replace all defs");
1757 unsigned NumElems =
MI.getNumOperands() - 1;
1759 for (
unsigned Idx = 0; Idx < NumElems; ++Idx) {
1760 Register DstReg =
MI.getOperand(Idx).getReg();
1764 MI.eraseFromParent();
1769 unsigned SrcIdx =
MI.getNumOperands() - 1;
1770 Register SrcReg =
MI.getOperand(SrcIdx).getReg();
1772 unsigned NumElems =
MI.getNumOperands() - 1;
1773 for (
unsigned Idx = 0; Idx < NumElems; ++Idx) {
1774 Register DstReg =
MI.getOperand(Idx).getReg();
1775 B.buildUndef(DstReg);
1782 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1783 "Expected an unmerge");
1785 for (
unsigned Idx = 1, EndIdx =
MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1794 Register SrcReg =
MI.getOperand(
MI.getNumDefs()).getReg();
1803 Register Dst0Reg =
MI.getOperand(0).getReg();
1810 MI.eraseFromParent();
1814 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1815 "Expected an unmerge");
1816 Register Dst0Reg =
MI.getOperand(0).getReg();
1823 Register SrcReg =
MI.getOperand(
MI.getNumDefs()).getReg();
1840 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1841 "Expected an unmerge");
1843 Register Dst0Reg =
MI.getOperand(0).getReg();
1848 "Expecting a G_ZEXT");
1860 "ZExt src doesn't fit in destination");
1865 for (
unsigned Idx = 1, EndIdx =
MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1870 MI.eraseFromParent();
1874 unsigned TargetShiftSize,
1875 unsigned &ShiftVal) {
1876 assert((
MI.getOpcode() == TargetOpcode::G_SHL ||
1877 MI.getOpcode() == TargetOpcode::G_LSHR ||
1878 MI.getOpcode() == TargetOpcode::G_ASHR) &&
"Expected a shift");
1886 if (Size <= TargetShiftSize)
1894 ShiftVal = MaybeImmVal->Value.getSExtValue();
1895 return ShiftVal >= Size / 2 && ShiftVal < Size;
1899 const unsigned &ShiftVal) {
1904 unsigned HalfSize = Size / 2;
1905 assert(ShiftVal >= HalfSize);
1911 unsigned NarrowShiftAmt = ShiftVal - HalfSize;
1913 if (
MI.getOpcode() == TargetOpcode::G_LSHR) {
1914 Register Narrowed = Unmerge.getReg(1);
1921 if (NarrowShiftAmt != 0) {
1928 }
else if (
MI.getOpcode() == TargetOpcode::G_SHL) {
1929 Register Narrowed = Unmerge.getReg(0);
1934 if (NarrowShiftAmt != 0) {
1942 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
1944 HalfTy, Unmerge.getReg(1),
1947 if (ShiftVal == HalfSize) {
1951 }
else if (ShiftVal == Size - 1) {
1959 HalfTy, Unmerge.getReg(1),
1968 MI.eraseFromParent();
1972 unsigned TargetShiftAmount) {
1983 assert(
MI.getOpcode() == TargetOpcode::G_INTTOPTR &&
"Expected a G_INTTOPTR");
1992 assert(
MI.getOpcode() == TargetOpcode::G_INTTOPTR &&
"Expected a G_INTTOPTR");
1996 MI.eraseFromParent();
2000 assert(
MI.getOpcode() == TargetOpcode::G_PTRTOINT &&
"Expected a G_PTRTOINT");
2006 assert(
MI.getOpcode() == TargetOpcode::G_PTRTOINT &&
"Expected a G_PTRTOINT");
2010 MI.eraseFromParent();
2015 assert(
MI.getOpcode() == TargetOpcode::G_ADD);
2022 PtrReg.second =
false;
2032 PtrReg.second =
true;
2044 const bool DoCommute = PtrReg.second;
2054 MI.eraseFromParent();
2059 auto &PtrAdd = cast<GPtrAdd>(
MI);
2070 NewCst += RHSCst->
sextOrTrunc(DstTy.getSizeInBits());
2080 auto &PtrAdd = cast<GPtrAdd>(
MI);
2085 PtrAdd.eraseFromParent();
2089 assert(
MI.getOpcode() == TargetOpcode::G_ANYEXT &&
"Expected a G_ANYEXT");
2098 assert(
MI.getOpcode() == TargetOpcode::G_ZEXT &&
"Expected a G_ZEXT");
2113 assert((
MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2114 MI.getOpcode() == TargetOpcode::G_SEXT ||
2115 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2116 "Expected a G_[ASZ]EXT");
2120 unsigned Opc =
MI.getOpcode();
2122 if (Opc == SrcOpc ||
2123 (Opc == TargetOpcode::G_ANYEXT &&
2124 (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2125 (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2134 assert((
MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2135 MI.getOpcode() == TargetOpcode::G_SEXT ||
2136 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2137 "Expected a G_[ASZ]EXT");
2140 unsigned SrcExtOp = std::get<1>(MatchInfo);
2143 if (
MI.getOpcode() == SrcExtOp) {
2145 MI.getOperand(1).setReg(
Reg);
2153 if (
MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2154 (
MI.getOpcode() == TargetOpcode::G_SEXT &&
2155 SrcExtOp == TargetOpcode::G_ZEXT)) {
2159 MI.eraseFromParent();
2164 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
"Expected a G_MUL");
2172 MI.eraseFromParent();
2176 assert(
MI.getOpcode() == TargetOpcode::G_FNEG &&
"Expected a G_FNEG");
2182 assert(
MI.getOpcode() == TargetOpcode::G_FABS &&
"Expected a G_FABS");
2183 Src =
MI.getOperand(1).getReg();
2190 assert(
MI.getOpcode() == TargetOpcode::G_FABS &&
"Expected a G_FABS");
2199 MI.getOperand(1).setReg(NegSrc);
2207 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2211 if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2212 SrcOpc == TargetOpcode::G_ZEXT) {
2221 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2223 unsigned SrcExtOp = MatchInfo.second;
2227 if (SrcTy == DstTy) {
2228 MI.eraseFromParent();
2237 MI.eraseFromParent();
2242 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2252 {TargetOpcode::G_SHL,
2257 MatchInfo = std::make_pair(ShiftSrc, ShiftAmt);
2266 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2272 Register ShiftSrc = MatchInfo.first;
2273 Register ShiftAmt = MatchInfo.second;
2277 MI.eraseFromParent();
2282 return MO.isReg() &&
2283 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2289 return !MO.isReg() ||
2290 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2295 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2297 return all_of(
Mask, [](
int Elt) {
return Elt < 0; });
2301 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
2302 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(0).getReg(),
2307 assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
2308 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(1).getReg(),
2318 OpIdx = Cst->isZero() ? 3 : 2;
2323 MI.eraseFromParent();
2366 if (
I1->mayLoadOrStore() && !
I1->isDereferenceableInvariantLoad(
nullptr))
2393 return MO.isReg() && MO.getReg().isPhysical();
2403 return I1->isIdenticalTo(*I2);
2418 return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) ==
2429 return MaybeCst && MaybeCst->getBitWidth() <= 64 &&
2430 MaybeCst->getSExtValue() ==
C;
2435 assert(
MI.getNumExplicitDefs() == 1 &&
"Expected one explicit def?");
2437 Register Replacement =
MI.getOperand(OpIdx).getReg();
2439 MI.eraseFromParent();
2446 assert(
MI.getNumExplicitDefs() == 1 &&
"Expected one explicit def?");
2449 MI.eraseFromParent();
2455 assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
2476 return MO.
isReg() &&
2487 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2490 MI.eraseFromParent();
2495 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2498 MI.eraseFromParent();
2503 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2506 MI.eraseFromParent();
2511 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2514 MI.eraseFromParent();
2522 Register &NewLHS = std::get<0>(MatchInfo);
2523 Register &NewRHS = std::get<1>(MatchInfo);
2531 NewLHS = MaybeNewLHS;
2540 assert(
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
2549 TargetOpcode::G_INSERT_VECTOR_ELT)
2555 MatchInfo.
resize(NumElts);
2559 if (IntImm >= NumElts)
2561 if (!MatchInfo[IntImm])
2562 MatchInfo[IntImm] = TmpReg;
2566 if (CurrInst->
getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
2568 if (TmpInst->
getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
2576 return TmpInst->
getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
2583 auto GetUndef = [&]() {
2590 for (
unsigned I = 0;
I < MatchInfo.size(); ++
I) {
2592 MatchInfo[
I] = GetUndef();
2595 MI.eraseFromParent();
2602 std::tie(SubLHS, SubRHS) = MatchInfo;
2604 MI.eraseFromParent();
2615 unsigned LogicOpcode =
MI.getOpcode();
2616 assert(LogicOpcode == TargetOpcode::G_AND ||
2617 LogicOpcode == TargetOpcode::G_OR ||
2618 LogicOpcode == TargetOpcode::G_XOR);
2631 if (!LeftHandInst || !RightHandInst)
2633 unsigned HandOpcode = LeftHandInst->
getOpcode();
2634 if (HandOpcode != RightHandInst->
getOpcode())
2653 switch (HandOpcode) {
2656 case TargetOpcode::G_ANYEXT:
2657 case TargetOpcode::G_SEXT:
2658 case TargetOpcode::G_ZEXT: {
2662 case TargetOpcode::G_AND:
2663 case TargetOpcode::G_ASHR:
2664 case TargetOpcode::G_LSHR:
2665 case TargetOpcode::G_SHL: {
2670 ExtraHandOpSrcReg = ZOp.
getReg();
2689 if (ExtraHandOpSrcReg.
isValid())
2690 HandBuildSteps.push_back(
2701 "Expected at least one instr to build?");
2704 assert(InstrToBuild.Opcode &&
"Expected a valid opcode?");
2705 assert(InstrToBuild.OperandFns.size() &&
"Expected at least one operand?");
2707 for (
auto &OperandFn : InstrToBuild.OperandFns)
2710 MI.eraseFromParent();
2715 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
2716 int64_t ShlCst, AshrCst;
2722 if (ShlCst != AshrCst)
2725 {TargetOpcode::G_SEXT_INREG, {
MRI.
getType(Src)}}))
2727 MatchInfo = std::make_tuple(Src, ShlCst);
2733 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
2736 std::tie(Src, ShiftAmt) = MatchInfo;
2740 MI.eraseFromParent();
2746 assert(
MI.getOpcode() == TargetOpcode::G_AND);
2761 B.buildAnd(Dst, R,
B.buildConstant(Ty,
C1 & C2));
2764 auto Zero =
B.buildConstant(Ty, 0);
2787 assert(
MI.getOpcode() == TargetOpcode::G_AND);
2810 (LHSBits.
Zero | RHSBits.
One).isAllOnes()) {
2817 (LHSBits.
One | RHSBits.
Zero).isAllOnes()) {
2833 assert(
MI.getOpcode() == TargetOpcode::G_OR);
2856 (LHSBits.
One | RHSBits.
Zero).isAllOnes()) {
2863 (LHSBits.
Zero | RHSBits.
One).isAllOnes()) {
2874 unsigned ExtBits =
MI.getOperand(2).getImm();
2880 int64_t Cst,
bool IsVector,
bool IsFP) {
2882 return (ScalarSizeBits == 1 && Cst == -1) ||
2888 assert(
MI.getOpcode() == TargetOpcode::G_XOR);
2904 RegsToNegate.push_back(XorSrc);
2908 for (
unsigned I = 0;
I < RegsToNegate.size(); ++
I) {
2913 switch (
Def->getOpcode()) {
2918 case TargetOpcode::G_ICMP:
2924 case TargetOpcode::G_FCMP:
2930 case TargetOpcode::G_AND:
2931 case TargetOpcode::G_OR:
2937 RegsToNegate.push_back(
Def->getOperand(1).getReg());
2938 RegsToNegate.push_back(
Def->getOperand(2).getReg());
2970 switch (
Def->getOpcode()) {
2973 case TargetOpcode::G_ICMP:
2974 case TargetOpcode::G_FCMP: {
2981 case TargetOpcode::G_AND:
2984 case TargetOpcode::G_OR:
2992 MI.eraseFromParent();
2998 assert(
MI.getOpcode() == TargetOpcode::G_XOR);
3002 Register SharedReg =
MI.getOperand(2).getReg();
3023 return Y == SharedReg;
3031 std::tie(
X,
Y) = MatchInfo;
3035 MI.getOperand(1).setReg(Not->getOperand(0).getReg());
3036 MI.getOperand(2).setReg(
Y);
3041 auto &PtrAdd = cast<GPtrAdd>(
MI);
3042 Register DstReg = PtrAdd.getReg(0);
3051 return ConstVal && *ConstVal == 0;
3060 auto &PtrAdd = cast<GPtrAdd>(
MI);
3063 PtrAdd.eraseFromParent();
3070 Register Pow2Src1 =
MI.getOperand(2).getReg();
3078 MI.eraseFromParent();
3082 unsigned &SelectOpNo) {
3092 if (
Select->getOpcode() != TargetOpcode::G_SELECT ||
3094 OtherOperandReg =
LHS;
3097 if (
Select->getOpcode() != TargetOpcode::G_SELECT ||
3114 unsigned BinOpcode =
MI.getOpcode();
3119 bool CanFoldNonConst =
3120 (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
3125 if (CanFoldNonConst)
3136 const unsigned &SelectOperand) {
3149 unsigned BinOpcode =
MI.getOpcode();
3156 if (SelectOperand == 1) {
3171 Select->eraseFromParent();
3172 MI.eraseFromParent();
3178 CombinerHelper::findCandidatesForLoadOrCombine(
const MachineInstr *Root)
const {
3179 assert(Root->
getOpcode() == TargetOpcode::G_OR &&
"Expected G_OR only!");
3208 const unsigned MaxIter =
3210 for (
unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3226 RegsToVisit.push_back(OrLHS);
3230 RegsToVisit.push_back(OrRHS);
3235 if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0)
3252 "Expected Reg to only have one non-debug use?");
3261 if (
Shift % MemSizeInBits != 0)
3265 auto *
Load = getOpcodeDef<GZExtLoad>(MaybeLoad,
MRI);
3269 if (!
Load->isUnordered() ||
Load->getMemSizeInBits() != MemSizeInBits)
3272 return std::make_pair(
Load,
Shift / MemSizeInBits);
3276 CombinerHelper::findLoadOffsetsForLoadOrCombine(
3310 for (
auto Reg : RegsToVisit) {
3318 std::tie(
Load, DstPos) = *LoadAndPos;
3329 auto &LoadMMO =
Load->getMMO();
3340 LoadPtr =
Load->getOperand(1).getReg();
3345 if (!SeenIdx.
insert(Idx).second)
3353 if (BasePtr != LoadPtr)
3356 if (Idx < LowestIdx) {
3358 LowestIdxLoad =
Load;
3365 if (!MemOffset2Idx.
try_emplace(DstPos, Idx).second)
3375 EarliestLoad =
Load;
3383 "Expected to find a load for each register?");
3384 assert(EarliestLoad != LatestLoad && EarliestLoad &&
3385 LatestLoad &&
"Expected at least two loads?");
3394 const unsigned MaxIter = 20;
3400 if (
MI.isLoadFoldBarrier())
3402 if (Iter++ == MaxIter)
3406 return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
3411 assert(
MI.getOpcode() == TargetOpcode::G_OR);
3431 if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3435 auto RegsToVisit = findCandidatesForLoadOrCombine(&
MI);
3442 const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3443 if (NarrowMemSizeInBits % 8 != 0)
3456 auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3457 MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3460 std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
3470 bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3482 const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3483 const unsigned ZeroByteOffset =
3487 auto ZeroOffsetIdx = MemOffset2Idx.
find(ZeroByteOffset);
3488 if (ZeroOffsetIdx == MemOffset2Idx.
end() ||
3489 ZeroOffsetIdx->second != LowestIdx)
3499 {TargetOpcode::G_LOAD, {Ty,
MRI.
getType(Ptr)}, {MMDesc}}))
3513 MIB.setInstrAndDebugLoc(*LatestLoad);
3515 MIB.buildLoad(LoadDst, Ptr, *NewMMO);
3517 MIB.buildBSwap(Dst, LoadDst);
3545 if (!SrcVal.
isValid() || TruncVal == SrcVal) {
3553 unsigned NarrowBits =
Store.getMMO().getMemoryType().getScalarSizeInBits();
3554 if (ShiftAmt % NarrowBits!= 0)
3556 const unsigned Offset = ShiftAmt / NarrowBits;
3558 if (SrcVal.
isValid() && FoundSrcVal != SrcVal)
3562 SrcVal = FoundSrcVal;
3592 auto &StoreMI = cast<GStore>(
MI);
3593 LLT MemTy = StoreMI.getMMO().getMemoryType();
3606 if (!StoreMI.isSimple())
3619 auto &LastStore = StoreMI;
3626 BaseReg = LastStore.getPointerReg();
3630 GStore *LowestIdxStore = &LastStore;
3631 int64_t LowestIdxOffset = LastOffset;
3635 if (!LowestShiftAmt)
3643 const unsigned NumStoresRequired =
3647 OffsetMap[*LowestShiftAmt] = LastOffset;
3656 const int MaxInstsToCheck = 10;
3657 int NumInstsChecked = 0;
3658 for (
auto II = ++LastStore.getReverseIterator();
3659 II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck;
3663 if ((NewStore = dyn_cast<GStore>(&*II))) {
3666 }
else if (II->isLoadFoldBarrier() || II->mayLoad()) {
3680 if (BaseReg != NewBaseReg)
3684 if (!ShiftByteOffset)
3686 if (MemOffset < LowestIdxOffset) {
3687 LowestIdxOffset = MemOffset;
3688 LowestIdxStore = NewStore;
3693 if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired ||
3694 OffsetMap[*ShiftByteOffset] !=
INT64_MAX)
3696 OffsetMap[*ShiftByteOffset] = MemOffset;
3700 NumInstsChecked = 0;
3701 if (FoundStores.size() == NumStoresRequired)
3705 if (FoundStores.size() != NumStoresRequired) {
3709 const auto &
DL = LastStore.getMF()->getDataLayout();
3710 auto &
C = LastStore.getMF()->getFunction().getContext();
3714 C,
DL, WideStoreTy, LowestIdxStore->
getMMO(), &Fast);
3721 auto checkOffsets = [&](
bool MatchLittleEndian) {
3722 if (MatchLittleEndian) {
3723 for (
unsigned i = 0;
i != NumStoresRequired; ++
i)
3724 if (OffsetMap[
i] !=
i * (NarrowBits / 8) + LowestIdxOffset)
3727 for (
unsigned i = 0,
j = NumStoresRequired - 1;
i != NumStoresRequired;
3729 if (OffsetMap[
j] !=
i * (NarrowBits / 8) + LowestIdxOffset)
3736 bool NeedBswap =
false;
3737 bool NeedRotate =
false;
3738 if (!checkOffsets(
DL.isLittleEndian())) {
3740 if (NarrowBits == 8 && checkOffsets(
DL.isBigEndian()))
3742 else if (NumStoresRequired == 2 && checkOffsets(
DL.isBigEndian()))
3774 "Unexpected type for rotate");
3787 ST->eraseFromParent();
3792 assert(
MI.getOpcode() == TargetOpcode::G_PHI);
3806 case TargetOpcode::G_ANYEXT:
3808 case TargetOpcode::G_ZEXT:
3809 case TargetOpcode::G_SEXT:
3823 for (
unsigned Idx = 1; Idx <
MI.getNumOperands(); Idx += 2) {
3826 case TargetOpcode::G_LOAD:
3827 case TargetOpcode::G_TRUNC:
3828 case TargetOpcode::G_SEXT:
3829 case TargetOpcode::G_ZEXT:
3830 case TargetOpcode::G_ANYEXT:
3831 case TargetOpcode::G_CONSTANT:
3835 if (InSrcs.
size() > 2)
3847 assert(
MI.getOpcode() == TargetOpcode::G_PHI);
3856 for (
unsigned SrcIdx = 1; SrcIdx <
MI.getNumOperands(); SrcIdx += 2) {
3858 if (!SrcMIs.
insert(SrcMI))
3864 if (InsertPt !=
MBB->
end() && InsertPt->isPHI())
3870 SrcMI->getOperand(0).getReg());
3871 OldToNewSrcMap[SrcMI] = NewExt;
3880 NewPhi.
addMBB(MO.getMBB());
3884 NewPhi.addUse(NewSrc->getOperand(0).getReg());
3892 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
3898 {TargetOpcode::G_BUILD_VECTOR, {SrcTy, SrcTy.
getElementType()}}))
3905 unsigned VecIdx = Cst->Value.getZExtValue();
3909 BuildVecMI =
getOpcodeDef(TargetOpcode::G_BUILD_VECTOR_TRUNC, SrcVec,
MRI);
3914 {TargetOpcode::G_BUILD_VECTOR_TRUNC, {SrcTy, ScalarTy}}))
3936 if (ScalarTy != DstTy) {
3939 MI.eraseFromParent();
3948 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3971 if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
3976 unsigned Idx = Cst->getZExtValue();
3979 ExtractedElts.
set(Idx);
3980 SrcDstPairs.emplace_back(
3981 std::make_pair(
MI.getOperand(Idx + 1).getReg(), &II));
3984 return ExtractedElts.
all();
3990 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
3991 for (
auto &Pair : SrcDstPairs) {
3992 auto *ExtMI = Pair.second;
3994 ExtMI->eraseFromParent();
3996 MI.eraseFromParent();
4003 MI.eraseFromParent();
4014 assert(
MI.getOpcode() == TargetOpcode::G_OR);
4020 Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
4021 unsigned FshOpc = 0;
4032 int64_t CstShlAmt, CstLShrAmt;
4035 CstShlAmt + CstLShrAmt ==
BitWidth) {
4036 FshOpc = TargetOpcode::G_FSHR;
4043 FshOpc = TargetOpcode::G_FSHL;
4049 FshOpc = TargetOpcode::G_FSHR;
4060 B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt});
4067 unsigned Opc =
MI.getOpcode();
4068 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4073 unsigned RotateOpc =
4074 Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
4079 unsigned Opc =
MI.getOpcode();
4080 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4081 bool IsFSHL = Opc == TargetOpcode::G_FSHL;
4084 : TargetOpcode::G_ROTR));
4085 MI.removeOperand(2);
4091 assert(
MI.getOpcode() == TargetOpcode::G_ROTL ||
4092 MI.getOpcode() == TargetOpcode::G_ROTR);
4096 bool OutOfRange =
false;
4097 auto MatchOutOfRange = [Bitsize, &OutOfRange](
const Constant *
C) {
4098 if (
auto *CI = dyn_cast<ConstantInt>(
C))
4099 OutOfRange |= CI->getValue().uge(Bitsize);
4106 assert(
MI.getOpcode() == TargetOpcode::G_ROTL ||
4107 MI.getOpcode() == TargetOpcode::G_ROTR);
4116 MI.getOperand(2).setReg(Amt);
4121 int64_t &MatchInfo) {
4122 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
4175 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
4200 if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
4206 unsigned Op = TargetOpcode::COPY;
4207 if (DstSize != LHSSize)
4208 Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
4218 assert(
MI.getOpcode() == TargetOpcode::G_AND);
4228 int64_t AndMaskBits;
4236 if (AndMaskBits & OrMaskBits)
4242 if (
MI.getOperand(1).getReg() == AndMaskReg)
4243 MI.getOperand(2).setReg(AndMaskReg);
4244 MI.getOperand(1).setReg(Src);
4253 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
4260 int64_t
Width =
MI.getOperand(2).getImm();
4268 if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits())
4272 auto Cst1 =
B.buildConstant(ExtractTy, ShiftImm);
4273 auto Cst2 =
B.buildConstant(ExtractTy,
Width);
4274 B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);
4280 bool CombinerHelper::matchBitfieldExtractFromAnd(
4282 assert(
MI.getOpcode() == TargetOpcode::G_AND);
4287 TargetOpcode::G_UBFX, Ty, ExtractTy))
4290 int64_t AndImm, LSBImm;
4299 auto MaybeMask =
static_cast<uint64_t>(AndImm);
4300 if (MaybeMask & (MaybeMask + 1))
4304 if (
static_cast<uint64_t>(LSBImm) >= Size)
4309 auto WidthCst =
B.buildConstant(ExtractTy,
Width);
4310 auto LSBCst =
B.buildConstant(ExtractTy, LSBImm);
4311 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst});
4318 const unsigned Opcode =
MI.getOpcode();
4319 assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR);
4321 const Register Dst =
MI.getOperand(0).getReg();
4323 const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR
4324 ? TargetOpcode::G_SBFX
4325 : TargetOpcode::G_UBFX;
4336 const unsigned Size = Ty.getScalarSizeInBits();
4346 if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >= Size)
4350 if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt)
4354 const int64_t Pos = ShrAmt - ShlAmt;
4355 const int64_t
Width = Size - ShrAmt;
4358 auto WidthCst =
B.buildConstant(ExtractTy,
Width);
4359 auto PosCst =
B.buildConstant(ExtractTy, Pos);
4360 B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst});
4367 const unsigned Opcode =
MI.getOpcode();
4368 assert(Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR);
4370 const Register Dst =
MI.getOperand(0).getReg();
4374 TargetOpcode::G_UBFX, Ty, ExtractTy))
4388 if (ShrAmt < 0 || ShrAmt >= Size)
4392 if (0 == (SMask >> ShrAmt)) {
4394 B.buildConstant(Dst, 0);
4401 UMask |= maskTrailingOnes<uint64_t>(ShrAmt);
4402 UMask &= maskTrailingOnes<uint64_t>(Size);
4407 const int64_t Pos = ShrAmt;
4412 if (Opcode == TargetOpcode::G_ASHR &&
Width + ShrAmt == Size)
4416 auto WidthCst =
B.buildConstant(ExtractTy,
Width);
4417 auto PosCst =
B.buildConstant(ExtractTy, Pos);
4418 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {AndSrc, PosCst, WidthCst});
4423 bool CombinerHelper::reassociationCanBreakAddressingModePattern(
4444 const APInt &C1APIntVal = *
C1;
4445 const APInt &C2APIntVal = *C2;
4446 const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue();
4452 unsigned ConvUseOpc = ConvUseMI->
getOpcode();
4453 while (ConvUseOpc == TargetOpcode::G_INTTOPTR ||
4454 ConvUseOpc == TargetOpcode::G_PTRTOINT) {
4461 auto LoadStore = ConvUseOpc == TargetOpcode::G_LOAD ||
4462 ConvUseOpc == TargetOpcode::G_STORE;
4495 Register Src1Reg =
MI.getOperand(1).getReg();
4496 if (
RHS->getOpcode() != TargetOpcode::G_ADD)
4508 MI.getOperand(1).setReg(NewBase.getReg(0));
4509 MI.getOperand(2).setReg(
RHS->getOperand(2).getReg());
4512 return !reassociationCanBreakAddressingModePattern(
MI);
4527 auto *LHSPtrAdd = cast<GPtrAdd>(
LHS);
4532 LHSPtrAdd->moveBefore(&
MI);
4535 MI.getOperand(2).setReg(LHSCstOff->
VReg);
4538 LHSPtrAdd->getOperand(2).setReg(RHSReg);
4541 return !reassociationCanBreakAddressingModePattern(
MI);
4549 auto *LHSPtrAdd = dyn_cast<GPtrAdd>(
LHS);
4553 Register Src2Reg =
MI.getOperand(2).getReg();
4554 Register LHSSrc1 = LHSPtrAdd->getBaseReg();
4555 Register LHSSrc2 = LHSPtrAdd->getOffsetReg();
4564 auto NewCst =
B.buildConstant(
MRI.
getType(Src2Reg), *
C1 + *C2);
4566 MI.getOperand(1).setReg(LHSSrc1);
4567 MI.getOperand(2).setReg(NewCst.getReg(0));
4570 return !reassociationCanBreakAddressingModePattern(
MI);
4575 auto &PtrAdd = cast<GPtrAdd>(
MI);
4612 MatchInfo = *MaybeCst;
4634 assert(
MI.getOpcode() == TargetOpcode::G_AND);
4658 case TargetOpcode::G_ADD:
4659 case TargetOpcode::G_SUB:
4660 case TargetOpcode::G_MUL:
4661 case TargetOpcode::G_AND:
4662 case TargetOpcode::G_OR:
4663 case TargetOpcode::G_XOR:
4671 auto Mask = Cst->Value;
4676 unsigned NarrowWidth =
Mask.countTrailingOnes();
4682 auto &MF = *
MI.getMF();
4685 auto &
DL = MF.getDataLayout();
4686 if (!TLI.isTruncateFree(WideTy, NarrowTy,
DL, Ctx) ||
4687 !TLI.isZExtFree(NarrowTy, WideTy,
DL, Ctx))
4701 MI.getOperand(1).setReg(
Ext.getReg(0));