41#define DEBUG_TYPE "gi-combiner"
44using namespace MIPatternMatch;
50 cl::desc(
"Force all indexed operations to be "
51 "legal for the GlobalISel combiner"));
57 : Builder(
B),
MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB),
58 MDT(MDT), IsPreLegalize(IsPreLegalize), LI(LI),
59 RBI(Builder.getMF().getSubtarget().getRegBankInfo()),
60 TRI(Builder.getMF().getSubtarget().getRegisterInfo()) {
73 assert(
I < ByteWidth &&
"I must be in [0, ByteWidth)");
92 assert(
I < ByteWidth &&
"I must be in [0, ByteWidth)");
93 return ByteWidth -
I - 1;
113static std::optional<bool>
117 unsigned Width = MemOffset2Idx.
size();
120 bool BigEndian =
true, LittleEndian =
true;
121 for (
unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
122 auto MemOffsetAndIdx = MemOffset2Idx.
find(MemOffset);
123 if (MemOffsetAndIdx == MemOffset2Idx.
end())
125 const int64_t
Idx = MemOffsetAndIdx->second - LowestIdx;
126 assert(
Idx >= 0 &&
"Expected non-negative byte offset?");
129 if (!BigEndian && !LittleEndian)
133 assert((BigEndian != LittleEndian) &&
134 "Pattern cannot be both big and little endian!");
141 assert(
LI &&
"Must have LegalizerInfo to query isLegal!");
157 return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) &&
158 isLegal({TargetOpcode::G_CONSTANT, {EltTy}});
185 unsigned ToOpcode)
const {
210 if (
MI.getOpcode() != TargetOpcode::COPY)
219 MI.eraseFromParent();
224 bool IsUndef =
false;
235 assert(
MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
236 "Invalid instruction");
246 assert(Def &&
"Operand not defined");
247 switch (Def->getOpcode()) {
248 case TargetOpcode::G_BUILD_VECTOR:
255 case TargetOpcode::G_IMPLICIT_DEF: {
264 "All undefs should have the same type");
268 EltIdx != EltEnd; ++EltIdx)
269 Ops.
push_back(Undef->getOperand(0).getReg());
296 MI.eraseFromParent();
311 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
312 "Invalid instruction kind");
337 if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
342 if (DstNumElts % SrcNumElts != 0)
348 unsigned NumConcat = DstNumElts / SrcNumElts;
351 for (
unsigned i = 0; i != DstNumElts; ++i) {
358 if ((
Idx % SrcNumElts != (i % SrcNumElts)) ||
359 (ConcatSrcs[i / SrcNumElts] >= 0 &&
360 ConcatSrcs[i / SrcNumElts] != (
int)(
Idx / SrcNumElts)))
363 ConcatSrcs[i / SrcNumElts] =
Idx / SrcNumElts;
370 for (
auto Src : ConcatSrcs) {
396 MI.eraseFromParent();
401 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
402 "Invalid instruction kind");
405 return Mask.size() == 1;
412 int I =
MI.getOperand(3).getShuffleMask()[0];
417 if (
I >= Src1NumElts) {
418 SrcReg =
MI.getOperand(2).getReg();
430 MI.eraseFromParent();
439 const LLT TyForCandidate,
440 unsigned OpcodeForCandidate,
445 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
456 if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
459 else if (CurrentUse.
ExtendOpcode == TargetOpcode::G_ANYEXT &&
460 OpcodeForCandidate != TargetOpcode::G_ANYEXT)
461 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
467 if (!isa<GZExtLoad>(LoadMI) && CurrentUse.
Ty == TyForCandidate) {
469 OpcodeForCandidate == TargetOpcode::G_ZEXT)
471 else if (CurrentUse.
ExtendOpcode == TargetOpcode::G_ZEXT &&
472 OpcodeForCandidate == TargetOpcode::G_SEXT)
473 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
482 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
493static void InsertInsnsWithoutSideEffectsBeforeUse(
505 InsertBB = PredBB->
getMBB();
510 if (InsertBB ==
DefMI.getParent()) {
512 Inserter(InsertBB, std::next(InsertPt), UseMO);
531 unsigned CandidateLoadOpc;
533 case TargetOpcode::G_ANYEXT:
534 CandidateLoadOpc = TargetOpcode::G_LOAD;
536 case TargetOpcode::G_SEXT:
537 CandidateLoadOpc = TargetOpcode::G_SEXTLOAD;
539 case TargetOpcode::G_ZEXT:
540 CandidateLoadOpc = TargetOpcode::G_ZEXTLOAD;
545 return CandidateLoadOpc;
576 if (!llvm::has_single_bit<uint32_t>(LoadValueTy.
getSizeInBits()))
584 unsigned PreferredOpcode =
586 ? TargetOpcode::G_ANYEXT
587 : isa<GSExtLoad>(&
MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
588 Preferred = {
LLT(), PreferredOpcode,
nullptr};
590 if (
UseMI.getOpcode() == TargetOpcode::G_SEXT ||
591 UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
592 (
UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
593 const auto &MMO = LoadMI->
getMMO();
595 if (MMO.isAtomic() &&
UseMI.getOpcode() != TargetOpcode::G_ANYEXT)
603 if (
LI->
getAction({CandidateLoadOpc, {UseTy, SrcTy}, {MMDesc}})
607 Preferred = ChoosePreferredUse(
MI, Preferred,
618 assert(Preferred.Ty != LoadValueTy &&
"Extending to same type?");
636 if (PreviouslyEmitted) {
646 EmittedInsns[InsertIntoBB] = NewMI;
655 auto &LoadValue =
MI.getOperand(0);
658 Uses.push_back(&UseMO);
660 for (
auto *UseMO :
Uses) {
670 if (UseDstReg != ChosenDstReg) {
671 if (Preferred.
Ty == UseDstTy) {
708 InsertInsnsWithoutSideEffectsBeforeUse(
Builder,
MI, *UseMO,
723 InsertInsnsWithoutSideEffectsBeforeUse(
Builder,
MI, *UseMO, InsertTruncAt);
726 MI.getOperand(0).setReg(ChosenDstReg);
732 assert(
MI.getOpcode() == TargetOpcode::G_AND);
751 APInt MaskVal = MaybeMask->Value;
772 if (MaskSizeBits > LoadSizeBits)
792 else if (LoadSizeBits > MaskSizeBits || LoadSizeBits ==
RegSize)
797 {TargetOpcode::G_ZEXTLOAD, {RegTy,
MRI.
getType(PtrReg)}, {MemDesc}}))
801 B.setInstrAndDebugLoc(*LoadMI);
802 auto &MF =
B.getMF();
804 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.
MemoryTy);
805 B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO);
814 "shouldn't consider debug uses");
822 if (DefOrUse ==
MBB.
end())
824 return &*DefOrUse == &
DefMI;
830 "shouldn't consider debug uses");
833 else if (
DefMI.getParent() !=
UseMI.getParent())
840 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
851 uint64_t SizeInBits =
MI.getOperand(2).getImm();
854 if (
auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser,
MRI)) {
856 auto LoadSizeBits = LoadMI->getMemSizeInBits();
859 if (LoadSizeBits == SizeInBits)
866 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
869 MI.eraseFromParent();
874 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
884 auto *LoadDef = getOpcodeDef<GLoad>(SrcReg,
MRI);
888 uint64_t MemBits = LoadDef->getMemSizeInBits();
893 unsigned NewSizeBits = std::min((
uint64_t)
MI.getOperand(2).getImm(), MemBits);
908 if (LoadDef->isSimple())
910 else if (MemBits > NewSizeBits || MemBits == RegTy.
getSizeInBits())
920 MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
926 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
928 unsigned ScalarSizeBits;
929 std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
938 auto &MMO = LoadDef->
getMMO();
941 auto PtrInfo = MMO.getPointerInfo();
945 MI.eraseFromParent();
960 auto *MF =
MI->getMF();
961 auto *
Addr = getOpcodeDef<GPtrAdd>(
MI->getPointerReg(),
MRI);
967 AM.
BaseOffs = CstOff->getSExtValue();
972 MF->getDataLayout(), AM,
974 MF->getFunction().getContext()),
975 MI->getMMO().getAddrSpace());
980 case TargetOpcode::G_LOAD:
981 return TargetOpcode::G_INDEXED_LOAD;
982 case TargetOpcode::G_STORE:
983 return TargetOpcode::G_INDEXED_STORE;
984 case TargetOpcode::G_ZEXTLOAD:
985 return TargetOpcode::G_INDEXED_ZEXTLOAD;
986 case TargetOpcode::G_SEXTLOAD:
987 return TargetOpcode::G_INDEXED_SEXTLOAD;
993bool CombinerHelper::isIndexedLoadStoreLegal(
GLoadStore &LdSt)
const {
1002 if (IndexedOpc == TargetOpcode::G_INDEXED_STORE)
1003 OpTys = {PtrTy, Ty, Ty};
1005 OpTys = {Ty, PtrTy};
1013 cl::desc(
"Number of uses of a base pointer to check before it is no longer "
1014 "considered for post-indexing."));
1018 bool &RematOffset) {
1031 if (!isIndexedLoadStoreLegal(LdSt))
1040 unsigned NumUsesChecked = 0;
1045 auto *PtrAdd = dyn_cast<GPtrAdd>(&
Use);
1053 if (StoredValDef == &
Use)
1056 Offset = PtrAdd->getOffsetReg();
1058 !TLI.isIndexingLegal(LdSt, PtrAdd->getBaseReg(),
Offset,
1064 RematOffset =
false;
1068 if (OffsetDef->
getOpcode() != TargetOpcode::G_CONSTANT)
1074 if (&BasePtrUse == PtrDef)
1079 auto *BasePtrLdSt = dyn_cast<GLoadStore>(&BasePtrUse);
1080 if (BasePtrLdSt && BasePtrLdSt != &LdSt &&
1082 isIndexedLoadStoreLegal(*BasePtrLdSt))
1087 if (
auto *BasePtrUseDef = dyn_cast<GPtrAdd>(&BasePtrUse)) {
1088 Register PtrAddDefReg = BasePtrUseDef->getReg(0);
1092 if (BaseUseUse.getParent() != LdSt.
getParent())
1095 if (
auto *UseUseLdSt = dyn_cast<GLoadStore>(&BaseUseUse))
1104 Addr = PtrAdd->getReg(0);
1105 Base = PtrAdd->getBaseReg();
1126 if (!isIndexedLoadStoreLegal(LdSt))
1130 if (BaseDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
1133 if (
auto *St = dyn_cast<GStore>(&LdSt)) {
1135 if (
Base == St->getValueReg())
1140 if (St->getValueReg() ==
Addr)
1146 if (AddrUse.getParent() != LdSt.
getParent())
1151 bool RealUse =
false;
1158 if (
auto *UseLdSt = dyn_cast<GLoadStore>(&AddrUse)) {
1170 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
1173 auto *LoadMI = getOpcodeDef<GLoad>(
MI.getOperand(1).getReg(),
MRI);
1187 if (!LoadMI->isSimple())
1208 int Elt = CVal->getZExtValue();
1221 Register VecPtr = LoadMI->getPointerReg();
1229 LegalityQuery Q = {TargetOpcode::G_LOAD, {VecEltTy, PtrTy}, {MMDesc}};
1254 B.buildLoad(Result, finalPtr, PtrInfo, Alignment);
1264 auto &LdSt = cast<GLoadStore>(
MI);
1269 MatchInfo.
IsPre = findPreIndexCandidate(LdSt, MatchInfo.
Addr, MatchInfo.
Base,
1271 if (!MatchInfo.
IsPre &&
1272 !findPostIndexCandidate(LdSt, MatchInfo.
Addr, MatchInfo.
Base,
1284 bool IsStore =
Opcode == TargetOpcode::G_STORE;
1292 *OldCst->getOperand(1).getCImm());
1293 MatchInfo.
Offset = NewCst.getReg(0);
1299 MIB.
addUse(
MI.getOperand(0).getReg());
1301 MIB.
addDef(
MI.getOperand(0).getReg());
1309 MI.eraseFromParent();
1318 bool IsDiv, IsSigned;
1323 case TargetOpcode::G_SDIV:
1324 case TargetOpcode::G_UDIV: {
1326 IsSigned =
Opcode == TargetOpcode::G_SDIV;
1329 case TargetOpcode::G_SREM:
1330 case TargetOpcode::G_UREM: {
1332 IsSigned =
Opcode == TargetOpcode::G_SREM;
1338 unsigned DivOpcode, RemOpcode, DivremOpcode;
1340 DivOpcode = TargetOpcode::G_SDIV;
1341 RemOpcode = TargetOpcode::G_SREM;
1342 DivremOpcode = TargetOpcode::G_SDIVREM;
1344 DivOpcode = TargetOpcode::G_UDIV;
1345 RemOpcode = TargetOpcode::G_UREM;
1346 DivremOpcode = TargetOpcode::G_UDIVREM;
1365 if (
MI.getParent() ==
UseMI.getParent() &&
1366 ((IsDiv &&
UseMI.getOpcode() == RemOpcode) ||
1367 (!IsDiv &&
UseMI.getOpcode() == DivOpcode)) &&
1381 assert(OtherMI &&
"OtherMI shouldn't be empty.");
1384 if (
Opcode == TargetOpcode::G_SDIV ||
Opcode == TargetOpcode::G_UDIV) {
1385 DestDivReg =
MI.getOperand(0).getReg();
1389 DestRemReg =
MI.getOperand(0).getReg();
1393 Opcode == TargetOpcode::G_SDIV ||
Opcode == TargetOpcode::G_SREM;
1405 FirstInst = OtherMI;
1409 : TargetOpcode::G_UDIVREM,
1410 {DestDivReg, DestRemReg},
1412 MI.eraseFromParent();
1418 assert(
MI.getOpcode() == TargetOpcode::G_BR);
1437 assert(std::next(BrIt) ==
MBB->
end() &&
"expected G_BR to be a terminator");
1439 BrCond = &*std::prev(BrIt);
1440 if (BrCond->
getOpcode() != TargetOpcode::G_BRCOND)
1446 return BrCondTarget !=
MI.getOperand(0).getMBB() &&
1464 MI.getOperand(0).setMBB(FallthroughBB);
1480 return Helper.lowerMemcpyInline(
MI) ==
1496 switch (
MI.getOpcode()) {
1499 case TargetOpcode::G_FNEG: {
1500 Result.changeSign();
1503 case TargetOpcode::G_FABS: {
1507 case TargetOpcode::G_FPTRUNC: {
1509 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
1514 case TargetOpcode::G_FSQRT: {
1518 Result =
APFloat(sqrt(Result.convertToDouble()));
1521 case TargetOpcode::G_FLOG2: {
1543 MI.eraseFromParent();
1554 if (
MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1564 if (!Add2Def || Add2Def->
getOpcode() != TargetOpcode::G_PTR_ADD)
1577 Type *AccessTy =
nullptr;
1578 auto &MF = *
MI.getMF();
1580 if (
auto *LdSt = dyn_cast<GLoadStore>(&
UseMI)) {
1582 MF.getFunction().getContext());
1587 APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1592 AMOld.
BaseOffs = MaybeImmVal->Value.getSExtValue();
1595 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1596 if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
1597 !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
1610 assert(
MI.getOpcode() == TargetOpcode::G_PTR_ADD &&
"Expected G_PTR_ADD");
1616 MI.getOperand(1).setReg(MatchInfo.
Base);
1617 MI.getOperand(2).setReg(NewOffset.getReg(0));
1632 Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_SSHLSAT ||
1633 Opcode == TargetOpcode::G_USHLSAT) &&
1634 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1654 (MaybeImmVal->Value.getZExtValue() + MaybeImm2Val->Value).getZExtValue();
1659 if (
Opcode == TargetOpcode::G_USHLSAT &&
1670 Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_SSHLSAT ||
1671 Opcode == TargetOpcode::G_USHLSAT) &&
1672 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1677 auto Imm = MatchInfo.
Imm;
1679 if (Imm >= ScalarSizeInBits) {
1681 if (
Opcode == TargetOpcode::G_SHL ||
Opcode == TargetOpcode::G_LSHR) {
1683 MI.eraseFromParent();
1688 Imm = ScalarSizeInBits - 1;
1694 MI.getOperand(1).setReg(MatchInfo.
Reg);
1695 MI.getOperand(2).setReg(NewImm);
1711 unsigned ShiftOpcode =
MI.getOpcode();
1712 assert((ShiftOpcode == TargetOpcode::G_SHL ||
1713 ShiftOpcode == TargetOpcode::G_ASHR ||
1714 ShiftOpcode == TargetOpcode::G_LSHR ||
1715 ShiftOpcode == TargetOpcode::G_USHLSAT ||
1716 ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1717 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1720 Register LogicDest =
MI.getOperand(1).getReg();
1725 unsigned LogicOpcode = LogicMI->
getOpcode();
1726 if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1727 LogicOpcode != TargetOpcode::G_XOR)
1731 const Register C1 =
MI.getOperand(2).getReg();
1733 if (!MaybeImmVal || MaybeImmVal->Value == 0)
1736 const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1740 if (
MI->getOpcode() != ShiftOpcode ||
1750 ShiftVal = MaybeImmVal->Value.getSExtValue();
1761 if (matchFirstShift(LogicMIOp1, C0Val)) {
1763 MatchInfo.
Shift2 = LogicMIOp1;
1764 }
else if (matchFirstShift(LogicMIOp2, C0Val)) {
1766 MatchInfo.
Shift2 = LogicMIOp2;
1770 MatchInfo.
ValSum = C0Val + C1Val;
1776 MatchInfo.
Logic = LogicMI;
1784 Opcode == TargetOpcode::G_LSHR ||
Opcode == TargetOpcode::G_USHLSAT ||
1785 Opcode == TargetOpcode::G_SSHLSAT) &&
1786 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1805 Register Shift2Const =
MI.getOperand(2).getReg();
1817 MI.eraseFromParent();
1821 assert(
MI.getOpcode() == TargetOpcode::G_SHL &&
"Expected G_SHL");
1824 auto &Shl = cast<GenericMachineInstr>(
MI);
1844 assert((SrcDef->getOpcode() == TargetOpcode::G_ADD ||
1845 SrcDef->getOpcode() == TargetOpcode::G_OR) &&
"Unexpected op");
1848 auto S1 =
B.buildShl(SrcTy,
X, ShiftReg);
1849 auto S2 =
B.buildShl(SrcTy, C1, ShiftReg);
1850 B.buildInstr(SrcDef->getOpcode(), {DstReg}, {S1, S2});
1856 unsigned &ShiftVal) {
1857 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
"Expected a G_MUL");
1863 ShiftVal = MaybeImmVal->Value.exactLogBase2();
1864 return (
static_cast<int32_t
>(ShiftVal) != -1);
1868 unsigned &ShiftVal) {
1869 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
"Expected a G_MUL");
1874 MI.setDesc(MIB.
getTII().
get(TargetOpcode::G_SHL));
1875 MI.getOperand(2).setReg(ShiftCst.getReg(0));
1882 assert(
MI.getOpcode() == TargetOpcode::G_SHL &&
KB);
1897 if (!MaybeShiftAmtVal)
1911 int64_t ShiftAmt = MaybeShiftAmtVal->getSExtValue();
1912 MatchData.
Reg = ExtSrc;
1913 MatchData.
Imm = ShiftAmt;
1917 return MinLeadingZeros >= ShiftAmt && ShiftAmt < SrcTySize;
1923 int64_t ShiftAmtVal = MatchData.
Imm;
1931 MI.eraseFromParent();
1938 for (
unsigned I = 0;
I <
Merge.getNumSources(); ++
I)
1941 auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0],
MRI);
1942 if (!Unmerge || Unmerge->getNumDefs() !=
Merge.getNumSources())
1945 for (
unsigned I = 0;
I < MergedValues.
size(); ++
I)
1946 if (MergedValues[
I] != Unmerge->getReg(
I))
1949 MatchInfo = Unmerge->getSourceReg();
1963 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1964 "Expected an unmerge");
1965 auto &Unmerge = cast<GUnmerge>(
MI);
1968 auto *SrcInstr = getOpcodeDef<GMergeLikeInstr>(SrcReg,
MRI);
1976 if (SrcMergeTy != Dst0Ty && !SameSize)
1980 for (
unsigned Idx = 0;
Idx < SrcInstr->getNumSources(); ++
Idx)
1987 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1988 "Expected an unmerge");
1990 "Not enough operands to replace all defs");
1991 unsigned NumElems =
MI.getNumOperands() - 1;
1995 bool CanReuseInputDirectly = DstTy == SrcTy;
1997 for (
unsigned Idx = 0;
Idx < NumElems; ++
Idx) {
2009 if (CanReuseInputDirectly)
2014 MI.eraseFromParent();
2019 unsigned SrcIdx =
MI.getNumOperands() - 1;
2020 Register SrcReg =
MI.getOperand(SrcIdx).getReg();
2022 if (SrcInstr->
getOpcode() != TargetOpcode::G_CONSTANT &&
2023 SrcInstr->
getOpcode() != TargetOpcode::G_FCONSTANT)
2034 for (
unsigned Idx = 0;
Idx != SrcIdx; ++
Idx) {
2036 Val = Val.
lshr(ShiftAmt);
2044 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2045 "Expected an unmerge");
2047 "Not enough operands to replace all defs");
2048 unsigned NumElems =
MI.getNumOperands() - 1;
2050 for (
unsigned Idx = 0;
Idx < NumElems; ++
Idx) {
2055 MI.eraseFromParent();
2060 unsigned SrcIdx =
MI.getNumOperands() - 1;
2061 Register SrcReg =
MI.getOperand(SrcIdx).getReg();
2063 unsigned NumElems =
MI.getNumOperands() - 1;
2064 for (
unsigned Idx = 0;
Idx < NumElems; ++
Idx) {
2066 B.buildUndef(DstReg);
2073 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2074 "Expected an unmerge");
2076 for (
unsigned Idx = 1, EndIdx =
MI.getNumDefs();
Idx != EndIdx; ++
Idx) {
2085 Register SrcReg =
MI.getOperand(
MI.getNumDefs()).getReg();
2094 Register Dst0Reg =
MI.getOperand(0).getReg();
2101 MI.eraseFromParent();
2105 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2106 "Expected an unmerge");
2107 Register Dst0Reg =
MI.getOperand(0).getReg();
2114 Register SrcReg =
MI.getOperand(
MI.getNumDefs()).getReg();
2131 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2132 "Expected an unmerge");
2134 Register Dst0Reg =
MI.getOperand(0).getReg();
2139 "Expecting a G_ZEXT");
2151 "ZExt src doesn't fit in destination");
2156 for (
unsigned Idx = 1, EndIdx =
MI.getNumDefs();
Idx != EndIdx; ++
Idx) {
2161 MI.eraseFromParent();
2165 unsigned TargetShiftSize,
2166 unsigned &ShiftVal) {
2167 assert((
MI.getOpcode() == TargetOpcode::G_SHL ||
2168 MI.getOpcode() == TargetOpcode::G_LSHR ||
2169 MI.getOpcode() == TargetOpcode::G_ASHR) &&
"Expected a shift");
2177 if (
Size <= TargetShiftSize)
2185 ShiftVal = MaybeImmVal->Value.getSExtValue();
2186 return ShiftVal >=
Size / 2 && ShiftVal <
Size;
2190 const unsigned &ShiftVal) {
2195 unsigned HalfSize =
Size / 2;
2196 assert(ShiftVal >= HalfSize);
2202 unsigned NarrowShiftAmt = ShiftVal - HalfSize;
2204 if (
MI.getOpcode() == TargetOpcode::G_LSHR) {
2205 Register Narrowed = Unmerge.getReg(1);
2212 if (NarrowShiftAmt != 0) {
2219 }
else if (
MI.getOpcode() == TargetOpcode::G_SHL) {
2220 Register Narrowed = Unmerge.getReg(0);
2225 if (NarrowShiftAmt != 0) {
2233 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
2235 HalfTy, Unmerge.getReg(1),
2238 if (ShiftVal == HalfSize) {
2242 }
else if (ShiftVal ==
Size - 1) {
2250 HalfTy, Unmerge.getReg(1),
2259 MI.eraseFromParent();
2263 unsigned TargetShiftAmount) {
2274 assert(
MI.getOpcode() == TargetOpcode::G_INTTOPTR &&
"Expected a G_INTTOPTR");
2283 assert(
MI.getOpcode() == TargetOpcode::G_INTTOPTR &&
"Expected a G_INTTOPTR");
2287 MI.eraseFromParent();
2291 assert(
MI.getOpcode() == TargetOpcode::G_PTRTOINT &&
"Expected a G_PTRTOINT");
2295 MI.eraseFromParent();
2300 assert(
MI.getOpcode() == TargetOpcode::G_ADD);
2307 PtrReg.second =
false;
2317 PtrReg.second =
true;
2329 const bool DoCommute = PtrReg.second;
2339 MI.eraseFromParent();
2344 auto &PtrAdd = cast<GPtrAdd>(
MI);
2355 NewCst += RHSCst->
sextOrTrunc(DstTy.getSizeInBits());
2365 auto &PtrAdd = cast<GPtrAdd>(
MI);
2370 PtrAdd.eraseFromParent();
2374 assert(
MI.getOpcode() == TargetOpcode::G_ANYEXT &&
"Expected a G_ANYEXT");
2383 assert(
MI.getOpcode() == TargetOpcode::G_ZEXT &&
"Expected a G_ZEXT");
2398 assert((
MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2399 MI.getOpcode() == TargetOpcode::G_SEXT ||
2400 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2401 "Expected a G_[ASZ]EXT");
2405 unsigned Opc =
MI.getOpcode();
2407 if (Opc == SrcOpc ||
2408 (Opc == TargetOpcode::G_ANYEXT &&
2409 (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2410 (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2419 assert((
MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2420 MI.getOpcode() == TargetOpcode::G_SEXT ||
2421 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2422 "Expected a G_[ASZ]EXT");
2424 Register Reg = std::get<0>(MatchInfo);
2425 unsigned SrcExtOp = std::get<1>(MatchInfo);
2428 if (
MI.getOpcode() == SrcExtOp) {
2430 MI.getOperand(1).setReg(Reg);
2438 if (
MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2439 (
MI.getOpcode() == TargetOpcode::G_SEXT &&
2440 SrcExtOp == TargetOpcode::G_ZEXT)) {
2444 MI.eraseFromParent();
2450 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2454 if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2455 SrcOpc == TargetOpcode::G_ZEXT) {
2464 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2466 unsigned SrcExtOp = MatchInfo.second;
2470 if (SrcTy == DstTy) {
2471 MI.eraseFromParent();
2480 MI.eraseFromParent();
2488 if (ShiftSize > 32 && TruncSize < 32)
2502 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2519 case TargetOpcode::G_SHL: {
2528 case TargetOpcode::G_LSHR:
2529 case TargetOpcode::G_ASHR: {
2536 if (
User.getOpcode() == TargetOpcode::G_STORE)
2540 if (NewShiftTy == SrcTy)
2554 {NewShiftTy, TL.getPreferredShiftAmountTy(NewShiftTy)}}))
2557 MatchInfo = std::make_pair(SrcMI, NewShiftTy);
2566 LLT NewShiftTy = MatchInfo.second;
2580 if (NewShiftTy == DstTy)
2590 return MO.isReg() &&
2591 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2597 return !MO.isReg() ||
2598 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2603 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2605 return all_of(Mask, [](
int Elt) {
return Elt < 0; });
2609 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
2610 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(0).getReg(),
2615 assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
2616 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(1).getReg(),
2621 assert((
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT ||
2622 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&
2623 "Expected an insert/extract element op");
2626 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3;
2639 OpIdx = Cst->isZero() ? 3 : 2;
2684 if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad())
2711 return MO.isReg() && MO.getReg().isPhysical();
2721 return I1->isIdenticalTo(*I2);
2736 return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) ==
2747 return MaybeCst && MaybeCst->getBitWidth() <= 64 &&
2748 MaybeCst->getSExtValue() ==
C;
2754 std::optional<FPValueAndVReg> MaybeCst;
2758 return MaybeCst->Value.isExactlyValue(
C);
2763 assert(
MI.getNumExplicitDefs() == 1 &&
"Expected one explicit def?");
2765 Register Replacement =
MI.getOperand(OpIdx).getReg();
2767 MI.eraseFromParent();
2773 assert(
MI.getNumExplicitDefs() == 1 &&
"Expected one explicit def?");
2776 MI.eraseFromParent();
2781 unsigned ConstIdx) {
2782 Register ConstReg =
MI.getOperand(ConstIdx).getReg();
2795 assert((
MI.getOpcode() == TargetOpcode::G_FSHL ||
2796 MI.getOpcode() == TargetOpcode::G_FSHR) &&
2797 "This is not a funnel shift operation");
2799 Register ConstReg =
MI.getOperand(3).getReg();
2804 assert((VRegAndVal) &&
"Value is not a constant");
2807 APInt NewConst = VRegAndVal->Value.
urem(
2813 MI.getOpcode(), {MI.getOperand(0)},
2814 {MI.getOperand(1), MI.getOperand(2), NewConstInstr.getReg(0)});
2816 MI.eraseFromParent();
2820 assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
2841 return MO.
isReg() &&
2852 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2855 MI.eraseFromParent();
2859 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2862 MI.eraseFromParent();
2866 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2869 MI.eraseFromParent();
2873 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2876 MI.eraseFromParent();
2880 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2883 MI.eraseFromParent();
2890 Register &NewLHS = std::get<0>(MatchInfo);
2891 Register &NewRHS = std::get<1>(MatchInfo);
2899 NewLHS = MaybeNewLHS;
2908 assert(
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
2917 TargetOpcode::G_INSERT_VECTOR_ELT)
2923 MatchInfo.
resize(NumElts);
2927 if (IntImm >= NumElts || IntImm < 0)
2929 if (!MatchInfo[IntImm])
2930 MatchInfo[IntImm] = TmpReg;
2934 if (CurrInst->
getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
2936 if (TmpInst->
getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
2944 return TmpInst->
getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
2951 auto GetUndef = [&]() {
2958 for (
unsigned I = 0;
I < MatchInfo.
size(); ++
I) {
2960 MatchInfo[
I] = GetUndef();
2963 MI.eraseFromParent();
2970 std::tie(SubLHS, SubRHS) = MatchInfo;
2972 MI.eraseFromParent();
2983 unsigned LogicOpcode =
MI.getOpcode();
2984 assert(LogicOpcode == TargetOpcode::G_AND ||
2985 LogicOpcode == TargetOpcode::G_OR ||
2986 LogicOpcode == TargetOpcode::G_XOR);
2999 if (!LeftHandInst || !RightHandInst)
3001 unsigned HandOpcode = LeftHandInst->
getOpcode();
3002 if (HandOpcode != RightHandInst->
getOpcode())
3014 if (!XTy.
isValid() || XTy != YTy)
3019 switch (HandOpcode) {
3022 case TargetOpcode::G_ANYEXT:
3023 case TargetOpcode::G_SEXT:
3024 case TargetOpcode::G_ZEXT: {
3028 case TargetOpcode::G_AND:
3029 case TargetOpcode::G_ASHR:
3030 case TargetOpcode::G_LSHR:
3031 case TargetOpcode::G_SHL: {
3036 ExtraHandOpSrcReg = ZOp.
getReg();
3058 if (ExtraHandOpSrcReg.
isValid())
3070 "Expected at least one instr to build?");
3073 assert(InstrToBuild.Opcode &&
"Expected a valid opcode?");
3074 assert(InstrToBuild.OperandFns.size() &&
"Expected at least one operand?");
3076 for (
auto &OperandFn : InstrToBuild.OperandFns)
3079 MI.eraseFromParent();
3084 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
3085 int64_t ShlCst, AshrCst;
3091 if (ShlCst != AshrCst)
3094 {TargetOpcode::G_SEXT_INREG, {
MRI.
getType(Src)}}))
3096 MatchInfo = std::make_tuple(Src, ShlCst);
3102 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
3105 std::tie(Src, ShiftAmt) = MatchInfo;
3109 MI.eraseFromParent();
3115 assert(
MI.getOpcode() == TargetOpcode::G_AND);
3130 B.buildAnd(Dst, R,
B.buildConstant(Ty, C1 & C2));
3133 auto Zero =
B.buildConstant(Ty, 0);
3156 assert(
MI.getOpcode() == TargetOpcode::G_AND);
3173 (LHSBits.
Zero | RHSBits.
One).isAllOnes()) {
3180 (LHSBits.
One | RHSBits.
Zero).isAllOnes()) {
3196 assert(
MI.getOpcode() == TargetOpcode::G_OR);
3213 (LHSBits.
One | RHSBits.
Zero).isAllOnes()) {
3220 (LHSBits.
Zero | RHSBits.
One).isAllOnes()) {
3231 unsigned ExtBits =
MI.getOperand(2).getImm();
3237 int64_t Cst,
bool IsVector,
bool IsFP) {
3239 return (ScalarSizeBits == 1 && Cst == -1) ||
3245 assert(
MI.getOpcode() == TargetOpcode::G_XOR);
3265 for (
unsigned I = 0;
I < RegsToNegate.
size(); ++
I) {
3270 switch (Def->getOpcode()) {
3275 case TargetOpcode::G_ICMP:
3281 case TargetOpcode::G_FCMP:
3287 case TargetOpcode::G_AND:
3288 case TargetOpcode::G_OR:
3294 RegsToNegate.
push_back(Def->getOperand(1).getReg());
3295 RegsToNegate.
push_back(Def->getOperand(2).getReg());
3322 for (
Register Reg : RegsToNegate) {
3327 switch (Def->getOpcode()) {
3330 case TargetOpcode::G_ICMP:
3331 case TargetOpcode::G_FCMP: {
3338 case TargetOpcode::G_AND:
3341 case TargetOpcode::G_OR:
3349 MI.eraseFromParent();
3355 assert(
MI.getOpcode() == TargetOpcode::G_XOR);
3359 Register SharedReg =
MI.getOperand(2).getReg();
3380 return Y == SharedReg;
3388 std::tie(
X,
Y) = MatchInfo;
3392 MI.getOperand(1).setReg(Not->getOperand(0).getReg());
3393 MI.getOperand(2).setReg(
Y);
3398 auto &PtrAdd = cast<GPtrAdd>(
MI);
3399 Register DstReg = PtrAdd.getReg(0);
3408 return ConstVal && *ConstVal == 0;
3417 auto &PtrAdd = cast<GPtrAdd>(
MI);
3420 PtrAdd.eraseFromParent();
3427 Register Pow2Src1 =
MI.getOperand(2).getReg();
3435 MI.eraseFromParent();
3439 unsigned &SelectOpNo) {
3449 if (
Select->getOpcode() != TargetOpcode::G_SELECT ||
3451 OtherOperandReg =
LHS;
3454 if (
Select->getOpcode() != TargetOpcode::G_SELECT ||
3471 unsigned BinOpcode =
MI.getOpcode();
3476 bool CanFoldNonConst =
3477 (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
3482 if (CanFoldNonConst)
3493 const unsigned &SelectOperand) {
3506 unsigned BinOpcode =
MI.getOpcode();
3513 if (SelectOperand == 1) {
3527 MI.eraseFromParent();
3530std::optional<SmallVector<Register, 8>>
3531CombinerHelper::findCandidatesForLoadOrCombine(
const MachineInstr *Root)
const {
3532 assert(Root->
getOpcode() == TargetOpcode::G_OR &&
"Expected G_OR only!");
3561 const unsigned MaxIter =
3563 for (
unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3572 return std::nullopt;
3588 if (RegsToVisit.
empty() || RegsToVisit.
size() % 2 != 0)
3589 return std::nullopt;
3601static std::optional<std::pair<GZExtLoad *, int64_t>>
3605 "Expected Reg to only have one non-debug use?");
3614 if (Shift % MemSizeInBits != 0)
3615 return std::nullopt;
3618 auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad,
MRI);
3620 return std::nullopt;
3622 if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
3623 return std::nullopt;
3625 return std::make_pair(Load, Shift / MemSizeInBits);
3628std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
3629CombinerHelper::findLoadOffsetsForLoadOrCombine(
3663 for (
auto Reg : RegsToVisit) {
3668 return std::nullopt;
3671 std::tie(Load, DstPos) = *LoadAndPos;
3679 return std::nullopt;
3682 auto &LoadMMO =
Load->getMMO();
3686 return std::nullopt;
3693 LoadPtr =
Load->getOperand(1).getReg();
3699 return std::nullopt;
3706 if (BasePtr != LoadPtr)
3707 return std::nullopt;
3709 if (
Idx < LowestIdx) {
3711 LowestIdxLoad =
Load;
3719 return std::nullopt;
3727 if (!EarliestLoad ||
dominates(*Load, *EarliestLoad))
3728 EarliestLoad =
Load;
3729 if (!LatestLoad ||
dominates(*LatestLoad, *Load))
3736 "Expected to find a load for each register?");
3737 assert(EarliestLoad != LatestLoad && EarliestLoad &&
3738 LatestLoad &&
"Expected at least two loads?");
3747 const unsigned MaxIter = 20;
3753 if (
MI.isLoadFoldBarrier())
3754 return std::nullopt;
3755 if (Iter++ == MaxIter)
3756 return std::nullopt;
3759 return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
3764 assert(
MI.getOpcode() == TargetOpcode::G_OR);
3784 if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3788 auto RegsToVisit = findCandidatesForLoadOrCombine(&
MI);
3795 const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3796 if (NarrowMemSizeInBits % 8 != 0)
3809 auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3810 MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3813 std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
3820 std::optional<bool> IsBigEndian =
isBigEndian(MemOffset2Idx, LowestIdx);
3823 bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3835 const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3836 const unsigned ZeroByteOffset =
3840 auto ZeroOffsetIdx = MemOffset2Idx.
find(ZeroByteOffset);
3841 if (ZeroOffsetIdx == MemOffset2Idx.
end() ||
3842 ZeroOffsetIdx->second != LowestIdx)
3866 MIB.setInstrAndDebugLoc(*LatestLoad);
3868 MIB.buildLoad(LoadDst,
Ptr, *NewMMO);
3870 MIB.buildBSwap(Dst, LoadDst);
3877 assert(
MI.getOpcode() == TargetOpcode::G_PHI);
3891 case TargetOpcode::G_ANYEXT:
3893 case TargetOpcode::G_ZEXT:
3894 case TargetOpcode::G_SEXT:
3908 for (
unsigned Idx = 1;
Idx <
MI.getNumOperands();
Idx += 2) {
3911 case TargetOpcode::G_LOAD:
3912 case TargetOpcode::G_TRUNC:
3913 case TargetOpcode::G_SEXT:
3914 case TargetOpcode::G_ZEXT:
3915 case TargetOpcode::G_ANYEXT:
3916 case TargetOpcode::G_CONSTANT:
3920 if (InSrcs.
size() > 2)
3932 assert(
MI.getOpcode() == TargetOpcode::G_PHI);
3941 for (
unsigned SrcIdx = 1; SrcIdx <
MI.getNumOperands(); SrcIdx += 2) {
3943 if (!SrcMIs.
insert(SrcMI))
3949 if (InsertPt !=
MBB->
end() && InsertPt->isPHI())
3955 SrcMI->getOperand(0).getReg());
3956 OldToNewSrcMap[SrcMI] = NewExt;
3965 NewPhi.
addMBB(MO.getMBB());
3969 NewPhi.addUse(NewSrc->getOperand(0).getReg());
3977 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
3987 unsigned VecIdx = Cst->Value.getZExtValue();
3992 if (SrcVecMI->
getOpcode() == TargetOpcode::G_TRUNC) {
3996 if (SrcVecMI->
getOpcode() != TargetOpcode::G_BUILD_VECTOR &&
3997 SrcVecMI->
getOpcode() != TargetOpcode::G_BUILD_VECTOR_TRUNC)
4018 if (ScalarTy != DstTy) {
4021 MI.eraseFromParent();
4030 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4053 if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
4058 unsigned Idx = Cst->getZExtValue();
4062 SrcDstPairs.emplace_back(
4063 std::make_pair(
MI.getOperand(
Idx + 1).getReg(), &II));
4066 return ExtractedElts.
all();
4072 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4073 for (
auto &Pair : SrcDstPairs) {
4074 auto *ExtMI = Pair.second;
4076 ExtMI->eraseFromParent();
4078 MI.eraseFromParent();
4085 MI.eraseFromParent();
4096 assert(
MI.getOpcode() == TargetOpcode::G_OR);
4102 Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
4103 unsigned FshOpc = 0;
4114 int64_t CstShlAmt, CstLShrAmt;
4117 CstShlAmt + CstLShrAmt ==
BitWidth) {
4118 FshOpc = TargetOpcode::G_FSHR;
4125 FshOpc = TargetOpcode::G_FSHL;
4131 FshOpc = TargetOpcode::G_FSHR;
4142 B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt});
4149 unsigned Opc =
MI.getOpcode();
4150 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4155 unsigned RotateOpc =
4156 Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
4161 unsigned Opc =
MI.getOpcode();
4162 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4163 bool IsFSHL = Opc == TargetOpcode::G_FSHL;
4166 : TargetOpcode::G_ROTR));
4167 MI.removeOperand(2);
4173 assert(
MI.getOpcode() == TargetOpcode::G_ROTL ||
4174 MI.getOpcode() == TargetOpcode::G_ROTR);
4178 bool OutOfRange =
false;
4179 auto MatchOutOfRange = [Bitsize, &OutOfRange](
const Constant *
C) {
4180 if (
auto *CI = dyn_cast<ConstantInt>(
C))
4181 OutOfRange |= CI->getValue().uge(Bitsize);
4188 assert(
MI.getOpcode() == TargetOpcode::G_ROTL ||
4189 MI.getOpcode() == TargetOpcode::G_ROTR);
4198 MI.getOperand(2).setReg(Amt);
4203 int64_t &MatchInfo) {
4204 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
4208 std::optional<bool> KnownVal;
4257 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
4282 if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
4288 unsigned Op = TargetOpcode::COPY;
4289 if (DstSize != LHSSize)
4290 Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
4300 assert(
MI.getOpcode() == TargetOpcode::G_AND);
4310 int64_t AndMaskBits;
4318 if (AndMaskBits & OrMaskBits)
4324 if (
MI.getOperand(1).getReg() == AndMaskReg)
4325 MI.getOperand(2).setReg(AndMaskReg);
4326 MI.getOperand(1).setReg(Src);
4335 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
4342 int64_t Width =
MI.getOperand(2).getImm();
4354 auto Cst1 =
B.buildConstant(ExtractTy, ShiftImm);
4355 auto Cst2 =
B.buildConstant(ExtractTy, Width);
4356 B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2);
4364 assert(
MI.getOpcode() == TargetOpcode::G_AND);