44#define DEBUG_TYPE "gi-combiner"
47using namespace MIPatternMatch;
53 cl::desc(
"Force all indexed operations to be "
54 "legal for the GlobalISel combiner"));
60 : Builder(
B),
MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB),
61 MDT(MDT), IsPreLegalize(IsPreLegalize), LI(LI),
62 RBI(Builder.getMF().getSubtarget().getRegBankInfo()),
63 TRI(Builder.getMF().getSubtarget().getRegisterInfo()) {
86 assert(
I < ByteWidth &&
"I must be in [0, ByteWidth)");
105 assert(
I < ByteWidth &&
"I must be in [0, ByteWidth)");
106 return ByteWidth -
I - 1;
126static std::optional<bool>
130 unsigned Width = MemOffset2Idx.
size();
133 bool BigEndian =
true, LittleEndian =
true;
134 for (
unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
135 auto MemOffsetAndIdx = MemOffset2Idx.
find(MemOffset);
136 if (MemOffsetAndIdx == MemOffset2Idx.
end())
138 const int64_t
Idx = MemOffsetAndIdx->second - LowestIdx;
139 assert(
Idx >= 0 &&
"Expected non-negative byte offset?");
142 if (!BigEndian && !LittleEndian)
146 assert((BigEndian != LittleEndian) &&
147 "Pattern cannot be both big and little endian!");
154 assert(
LI &&
"Must have LegalizerInfo to query isLegal!");
170 return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) &&
171 isLegal({TargetOpcode::G_CONSTANT, {EltTy}});
198 unsigned ToOpcode)
const {
223 if (
MI.getOpcode() != TargetOpcode::COPY)
232 MI.eraseFromParent();
254 if (OrigDef->
isPHI() || isa<GUnmerge>(OrigDef))
261 std::optional<MachineOperand> MaybePoisonOperand;
263 if (!Operand.isReg())
269 if (!MaybePoisonOperand)
270 MaybePoisonOperand = Operand;
279 if (!MaybePoisonOperand) {
282 cast<GenericMachineInstr>(OrigDef)->dropPoisonGeneratingFlags();
284 B.buildCopy(
DstOp, OrigOp);
289 Register MaybePoisonOperandReg = MaybePoisonOperand->getReg();
290 LLT MaybePoisonOperandRegTy =
MRI.
getType(MaybePoisonOperandReg);
294 cast<GenericMachineInstr>(OrigDef)->dropPoisonGeneratingFlags();
297 auto Freeze =
B.buildFreeze(MaybePoisonOperandRegTy, MaybePoisonOperandReg);
308 assert(
MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
309 "Invalid instruction");
319 assert(Def &&
"Operand not defined");
322 switch (Def->getOpcode()) {
323 case TargetOpcode::G_BUILD_VECTOR:
330 case TargetOpcode::G_IMPLICIT_DEF: {
339 "All undefs should have the same type");
343 EltIdx != EltEnd; ++EltIdx)
344 Ops.
push_back(Undef->getOperand(0).getReg());
355 {TargetOpcode::G_BUILD_VECTOR, {DstTy,
MRI.
getType(Ops[0])}})) {
382 MI.eraseFromParent();
393 if (!ConcatMI1 || !ConcatMI2)
397 if (
MRI.
getType(ConcatMI1->getSourceReg(0)) !=
404 for (
unsigned i = 0; i < Mask.size(); i += ConcatSrcNumElt) {
408 for (
unsigned j = 1; j < ConcatSrcNumElt; j++) {
409 if (i + j >= Mask.size())
411 if (Mask[i + j] != -1)
415 {TargetOpcode::G_IMPLICIT_DEF, {ConcatSrcTy}}))
418 }
else if (Mask[i] % ConcatSrcNumElt == 0) {
419 for (
unsigned j = 1; j < ConcatSrcNumElt; j++) {
420 if (i + j >= Mask.size())
422 if (Mask[i + j] != Mask[i] +
static_cast<int>(j))
428 Ops.
push_back(ConcatMI1->getSourceReg(Mask[i] / ConcatSrcNumElt));
430 Ops.
push_back(ConcatMI2->getSourceReg(Mask[i] / ConcatSrcNumElt -
431 ConcatMI1->getNumSources()));
439 {TargetOpcode::G_CONCAT_VECTORS,
440 {
MRI.
getType(
MI.getOperand(0).getReg()), ConcatSrcTy}}))
463 MI.eraseFromParent();
477 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
478 "Invalid instruction kind");
503 if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
508 if (DstNumElts % SrcNumElts != 0)
514 unsigned NumConcat = DstNumElts / SrcNumElts;
517 for (
unsigned i = 0; i != DstNumElts; ++i) {
524 if ((
Idx % SrcNumElts != (i % SrcNumElts)) ||
525 (ConcatSrcs[i / SrcNumElts] >= 0 &&
526 ConcatSrcs[i / SrcNumElts] != (
int)(
Idx / SrcNumElts)))
529 ConcatSrcs[i / SrcNumElts] =
Idx / SrcNumElts;
536 for (
auto Src : ConcatSrcs) {
562 MI.eraseFromParent();
567 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
568 "Invalid instruction kind");
571 return Mask.size() == 1;
578 int I =
MI.getOperand(3).getShuffleMask()[0];
583 if (
I >= Src1NumElts) {
584 SrcReg =
MI.getOperand(2).getReg();
596 MI.eraseFromParent();
605 const LLT TyForCandidate,
606 unsigned OpcodeForCandidate,
611 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
622 if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
625 else if (CurrentUse.
ExtendOpcode == TargetOpcode::G_ANYEXT &&
626 OpcodeForCandidate != TargetOpcode::G_ANYEXT)
627 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
633 if (!isa<GZExtLoad>(LoadMI) && CurrentUse.
Ty == TyForCandidate) {
635 OpcodeForCandidate == TargetOpcode::G_ZEXT)
637 else if (CurrentUse.
ExtendOpcode == TargetOpcode::G_ZEXT &&
638 OpcodeForCandidate == TargetOpcode::G_SEXT)
639 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
648 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
659static void InsertInsnsWithoutSideEffectsBeforeUse(
671 InsertBB = PredBB->
getMBB();
676 if (InsertBB ==
DefMI.getParent()) {
678 Inserter(InsertBB, std::next(InsertPt), UseMO);
697 unsigned CandidateLoadOpc;
699 case TargetOpcode::G_ANYEXT:
700 CandidateLoadOpc = TargetOpcode::G_LOAD;
702 case TargetOpcode::G_SEXT:
703 CandidateLoadOpc = TargetOpcode::G_SEXTLOAD;
705 case TargetOpcode::G_ZEXT:
706 CandidateLoadOpc = TargetOpcode::G_ZEXTLOAD;
711 return CandidateLoadOpc;
742 if (!llvm::has_single_bit<uint32_t>(LoadValueTy.
getSizeInBits()))
750 unsigned PreferredOpcode =
752 ? TargetOpcode::G_ANYEXT
753 : isa<GSExtLoad>(&
MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
754 Preferred = {
LLT(), PreferredOpcode,
nullptr};
756 if (
UseMI.getOpcode() == TargetOpcode::G_SEXT ||
757 UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
758 (
UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
759 const auto &MMO = LoadMI->
getMMO();
769 if (
LI->
getAction({CandidateLoadOpc, {UseTy, SrcTy}, {MMDesc}})
773 Preferred = ChoosePreferredUse(
MI, Preferred,
784 assert(Preferred.Ty != LoadValueTy &&
"Extending to same type?");
802 if (PreviouslyEmitted) {
812 EmittedInsns[InsertIntoBB] = NewMI;
824 Uses.push_back(&UseMO);
826 for (
auto *UseMO :
Uses) {
836 if (UseDstReg != ChosenDstReg) {
837 if (Preferred.
Ty == UseDstTy) {
874 InsertInsnsWithoutSideEffectsBeforeUse(
Builder,
MI, *UseMO,
889 InsertInsnsWithoutSideEffectsBeforeUse(
Builder,
MI, *UseMO, InsertTruncAt);
892 MI.getOperand(0).setReg(ChosenDstReg);
898 assert(
MI.getOpcode() == TargetOpcode::G_AND);
917 APInt MaskVal = MaybeMask->Value;
938 if (MaskSizeBits > LoadSizeBits.
getValue())
958 else if (LoadSizeBits.
getValue() > MaskSizeBits ||
964 {TargetOpcode::G_ZEXTLOAD, {RegTy,
MRI.
getType(PtrReg)}, {MemDesc}}))
968 B.setInstrAndDebugLoc(*LoadMI);
969 auto &MF =
B.getMF();
971 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.
MemoryTy);
972 B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO);
981 "shouldn't consider debug uses");
989 if (DefOrUse ==
MBB.
end())
991 return &*DefOrUse == &
DefMI;
997 "shouldn't consider debug uses");
1000 else if (
DefMI.getParent() !=
UseMI.getParent())
1007 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1016 LoadUser = TruncSrc;
1018 uint64_t SizeInBits =
MI.getOperand(2).getImm();
1021 if (
auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser,
MRI)) {
1023 auto LoadSizeBits = LoadMI->getMemSizeInBits();
1027 if (LoadSizeBits == SizeInBits)
1034 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1036 MI.eraseFromParent();
1041 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1051 auto *LoadDef = getOpcodeDef<GLoad>(SrcReg,
MRI);
1055 uint64_t MemBits = LoadDef->getMemSizeInBits().getValue();
1060 unsigned NewSizeBits = std::min((
uint64_t)
MI.getOperand(2).getImm(), MemBits);
1063 if (NewSizeBits < 8)
1075 if (LoadDef->isSimple())
1077 else if (MemBits > NewSizeBits || MemBits == RegTy.
getSizeInBits())
1087 MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits);
1093 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1095 unsigned ScalarSizeBits;
1096 std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
1105 auto &MMO = LoadDef->
getMMO();
1108 auto PtrInfo = MMO.getPointerInfo();
1112 MI.eraseFromParent();
1120 auto *MF =
MI->getMF();
1121 auto *
Addr = getOpcodeDef<GPtrAdd>(
MI->getPointerReg(),
MRI);
1127 AM.
BaseOffs = CstOff->getSExtValue();
1132 MF->getDataLayout(), AM,
1134 MF->getFunction().getContext()),
1135 MI->getMMO().getAddrSpace());
1140 case TargetOpcode::G_LOAD:
1141 return TargetOpcode::G_INDEXED_LOAD;
1142 case TargetOpcode::G_STORE:
1143 return TargetOpcode::G_INDEXED_STORE;
1144 case TargetOpcode::G_ZEXTLOAD:
1145 return TargetOpcode::G_INDEXED_ZEXTLOAD;
1146 case TargetOpcode::G_SEXTLOAD:
1147 return TargetOpcode::G_INDEXED_SEXTLOAD;
1153bool CombinerHelper::isIndexedLoadStoreLegal(
GLoadStore &LdSt)
const {
1163 if (IndexedOpc == TargetOpcode::G_INDEXED_STORE)
1164 OpTys = {PtrTy, Ty, Ty};
1166 OpTys = {Ty, PtrTy};
1174 cl::desc(
"Number of uses of a base pointer to check before it is no longer "
1175 "considered for post-indexing."));
1179 bool &RematOffset) {
1192 if (!isIndexedLoadStoreLegal(LdSt))
1201 unsigned NumUsesChecked = 0;
1206 auto *PtrAdd = dyn_cast<GPtrAdd>(&
Use);
1214 if (StoredValDef == &
Use)
1217 Offset = PtrAdd->getOffsetReg();
1219 !TLI.isIndexingLegal(LdSt, PtrAdd->getBaseReg(),
Offset,
1225 RematOffset =
false;
1229 if (OffsetDef->
getOpcode() != TargetOpcode::G_CONSTANT)
1235 if (&BasePtrUse == PtrDef)
1240 auto *BasePtrLdSt = dyn_cast<GLoadStore>(&BasePtrUse);
1241 if (BasePtrLdSt && BasePtrLdSt != &LdSt &&
1243 isIndexedLoadStoreLegal(*BasePtrLdSt))
1248 if (
auto *BasePtrUseDef = dyn_cast<GPtrAdd>(&BasePtrUse)) {
1249 Register PtrAddDefReg = BasePtrUseDef->getReg(0);
1253 if (BaseUseUse.getParent() != LdSt.
getParent())
1256 if (
auto *UseUseLdSt = dyn_cast<GLoadStore>(&BaseUseUse))
1265 Addr = PtrAdd->getReg(0);
1266 Base = PtrAdd->getBaseReg();
1287 if (!isIndexedLoadStoreLegal(LdSt))
1291 if (BaseDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
1294 if (
auto *St = dyn_cast<GStore>(&LdSt)) {
1296 if (
Base == St->getValueReg())
1301 if (St->getValueReg() ==
Addr)
1307 if (AddrUse.getParent() != LdSt.
getParent())
1312 bool RealUse =
false;
1319 if (
auto *UseLdSt = dyn_cast<GLoadStore>(&AddrUse)) {
1331 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
1334 auto *LoadMI = getOpcodeDef<GLoad>(
MI.getOperand(1).getReg(),
MRI);
1348 if (!LoadMI->isSimple())
1360 const unsigned MaxIter = 20;
1363 if (
II->isLoadFoldBarrier())
1365 if (Iter++ == MaxIter)
1381 int Elt = CVal->getZExtValue();
1394 Register VecPtr = LoadMI->getPointerReg();
1402 LegalityQuery Q = {TargetOpcode::G_LOAD, {VecEltTy, PtrTy}, {MMDesc}};
1427 B.buildLoad(Result, finalPtr, PtrInfo, Alignment);
1437 auto &LdSt = cast<GLoadStore>(
MI);
1442 MatchInfo.
IsPre = findPreIndexCandidate(LdSt, MatchInfo.
Addr, MatchInfo.
Base,
1444 if (!MatchInfo.
IsPre &&
1445 !findPostIndexCandidate(LdSt, MatchInfo.
Addr, MatchInfo.
Base,
1455 unsigned Opcode =
MI.getOpcode();
1456 bool IsStore = Opcode == TargetOpcode::G_STORE;
1464 *OldCst->getOperand(1).getCImm());
1465 MatchInfo.
Offset = NewCst.getReg(0);
1471 MIB.
addUse(
MI.getOperand(0).getReg());
1473 MIB.
addDef(
MI.getOperand(0).getReg());
1481 MI.eraseFromParent();
1489 unsigned Opcode =
MI.getOpcode();
1490 bool IsDiv, IsSigned;
1495 case TargetOpcode::G_SDIV:
1496 case TargetOpcode::G_UDIV: {
1498 IsSigned = Opcode == TargetOpcode::G_SDIV;
1501 case TargetOpcode::G_SREM:
1502 case TargetOpcode::G_UREM: {
1504 IsSigned = Opcode == TargetOpcode::G_SREM;
1510 unsigned DivOpcode, RemOpcode, DivremOpcode;
1512 DivOpcode = TargetOpcode::G_SDIV;
1513 RemOpcode = TargetOpcode::G_SREM;
1514 DivremOpcode = TargetOpcode::G_SDIVREM;
1516 DivOpcode = TargetOpcode::G_UDIV;
1517 RemOpcode = TargetOpcode::G_UREM;
1518 DivremOpcode = TargetOpcode::G_UDIVREM;
1537 if (
MI.getParent() ==
UseMI.getParent() &&
1538 ((IsDiv &&
UseMI.getOpcode() == RemOpcode) ||
1539 (!IsDiv &&
UseMI.getOpcode() == DivOpcode)) &&
1552 unsigned Opcode =
MI.getOpcode();
1553 assert(OtherMI &&
"OtherMI shouldn't be empty.");
1556 if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) {
1557 DestDivReg =
MI.getOperand(0).getReg();
1561 DestRemReg =
MI.getOperand(0).getReg();
1565 Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM;
1575 : TargetOpcode::G_UDIVREM,
1576 {DestDivReg, DestRemReg},
1578 MI.eraseFromParent();
1584 assert(
MI.getOpcode() == TargetOpcode::G_BR);
1603 assert(std::next(BrIt) ==
MBB->
end() &&
"expected G_BR to be a terminator");
1605 BrCond = &*std::prev(BrIt);
1606 if (BrCond->
getOpcode() != TargetOpcode::G_BRCOND)
1612 return BrCondTarget !=
MI.getOperand(0).getMBB() &&
1630 MI.getOperand(0).setMBB(FallthroughBB);
1646 return Helper.lowerMemcpyInline(
MI) ==
1662 switch (
MI.getOpcode()) {
1665 case TargetOpcode::G_FNEG: {
1666 Result.changeSign();
1669 case TargetOpcode::G_FABS: {
1673 case TargetOpcode::G_FPTRUNC: {
1675 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
1680 case TargetOpcode::G_FSQRT: {
1684 Result =
APFloat(sqrt(Result.convertToDouble()));
1687 case TargetOpcode::G_FLOG2: {
1708 MI.eraseFromParent();
1719 if (
MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1729 if (!Add2Def || Add2Def->
getOpcode() != TargetOpcode::G_PTR_ADD)
1742 Type *AccessTy =
nullptr;
1743 auto &MF = *
MI.getMF();
1745 if (
auto *LdSt = dyn_cast<GLoadStore>(&
UseMI)) {
1747 MF.getFunction().getContext());
1752 APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value;
1757 AMOld.
BaseOffs = MaybeImmVal->Value.getSExtValue();
1760 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1761 if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) &&
1762 !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS))
1775 assert(
MI.getOpcode() == TargetOpcode::G_PTR_ADD &&
"Expected G_PTR_ADD");
1781 MI.getOperand(1).setReg(MatchInfo.
Base);
1782 MI.getOperand(2).setReg(NewOffset.getReg(0));
1795 unsigned Opcode =
MI.getOpcode();
1796 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1797 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1798 Opcode == TargetOpcode::G_USHLSAT) &&
1799 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1819 (MaybeImmVal->Value.getZExtValue() + MaybeImm2Val->Value).getZExtValue();
1824 if (Opcode == TargetOpcode::G_USHLSAT &&
1833 unsigned Opcode =
MI.getOpcode();
1834 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1835 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1836 Opcode == TargetOpcode::G_USHLSAT) &&
1837 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1841 auto Imm = MatchInfo.
Imm;
1843 if (Imm >= ScalarSizeInBits) {
1845 if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1847 MI.eraseFromParent();
1852 Imm = ScalarSizeInBits - 1;
1858 MI.getOperand(1).setReg(MatchInfo.
Reg);
1859 MI.getOperand(2).setReg(NewImm);
1875 unsigned ShiftOpcode =
MI.getOpcode();
1876 assert((ShiftOpcode == TargetOpcode::G_SHL ||
1877 ShiftOpcode == TargetOpcode::G_ASHR ||
1878 ShiftOpcode == TargetOpcode::G_LSHR ||
1879 ShiftOpcode == TargetOpcode::G_USHLSAT ||
1880 ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1881 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1884 Register LogicDest =
MI.getOperand(1).getReg();
1889 unsigned LogicOpcode = LogicMI->
getOpcode();
1890 if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1891 LogicOpcode != TargetOpcode::G_XOR)
1895 const Register C1 =
MI.getOperand(2).getReg();
1897 if (!MaybeImmVal || MaybeImmVal->Value == 0)
1900 const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1904 if (
MI->getOpcode() != ShiftOpcode ||
1914 ShiftVal = MaybeImmVal->Value.getSExtValue();
1925 if (matchFirstShift(LogicMIOp1, C0Val)) {
1927 MatchInfo.
Shift2 = LogicMIOp1;
1928 }
else if (matchFirstShift(LogicMIOp2, C0Val)) {
1930 MatchInfo.
Shift2 = LogicMIOp2;
1934 MatchInfo.
ValSum = C0Val + C1Val;
1940 MatchInfo.
Logic = LogicMI;
1946 unsigned Opcode =
MI.getOpcode();
1947 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1948 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
1949 Opcode == TargetOpcode::G_SSHLSAT) &&
1950 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1968 Register Shift2Const =
MI.getOperand(2).getReg();
1980 MI.eraseFromParent();
1984 assert(
MI.getOpcode() == TargetOpcode::G_SHL &&
"Expected G_SHL");
1987 auto &Shl = cast<GenericMachineInstr>(
MI);
2007 assert((SrcDef->getOpcode() == TargetOpcode::G_ADD ||
2008 SrcDef->getOpcode() == TargetOpcode::G_OR) &&
"Unexpected op");
2011 auto S1 =
B.buildShl(SrcTy,
X, ShiftReg);
2012 auto S2 =
B.buildShl(SrcTy, C1, ShiftReg);
2013 B.buildInstr(SrcDef->getOpcode(), {DstReg}, {
S1, S2});
2019 unsigned &ShiftVal) {
2020 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
"Expected a G_MUL");
2026 ShiftVal = MaybeImmVal->Value.exactLogBase2();
2027 return (
static_cast<int32_t
>(ShiftVal) != -1);
2031 unsigned &ShiftVal) {
2032 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
"Expected a G_MUL");
2037 MI.setDesc(MIB.
getTII().
get(TargetOpcode::G_SHL));
2038 MI.getOperand(2).setReg(ShiftCst.getReg(0));
2045 assert(
MI.getOpcode() == TargetOpcode::G_SHL &&
KB);
2060 if (!MaybeShiftAmtVal)
2074 int64_t ShiftAmt = MaybeShiftAmtVal->getSExtValue();
2075 MatchData.
Reg = ExtSrc;
2076 MatchData.
Imm = ShiftAmt;
2080 return MinLeadingZeros >= ShiftAmt && ShiftAmt < SrcTySize;
2086 int64_t ShiftAmtVal = MatchData.
Imm;
2093 MI.eraseFromParent();
2100 for (
unsigned I = 0;
I <
Merge.getNumSources(); ++
I)
2103 auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0],
MRI);
2104 if (!Unmerge || Unmerge->getNumDefs() !=
Merge.getNumSources())
2107 for (
unsigned I = 0;
I < MergedValues.
size(); ++
I)
2108 if (MergedValues[
I] != Unmerge->getReg(
I))
2111 MatchInfo = Unmerge->getSourceReg();
2125 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2126 "Expected an unmerge");
2127 auto &Unmerge = cast<GUnmerge>(
MI);
2130 auto *SrcInstr = getOpcodeDef<GMergeLikeInstr>(SrcReg,
MRI);
2138 if (SrcMergeTy != Dst0Ty && !SameSize)
2142 for (
unsigned Idx = 0;
Idx < SrcInstr->getNumSources(); ++
Idx)
2149 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2150 "Expected an unmerge");
2152 "Not enough operands to replace all defs");
2153 unsigned NumElems =
MI.getNumOperands() - 1;
2157 bool CanReuseInputDirectly = DstTy == SrcTy;
2158 for (
unsigned Idx = 0;
Idx < NumElems; ++
Idx) {
2170 if (CanReuseInputDirectly)
2175 MI.eraseFromParent();
2180 unsigned SrcIdx =
MI.getNumOperands() - 1;
2181 Register SrcReg =
MI.getOperand(SrcIdx).getReg();
2183 if (SrcInstr->
getOpcode() != TargetOpcode::G_CONSTANT &&
2184 SrcInstr->
getOpcode() != TargetOpcode::G_FCONSTANT)
2195 for (
unsigned Idx = 0;
Idx != SrcIdx; ++
Idx) {
2197 Val = Val.
lshr(ShiftAmt);
2205 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2206 "Expected an unmerge");
2208 "Not enough operands to replace all defs");
2209 unsigned NumElems =
MI.getNumOperands() - 1;
2210 for (
unsigned Idx = 0;
Idx < NumElems; ++
Idx) {
2215 MI.eraseFromParent();
2220 unsigned SrcIdx =
MI.getNumOperands() - 1;
2221 Register SrcReg =
MI.getOperand(SrcIdx).getReg();
2223 unsigned NumElems =
MI.getNumOperands() - 1;
2224 for (
unsigned Idx = 0;
Idx < NumElems; ++
Idx) {
2226 B.buildUndef(DstReg);
2233 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2234 "Expected an unmerge");
2239 for (
unsigned Idx = 1, EndIdx =
MI.getNumDefs();
Idx != EndIdx; ++
Idx) {
2247 Register SrcReg =
MI.getOperand(
MI.getNumDefs()).getReg();
2248 Register Dst0Reg =
MI.getOperand(0).getReg();
2250 MI.eraseFromParent();
2254 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2255 "Expected an unmerge");
2256 Register Dst0Reg =
MI.getOperand(0).getReg();
2263 Register SrcReg =
MI.getOperand(
MI.getNumDefs()).getReg();
2280 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2281 "Expected an unmerge");
2283 Register Dst0Reg =
MI.getOperand(0).getReg();
2288 "Expecting a G_ZEXT");
2298 "ZExt src doesn't fit in destination");
2303 for (
unsigned Idx = 1, EndIdx =
MI.getNumDefs();
Idx != EndIdx; ++
Idx) {
2308 MI.eraseFromParent();
2312 unsigned TargetShiftSize,
2313 unsigned &ShiftVal) {
2314 assert((
MI.getOpcode() == TargetOpcode::G_SHL ||
2315 MI.getOpcode() == TargetOpcode::G_LSHR ||
2316 MI.getOpcode() == TargetOpcode::G_ASHR) &&
"Expected a shift");
2324 if (
Size <= TargetShiftSize)
2332 ShiftVal = MaybeImmVal->Value.getSExtValue();
2333 return ShiftVal >=
Size / 2 && ShiftVal <
Size;
2337 const unsigned &ShiftVal) {
2342 unsigned HalfSize =
Size / 2;
2343 assert(ShiftVal >= HalfSize);
2348 unsigned NarrowShiftAmt = ShiftVal - HalfSize;
2350 if (
MI.getOpcode() == TargetOpcode::G_LSHR) {
2351 Register Narrowed = Unmerge.getReg(1);
2358 if (NarrowShiftAmt != 0) {
2365 }
else if (
MI.getOpcode() == TargetOpcode::G_SHL) {
2366 Register Narrowed = Unmerge.getReg(0);
2371 if (NarrowShiftAmt != 0) {
2379 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
2381 HalfTy, Unmerge.getReg(1),
2384 if (ShiftVal == HalfSize) {
2388 }
else if (ShiftVal ==
Size - 1) {
2396 HalfTy, Unmerge.getReg(1),
2405 MI.eraseFromParent();
2409 unsigned TargetShiftAmount) {
2420 assert(
MI.getOpcode() == TargetOpcode::G_INTTOPTR &&
"Expected a G_INTTOPTR");
2429 assert(
MI.getOpcode() == TargetOpcode::G_INTTOPTR &&
"Expected a G_INTTOPTR");
2432 MI.eraseFromParent();
2436 assert(
MI.getOpcode() == TargetOpcode::G_PTRTOINT &&
"Expected a G_PTRTOINT");
2439 MI.eraseFromParent();
2444 assert(
MI.getOpcode() == TargetOpcode::G_ADD);
2451 PtrReg.second =
false;
2461 PtrReg.second =
true;
2473 const bool DoCommute = PtrReg.second;
2482 MI.eraseFromParent();
2487 auto &PtrAdd = cast<GPtrAdd>(
MI);
2498 NewCst += RHSCst->
sextOrTrunc(DstTy.getSizeInBits());
2508 auto &PtrAdd = cast<GPtrAdd>(
MI);
2512 PtrAdd.eraseFromParent();
2516 assert(
MI.getOpcode() == TargetOpcode::G_ANYEXT &&
"Expected a G_ANYEXT");
2521 SrcReg = OriginalSrcReg;
2528 assert(
MI.getOpcode() == TargetOpcode::G_ZEXT &&
"Expected a G_ZEXT");
2546 if (ShiftSize > 32 && TruncSize < 32)
2560 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC &&
"Expected a G_TRUNC");
2577 case TargetOpcode::G_SHL: {
2586 case TargetOpcode::G_LSHR:
2587 case TargetOpcode::G_ASHR: {
2594 if (
User.getOpcode() == TargetOpcode::G_STORE)
2598 if (NewShiftTy == SrcTy)
2612 {NewShiftTy, TL.getPreferredShiftAmountTy(NewShiftTy)}}))
2615 MatchInfo = std::make_pair(SrcMI, NewShiftTy);
2622 LLT NewShiftTy = MatchInfo.second;
2636 if (NewShiftTy == DstTy)
2646 return MO.isReg() &&
2647 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2653 return !MO.isReg() ||
2654 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2659 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2661 return all_of(Mask, [](
int Elt) {
return Elt < 0; });
2665 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
2666 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(0).getReg(),
2671 assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
2672 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(1).getReg(),
2677 assert((
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT ||
2678 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&
2679 "Expected an insert/extract element op");
2682 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3;
2695 OpIdx = Cst->isZero() ? 3 : 2;
2740 if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad())
2767 return MO.isReg() && MO.getReg().isPhysical();
2777 return I1->isIdenticalTo(*I2);
2792 return I1->findRegisterDefOperandIdx(InstAndDef1->Reg,
nullptr) ==
2803 return MaybeCst && MaybeCst->getBitWidth() <= 64 &&
2804 MaybeCst->getSExtValue() ==
C;
2810 std::optional<FPValueAndVReg> MaybeCst;
2814 return MaybeCst->Value.isExactlyValue(
C);
2819 assert(
MI.getNumExplicitDefs() == 1 &&
"Expected one explicit def?");
2821 Register Replacement =
MI.getOperand(OpIdx).getReg();
2823 MI.eraseFromParent();
2829 assert(
MI.getNumExplicitDefs() == 1 &&
"Expected one explicit def?");
2832 MI.eraseFromParent();
2837 unsigned ConstIdx) {
2838 Register ConstReg =
MI.getOperand(ConstIdx).getReg();
2851 assert((
MI.getOpcode() == TargetOpcode::G_FSHL ||
2852 MI.getOpcode() == TargetOpcode::G_FSHR) &&
2853 "This is not a funnel shift operation");
2855 Register ConstReg =
MI.getOperand(3).getReg();
2860 assert((VRegAndVal) &&
"Value is not a constant");
2863 APInt NewConst = VRegAndVal->Value.
urem(
2868 MI.getOpcode(), {MI.getOperand(0)},
2869 {MI.getOperand(1), MI.getOperand(2), NewConstInstr.getReg(0)});
2871 MI.eraseFromParent();
2875 assert(
MI.getOpcode() == TargetOpcode::G_SELECT);
2896 return MO.
isReg() &&
2907 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2909 MI.eraseFromParent();
2913 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2915 MI.eraseFromParent();
2919 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2921 MI.eraseFromParent();
2926 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2928 MI.eraseFromParent();
2932 assert(
MI.getNumDefs() == 1 &&
"Expected only one def?");
2934 MI.eraseFromParent();
2941 Register &NewLHS = std::get<0>(MatchInfo);
2942 Register &NewRHS = std::get<1>(MatchInfo);
2950 NewLHS = MaybeNewLHS;
2959 assert(
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
2968 TargetOpcode::G_INSERT_VECTOR_ELT)
2974 MatchInfo.
resize(NumElts);
2978 if (IntImm >= NumElts || IntImm < 0)
2980 if (!MatchInfo[IntImm])
2981 MatchInfo[IntImm] = TmpReg;
2985 if (CurrInst->
getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
2987 if (TmpInst->
getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
2996 return TmpInst->
getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
3003 auto GetUndef = [&]() {
3015 MI.eraseFromParent();
3021 std::tie(SubLHS, SubRHS) = MatchInfo;
3023 MI.eraseFromParent();
3034 unsigned LogicOpcode =
MI.getOpcode();
3035 assert(LogicOpcode == TargetOpcode::G_AND ||
3036 LogicOpcode == TargetOpcode::G_OR ||
3037 LogicOpcode == TargetOpcode::G_XOR);
3050 if (!LeftHandInst || !RightHandInst)
3052 unsigned HandOpcode = LeftHandInst->
getOpcode();
3053 if (HandOpcode != RightHandInst->
getOpcode())
3065 if (!XTy.
isValid() || XTy != YTy)
3070 switch (HandOpcode) {
3073 case TargetOpcode::G_ANYEXT:
3074 case TargetOpcode::G_SEXT:
3075 case TargetOpcode::G_ZEXT: {
3079 case TargetOpcode::G_TRUNC: {
3095 case TargetOpcode::G_AND:
3096 case TargetOpcode::G_ASHR:
3097 case TargetOpcode::G_LSHR:
3098 case TargetOpcode::G_SHL: {
3103 ExtraHandOpSrcReg = ZOp.
getReg();
3125 if (ExtraHandOpSrcReg.
isValid())
3137 "Expected at least one instr to build?");
3139 assert(InstrToBuild.Opcode &&
"Expected a valid opcode?");
3140 assert(InstrToBuild.OperandFns.size() &&
"Expected at least one operand?");
3142 for (
auto &OperandFn : InstrToBuild.OperandFns)
3145 MI.eraseFromParent();
3150 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
3151 int64_t ShlCst, AshrCst;
3157 if (ShlCst != AshrCst)
3160 {TargetOpcode::G_SEXT_INREG, {
MRI.
getType(Src)}}))
3162 MatchInfo = std::make_tuple(Src, ShlCst);
3168 assert(
MI.getOpcode() == TargetOpcode::G_ASHR);
3171 std::tie(Src, ShiftAmt) = MatchInfo;
3174 MI.eraseFromParent();
3180 assert(
MI.getOpcode() == TargetOpcode::G_AND);
3195 B.buildAnd(Dst, R,
B.buildConstant(Ty, C1 & C2));
3198 auto Zero =
B.buildConstant(Ty, 0);
3221 assert(
MI.getOpcode() == TargetOpcode::G_AND);
3245 (LHSBits.
Zero | RHSBits.
One).isAllOnes()) {
3252 (LHSBits.
One | RHSBits.
Zero).isAllOnes()) {
3268 assert(
MI.getOpcode() == TargetOpcode::G_OR);
3286 (LHSBits.
One | RHSBits.
Zero).isAllOnes()) {
3293 (LHSBits.
Zero | RHSBits.
One).isAllOnes()) {
3304 unsigned ExtBits =
MI.getOperand(2).getImm();
3310 int64_t Cst,
bool IsVector,
bool IsFP) {
3312 return (ScalarSizeBits == 1 && Cst == -1) ||
3318 assert(
MI.getOpcode() == TargetOpcode::G_XOR);
3338 for (
unsigned I = 0;
I < RegsToNegate.
size(); ++
I) {
3343 switch (Def->getOpcode()) {
3348 case TargetOpcode::G_ICMP:
3354 case TargetOpcode::G_FCMP:
3360 case TargetOpcode::G_AND:
3361 case TargetOpcode::G_OR:
3367 RegsToNegate.
push_back(Def->getOperand(1).getReg());
3368 RegsToNegate.
push_back(Def->getOperand(2).getReg());
3395 for (
Register Reg : RegsToNegate) {
3400 switch (Def->getOpcode()) {
3403 case TargetOpcode::G_ICMP:
3404 case TargetOpcode::G_FCMP: {
3411 case TargetOpcode::G_AND:
3414 case TargetOpcode::G_OR:
3422 MI.eraseFromParent();
3428 assert(
MI.getOpcode() == TargetOpcode::G_XOR);
3432 Register SharedReg =
MI.getOperand(2).getReg();
3453 return Y == SharedReg;
3460 std::tie(
X,
Y) = MatchInfo;
3464 MI.getOperand(1).setReg(Not->getOperand(0).getReg());
3465 MI.getOperand(2).setReg(
Y);
3470 auto &PtrAdd = cast<GPtrAdd>(
MI);
3471 Register DstReg = PtrAdd.getReg(0);
3480 return ConstVal && *ConstVal == 0;
3489 auto &PtrAdd = cast<GPtrAdd>(
MI);
3491 PtrAdd.eraseFromParent();
3498 Register Pow2Src1 =
MI.getOperand(2).getReg();
3505 MI.eraseFromParent();
3509 unsigned &SelectOpNo) {
3519 if (
Select->getOpcode() != TargetOpcode::G_SELECT ||
3521 OtherOperandReg =
LHS;
3524 if (
Select->getOpcode() != TargetOpcode::G_SELECT ||
3541 unsigned BinOpcode =
MI.getOpcode();
3546 bool CanFoldNonConst =
3547 (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
3552 if (CanFoldNonConst)
3563 const unsigned &SelectOperand) {
3574 unsigned BinOpcode =
MI.getOpcode();
3581 if (SelectOperand == 1) {
3595 MI.eraseFromParent();
3598std::optional<SmallVector<Register, 8>>
3599CombinerHelper::findCandidatesForLoadOrCombine(
const MachineInstr *Root)
const {
3600 assert(Root->
getOpcode() == TargetOpcode::G_OR &&
"Expected G_OR only!");
3629 const unsigned MaxIter =
3631 for (
unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3640 return std::nullopt;
3656 if (RegsToVisit.
empty() || RegsToVisit.
size() % 2 != 0)
3657 return std::nullopt;
3669static std::optional<std::pair<GZExtLoad *, int64_t>>
3673 "Expected Reg to only have one non-debug use?");
3682 if (Shift % MemSizeInBits != 0)
3683 return std::nullopt;
3686 auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad,
MRI);
3688 return std::nullopt;
3690 if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits)
3691 return std::nullopt;
3693 return std::make_pair(Load, Shift / MemSizeInBits);
3696std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
3697CombinerHelper::findLoadOffsetsForLoadOrCombine(
3731 for (
auto Reg : RegsToVisit) {
3736 return std::nullopt;
3739 std::tie(Load, DstPos) = *LoadAndPos;
3747 return std::nullopt;
3750 auto &LoadMMO =
Load->getMMO();
3754 return std::nullopt;
3761 LoadPtr =
Load->getOperand(1).getReg();
3767 return std::nullopt;
3774 if (BasePtr != LoadPtr)
3775 return std::nullopt;
3777 if (
Idx < LowestIdx) {
3779 LowestIdxLoad =
Load;
3787 return std::nullopt;
3795 if (!EarliestLoad ||
dominates(*Load, *EarliestLoad))
3796 EarliestLoad =
Load;
3797 if (!LatestLoad ||
dominates(*LatestLoad, *Load))
3804 "Expected to find a load for each register?");
3805 assert(EarliestLoad != LatestLoad && EarliestLoad &&
3806 LatestLoad &&
"Expected at least two loads?");
3815 const unsigned MaxIter = 20;
3821 if (
MI.isLoadFoldBarrier())
3822 return std::nullopt;
3823 if (Iter++ == MaxIter)
3824 return std::nullopt;
3827 return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad);
3832 assert(
MI.getOpcode() == TargetOpcode::G_OR);
3852 if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3856 auto RegsToVisit = findCandidatesForLoadOrCombine(&
MI);
3863 const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3864 if (NarrowMemSizeInBits % 8 != 0)
3877 auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3878 MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3881 std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo;
3888 std::optional<bool> IsBigEndian =
isBigEndian(MemOffset2Idx, LowestIdx);
3891 bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3903 const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3904 const unsigned ZeroByteOffset =
3908 auto ZeroOffsetIdx = MemOffset2Idx.
find(ZeroByteOffset);
3909 if (ZeroOffsetIdx == MemOffset2Idx.
end() ||
3910 ZeroOffsetIdx->second != LowestIdx)
3934 MIB.setInstrAndDebugLoc(*LatestLoad);
3936 MIB.buildLoad(LoadDst,
Ptr, *NewMMO);
3938 MIB.buildBSwap(Dst, LoadDst);
3945 auto &
PHI = cast<GPhi>(
MI);
3958 case TargetOpcode::G_ANYEXT:
3960 case TargetOpcode::G_ZEXT:
3961 case TargetOpcode::G_SEXT:
3975 for (
unsigned I = 0;
I <
PHI.getNumIncomingValues(); ++
I) {
3978 case TargetOpcode::G_LOAD:
3979 case TargetOpcode::G_TRUNC:
3980 case TargetOpcode::G_SEXT:
3981 case TargetOpcode::G_ZEXT:
3982 case TargetOpcode::G_ANYEXT:
3983 case TargetOpcode::G_CONSTANT:
3987 if (InSrcs.
size() > 2)
3999 auto &
PHI = cast<GPhi>(
MI);
4008 for (
unsigned I = 0;
I <
PHI.getNumIncomingValues(); ++
I) {
4009 auto SrcReg =
PHI.getIncomingValue(
I);
4011 if (!SrcMIs.
insert(SrcMI))
4017 if (InsertPt !=
MBB->
end() && InsertPt->isPHI())
4023 OldToNewSrcMap[SrcMI] = NewExt;
4032 NewPhi.
addMBB(MO.getMBB());
4036 NewPhi.addUse(NewSrc->getOperand(0).getReg());
4044 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
4054 unsigned VecIdx = Cst->Value.getZExtValue();
4059 if (SrcVecMI->
getOpcode() == TargetOpcode::G_TRUNC) {
4063 if (SrcVecMI->
getOpcode() != TargetOpcode::G_BUILD_VECTOR &&
4064 SrcVecMI->
getOpcode() != TargetOpcode::G_BUILD_VECTOR_TRUNC)
4084 if (ScalarTy != DstTy) {
4087 MI.eraseFromParent();
4096 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4119 if (
II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT)
4124 unsigned Idx = Cst->getZExtValue();
4128 SrcDstPairs.emplace_back(
4129 std::make_pair(
MI.getOperand(
Idx + 1).getReg(), &
II));
4132 return ExtractedElts.
all();
4138 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
4139 for (
auto &Pair : SrcDstPairs) {
4140 auto *ExtMI = Pair.second;
4142 ExtMI->eraseFromParent();
4144 MI.eraseFromParent();
4150 MI.eraseFromParent();
4160 assert(
MI.getOpcode() == TargetOpcode::G_OR);
4166 Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt;
4167 unsigned FshOpc = 0;
4178 int64_t CstShlAmt, CstLShrAmt;
4181 CstShlAmt + CstLShrAmt ==
BitWidth) {
4182 FshOpc = TargetOpcode::G_FSHR;
4189 FshOpc = TargetOpcode::G_FSHL;
4195 FshOpc = TargetOpcode::G_FSHR;
4206 B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt});
4213 unsigned Opc =
MI.getOpcode();
4214 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4219 unsigned RotateOpc =
4220 Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR;
4225 unsigned Opc =
MI.getOpcode();
4226 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
4227 bool IsFSHL = Opc == TargetOpcode::G_FSHL;
4230 : TargetOpcode::G_ROTR));
4231 MI.removeOperand(2);
4237 assert(
MI.getOpcode() == TargetOpcode::G_ROTL ||
4238 MI.getOpcode() == TargetOpcode::G_ROTR);
4242 bool OutOfRange =
false;
4243 auto MatchOutOfRange = [Bitsize, &OutOfRange](
const Constant *
C) {
4244 if (
auto *CI = dyn_cast<ConstantInt>(
C))
4245 OutOfRange |= CI->getValue().uge(Bitsize);
4252 assert(
MI.getOpcode() == TargetOpcode::G_ROTL ||
4253 MI.getOpcode() == TargetOpcode::G_ROTR);
4261 MI.getOperand(2).setReg(Amt);
4266 int64_t &MatchInfo) {
4267 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
4279 if (KnownRHS.isUnknown())
4282 std::optional<bool> KnownVal;
4283 if (KnownRHS.isZero()) {
4344 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
4369 if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1)
4375 unsigned Op = TargetOpcode::COPY;
4376 if (DstSize != LHSSize)
4377 Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT;
4387 assert(
MI.getOpcode() == TargetOpcode::G_AND);
4397 int64_t AndMaskBits;
4405 if (AndMaskBits & OrMaskBits)
4411 if (
MI.getOperand(1).getReg() == AndMaskReg)
4412 MI.getOperand(2).setReg(AndMaskReg);
4413 MI.getOperand(1).setReg(Src);
4422 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
4429 int64_t Width =
MI.getOperand(2).getImm();