40#define DEBUG_TYPE "instcombine"
52 bool IsSigned =
false) {
55 Result = In1.
sadd_ov(In2, Overflow);
57 Result = In1.
uadd_ov(In2, Overflow);
65 bool IsSigned =
false) {
68 Result = In1.
ssub_ov(In2, Overflow);
70 Result = In1.
usub_ov(In2, Overflow);
78 for (
auto *U :
I.users())
100 }
else if (
C.isAllOnes()) {
121 if (LI->
isVolatile() || !GV || !GV->isConstant() ||
122 !GV->hasDefinitiveInitializer())
126 TypeSize EltSize =
DL.getTypeStoreSize(EltTy);
142 if (!ConstOffset.
ult(Stride))
156 enum { Overdefined = -3, Undefined = -2 };
165 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
169 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
177 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
187 for (
unsigned i = 0, e = ArrayElementCount; i != e; ++i,
Offset += Stride) {
201 CompareRHS,
DL, &
TLI);
209 if (TrueRangeEnd == (
int)i - 1)
211 if (FalseRangeEnd == (
int)i - 1)
228 if (FirstTrueElement == Undefined)
229 FirstTrueElement = TrueRangeEnd = i;
232 if (SecondTrueElement == Undefined)
233 SecondTrueElement = i;
235 SecondTrueElement = Overdefined;
238 if (TrueRangeEnd == (
int)i - 1)
241 TrueRangeEnd = Overdefined;
245 if (FirstFalseElement == Undefined)
246 FirstFalseElement = FalseRangeEnd = i;
249 if (SecondFalseElement == Undefined)
250 SecondFalseElement = i;
252 SecondFalseElement = Overdefined;
255 if (FalseRangeEnd == (
int)i - 1)
258 FalseRangeEnd = Overdefined;
263 if (i < 64 && IsTrueForElt)
264 MagicBitvector |= 1ULL << i;
269 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
270 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
271 FalseRangeEnd == Overdefined)
285 auto MaskIdx = [&](
Value *Idx) {
289 Idx =
Builder.CreateAnd(Idx, Mask);
296 if (SecondTrueElement != Overdefined) {
299 if (FirstTrueElement == Undefined)
302 Value *FirstTrueIdx = ConstantInt::get(Idx->
getType(), FirstTrueElement);
305 if (SecondTrueElement == Undefined)
310 Value *SecondTrueIdx = ConstantInt::get(Idx->
getType(), SecondTrueElement);
312 return BinaryOperator::CreateOr(C1, C2);
317 if (SecondFalseElement != Overdefined) {
320 if (FirstFalseElement == Undefined)
323 Value *FirstFalseIdx = ConstantInt::get(Idx->
getType(), FirstFalseElement);
326 if (SecondFalseElement == Undefined)
331 Value *SecondFalseIdx =
332 ConstantInt::get(Idx->
getType(), SecondFalseElement);
334 return BinaryOperator::CreateAnd(C1, C2);
339 if (TrueRangeEnd != Overdefined) {
340 assert(TrueRangeEnd != FirstTrueElement &&
"Should emit single compare");
344 if (FirstTrueElement) {
346 Idx =
Builder.CreateAdd(Idx, Offs);
350 ConstantInt::get(Idx->
getType(), TrueRangeEnd - FirstTrueElement + 1);
355 if (FalseRangeEnd != Overdefined) {
356 assert(FalseRangeEnd != FirstFalseElement &&
"Should emit single compare");
359 if (FirstFalseElement) {
361 Idx =
Builder.CreateAdd(Idx, Offs);
365 ConstantInt::get(Idx->
getType(), FalseRangeEnd - FirstFalseElement);
378 if (ArrayElementCount <= Idx->
getType()->getIntegerBitWidth())
381 Ty =
DL.getSmallestLegalIntType(
Init->getContext(), ArrayElementCount);
386 V =
Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
387 V =
Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
412 while (!WorkList.
empty()) {
415 while (!WorkList.
empty()) {
416 if (Explored.
size() >= 100)
434 if (!
GEP->isInBounds() ||
count_if(
GEP->indices(), IsNonConst) > 1)
442 if (WorkList.
back() == V) {
458 for (
auto *PN : PHIs)
459 for (
Value *
Op : PN->incoming_values())
467 for (
Value *Val : Explored) {
473 if (Inst ==
Base || Inst ==
PHI || !Inst || !
PHI ||
477 if (
PHI->getParent() == Inst->getParent())
487 bool Before =
true) {
495 I = &*std::next(
I->getIterator());
496 Builder.SetInsertPoint(
I);
501 BasicBlock &Entry =
A->getParent()->getEntryBlock();
502 Builder.SetInsertPoint(&Entry, Entry.getFirstInsertionPt());
524 Base->getContext(),
DL.getIndexTypeSizeInBits(Start->getType()));
530 for (
Value *Val : Explored) {
538 PHI->getName() +
".idx",
PHI->getIterator());
543 for (
Value *Val : Explored) {
552 NewInsts[
GEP] = OffsetV;
554 NewInsts[
GEP] = Builder.CreateAdd(
555 Op, OffsetV,
GEP->getOperand(0)->getName() +
".add",
567 for (
Value *Val : Explored) {
574 for (
unsigned I = 0,
E =
PHI->getNumIncomingValues();
I <
E; ++
I) {
575 Value *NewIncoming =
PHI->getIncomingValue(
I);
577 auto It = NewInsts.
find(NewIncoming);
578 if (It != NewInsts.
end())
579 NewIncoming = It->second;
586 for (
Value *Val : Explored) {
592 Value *NewVal = Builder.CreateGEP(Builder.getInt8Ty(),
Base, NewInsts[Val],
593 Val->getName() +
".ptr", NW);
600 return NewInsts[Start];
686 if (
Base.Ptr == RHS && CanFold(
Base.LHSNW) && !
Base.isExpensive()) {
690 EmitGEPOffsets(
Base.LHSGEPs,
Base.LHSNW, IdxTy,
true);
698 RHS->getType()->getPointerAddressSpace())) {
729 if (GEPLHS->
getOperand(0) != GEPRHS->getOperand(0)) {
730 bool IndicesTheSame =
733 GEPRHS->getPointerOperand()->getType() &&
737 if (GEPLHS->
getOperand(i) != GEPRHS->getOperand(i)) {
738 IndicesTheSame =
false;
744 if (IndicesTheSame &&
752 if (GEPLHS->
isInBounds() && GEPRHS->isInBounds() &&
754 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
758 Value *LOffset = EmitGEPOffset(GEPLHS);
759 Value *ROffset = EmitGEPOffset(GEPRHS);
766 if (LHSIndexTy != RHSIndexTy) {
769 ROffset =
Builder.CreateTrunc(ROffset, LHSIndexTy);
771 LOffset =
Builder.CreateTrunc(LOffset, RHSIndexTy);
780 if (GEPLHS->
getOperand(0) == GEPRHS->getOperand(0) &&
784 unsigned NumDifferences = 0;
785 unsigned DiffOperand = 0;
786 for (
unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
787 if (GEPLHS->
getOperand(i) != GEPRHS->getOperand(i)) {
789 Type *RHSType = GEPRHS->getOperand(i)->getType();
800 if (NumDifferences++)
805 if (NumDifferences == 0)
813 Value *RHSV = GEPRHS->getOperand(DiffOperand);
814 return NewICmp(NW, LHSV, RHSV);
822 EmitGEPOffsets(
Base.LHSGEPs,
Base.LHSNW, IdxTy,
true);
824 EmitGEPOffsets(
Base.RHSGEPs,
Base.RHSNW, IdxTy,
true);
825 return NewICmp(
Base.LHSNW &
Base.RHSNW, L, R);
851 bool Captured =
false;
856 CmpCaptureTracker(
AllocaInst *Alloca) : Alloca(Alloca) {}
858 void tooManyUses()
override { Captured =
true; }
870 ICmps[ICmp] |= 1u << U->getOperandNo();
879 CmpCaptureTracker Tracker(Alloca);
881 if (Tracker.Captured)
885 for (
auto [ICmp, Operands] : Tracker.ICmps) {
891 auto *Res = ConstantInt::get(ICmp->getType(),
917 assert(!!
C &&
"C should not be zero!");
933 ConstantInt::get(
X->getType(), -
C));
945 ConstantInt::get(
X->getType(),
SMax -
C));
956 ConstantInt::get(
X->getType(),
SMax - (
C - 1)));
965 assert(
I.isEquality() &&
"Cannot fold icmp gt/lt");
968 if (
I.getPredicate() ==
I.ICMP_NE)
970 return new ICmpInst(Pred, LHS, RHS);
989 return getICmp(
I.ICMP_UGT,
A,
990 ConstantInt::get(
A->getType(), AP2.
logBase2()));
1002 if (IsAShr && AP1 == AP2.
ashr(Shift)) {
1006 return getICmp(
I.ICMP_UGE,
A, ConstantInt::get(
A->getType(), Shift));
1007 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1008 }
else if (AP1 == AP2.
lshr(Shift)) {
1009 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1015 auto *TorF = ConstantInt::get(
I.getType(),
I.getPredicate() ==
I.ICMP_NE);
1024 assert(
I.isEquality() &&
"Cannot fold icmp gt/lt");
1027 if (
I.getPredicate() ==
I.ICMP_NE)
1029 return new ICmpInst(Pred, LHS, RHS);
1038 if (!AP1 && AP2TrailingZeros != 0)
1041 ConstantInt::get(
A->getType(), AP2.
getBitWidth() - AP2TrailingZeros));
1049 if (Shift > 0 && AP2.
shl(Shift) == AP1)
1050 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1054 auto *TorF = ConstantInt::get(
I.getType(),
I.getPredicate() ==
I.ICMP_NE);
1083 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1107 if (U == AddWithCst)
1125 I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1133 Value *TruncA = Builder.CreateTrunc(
A, NewType,
A->getName() +
".trunc");
1134 Value *TruncB = Builder.CreateTrunc(
B, NewType,
B->getName() +
".trunc");
1135 CallInst *
Call = Builder.CreateCall(
F, {TruncA, TruncB},
"sadd");
1136 Value *
Add = Builder.CreateExtractValue(
Call, 0,
"sadd.result");
1154 if (!
I.isEquality())
1185 APInt(XBitWidth, XBitWidth - 1))))
1212 return new ICmpInst(Pred,
B, Cmp.getOperand(1));
1214 return new ICmpInst(Pred,
A, Cmp.getOperand(1));
1231 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1243 return new ICmpInst(Pred,
Y, Cmp.getOperand(1));
1249 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1252 if (BO0->hasNoUnsignedWrap() || BO0->hasNoSignedWrap()) {
1260 return new ICmpInst(Pred,
Y, Cmp.getOperand(1));
1265 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1281 return new ICmpInst(Pred, Stripped,
1294 const APInt *Mask, *Neg;
1310 auto *NewAnd =
Builder.CreateAnd(Num, *Mask);
1313 return new ICmpInst(Pred, NewAnd, Zero);
1334 Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1350 for (
Value *V : Phi->incoming_values()) {
1358 PHINode *NewPhi =
Builder.CreatePHI(Cmp.getType(), Phi->getNumOperands());
1359 for (
auto [V, Pred] :
zip(
Ops, Phi->blocks()))
1374 Value *
X = Cmp.getOperand(0), *
Y = Cmp.getOperand(1);
1407 if (Cmp.isEquality() || (IsSignBit &&
hasBranchUse(Cmp)))
1412 if (Cmp.hasOneUse() &&
1426 if (!
match(BI->getCondition(),
1431 if (
DT.dominates(Edge0, Cmp.getParent())) {
1432 if (
auto *V = handleDomCond(DomPred, DomC))
1436 if (
DT.dominates(Edge1, Cmp.getParent()))
1452 Type *SrcTy =
X->getType();
1454 SrcBits = SrcTy->getScalarSizeInBits();
1458 if (shouldChangeType(Trunc->
getType(), SrcTy)) {
1460 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy,
C.sext(SrcBits)));
1462 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy,
C.zext(SrcBits)));
1465 if (
C.isOne() &&
C.getBitWidth() > 1) {
1470 ConstantInt::get(V->getType(), 1));
1482 auto NewPred = (Pred == Cmp.ICMP_EQ) ? Cmp.ICMP_UGE : Cmp.ICMP_ULT;
1484 ConstantInt::get(SrcTy, DstBits - Pow2->
logBase2()));
1490 Pred,
Y, ConstantInt::get(SrcTy,
C.logBase2() - Pow2->
logBase2()));
1496 if (!SrcTy->isVectorTy() && shouldChangeType(DstBits, SrcBits)) {
1500 Constant *WideC = ConstantInt::get(SrcTy,
C.zext(SrcBits));
1509 if ((Known.
Zero | Known.
One).countl_one() >= SrcBits - DstBits) {
1511 APInt NewRHS =
C.zext(SrcBits);
1513 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy, NewRHS));
1525 DstBits == SrcBits - ShAmt) {
1542 bool YIsSExt =
false;
1545 unsigned NoWrapFlags =
cast<TruncInst>(Cmp.getOperand(0))->getNoWrapKind() &
1547 if (Cmp.isSigned()) {
1558 if (
X->getType() !=
Y->getType() &&
1559 (!Cmp.getOperand(0)->hasOneUse() || !Cmp.getOperand(1)->hasOneUse()))
1561 if (!isDesirableIntType(
X->getType()->getScalarSizeInBits()) &&
1562 isDesirableIntType(
Y->getType()->getScalarSizeInBits())) {
1564 Pred = Cmp.getSwappedPredicate(Pred);
1569 else if (!Cmp.isSigned() &&
1583 Type *TruncTy = Cmp.getOperand(0)->getType();
1588 if (isDesirableIntType(TruncBits) &&
1589 !isDesirableIntType(
X->getType()->getScalarSizeInBits()))
1612 bool TrueIfSigned =
false;
1629 if (
Xor->hasOneUse()) {
1631 if (!Cmp.isEquality() && XorC->
isSignMask()) {
1632 Pred = Cmp.getFlippedSignednessPredicate();
1633 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(),
C ^ *XorC));
1638 Pred = Cmp.getFlippedSignednessPredicate();
1639 Pred = Cmp.getSwappedPredicate(Pred);
1640 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(),
C ^ *XorC));
1647 if (*XorC == ~
C && (
C + 1).isPowerOf2())
1650 if (*XorC ==
C && (
C + 1).isPowerOf2())
1655 if (*XorC == -
C &&
C.isPowerOf2())
1657 ConstantInt::get(
X->getType(), ~
C));
1659 if (*XorC ==
C && (-
C).isPowerOf2())
1661 ConstantInt::get(
X->getType(), ~
C));
1683 const APInt *ShiftC;
1688 Type *XType =
X->getType();
1694 return new ICmpInst(Pred,
Add, ConstantInt::get(XType, Bound));
1703 if (!Shift || !Shift->
isShift())
1711 unsigned ShiftOpcode = Shift->
getOpcode();
1712 bool IsShl = ShiftOpcode == Instruction::Shl;
1715 APInt NewAndCst, NewCmpCst;
1716 bool AnyCmpCstBitsShiftedOut;
1717 if (ShiftOpcode == Instruction::Shl) {
1725 NewCmpCst = C1.
lshr(*C3);
1726 NewAndCst = C2.
lshr(*C3);
1727 AnyCmpCstBitsShiftedOut = NewCmpCst.
shl(*C3) != C1;
1728 }
else if (ShiftOpcode == Instruction::LShr) {
1733 NewCmpCst = C1.
shl(*C3);
1734 NewAndCst = C2.
shl(*C3);
1735 AnyCmpCstBitsShiftedOut = NewCmpCst.
lshr(*C3) != C1;
1741 assert(ShiftOpcode == Instruction::AShr &&
"Unknown shift opcode");
1742 NewCmpCst = C1.
shl(*C3);
1743 NewAndCst = C2.
shl(*C3);
1744 AnyCmpCstBitsShiftedOut = NewCmpCst.
ashr(*C3) != C1;
1745 if (NewAndCst.
ashr(*C3) != C2)
1749 if (AnyCmpCstBitsShiftedOut) {
1759 Shift->
getOperand(0), ConstantInt::get(
And->getType(), NewAndCst));
1760 return new ICmpInst(Cmp.getPredicate(), NewAnd,
1761 ConstantInt::get(
And->getType(), NewCmpCst));
1778 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1792 return new TruncInst(
And->getOperand(0), Cmp.getType());
1803 ConstantInt::get(
X->getType(), ~*C2));
1808 ConstantInt::get(
X->getType(), -*C2));
1811 if (!
And->hasOneUse())
1814 if (Cmp.isEquality() && C1.
isZero()) {
1832 Constant *NegBOC = ConstantInt::get(
And->getType(), -NewC2);
1834 return new ICmpInst(NewPred,
X, NegBOC);
1852 if (!Cmp.getType()->isVectorTy()) {
1853 Type *WideType = W->getType();
1855 Constant *ZextC1 = ConstantInt::get(WideType, C1.
zext(WideScalarBits));
1856 Constant *ZextC2 = ConstantInt::get(WideType, C2->
zext(WideScalarBits));
1858 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1869 if (!Cmp.isSigned() && C1.
isZero() &&
And->getOperand(0)->hasOneUse() &&
1876 unsigned UsesRemoved = 0;
1877 if (
And->hasOneUse())
1879 if (
Or->hasOneUse())
1886 if (UsesRemoved >= RequireUsesRemoved) {
1890 One,
Or->getName());
1892 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1906 if (!Cmp.getParent()->getParent()->hasFnAttribute(
1907 Attribute::NoImplicitFloat) &&
1910 Type *FPType = V->getType()->getScalarType();
1911 if (FPType->isIEEELikeFPTy() && (C1.
isZero() || C1 == *C2)) {
1912 APInt ExponentMask =
1914 if (*C2 == ExponentMask) {
1915 unsigned Mask = C1.
isZero()
1949 Constant *MinSignedC = ConstantInt::get(
1953 return new ICmpInst(NewPred,
X, MinSignedC);
1968 if (!Cmp.isEquality())
1974 if (Cmp.getOperand(1) ==
Y &&
C.isNegatedPowerOf2()) {
1985 X->getType()->isIntOrIntVectorTy(1) && (
C.isZero() ||
C.isOne())) {
1991 return BinaryOperator::CreateAnd(TruncY,
X);
2009 const APInt *Addend, *Msk;
2013 APInt NewComperand = (
C - *Addend) & *Msk;
2014 Value *MaskA =
Builder.CreateAnd(
A, ConstantInt::get(
A->getType(), *Msk));
2016 ConstantInt::get(MaskA->
getType(), NewComperand));
2038 while (!WorkList.
empty()) {
2039 auto MatchOrOperatorArgument = [&](
Value *OrOperatorArgument) {
2042 if (
match(OrOperatorArgument,
2048 if (
match(OrOperatorArgument,
2058 Value *OrOperatorLhs, *OrOperatorRhs;
2060 if (!
match(CurrentValue,
2065 MatchOrOperatorArgument(OrOperatorRhs);
2066 MatchOrOperatorArgument(OrOperatorLhs);
2071 Value *LhsCmp = Builder.CreateICmp(Pred, CmpValues.
rbegin()->first,
2072 CmpValues.
rbegin()->second);
2074 for (
auto It = CmpValues.
rbegin() + 1; It != CmpValues.
rend(); ++It) {
2075 Value *RhsCmp = Builder.CreateICmp(Pred, It->first, It->second);
2076 LhsCmp = Builder.CreateBinOp(BOpc, LhsCmp, RhsCmp);
2092 ConstantInt::get(V->getType(), 1));
2095 Value *OrOp0 =
Or->getOperand(0), *OrOp1 =
Or->getOperand(1);
2102 Builder.CreateXor(OrOp1, ConstantInt::get(OrOp1->getType(),
C));
2103 return new ICmpInst(Pred, OrOp0, NewC);
2107 if (
match(OrOp1,
m_APInt(MaskC)) && Cmp.isEquality()) {
2108 if (*MaskC ==
C && (
C + 1).isPowerOf2()) {
2113 return new ICmpInst(Pred, OrOp0, OrOp1);
2120 if (
Or->hasOneUse()) {
2122 Constant *NewC = ConstantInt::get(
Or->getType(),
C ^ (*MaskC));
2134 Constant *NewC = ConstantInt::get(
X->getType(), TrueIfSigned ? 1 : 0);
2162 if (!Cmp.isEquality() || !
C.isZero() || !
Or->hasOneUse())
2194 if (Cmp.isEquality() &&
C.isZero() &&
X ==
Mul->getOperand(1) &&
2195 (
Mul->hasNoUnsignedWrap() ||
Mul->hasNoSignedWrap()))
2217 if (Cmp.isEquality()) {
2219 if (
Mul->hasNoSignedWrap() &&
C.srem(*MulC).isZero()) {
2220 Constant *NewC = ConstantInt::get(MulTy,
C.sdiv(*MulC));
2228 if (
C.urem(*MulC).isZero()) {
2231 if ((*MulC & 1).isOne() ||
Mul->hasNoUnsignedWrap()) {
2232 Constant *NewC = ConstantInt::get(MulTy,
C.udiv(*MulC));
2245 if (
C.isMinSignedValue() && MulC->
isAllOnes())
2251 NewC = ConstantInt::get(
2255 "Unexpected predicate");
2256 NewC = ConstantInt::get(
2261 NewC = ConstantInt::get(
2265 "Unexpected predicate");
2266 NewC = ConstantInt::get(
2271 return NewC ?
new ICmpInst(Pred,
X, NewC) :
nullptr;
2283 unsigned TypeBits =
C.getBitWidth();
2285 if (Cmp.isUnsigned()) {
2305 return new ICmpInst(Pred,
Y, ConstantInt::get(ShiftType, CLog2));
2306 }
else if (Cmp.isSigned() && C2->
isOne()) {
2307 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
2328 const APInt *ShiftVal;
2358 const APInt *ShiftAmt;
2364 unsigned TypeBits =
C.getBitWidth();
2365 if (ShiftAmt->
uge(TypeBits))
2377 APInt ShiftedC =
C.ashr(*ShiftAmt);
2378 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2381 C.ashr(*ShiftAmt).shl(*ShiftAmt) ==
C) {
2382 APInt ShiftedC =
C.ashr(*ShiftAmt);
2383 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2390 assert(!
C.isMinSignedValue() &&
"Unexpected icmp slt");
2391 APInt ShiftedC = (
C - 1).ashr(*ShiftAmt) + 1;
2392 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2402 APInt ShiftedC =
C.lshr(*ShiftAmt);
2403 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2406 C.lshr(*ShiftAmt).shl(*ShiftAmt) ==
C) {
2407 APInt ShiftedC =
C.lshr(*ShiftAmt);
2408 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2415 assert(
C.ugt(0) &&
"ult 0 should have been eliminated");
2416 APInt ShiftedC = (
C - 1).lshr(*ShiftAmt) + 1;
2417 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2421 if (Cmp.isEquality() && Shl->
hasOneUse()) {
2427 Constant *LShrC = ConstantInt::get(ShType,
C.lshr(*ShiftAmt));
2432 bool TrueIfSigned =
false;
2444 if (Cmp.isUnsigned() && Shl->
hasOneUse()) {
2446 if ((
C + 1).isPowerOf2() &&
2454 if (
C.isPowerOf2() &&
2484 Pred, ConstantInt::get(ShType->
getContext(),
C))) {
2485 CmpPred = FlippedStrictness->first;
2493 ConstantInt::get(TruncTy, RHSC.
ashr(*ShiftAmt).
trunc(TypeBits - Amt));
2495 Builder.CreateTrunc(
X, TruncTy,
"",
false,
2512 if (Cmp.isEquality() && Shr->
isExact() &&
C.isZero())
2513 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
2515 bool IsAShr = Shr->
getOpcode() == Instruction::AShr;
2516 const APInt *ShiftValC;
2518 if (Cmp.isEquality())
2536 assert(ShiftValC->
uge(
C) &&
"Expected simplify of compare");
2537 assert((IsUGT || !
C.isZero()) &&
"Expected X u< 0 to simplify");
2539 unsigned CmpLZ = IsUGT ?
C.countl_zero() : (
C - 1).
countl_zero();
2547 const APInt *ShiftAmtC;
2553 unsigned TypeBits =
C.getBitWidth();
2555 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2558 bool IsExact = Shr->
isExact();
2566 (
C - 1).isPowerOf2() &&
C.countLeadingZeros() > ShAmtVal) {
2572 APInt ShiftedC = (
C - 1).shl(ShAmtVal) + 1;
2573 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2579 APInt ShiftedC =
C.shl(ShAmtVal);
2580 if (ShiftedC.
ashr(ShAmtVal) ==
C)
2581 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2585 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2586 if (!
C.isMaxSignedValue() && !(
C + 1).shl(ShAmtVal).isMinSignedValue() &&
2587 (ShiftedC + 1).ashr(ShAmtVal) == (
C + 1))
2588 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2594 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2595 if ((ShiftedC + 1).ashr(ShAmtVal) == (
C + 1) ||
2596 (
C + 1).shl(ShAmtVal).isMinSignedValue())
2597 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2604 if (
C.getBitWidth() > 2 &&
C.getNumSignBits() <= ShAmtVal) {
2614 }
else if (!IsAShr) {
2618 APInt ShiftedC =
C.shl(ShAmtVal);
2619 if (ShiftedC.
lshr(ShAmtVal) ==
C)
2620 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2624 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2625 if ((ShiftedC + 1).lshr(ShAmtVal) == (
C + 1))
2626 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2630 if (!Cmp.isEquality())
2638 assert(((IsAShr &&
C.shl(ShAmtVal).ashr(ShAmtVal) ==
C) ||
2639 (!IsAShr &&
C.shl(ShAmtVal).lshr(ShAmtVal) ==
C)) &&
2640 "Expected icmp+shr simplify did not occur.");
2645 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy,
C << ShAmtVal));
2651 Constant *Mask = ConstantInt::get(ShrTy, Val);
2653 return new ICmpInst(Pred,
And, ConstantInt::get(ShrTy,
C << ShAmtVal));
2670 const APInt *DivisorC;
2679 "ult X, 0 should have been simplified already.");
2684 if (!NormalizedC.
uge(DivisorC->
abs() - 1))
2707 const APInt *DivisorC;
2716 !
C.isStrictlyPositive()))
2722 Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2726 return new ICmpInst(Pred,
And, ConstantInt::get(Ty,
C));
2753 assert(*C2 != 0 &&
"udiv 0, X should have been simplified already.");
2758 "icmp ugt X, UINT_MAX should have been simplified already.");
2760 ConstantInt::get(Ty, C2->
udiv(
C + 1)));
2765 assert(
C != 0 &&
"icmp ult X, 0 should have been simplified already.");
2767 ConstantInt::get(Ty, C2->
udiv(
C)));
2781 bool DivIsSigned = Div->
getOpcode() == Instruction::SDiv;
2791 if (Cmp.isEquality() && Div->
hasOneUse() &&
C.isSignBitSet() &&
2792 (!DivIsSigned ||
C.isMinSignedValue())) {
2793 Value *XBig =
Builder.CreateICmp(Pred,
X, ConstantInt::get(Ty,
C));
2794 Value *YOne =
Builder.CreateICmp(Pred,
Y, ConstantInt::get(Ty, 1));
2820 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned()) {
2824 DivIsSigned =
false;
2843 bool ProdOV = (DivIsSigned ? Prod.
sdiv(*C2) : Prod.
udiv(*C2)) !=
C;
2856 int LoOverflow = 0, HiOverflow = 0;
2857 APInt LoBound, HiBound;
2862 HiOverflow = LoOverflow = ProdOV;
2871 LoBound = -(RangeSize - 1);
2872 HiBound = RangeSize;
2873 }
else if (
C.isStrictlyPositive()) {
2875 HiOverflow = LoOverflow = ProdOV;
2881 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2883 APInt DivNeg = -RangeSize;
2884 LoOverflow =
addWithOverflow(LoBound, HiBound, DivNeg,
true) ? -1 : 0;
2892 LoBound = RangeSize + 1;
2893 HiBound = -RangeSize;
2894 if (HiBound == *C2) {
2898 }
else if (
C.isStrictlyPositive()) {
2901 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2907 LoOverflow = HiOverflow = ProdOV;
2920 if (LoOverflow && HiOverflow)
2924 X, ConstantInt::get(Ty, LoBound));
2927 X, ConstantInt::get(Ty, HiBound));
2931 if (LoOverflow && HiOverflow)
2935 X, ConstantInt::get(Ty, LoBound));
2938 X, ConstantInt::get(Ty, HiBound));
2943 if (LoOverflow == +1)
2945 if (LoOverflow == -1)
2947 return new ICmpInst(Pred,
X, ConstantInt::get(Ty, LoBound));
2950 if (HiOverflow == +1)
2952 if (HiOverflow == -1)
2982 bool HasNSW =
Sub->hasNoSignedWrap();
2983 bool HasNUW =
Sub->hasNoUnsignedWrap();
2985 ((Cmp.isUnsigned() && HasNUW) || (Cmp.isSigned() && HasNSW)) &&
2987 return new ICmpInst(SwappedPred,
Y, ConstantInt::get(Ty, SubResult));
2995 if (Cmp.isEquality() &&
C.isZero() &&
2996 none_of((
Sub->users()), [](
const User *U) { return isa<PHINode>(U); }))
3004 if (!
Sub->hasOneUse())
3007 if (
Sub->hasNoSignedWrap()) {
3031 (*C2 & (
C - 1)) == (
C - 1))
3044 return new ICmpInst(SwappedPred,
Add, ConstantInt::get(Ty, ~
C));
3050 auto FoldConstant = [&](
bool Val) {
3051 Constant *Res = Val ? Builder.getTrue() : Builder.getFalse();
3058 switch (Table.to_ulong()) {
3060 return FoldConstant(
false);
3062 return HasOneUse ? Builder.CreateNot(Builder.CreateOr(Op0, Op1)) :
nullptr;
3064 return HasOneUse ? Builder.CreateAnd(Builder.CreateNot(Op0), Op1) :
nullptr;
3066 return Builder.CreateNot(Op0);
3068 return HasOneUse ? Builder.CreateAnd(Op0, Builder.CreateNot(Op1)) :
nullptr;
3070 return Builder.CreateNot(Op1);
3072 return Builder.CreateXor(Op0, Op1);
3074 return HasOneUse ? Builder.CreateNot(Builder.CreateAnd(Op0, Op1)) :
nullptr;
3076 return Builder.CreateAnd(Op0, Op1);
3078 return HasOneUse ? Builder.CreateNot(Builder.CreateXor(Op0, Op1)) :
nullptr;
3082 return HasOneUse ? Builder.CreateOr(Builder.CreateNot(Op0), Op1) :
nullptr;
3086 return HasOneUse ? Builder.CreateOr(Op0, Builder.CreateNot(Op1)) :
nullptr;
3088 return Builder.CreateOr(Op0, Op1);
3090 return FoldConstant(
true);
3105 Cmp.getType() !=
A->getType() || Cmp.getType() !=
B->getType())
3108 std::bitset<4> Table;
3109 auto ComputeTable = [&](
bool First,
bool Second) -> std::optional<bool> {
3113 auto *Val = Res->getType()->isVectorTy() ? Res->getSplatValue() : Res;
3117 return std::nullopt;
3120 for (
unsigned I = 0;
I < 4; ++
I) {
3121 bool First = (
I >> 1) & 1;
3122 bool Second =
I & 1;
3123 if (
auto Res = ComputeTable(
First, Second))
3145 const APInt *ShAmtC;
3153 return new ICmpInst(Pred,
A, ConstantInt::get(
A->getType(),
C));
3165 if (
Add->hasNoUnsignedWrap() &&
3168 APInt NewC =
C.usub_ov(*C2, Overflow);
3172 return new ICmpInst(Pred,
X, ConstantInt::get(Ty, NewC));
3177 if (
Add->hasNoSignedWrap() &&
3180 APInt NewC =
C.ssub_ov(*C2, Overflow);
3184 return new ICmpInst(ChosenPred,
X, ConstantInt::get(Ty, NewC));
3188 C.isNonNegative() && (
C - *C2).isNonNegative() &&
3191 .isAllNonNegative())
3193 ConstantInt::get(Ty,
C - *C2));
3198 if (Cmp.isSigned()) {
3199 if (
Lower.isSignMask())
3201 if (
Upper.isSignMask())
3204 if (
Lower.isMinValue())
3206 if (
Upper.isMinValue())
3239 if (!
Add->hasOneUse())
3254 ConstantInt::get(Ty,
C * 2));
3268 Builder.CreateAdd(
X, ConstantInt::get(Ty, *C2 -
C - 1)),
3269 ConstantInt::get(Ty, ~
C));
3274 Type *NewCmpTy = V->getType();
3276 if (shouldChangeType(Ty, NewCmpTy)) {
3287 :
Builder.CreateAdd(V, ConstantInt::get(NewCmpTy, EquivOffset)),
3288 ConstantInt::get(NewCmpTy, EquivInt));
3310 Value *EqualVal =
SI->getTrueValue();
3311 Value *UnequalVal =
SI->getFalseValue();
3334 auto FlippedStrictness =
3336 if (!FlippedStrictness)
3339 "basic correctness failure");
3340 RHS2 = FlippedStrictness->second;
3352 assert(
C &&
"Cmp RHS should be a constant int!");
3358 Value *OrigLHS, *OrigRHS;
3359 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
3360 if (Cmp.hasOneUse() &&
3363 assert(C1LessThan && C2Equal && C3GreaterThan);
3366 C1LessThan->
getValue(),
C->getValue(), Cmp.getPredicate());
3368 Cmp.getPredicate());
3370 C3GreaterThan->
getValue(),
C->getValue(), Cmp.getPredicate());
3381 if (TrueWhenLessThan)
3387 if (TrueWhenGreaterThan)
3402 Value *Op1 = Cmp.getOperand(1);
3403 Value *BCSrcOp = Bitcast->getOperand(0);
3404 Type *SrcType = Bitcast->getSrcTy();
3405 Type *DstType = Bitcast->getType();
3409 if (SrcType->isVectorTy() == DstType->isVectorTy() &&
3410 SrcType->getScalarSizeInBits() == DstType->getScalarSizeInBits()) {
3425 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(), 1));
3452 Type *XType =
X->getType();
3455 if (!(XType->
isPPC_FP128Ty() || SrcType->isPPC_FP128Ty())) {
3470 Type *FPType = SrcType->getScalarType();
3471 if (!Cmp.getParent()->getParent()->hasFnAttribute(
3472 Attribute::NoImplicitFloat) &&
3473 Cmp.isEquality() && FPType->isIEEELikeFPTy()) {
3479 Builder.createIsFPClass(BCSrcOp, Mask));
3486 if (!
match(Cmp.getOperand(1),
m_APInt(
C)) || !DstType->isIntegerTy() ||
3487 !SrcType->isIntOrIntVectorTy())
3497 if (Cmp.isEquality() &&
C->isAllOnes() && Bitcast->hasOneUse()) {
3498 if (
Value *NotBCSrcOp =
3500 Value *Cast =
Builder.CreateBitCast(NotBCSrcOp, DstType);
3509 if (Cmp.isEquality() &&
C->isZero() && Bitcast->hasOneUse() &&
3512 Type *NewType =
Builder.getIntNTy(VecTy->getPrimitiveSizeInBits());
3532 if (
C->isSplat(EltTy->getBitWidth())) {
3539 Value *Extract =
Builder.CreateExtractElement(Vec, Elem);
3540 Value *NewC = ConstantInt::get(EltTy,
C->trunc(EltTy->getBitWidth()));
3541 return new ICmpInst(Pred, Extract, NewC);
3577 Value *Cmp0 = Cmp.getOperand(0);
3579 if (
C->isZero() && Cmp.isEquality() && Cmp0->
hasOneUse() &&
3586 return new ICmpInst(Cmp.getPredicate(),
X,
Y);
3601 if (!Cmp.isEquality())
3610 case Instruction::SRem:
3621 case Instruction::Add: {
3628 }
else if (
C.isZero()) {
3631 if (
Value *NegVal = dyn_castNegVal(BOp1))
3632 return new ICmpInst(Pred, BOp0, NegVal);
3633 if (
Value *NegVal = dyn_castNegVal(BOp0))
3634 return new ICmpInst(Pred, NegVal, BOp1);
3643 return new ICmpInst(Pred, BOp0, Neg);
3648 case Instruction::Xor:
3653 }
else if (
C.isZero()) {
3655 return new ICmpInst(Pred, BOp0, BOp1);
3658 case Instruction::Or: {
3679 Cond->getType() == Cmp.getType()) {
3717 case Instruction::UDiv:
3718 case Instruction::SDiv:
3728 return new ICmpInst(Pred, BOp0, BOp1);
3731 Instruction::Mul, BO->
getOpcode() == Instruction::SDiv, BOp1,
3732 Cmp.getOperand(1), BO);
3736 return new ICmpInst(Pred, YC, BOp0);
3740 if (BO->
getOpcode() == Instruction::UDiv &&
C.isZero()) {
3743 return new ICmpInst(NewPred, BOp1, BOp0);
3757 "Non-ctpop intrin in ctpop fold");
3792 Type *Ty =
II->getType();
3796 switch (
II->getIntrinsicID()) {
3797 case Intrinsic::abs:
3800 if (
C.isZero() ||
C.isMinSignedValue())
3801 return new ICmpInst(Pred,
II->getArgOperand(0), ConstantInt::get(Ty,
C));
3804 case Intrinsic::bswap:
3806 return new ICmpInst(Pred,
II->getArgOperand(0),
3807 ConstantInt::get(Ty,
C.byteSwap()));
3809 case Intrinsic::bitreverse:
3811 return new ICmpInst(Pred,
II->getArgOperand(0),
3812 ConstantInt::get(Ty,
C.reverseBits()));
3814 case Intrinsic::ctlz:
3815 case Intrinsic::cttz: {
3818 return new ICmpInst(Pred,
II->getArgOperand(0),
3824 unsigned Num =
C.getLimitedValue(
BitWidth);
3826 bool IsTrailing =
II->getIntrinsicID() == Intrinsic::cttz;
3829 APInt Mask2 = IsTrailing
3833 ConstantInt::get(Ty, Mask2));
3838 case Intrinsic::ctpop: {
3841 bool IsZero =
C.isZero();
3843 return new ICmpInst(Pred,
II->getArgOperand(0),
3850 case Intrinsic::fshl:
3851 case Intrinsic::fshr:
3852 if (
II->getArgOperand(0) ==
II->getArgOperand(1)) {
3853 const APInt *RotAmtC;
3857 return new ICmpInst(Pred,
II->getArgOperand(0),
3858 II->getIntrinsicID() == Intrinsic::fshl
3859 ? ConstantInt::get(Ty,
C.rotr(*RotAmtC))
3860 : ConstantInt::get(Ty,
C.rotl(*RotAmtC)));
3864 case Intrinsic::umax:
3865 case Intrinsic::uadd_sat: {
3868 if (
C.isZero() &&
II->hasOneUse()) {
3875 case Intrinsic::ssub_sat:
3880 if (
C.isZero() &&
II->getType()->getScalarSizeInBits() > 1)
3881 return new ICmpInst(Pred,
II->getArgOperand(0),
II->getArgOperand(1));
3883 case Intrinsic::usub_sat: {
3888 return new ICmpInst(NewPred,
II->getArgOperand(0),
II->getArgOperand(1));
3903 assert(Cmp.isEquality());
3906 Value *Op0 = Cmp.getOperand(0);
3907 Value *Op1 = Cmp.getOperand(1);
3910 if (!IIOp0 || !IIOp1 || IIOp0->getIntrinsicID() != IIOp1->getIntrinsicID())
3913 switch (IIOp0->getIntrinsicID()) {
3914 case Intrinsic::bswap:
3915 case Intrinsic::bitreverse:
3918 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3919 case Intrinsic::fshl:
3920 case Intrinsic::fshr: {
3923 if (IIOp0->getOperand(0) != IIOp0->getOperand(1))
3925 if (IIOp1->getOperand(0) != IIOp1->getOperand(1))
3927 if (IIOp0->getOperand(2) == IIOp1->getOperand(2))
3928 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3934 unsigned OneUses = IIOp0->hasOneUse() + IIOp1->hasOneUse();
3939 Builder.CreateSub(IIOp0->getOperand(2), IIOp1->getOperand(2));
3940 Value *CombinedRotate = Builder.CreateIntrinsic(
3941 Op0->
getType(), IIOp0->getIntrinsicID(),
3942 {IIOp0->getOperand(0), IIOp0->getOperand(0), SubAmt});
3943 return new ICmpInst(Pred, IIOp1->getOperand(0), CombinedRotate);
3961 switch (
II->getIntrinsicID()) {
3964 case Intrinsic::fshl:
3965 case Intrinsic::fshr:
3966 if (Cmp.isEquality() &&
II->getArgOperand(0) ==
II->getArgOperand(1)) {
3968 if (
C.isZero() ||
C.isAllOnes())
3969 return new ICmpInst(Pred,
II->getArgOperand(0), Cmp.getOperand(1));
3983 case Instruction::Xor:
3987 case Instruction::And:
3991 case Instruction::Or:
3995 case Instruction::Mul:
3999 case Instruction::Shl:
4003 case Instruction::LShr:
4004 case Instruction::AShr:
4008 case Instruction::SRem:
4012 case Instruction::UDiv:
4016 case Instruction::SDiv:
4020 case Instruction::Sub:
4024 case Instruction::Add:
4048 if (!
II->hasOneUse())
4064 Value *Op0 =
II->getOperand(0);
4065 Value *Op1 =
II->getOperand(1);
4074 switch (
II->getIntrinsicID()) {
4077 "This function only works with usub_sat and uadd_sat for now!");
4078 case Intrinsic::uadd_sat:
4081 case Intrinsic::usub_sat:
4091 II->getBinaryOp(), *COp1,
II->getNoWrapKind());
4098 if (
II->getBinaryOp() == Instruction::Add)
4104 SatValCheck ? Instruction::BinaryOps::Or : Instruction::BinaryOps::And;
4106 std::optional<ConstantRange> Combination;
4107 if (CombiningOp == Instruction::BinaryOps::Or)
4119 Combination->getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
4123 Builder.CreateAdd(Op0, ConstantInt::get(Op1->
getType(), EquivOffset)),
4124 ConstantInt::get(Op1->
getType(), EquivInt));
4131 std::optional<ICmpInst::Predicate> NewPredicate = std::nullopt;
4136 NewPredicate = Pred;
4140 else if (
C.isAllOnes())
4148 else if (
C.isZero())
4165 if (!
C.isZero() && !
C.isAllOnes())
4176 if (
I->getIntrinsicID() == Intrinsic::scmp)
4190 switch (
II->getIntrinsicID()) {
4193 case Intrinsic::uadd_sat:
4194 case Intrinsic::usub_sat:
4199 case Intrinsic::ctpop: {
4204 case Intrinsic::scmp:
4205 case Intrinsic::ucmp:
4211 if (Cmp.isEquality())
4214 Type *Ty =
II->getType();
4216 switch (
II->getIntrinsicID()) {
4217 case Intrinsic::ctpop: {
4229 case Intrinsic::ctlz: {
4232 unsigned Num =
C.getLimitedValue();
4235 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4240 unsigned Num =
C.getLimitedValue();
4243 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4247 case Intrinsic::cttz: {
4249 if (!
II->hasOneUse())
4256 Builder.CreateAnd(
II->getArgOperand(0), Mask),
4264 Builder.CreateAnd(
II->getArgOperand(0), Mask),
4269 case Intrinsic::ssub_sat:
4276 return new ICmpInst(Pred,
II->getArgOperand(0),
II->getArgOperand(1));
4280 II->getArgOperand(1));
4284 II->getArgOperand(1));
4287 case Intrinsic::abs: {
4288 if (!
II->hasOneUse())
4292 bool IsIntMinPoison =
4299 Builder.CreateAdd(
X, ConstantInt::get(Ty,
C)),
4300 ConstantInt::get(Ty, 2 *
C));
4307 Builder.CreateAdd(
X, ConstantInt::get(Ty,
C - 1)),
4308 ConstantInt::get(Ty, 2 * (
C - 1)));
4322 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
4329 case Instruction::IntToPtr:
4338 case Instruction::Load:
4355 auto SimplifyOp = [&](
Value *
Op,
bool SelectCondIsTrue) ->
Value * {
4359 SI->getCondition(), Pred,
Op, RHS,
DL, SelectCondIsTrue))
4360 return ConstantInt::get(
I.getType(), *Impl);
4365 Value *Op1 = SimplifyOp(
SI->getOperand(1),
true);
4369 Value *Op2 = SimplifyOp(
SI->getOperand(2),
false);
4373 auto Simplifies = [&](
Value *
Op,
unsigned Idx) {
4388 bool Transform =
false;
4391 else if (Simplifies(Op1, 1) || Simplifies(Op2, 2)) {
4393 if (
SI->hasOneUse())
4396 else if (CI && !CI->
isZero())
4404 Op1 =
Builder.CreateICmp(Pred,
SI->getOperand(1), RHS,
I.getName());
4406 Op2 =
Builder.CreateICmp(Pred,
SI->getOperand(2), RHS,
I.getName());
4416 unsigned Depth = 0) {
4419 if (V->getType()->getScalarSizeInBits() == 1)
4427 switch (
I->getOpcode()) {
4428 case Instruction::ZExt:
4431 case Instruction::SExt:
4435 case Instruction::And:
4436 case Instruction::Or:
4443 case Instruction::Xor:
4453 case Instruction::Select:
4457 case Instruction::Shl:
4460 case Instruction::LShr:
4463 case Instruction::AShr:
4467 case Instruction::Add:
4473 case Instruction::Sub:
4479 case Instruction::Call: {
4481 switch (
II->getIntrinsicID()) {
4484 case Intrinsic::umax:
4485 case Intrinsic::smax:
4486 case Intrinsic::umin:
4487 case Intrinsic::smin:
4492 case Intrinsic::bitreverse:
4582 auto IsLowBitMask = [&]() {
4600 auto Check = [&]() {
4618 auto Check = [&]() {
4637 if (!IsLowBitMask())
4656 const APInt *C0, *C1;
4673 const APInt &MaskedBits = *C0;
4674 assert(MaskedBits != 0 &&
"shift by zero should be folded away already.");
4695 auto *XType =
X->getType();
4696 const unsigned XBitWidth = XType->getScalarSizeInBits();
4698 assert(
BitWidth.ugt(MaskedBits) &&
"shifts should leave some bits untouched");
4711 Value *T0 = Builder.CreateAdd(
X, ConstantInt::get(XType, AddCst));
4713 Value *
T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
4729 !
I.getOperand(0)->hasOneUse())
4754 assert(NarrowestTy ==
I.getOperand(0)->getType() &&
4755 "We did not look past any shifts while matching XShift though.");
4756 bool HadTrunc = WidestTy !=
I.getOperand(0)->getType();
4763 auto XShiftOpcode = XShift->
getOpcode();
4764 if (XShiftOpcode == YShift->
getOpcode())
4767 Value *
X, *XShAmt, *
Y, *YShAmt;
4776 if (!
match(
I.getOperand(0),
4802 unsigned MaximalPossibleTotalShiftAmount =
4805 APInt MaximalRepresentableShiftAmount =
4807 if (MaximalRepresentableShiftAmount.
ult(MaximalPossibleTotalShiftAmount))
4816 if (NewShAmt->getType() != WidestTy) {
4826 if (!
match(NewShAmt,
4828 APInt(WidestBitWidth, WidestBitWidth))))
4833 auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
4839 ? NewShAmt->getSplatValue()
4842 if (NewShAmtSplat &&
4852 unsigned MaxActiveBits = Known.
getBitWidth() - MinLeadZero;
4853 if (MaxActiveBits <= 1)
4863 unsigned MaxActiveBits = Known.
getBitWidth() - MinLeadZero;
4864 if (MaxActiveBits <= 1)
4867 if (NewShAmtSplat) {
4870 if (AdjNewShAmt.
ule(MinLeadZero))
4881 X = Builder.CreateZExt(
X, WidestTy);
4882 Y = Builder.CreateZExt(
Y, WidestTy);
4884 Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
4885 ? Builder.CreateLShr(
X, NewShAmt)
4886 : Builder.CreateShl(
X, NewShAmt);
4887 Value *
T1 = Builder.CreateAnd(T0,
Y);
4888 return Builder.CreateICmp(
I.getPredicate(),
T1,
4906 if (!
I.isEquality() &&
4916 NeedNegation =
false;
4919 NeedNegation =
true;
4925 if (
I.isEquality() &&
4940 bool MulHadOtherUses =
Mul && !
Mul->hasOneUse();
4941 if (MulHadOtherUses)
4945 Div->
getOpcode() == Instruction::UDiv ? Intrinsic::umul_with_overflow
4946 : Intrinsic::smul_with_overflow,
4947 X->getType(), {X, Y},
nullptr,
"mul");
4952 if (MulHadOtherUses)
4957 Res =
Builder.CreateNot(Res,
"mul.not.ov");
4961 if (MulHadOtherUses)
4987 Type *Ty =
X->getType();
4991 Value *
And = Builder.CreateAnd(
X, MaxSignedVal);
5001 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5063 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5098 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5114 return new ICmpInst(PredOut, Op0, Op1);
5134 return new ICmpInst(NewPred, Op0, Const);
5146 if (!
C.isPowerOf2())
5159 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5227 return new ICmpInst(NewPred, Op1, Zero);
5236 return new ICmpInst(NewPred, Op0, Zero);
5240 bool NoOp0WrapProblem =
false, NoOp1WrapProblem =
false;
5241 bool Op0HasNUW =
false, Op1HasNUW =
false;
5242 bool Op0HasNSW =
false, Op1HasNSW =
false;
5246 bool &HasNSW,
bool &HasNUW) ->
bool {
5253 }
else if (BO.
getOpcode() == Instruction::Or) {
5261 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr;
5265 NoOp0WrapProblem = hasNoWrapProblem(*BO0, Pred, Op0HasNSW, Op0HasNUW);
5269 NoOp1WrapProblem = hasNoWrapProblem(*BO1, Pred, Op1HasNSW, Op1HasNUW);
5274 if ((
A == Op1 ||
B == Op1) && NoOp0WrapProblem)
5280 if ((
C == Op0 ||
D == Op0) && NoOp1WrapProblem)
5285 if (
A &&
C && (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D) && NoOp0WrapProblem &&
5293 }
else if (
A ==
D) {
5297 }
else if (
B ==
C) {
5314 bool IsNegative) ->
bool {
5315 const APInt *OffsetC;
5327 if (!
C.isStrictlyPositive())
5348 if (
A && NoOp0WrapProblem &&
5349 ShareCommonDivisor(
A, Op1,
B,
5360 if (
C && NoOp1WrapProblem &&
5361 ShareCommonDivisor(Op0,
C,
D,
5374 if (
A &&
C && NoOp0WrapProblem && NoOp1WrapProblem &&
5376 const APInt *AP1, *AP2;
5384 if (AP1Abs.
uge(AP2Abs)) {
5385 APInt Diff = *AP1 - *AP2;
5388 A, C3,
"", Op0HasNUW && Diff.
ule(*AP1), Op0HasNSW);
5391 APInt Diff = *AP2 - *AP1;
5394 C, C3,
"", Op1HasNUW && Diff.
ule(*AP2), Op1HasNSW);
5413 if (BO0 && BO0->
getOpcode() == Instruction::Sub) {
5417 if (BO1 && BO1->
getOpcode() == Instruction::Sub) {
5423 if (
A == Op1 && NoOp0WrapProblem)
5426 if (
C == Op0 && NoOp1WrapProblem)
5446 if (
B &&
D &&
B ==
D && NoOp0WrapProblem && NoOp1WrapProblem)
5450 if (
A &&
C &&
A ==
C && NoOp0WrapProblem && NoOp1WrapProblem)
5458 if (RHSC->isNotMinSignedValue())
5459 return new ICmpInst(
I.getSwappedPredicate(),
X,
5477 if (Op0HasNSW && Op1HasNSW) {
5484 SQ.getWithInstruction(&
I));
5489 SQ.getWithInstruction(&
I));
5490 if (GreaterThan &&
match(GreaterThan,
m_One()))
5497 if (((Op0HasNSW && Op1HasNSW) || (Op0HasNUW && Op1HasNUW)) &&
5509 if (NonZero && BO0 && BO1 && Op0HasNSW && Op1HasNSW)
5516 if (NonZero && BO0 && BO1 && Op0HasNUW && Op1HasNUW)
5527 else if (BO1 && BO1->
getOpcode() == Instruction::SRem &&
5557 case Instruction::Add:
5558 case Instruction::Sub:
5559 case Instruction::Xor: {
5566 if (
C->isSignMask()) {
5572 if (BO0->
getOpcode() == Instruction::Xor &&
C->isMaxSignedValue()) {
5574 NewPred =
I.getSwappedPredicate(NewPred);
5580 case Instruction::Mul: {
5581 if (!
I.isEquality())
5589 if (
unsigned TZs =
C->countr_zero()) {
5595 return new ICmpInst(Pred, And1, And2);
5600 case Instruction::UDiv:
5601 case Instruction::LShr:
5606 case Instruction::SDiv:
5612 case Instruction::AShr:
5617 case Instruction::Shl: {
5618 bool NUW = Op0HasNUW && Op1HasNUW;
5619 bool NSW = Op0HasNSW && Op1HasNSW;
5622 if (!NSW &&
I.isSigned())
5686 auto IsCondKnownTrue = [](
Value *Val) -> std::optional<bool> {
5688 return std::nullopt;
5693 return std::nullopt;
5699 Pred = Pred.dropSameSign();
5702 if (!CmpXZ.has_value() && !CmpYZ.has_value())
5704 if (!CmpXZ.has_value()) {
5710 if (CmpYZ.has_value())
5734 if (!MinMaxCmpXZ.has_value()) {
5742 if (!MinMaxCmpXZ.has_value())
5758 return FoldIntoCmpYZ();
5785 return FoldIntoCmpYZ();
5794 return FoldIntoCmpYZ();
5826 const APInt *
Lo =
nullptr, *
Hi =
nullptr;
5849 I,
Builder.CreateICmp(Pred,
X, ConstantInt::get(
X->getType(),
C)));
5855 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5859 if (
I.isEquality()) {
5894 Type *Ty =
A->getType();
5895 CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop,
A);
5897 ConstantInt::get(Ty, 2))
5899 ConstantInt::get(Ty, 1));
5906using OffsetOp = std::pair<Instruction::BinaryOps, Value *>;
5908 bool AllowRecursion) {
5914 case Instruction::Add:
5915 Offsets.emplace_back(Instruction::Sub, Inst->
getOperand(1));
5916 Offsets.emplace_back(Instruction::Sub, Inst->
getOperand(0));
5918 case Instruction::Sub:
5919 Offsets.emplace_back(Instruction::Add, Inst->
getOperand(1));
5921 case Instruction::Xor:
5922 Offsets.emplace_back(Instruction::Xor, Inst->
getOperand(1));
5923 Offsets.emplace_back(Instruction::Xor, Inst->
getOperand(0));
5925 case Instruction::Shl:
5927 Offsets.emplace_back(Instruction::AShr, Inst->
getOperand(1));
5929 Offsets.emplace_back(Instruction::LShr, Inst->
getOperand(1));
5931 case Instruction::Select:
5932 if (AllowRecursion) {
5967 return Builder.CreateSelect(
5980 assert(
I.isEquality() &&
"Expected an equality icmp");
5981 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5992 case Instruction::AShr: {
5993 const APInt *CV, *CRHS;
5995 CV->
ashr(*CRHS).
shl(*CRHS) == *CV) &&
6001 case Instruction::LShr: {
6002 const APInt *CV, *CRHS;
6004 CV->
lshr(*CRHS).
shl(*CRHS) == *CV) &&
6023 auto ApplyOffset = [&](
Value *V,
unsigned BinOpc,
6026 if (!Sel->hasOneUse())
6028 Value *TrueVal = ApplyOffsetImpl(Sel->getTrueValue(), BinOpc,
RHS);
6031 Value *FalseVal = ApplyOffsetImpl(Sel->getFalseValue(), BinOpc,
RHS);
6036 if (
Value *Simplified = ApplyOffsetImpl(V, BinOpc,
RHS))
6041 for (
auto [BinOp,
RHS] : OffsetOps) {
6042 auto BinOpc =
static_cast<unsigned>(BinOp);
6044 auto Op0Result = ApplyOffset(Op0, BinOpc,
RHS);
6045 if (!Op0Result.isValid())
6047 auto Op1Result = ApplyOffset(Op1, BinOpc,
RHS);
6048 if (!Op1Result.isValid())
6051 Value *NewLHS = Op0Result.materialize(Builder);
6052 Value *NewRHS = Op1Result.materialize(Builder);
6053 return new ICmpInst(
I.getPredicate(), NewLHS, NewRHS);
6060 if (!
I.isEquality())
6063 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
6067 if (
A == Op1 ||
B == Op1) {
6068 Value *OtherVal =
A == Op1 ?
B :
A;
6096 Value *OtherVal =
A == Op0 ?
B :
A;
6103 Value *
X =
nullptr, *
Y =
nullptr, *Z =
nullptr;
6109 }
else if (
A ==
D) {
6113 }
else if (
B ==
C) {
6117 }
else if (
B ==
D) {
6127 const APInt *C0, *C1;
6129 (*C0 ^ *C1).isNegatedPowerOf2();
6135 int(Op0->
hasOneUse()) + int(Op1->hasOneUse()) +
6137 if (XorIsNegP2 || UseCnt >= 2) {
6140 Op1 =
Builder.CreateAnd(Op1, Z);
6160 (Op0->
hasOneUse() || Op1->hasOneUse())) {
6165 MaskC->
countr_one() ==
A->getType()->getScalarSizeInBits())
6171 const APInt *AP1, *AP2;
6180 if (ShAmt < TypeBits && ShAmt != 0) {
6185 return new ICmpInst(NewPred,
Xor, ConstantInt::get(
A->getType(), CmpVal));
6195 if (ShAmt < TypeBits && ShAmt != 0) {
6215 if (ShAmt < ASize) {
6238 A->getType()->getScalarSizeInBits() ==
BitWidth * 2 &&
6239 (
I.getOperand(0)->hasOneUse() ||
I.getOperand(1)->hasOneUse())) {
6244 Add, ConstantInt::get(
A->getType(),
C.shl(1)));
6271 Builder.CreateIntrinsic(Op0->
getType(), Intrinsic::fshl, {A, A, B}));
6286 std::optional<bool> IsZero = std::nullopt;
6328 Constant *
C = ConstantInt::get(Res->X->getType(), Res->C);
6332 unsigned SrcBits =
X->getType()->getScalarSizeInBits();
6334 if (
II->getIntrinsicID() == Intrinsic::cttz ||
6335 II->getIntrinsicID() == Intrinsic::ctlz) {
6336 unsigned MaxRet = SrcBits;
6362 bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
6363 bool IsSignedCmp = ICmp.
isSigned();
6371 if (IsZext0 != IsZext1) {
6376 if (ICmp.
isEquality() &&
X->getType()->isIntOrIntVectorTy(1) &&
6377 Y->getType()->isIntOrIntVectorTy(1))
6387 bool IsNonNeg0 = NonNegInst0 && NonNegInst0->hasNonNeg();
6388 bool IsNonNeg1 = NonNegInst1 && NonNegInst1->hasNonNeg();
6390 if ((IsZext0 && IsNonNeg0) || (IsZext1 && IsNonNeg1))
6397 Type *XTy =
X->getType(), *YTy =
Y->getType();
6404 IsSignedExt ? Instruction::SExt : Instruction::ZExt;
6406 X =
Builder.CreateCast(CastOpcode,
X, YTy);
6408 Y =
Builder.CreateCast(CastOpcode,
Y, XTy);
6420 if (IsSignedCmp && IsSignedExt)
6433 Type *SrcTy = CastOp0->getSrcTy();
6441 if (IsSignedExt && IsSignedCmp)
6472 Value *SimplifiedOp0 = simplifyIntToPtrRoundTripCast(ICmp.
getOperand(0));
6473 Value *SimplifiedOp1 = simplifyIntToPtrRoundTripCast(ICmp.
getOperand(1));
6474 if (SimplifiedOp0 || SimplifiedOp1)
6476 SimplifiedOp0 ? SimplifiedOp0 : ICmp.
getOperand(0),
6477 SimplifiedOp1 ? SimplifiedOp1 : ICmp.
getOperand(1));
6485 Value *Op0Src = CastOp0->getOperand(0);
6486 Type *SrcTy = CastOp0->getSrcTy();
6487 Type *DestTy = CastOp0->getDestTy();
6491 auto CompatibleSizes = [&](
Type *PtrTy,
Type *IntTy) {
6496 return DL.getPointerTypeSizeInBits(PtrTy) == IntTy->getIntegerBitWidth();
6498 if (CastOp0->getOpcode() == Instruction::PtrToInt &&
6499 CompatibleSizes(SrcTy, DestTy)) {
6500 Value *NewOp1 =
nullptr;
6502 Value *PtrSrc = PtrToIntOp1->getOperand(0);
6504 NewOp1 = PtrToIntOp1->getOperand(0);
6514 if (CastOp0->getOpcode() == Instruction::IntToPtr &&
6515 CompatibleSizes(DestTy, SrcTy)) {
6516 Value *NewOp1 =
nullptr;
6518 Value *IntSrc = IntToPtrOp1->getOperand(0);
6520 NewOp1 = IntToPtrOp1->getOperand(0);
6540 case Instruction::Add:
6541 case Instruction::Sub:
6543 case Instruction::Mul:
6544 return !(
RHS->getType()->isIntOrIntVectorTy(1) && IsSigned) &&
6556 case Instruction::Add:
6561 case Instruction::Sub:
6566 case Instruction::Mul:
6575 bool IsSigned,
Value *LHS,
6586 Builder.SetInsertPoint(&OrigI);
6603 Result = Builder.CreateBinOp(BinaryOp,
LHS,
RHS);
6604 Result->takeName(&OrigI);
6608 Result = Builder.CreateBinOp(BinaryOp,
LHS,
RHS);
6609 Result->takeName(&OrigI);
6613 Inst->setHasNoSignedWrap();
6615 Inst->setHasNoUnsignedWrap();
6638 const APInt *OtherVal,
6648 assert(MulInstr->getOpcode() == Instruction::Mul);
6652 assert(
LHS->getOpcode() == Instruction::ZExt);
6653 assert(
RHS->getOpcode() == Instruction::ZExt);
6657 Type *TyA =
A->getType(), *TyB =
B->getType();
6659 WidthB = TyB->getPrimitiveSizeInBits();
6662 if (WidthB > WidthA) {
6679 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
6680 if (TruncWidth > MulWidth)
6684 if (BO->getOpcode() != Instruction::And)
6687 const APInt &CVal = CI->getValue();
6703 switch (
I.getPredicate()) {
6710 if (MaxVal.
eq(*OtherVal))
6720 if (MaxVal.
eq(*OtherVal))
6734 if (WidthA < MulWidth)
6735 MulA = Builder.CreateZExt(
A, MulType);
6736 if (WidthB < MulWidth)
6737 MulB = Builder.CreateZExt(
B, MulType);
6739 Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, MulType,
6740 {MulA, MulB},
nullptr,
"umul");
6747 Value *
Mul = Builder.CreateExtractValue(
Call, 0,
"umul.value");
6752 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
6757 assert(BO->getOpcode() == Instruction::And);
6761 Value *ShortAnd = Builder.CreateAnd(
Mul, ShortMask);
6762 Value *Zext = Builder.CreateZExt(ShortAnd, BO->
getType());
6774 Value *Res = Builder.CreateExtractValue(
Call, 1);
6795 switch (
I.getPredicate()) {
6826 assert(DI && UI &&
"Instruction not defined\n");
6838 if (Usr != UI && !
DT.dominates(DB, Usr->getParent()))
6853 if (!IC || (IC->getOperand(0) !=
SI && IC->getOperand(1) !=
SI))
6900 const unsigned SIOpd) {
6901 assert((SIOpd == 1 || SIOpd == 2) &&
"Invalid select operand!");
6903 BasicBlock *Succ =
SI->getParent()->getTerminator()->getSuccessor(1);
6917 SI->replaceUsesOutsideBlock(
SI->getOperand(SIOpd),
SI->getParent());
6927 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
6932 unsigned BitWidth = Ty->isIntOrIntVectorTy()
6933 ? Ty->getScalarSizeInBits()
6934 :
DL.getPointerTypeSizeInBits(Ty->getScalarType());
6987 if (!Cmp.hasOneUse())
6996 if (!isMinMaxCmp(
I)) {
7001 if (Op1Min == Op0Max)
7006 if (*CmpC == Op0Min + 1)
7008 ConstantInt::get(Op1->getType(), *CmpC - 1));
7018 if (Op1Max == Op0Min)
7023 if (*CmpC == Op0Max - 1)
7025 ConstantInt::get(Op1->getType(), *CmpC + 1));
7035 if (Op1Min == Op0Max)
7039 if (*CmpC == Op0Min + 1)
7041 ConstantInt::get(Op1->getType(), *CmpC - 1));
7046 if (Op1Max == Op0Min)
7050 if (*CmpC == Op0Max - 1)
7052 ConstantInt::get(Op1->getType(), *CmpC + 1));
7069 APInt Op0KnownZeroInverted = ~Op0Known.Zero;
7072 Value *LHS =
nullptr;
7075 *LHSC != Op0KnownZeroInverted)
7081 Type *XTy =
X->getType();
7083 APInt C2 = Op0KnownZeroInverted;
7084 APInt C2Pow2 = (C2 & ~(*C1 - 1)) + *C1;
7090 auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1);
7100 (Op0Known & Op1Known) == Op0Known)
7106 if (Op1Min == Op0Max)
7110 if (Op1Max == Op0Min)
7114 if (Op1Min == Op0Max)
7118 if (Op1Max == Op0Min)
7126 if ((
I.isSigned() || (
I.isUnsigned() && !
I.hasSameSign())) &&
7129 I.setPredicate(
I.getUnsignedPredicate());
7147 return BinaryOperator::CreateAnd(
Builder.CreateIsNull(
X),
Y);
7153 return BinaryOperator::CreateOr(
Builder.CreateIsNull(
X),
Y);
7164 bool IsSExt = ExtI->
getOpcode() == Instruction::SExt;
7166 auto CreateRangeCheck = [&] {
7181 }
else if (!IsSExt || HasOneUse) {
7186 return CreateRangeCheck();
7188 }
else if (IsSExt ?
C->isAllOnes() :
C->isOne()) {
7196 }
else if (!IsSExt || HasOneUse) {
7201 return CreateRangeCheck();
7215 Instruction::ICmp, Pred1,
X,
7234 Value *Op0 =
I.getOperand(0);
7235 Value *Op1 =
I.getOperand(1);
7241 if (!FlippedStrictness)
7244 return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
7262 I.setName(
I.getName() +
".not");
7273 Value *
A =
I.getOperand(0), *
B =
I.getOperand(1);
7274 assert(
A->getType()->isIntOrIntVectorTy(1) &&
"Bools only");
7280 switch (
I.getPredicate()) {
7289 switch (
I.getPredicate()) {
7299 switch (
I.getPredicate()) {
7308 return BinaryOperator::CreateXor(
A,
B);
7316 return BinaryOperator::CreateAnd(Builder.CreateNot(
A),
B);
7324 return BinaryOperator::CreateAnd(Builder.CreateNot(
B),
A);
7332 return BinaryOperator::CreateOr(Builder.CreateNot(
A),
B);
7340 return BinaryOperator::CreateOr(Builder.CreateNot(
B),
A);
7388 Value *NewX = Builder.CreateLShr(
X,
Y,
X->getName() +
".highbits");
7396 Value *
LHS = Cmp.getOperand(0), *
RHS = Cmp.getOperand(1);
7400 Value *V = Builder.CreateCmp(Pred,
X,
Y, Cmp.getName());
7402 I->copyIRFlags(&Cmp);
7403 Module *M = Cmp.getModule();
7405 M, Intrinsic::vector_reverse, V->getType());
7412 (
LHS->hasOneUse() ||
RHS->hasOneUse()))
7413 return createCmpReverse(Pred, V1, V2);
7417 return createCmpReverse(Pred, V1,
RHS);
7421 return createCmpReverse(Pred,
LHS, V2);
7432 V1Ty == V2->
getType() && (
LHS->hasOneUse() ||
RHS->hasOneUse())) {
7433 Value *NewCmp = Builder.CreateCmp(Pred, V1, V2);
7446 Constant *ScalarC =
C->getSplatValue(
true);
7454 Value *NewCmp = Builder.CreateCmp(Pred, V1,
C);
7465 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
7471 if (
match(Op0, UAddOvResultPat) &&
7482 (Op0 ==
A || Op0 ==
B))
7492 if (!
I.getOperand(0)->getType()->isPointerTy() ||
7494 I.getParent()->getParent(),
7495 I.getOperand(0)->getType()->getPointerAddressSpace())) {
7501 Op->isLaunderOrStripInvariantGroup()) {
7503 Op->getOperand(0),
I.getOperand(1));
7515 Value *Const =
I.getOperand(1);
7533 Type *VecEltTy = VecTy->getElementType();
7535 DL.getTypeSizeInBits(VecEltTy) * VecTy->getNumElements();
7536 if (!
DL.fitsInLegalInteger(ScalarBW))
7540 ? ConstantInt::get(ScalarTy, 0)
7543 Builder.CreateBitCast(Vec, ScalarTy), NewConst);
7555 if (
I.getType()->isVectorTy())
7578 if (!LHSTy || !LHSTy->getElementType()->isIntegerTy())
7581 LHSTy->getNumElements() * LHSTy->getElementType()->getIntegerBitWidth();
7583 if (!
DL.isLegalInteger(NumBits))
7587 auto *ScalarTy = Builder.getIntNTy(NumBits);
7588 LHS = Builder.CreateBitCast(
LHS, ScalarTy,
LHS->getName() +
".scalar");
7589 RHS = Builder.CreateBitCast(
RHS, ScalarTy,
RHS->getName() +
".scalar");
7645 bool IsIntMinPosion =
C->isAllOnesValue();
7657 CxtI, IsIntMinPosion
7658 ?
Builder.CreateICmpSGT(
X, AllOnesValue)
7660 X, ConstantInt::get(
X->getType(),
SMin + 1)));
7666 CxtI, IsIntMinPosion
7667 ?
Builder.CreateICmpSLT(
X, NullValue)
7669 X, ConstantInt::get(
X->getType(),
SMin)));
7682 auto CheckUGT1 = [](
const APInt &Divisor) {
return Divisor.ugt(1); };
7697 auto CheckNE0 = [](
const APInt &Shift) {
return !Shift.isZero(); };
7717 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
7724 if (Op0Cplxity < Op1Cplxity) {
7739 if (
Value *V = dyn_castNegVal(SelectTrue)) {
7740 if (V == SelectFalse)
7742 }
else if (
Value *V = dyn_castNegVal(SelectFalse)) {
7743 if (V == SelectTrue)
7803 if (
C->isNonNegative())
7807 ConstantInt::get(
X->getType(), ~*
C));
7813 if (
C->isNonNegative())
7817 ConstantInt::get(
X->getType(), ~*
C));
7873 if (
I.isCommutative()) {
7874 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
7903 (Op0->
hasOneUse() || Op1->hasOneUse())) {
7908 Cond, Res, NewICMP,
"",
nullptr,
7915 Cond, NewICMP, Res,
"",
nullptr,
7931 bool I0NUW = I0->hasNoUnsignedWrap();
7932 bool I1NUW = I1->hasNoUnsignedWrap();
7933 bool I0NSW = I0->hasNoSignedWrap();
7934 bool I1NSW = I1->hasNoSignedWrap();
7938 ((I0NUW || I0NSW) && (I1NUW || I1NSW)))) {
7940 ConstantInt::get(Op0->
getType(), 0));
7947 assert(Op1->getType()->isPointerTy() &&
7948 "Comparing pointer with non-pointer?");
7977 bool ConsumesOp0, ConsumesOp1;
7980 (ConsumesOp0 || ConsumesOp1)) {
7983 assert(InvOp0 && InvOp1 &&
7984 "Mismatch between isFreeToInvert and getFreelyInverted");
7985 return new ICmpInst(
I.getSwappedPredicate(), InvOp0, InvOp1);
7997 if (AddI->
getOpcode() == Instruction::Add &&
7998 OptimizeOverflowCheck(Instruction::Add,
false,
X,
Y, *AddI,
7999 Result, Overflow)) {
8017 if ((
I.isUnsigned() ||
I.isEquality()) &&
8020 Y->getType()->getScalarSizeInBits() == 1 &&
8021 (Op0->
hasOneUse() || Op1->hasOneUse())) {
8028 unsigned ShiftOpc = ShiftI->
getOpcode();
8029 if ((ExtOpc == Instruction::ZExt && ShiftOpc == Instruction::LShr) ||
8030 (ExtOpc == Instruction::SExt && ShiftOpc == Instruction::AShr)) {
8064 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
8071 if (
I.getType()->isVectorTy())
8083 const APInt *C1, *C2;
8090 Type *InputTy =
A->getType();
8097 TruncC1.
setBit(InputBitWidth - 1);
8101 ConstantInt::get(InputTy, C2->
trunc(InputBitWidth)));
8121 if (MantissaWidth == -1)
8128 if (
I.isEquality()) {
8130 bool IsExact =
false;
8131 APSInt RHSCvt(IntWidth, LHSUnsigned);
8140 if (*RHS != RHSRoundInt) {
8160 if ((
int)IntWidth > MantissaWidth) {
8162 int Exp =
ilogb(*RHS);
8165 if (MaxExponent < (
int)IntWidth - !LHSUnsigned)
8171 if (MantissaWidth <= Exp && Exp <= (
int)IntWidth - !LHSUnsigned)
8180 assert(!RHS->isNaN() &&
"NaN comparison not already folded!");
8183 switch (
I.getPredicate()) {
8274 APSInt RHSInt(IntWidth, LHSUnsigned);
8277 if (!RHS->isZero()) {
8292 if (RHS->isNegative())
8298 if (RHS->isNegative())
8304 if (RHS->isNegative())
8311 if (!RHS->isNegative())
8317 if (RHS->isNegative())
8323 if (RHS->isNegative())
8329 if (RHS->isNegative())
8336 if (!RHS->isNegative())
8355 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
8366 unsigned Pred =
I.getPredicate();
8374 if (!Res00 || !Res01 || !Res10 || !Res11)
8383 std::bitset<4> Table;
8441 if (
C->isNegative())
8442 Pred =
I.getSwappedPredicate();
8469 "X ord/uno NaN should be folded away by simplifyFCmpInst()");
8475 bool RoundDown =
false;
8496 auto NextValue = [](
const APFloat &
Value,
bool RoundDown) {
8498 NextValue.
next(RoundDown);
8502 APFloat NextCValue = NextValue(*CValue, RoundDown);
8507 APFloat ExtCValue = ConvertFltSema(*CValue, DestFltSema);
8508 APFloat ExtNextCValue = ConvertFltSema(NextCValue, DestFltSema);
8515 APFloat PrevCValue = NextValue(*CValue, !RoundDown);
8516 APFloat Bias = ConvertFltSema(*CValue - PrevCValue, DestFltSema);
8518 ExtNextCValue = ExtCValue + Bias;
8525 C.getType()->getScalarType()->getFltSemantics();
8528 APFloat MidValue = ConvertFltSema(ExtMidValue, SrcFltSema);
8529 if (MidValue != *CValue)
8530 ExtMidValue.
next(!RoundDown);
8538 if (ConvertFltSema(ExtMidValue, SrcFltSema).isInfinity())
8542 APFloat NextExtMidValue = NextValue(ExtMidValue, RoundDown);
8543 if (ConvertFltSema(NextExtMidValue, SrcFltSema).
isFinite())
8548 ConstantFP::get(DestType, ExtMidValue),
"", &
I);
8561 if (!
C->isPosZero()) {
8562 if (!
C->isSmallestNormalized())
8575 switch (
I.getPredicate()) {
8601 switch (
I.getPredicate()) {
8626 assert(!
I.hasNoNaNs() &&
"fcmp should have simplified");
8631 assert(!
I.hasNoNaNs() &&
"fcmp should have simplified");
8645 return replacePredAndOp0(&
I,
I.getPredicate(),
X);
8668 I.setHasNoInfs(
false);
8670 switch (
I.getPredicate()) {
8715 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
8720 Pred =
I.getSwappedPredicate();
8729 return new FCmpInst(Pred, Op0, Zero,
"", &
I);
8765 I.getFunction()->getDenormalMode(
8772 I.setHasNoNaNs(
true);
8797 if (MantissaWidth != -1 &&
ilogb(*
C) < MantissaWidth) {
8799 I.setPredicate(
I.getSwappedPredicate());
8836 if (!IsStrictLt && !IsStrictGt && !IsGe)
8858 }
else if (
match(FAbsArg,
8866 if (
A->getType() !=
B->getType())
8881 Type *OpType =
LHS->getType();
8887 if (!FloorX && !CeilX) {
8891 Pred =
I.getSwappedPredicate();
8967 if (!
I || !(
I->getOpcode() == Instruction::SIToFP ||
8968 I->getOpcode() == Instruction::UIToFP))
8971 bool IsUnsigned =
I->getOpcode() == Instruction::UIToFP;
8972 unsigned BitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
8995 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
8997 SQ.getWithInstruction(&
I)))
9002 assert(OpType == Op1->getType() &&
"fcmp with different-typed operands?");
9027 if (
I.isCommutative()) {
9028 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
9050 return new FCmpInst(
I.getSwappedPredicate(),
X,
Y,
"", &
I);
9066 bool IsRedundantMinMaxClamp =
9128 !
F.getDenormalMode(Op1->getType()->getScalarType()->getFltSemantics())
9129 .inputsMayBeZero()) {
9137 Type *IntTy =
X->getType();
9138 const APInt &SignMask =
~APInt::getSignMask(IntTy->getScalarSizeInBits());
9139 Value *MaskX =
Builder.CreateAnd(
X, ConstantInt::get(IntTy, SignMask));
9149 case Instruction::Select:
9157 case Instruction::FSub:
9162 case Instruction::PHI:
9166 case Instruction::SIToFP:
9167 case Instruction::UIToFP:
9171 case Instruction::FDiv:
9175 case Instruction::Load:
9181 case Instruction::FPTrunc:
9208 return new FCmpInst(
I.getSwappedPredicate(),
X, NegC,
"", &
I);
9222 X->getType() ==
Y->getType())
9233 X->getType()->getScalarType()->getFltSemantics();
9269 Constant *NewC = ConstantFP::get(
X->getType(), TruncC);
9282 Type *IntType =
Builder.getIntNTy(
X->getType()->getScalarSizeInBits());
9295 Value *CanonLHS =
nullptr;
9298 if (CanonLHS == Op1)
9299 return new FCmpInst(Pred, Op1, Op1,
"", &
I);
9301 Value *CanonRHS =
nullptr;
9304 if (CanonRHS == Op0)
9305 return new FCmpInst(Pred, Op0, Op0,
"", &
I);
9308 if (CanonLHS && CanonRHS)
9309 return new FCmpInst(Pred, CanonLHS, CanonRHS,
"", &
I);
9312 if (
I.getType()->isVectorTy())
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static Instruction * foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
static Instruction * foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize fabs(X) compared with zero.
static void collectOffsetOp(Value *V, SmallVectorImpl< OffsetOp > &Offsets, bool AllowRecursion)
static Value * rewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags NW, const DataLayout &DL, SetVector< Value * > &Explored, InstCombiner &IC)
Returns a re-written value of Start as an indexed GEP using Base as a pointer.
static bool isMinMaxCmpSelectEliminable(SelectPatternFlavor Flavor, Value *A, Value *B)
Returns true if a select that implements a min/max is redundant and select result can be replaced wit...
static Instruction * foldICmpEqualityWithOffset(ICmpInst &I, InstCombiner::BuilderTy &Builder, const SimplifyQuery &SQ)
Offset both sides of an equality icmp to see if we can save some instructions: icmp eq/ne X,...
static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1+In2, returning true if the result overflowed for this type.
static Instruction * foldICmpOfVectorReduce(ICmpInst &I, const DataLayout &DL, IRBuilderBase &Builder)
static Instruction * foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldVectorCmp(CmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q, unsigned Depth=0)
static Value * createLogicFromTable(const std::bitset< 4 > &Table, Value *Op0, Value *Op1, IRBuilderBase &Builder, bool HasOneUse)
static Instruction * foldICmpOfUAddOv(ICmpInst &I)
static bool isChainSelectCmpBranch(const SelectInst *SI)
Return true when the instruction sequence within a block is select-cmp-br.
static Instruction * foldICmpInvariantGroup(ICmpInst &I)
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static Instruction * foldReductionIdiom(ICmpInst &I, InstCombiner::BuilderTy &Builder, const DataLayout &DL)
This function folds patterns produced by lowering of reduce idioms, such as llvm.vector....
static Instruction * canonicalizeICmpBool(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Integer compare with boolean values can always be turned into bitwise ops.
static Instruction * foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, Constant *RHSC, InstCombinerImpl &CI)
static Value * foldICmpOrXorSubChain(ICmpInst &Cmp, BinaryOperator *Or, InstCombiner::BuilderTy &Builder)
Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
static bool hasBranchUse(ICmpInst &I)
Given an icmp instruction, return true if any use of this comparison is a branch on sign bit comparis...
static Value * foldICmpWithLowBitMaskedVal(CmpPredicate Pred, Value *Op0, Value *Op1, const SimplifyQuery &Q, InstCombiner &IC)
Some comparisons can be simplified.
static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth)
When performing a comparison against a constant, it is possible that not all the bits in the LHS are ...
static Instruction * foldICmpShlLHSC(ICmpInst &Cmp, Instruction *Shl, const APInt &C)
Fold icmp (shl nuw C2, Y), C.
static Instruction * foldFCmpWithFloorAndCeil(FCmpInst &I, InstCombinerImpl &IC)
static Instruction * foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldICmpOfCmpIntrinsicWithConstant(CmpPredicate Pred, IntrinsicInst *I, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * processUMulZExtIdiom(ICmpInst &I, Value *MulVal, const APInt *OtherVal, InstCombinerImpl &IC)
Recognize and process idiom involving test for multiplication overflow.
static Instruction * foldSqrtWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize sqrt(X) compared with zero.
static Instruction * foldFCmpFNegCommonOp(FCmpInst &I)
static Instruction * foldICmpWithHighBitMask(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static ICmpInst * canonicalizeCmpWithConstant(ICmpInst &I)
If we have an icmp le or icmp ge instruction with a constant operand, turn it into the appropriate ic...
static Instruction * foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
Fold an icmp with LLVM intrinsics.
static Instruction * foldICmpUSubSatOrUAddSatWithConstant(CmpPredicate Pred, SaturatingInst *II, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * foldICmpPow2Test(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1-In2, returning true if the result overflowed for this type.
static bool canRewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags &NW, const DataLayout &DL, SetVector< Value * > &Explored)
Returns true if we can rewrite Start as a GEP with pointer Base and some integer offset.
static Instruction * foldFCmpFpTrunc(FCmpInst &I, const Instruction &FPTrunc, const Constant &C)
static Instruction * foldICmpXNegX(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static Instruction * processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, ConstantInt *CI2, ConstantInt *CI1, InstCombinerImpl &IC)
The caller has matched a pattern of the form: I = icmp ugt (add (add A, B), CI2), CI1 If this is of t...
static Value * foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ, InstCombiner::BuilderTy &Builder)
static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C)
Returns true if the exploded icmp can be expressed as a signed comparison to zero and updates the pre...
static Instruction * transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, const DataLayout &DL, InstCombiner &IC)
Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
static Instruction * foldCtpopPow2Test(ICmpInst &I, IntrinsicInst *CtpopLhs, const APInt &CRhs, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
static Instruction * foldFCmpFAbsFSubIntToFP(FCmpInst &I, InstCombinerImpl &IC)
Fold: fabs(uitofp(a) - uitofp(b)) pred C --> a == b where 'pred' is olt, ult, ogt,...
static void setInsertionPoint(IRBuilder<> &Builder, Value *V, bool Before=true)
static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS, bool IsSigned)
static bool isMultipleOf(Value *X, const APInt &C, const SimplifyQuery &Q)
Return true if X is a multiple of C.
static Value * foldICmpWithTruncSignExtendedVal(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Some comparisons can be simplified.
static Instruction * foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
static constexpr roundingMode rmTowardZero
static constexpr roundingMode rmNearestTiesToEven
opStatus
IEEE-754R 7: Default exception handling.
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
opStatus next(bool nextDown)
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
LLVM_ABI FPClassTest classify() const
Return the FPClassTest which will return true for the value.
opStatus roundToIntegral(roundingMode RM)
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
unsigned ceilLogBase2() const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
bool eq(const APInt &RHS) const
Equality comparison.
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
void flipAllBits()
Toggle every bit to its opposite value.
unsigned countl_one() const
Count the number of leading one bits.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
An arbitrary precision integer that knows its signedness.
static APSInt getMinValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the minimum integer value with the given bit width and signedness.
static APSInt getMaxValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the maximum integer value with the given bit width and signedness.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM Basic Block Representation.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
BinaryOps getOpcode() const
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
static LLVM_ABI Predicate getFlippedStrictnessPredicate(Predicate pred)
This is a static version that you can use without an instruction available.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static LLVM_ABI bool isStrictPredicate(Predicate predicate)
This is a static version that you can use without an instruction available.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
static bool isIntPredicate(Predicate P)
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
Conditional Branch instruction.
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPointerBitCastOrAddrSpaceCast(Constant *C, Type *Ty)
Create a BitCast or AddrSpaceCast for a pointer type depending on the address space.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getXor(Constant *C1, Constant *C2)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const
Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
This is an important base class in LLVM.
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
LLVM_ABI const APInt & getUniqueInteger() const
If C is a constant integer then return its value, otherwise C must be a vector of constant integers,...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
This instruction compares its operands according to the predicate given to the constructor.
static bool isCommutative(Predicate Pred)
static bool isEquality(Predicate Pred)
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
LLVM_ABI Type * getSourceElementType() const
Value * getPointerOperand()
GEPNoWrapFlags getNoWrapFlags() const
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Common base class shared among various IRBuilders.
LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Instruction * foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, const APInt &C)
Fold icmp ({al}shr X, Y), C.
Instruction * foldICmpWithZextOrSext(ICmpInst &ICmp)
Instruction * foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, ConstantInt *C)
Instruction * foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Instruction * foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
Instruction * foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, const APInt &C)
Fold icmp (or X, Y), C.
Instruction * foldICmpTruncWithTruncOrExt(ICmpInst &Cmp, const SimplifyQuery &Q)
Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
Instruction * foldSignBitTest(ICmpInst &I)
Fold equality-comparison between zero and any (maybe truncated) right-shift by one-less-than-bitwidth...
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside)
Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise (V < Lo || V >= Hi).
Instruction * foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ)
Try to fold icmp (binop), X or icmp X, (binop).
Instruction * foldCmpLoadFromIndexedGlobal(LoadInst *LI, GetElementPtrInst *GEP, CmpInst &ICI, ConstantInt *AndCst=nullptr)
This is called when we see this pattern: cmp pred (load (gep GV, ...)), cmpcst where GV is a global v...
Instruction * foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, const APInt &C)
Fold icmp (sub X, Y), C.
Instruction * foldICmpWithClamp(ICmpInst &Cmp, Value *X, MinMaxIntrinsic *Min)
Match and fold patterns like: icmp eq/ne X, min(max(X, Lo), Hi) which represents a range check and ca...
Instruction * foldICmpInstWithConstantNotInt(ICmpInst &Cmp)
Handle icmp with constant (but not simple integer constant) RHS.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Instruction * foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (shl AP2, A), AP1)" -> (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
Value * reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0, const SimplifyQuery &SQ, bool AnalyzeForSignBitExtraction=false)
Instruction * foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an equality icmp with LLVM intrinsic and constant operand.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Value * foldMultiplicationOverflowCheck(ICmpInst &Cmp)
Fold (-1 u/ x) u< y ((x * y) ?
Instruction * foldICmpWithConstant(ICmpInst &Cmp)
Fold icmp Pred X, C.
CmpInst * canonicalizeICmpPredicate(CmpInst &I)
If we have a comparison with a non-canonical predicate, if we can update all the users,...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldICmpWithZero(ICmpInst &Cmp)
Instruction * foldICmpCommutative(CmpPredicate Pred, Value *Op0, Value *Op1, ICmpInst &CxtI)
Instruction * foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp equality instruction with binary operator LHS and constant RHS: icmp eq/ne BO,...
Instruction * foldICmpUsingBoolRange(ICmpInst &I)
If one operand of an icmp is effectively a bool (value range of {0,1}), then try to reduce patterns b...
Instruction * foldICmpWithTrunc(ICmpInst &Cmp)
Instruction * foldCmpSelectOfConstants(CmpInst &I)
Fold fcmp/icmp pred (select C1, TV1, FV1), (select C2, TV2, FV2) where all true/false values are cons...
Instruction * foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, ConstantInt *&Less, ConstantInt *&Equal, ConstantInt *&Greater)
Match a select chain which produces one of three values based on whether the LHS is less than,...
Instruction * visitFCmpInst(FCmpInst &I)
Instruction * foldICmpUsingKnownBits(ICmpInst &Cmp)
Try to fold the comparison based on range information we can get by checking whether bits are known t...
Instruction * foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, const APInt &C)
Fold icmp ({su}div X, Y), C.
Instruction * foldIRemByPowerOfTwoToBitTest(ICmpInst &I)
If we have: icmp eq/ne (urem/srem x, y), 0 iff y is a power-of-two, we can replace this with a bit te...
Instruction * foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold fcmp ([us]itofp x, cst) if possible.
Instruction * foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Fold icmp (udiv X, Y), C.
Instruction * foldICmpAddOpConst(Value *X, const APInt &C, CmpPredicate Pred)
Fold "icmp pred (X+C), X".
Instruction * foldICmpWithCastOp(ICmpInst &ICmp)
Handle icmp (cast x), (cast or constant).
Instruction * foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, const APInt &C)
Fold icmp (trunc X), C.
Instruction * foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, const APInt &C)
Fold icmp (add X, Y), C.
Instruction * foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, const APInt &C)
Fold icmp (mul X, Y), C.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Instruction * foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
Fold icmp (xor X, Y), C.
Instruction * foldSelectICmp(CmpPredicate Pred, SelectInst *SI, Value *RHS, const ICmpInst &I)
Instruction * foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp, const APInt &C)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldIsMultipleOfAPowerOfTwo(ICmpInst &Cmp)
Fold icmp eq (num + mask) & ~mask, num to icmp eq (and num, mask), 0 Where mask is a low bit mask.
Instruction * foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1, const APInt &C2)
Fold icmp (and (sh X, Y), C2), C1.
Instruction * foldICmpBinOpWithConstantViaTruthTable(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Instruction * foldICmpInstWithConstant(ICmpInst &Cmp)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
For power-of-2 C: ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1) ((X s>> ShiftC) ^ X) u> (C - 1) -...
Instruction * foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, const APInt &C)
Fold icmp (shl X, Y), C.
Instruction * foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, const APInt &C)
Fold icmp (and X, Y), C.
Instruction * foldICmpEquality(ICmpInst &Cmp)
Instruction * foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax, Value *Z, CmpPredicate Pred)
Fold icmp Pred min|max(X, Y), Z.
bool dominatesAllUses(const Instruction *DI, const Instruction *UI, const BasicBlock *DB) const
True when DB dominates all uses of DI except UI.
bool foldAllocaCmp(AllocaInst *Alloca)
Instruction * visitICmpInst(ICmpInst &I)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * foldICmpWithDominatingICmp(ICmpInst &Cmp)
Canonicalize icmp instructions based on dominating conditions.
bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, const unsigned SIOpd)
Try to replace select with select operand SIOpd in SI-ICmp sequence.
Instruction * foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> (icmp eq/ne A, Log2(AP2/AP1)) -> (icmp eq/ne A,...
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
Instruction * foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1)
Fold icmp (and X, C2), C1.
Instruction * foldICmpBitCast(ICmpInst &Cmp)
Instruction * foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, Instruction &I)
Fold comparisons between a GEP instruction and something else.
The core instruction combiner logic.
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI, bool IsNSW=false) const
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
bool canBeCastedExactlyIntToFP(Value *V, Type *FPTy, bool IsSigned, const Instruction *CxtI=nullptr) const
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ?
void addToWorklist(Instruction *I)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoInfs() const LLVM_READONLY
Determine whether the no-infs flag is set.
bool isArithmeticShift() const
Return true if this is an arithmetic shift right.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
This class represents min/max intrinsics.
static bool isMin(Intrinsic::ID ID)
Whether the intrinsic is a smin or umin.
static bool isSigned(Intrinsic::ID ID)
Whether the intrinsic is signed or unsigned.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
bool contains(const_arg_type key) const
Check if the SetVector contains the given key.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
reverse_iterator rbegin()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class represents a truncation of integer types.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
LLVM_ABI int getFPMantissaWidth() const
Return the width of the mantissa of this type.
LLVM_ABI const fltSemantics & getFltSemantics() const
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
iterator_range< use_iterator > uses()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A unsign-divided by B, rounded by the given rounding mode.
LLVM_ABI APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A sign-divided by B, rounded by the given rounding mode.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
match_combine_and< Ty... > m_CombineAnd(const Ty &...Ps)
Combine pattern matchers matching all of Ps patterns.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap, true > m_c_NUWAdd(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_BinOp()
Match an arbitrary binary operation and ignore it.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, UIToFPInst >, CastInst_match< OpTy, SIToFPInst > > m_IToFP(const OpTy &Op)
auto m_Value()
Match an arbitrary value and ignore it.
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_Constant()
Match an arbitrary Constant and ignore it.
NoWrapTrunc_match< OpTy, TruncInst::NoSignedWrap > m_NSWTrunc(const OpTy &Op)
Matches trunc nsw.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< is_negated_power2_or_zero > m_NegatedPower2OrZero()
Match a integer or vector negated power-of-2.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
SelectLike_match< CondTy, LTy, RTy > m_SelectLike(const CondTy &C, const LTy &TrueC, const RTy &FalseC)
Matches a value that behaves like a boolean-controlled select, i.e.
cst_pred_ty< is_lowbit_mask_or_zero > m_LowBitMaskOrZero()
Match an integer or vector with only the low bit(s) set.
auto m_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
cstfp_pred_ty< is_finitenonzero > m_FiniteNonZero()
Match a finite non-zero FP constant.
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
Signum_match< Val_t > m_Signum(const Val_t &V)
Matches a signum pattern.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
UAddWithOverflow_match< LHS_t, RHS_t, Sum_t > m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S)
Match an icmp instruction checking for unsigned overflow on addition.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_unless< Ty > m_Unless(const Ty &M)
Match if the inner matcher does NOT match.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
auto m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
This is an optimization pass for GlobalISel generic memory operations.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
cl::opt< bool > ProfcheckDisableMetadataFixes
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI Value * stripNullTest(Value *V)
Returns the inner value X if the expression has the form f(X) where f(X) == 0 if and only if X == 0,...
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_FMINNUM
Unsigned maximum.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI LinearExpression decomposeLinearExpression(const DataLayout &DL, Value *Ptr)
Decompose a pointer into a linear expression.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
Returns: X * 2^Exp for integral exponents.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
constexpr unsigned BitWidth
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, const SimplifyQuery &SQ, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Value * materialize(InstCombiner::BuilderTy &Builder) const
static OffsetResult select(Value *Cond, Value *TrueV, Value *FalseV, Instruction *MDFrom)
static OffsetResult value(Value *V)
static OffsetResult invalid()
This callback is used in conjunction with PointerMayBeCaptured.
static CommonPointerBase compute(Value *LHS, Value *RHS)
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
static constexpr DenormalMode getIEEE()
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
APInt getSignedMaxValue() const
Return the maximal signed value possible given these KnownBits.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
unsigned getBitWidth() const
Get the bit width of this value.
bool isConstant() const
Returns true if we know the value of all bits.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
APInt getSignedMinValue() const
Return the minimal signed value possible given these KnownBits.
const APInt & getConstant() const
Returns the value when all bits have a known value.
Linear expression BasePtr + Index * Scale + Offset.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithInstruction(const Instruction *I) const
A MapVector that performs no allocations if smaller than a certain size.
Capture information for a specific Use.