83#include "llvm/Config/llvm-config.h"
138#define DEBUG_TYPE "scalar-evolution"
141 "Number of loop exits with predictable exit counts");
143 "Number of loop exits without predictable exit counts");
145 "Number of loops with trip counts computed by force");
147#ifdef EXPENSIVE_CHECKS
155 cl::desc(
"Maximum number of iterations SCEV will "
156 "symbolically execute a constant "
162 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
165 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
169 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
179 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
183 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
184 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
188 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
189 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
193 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
194 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
199 cl::desc(
"Maximum depth of recursive arithmetics"),
203 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
208 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::desc(
"Max coefficients in AddRec during evolving"),
218 cl::desc(
"Size of the expression which is considered huge"),
223 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
227 "scalar-evolution-max-loop-guard-collection-depth",
cl::Hidden,
228 cl::desc(
"Maximum depth for recursive loop guard collection"),
cl::init(1));
233 cl::desc(
"When printing analysis, include information on every instruction"));
236 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
238 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
239 "be costly in terms of compile time"));
242 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
243 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
244 "Phi strongly connected components"),
249 cl::desc(
"Handle <= and >= in finite loops"),
253 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
254 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
343#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
363 OS <<
"(ptrto" << OpS <<
" " << *
Op->getType() <<
" " << *
Op <<
" to "
370 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
377 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
384 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
413 const char *OpStr =
nullptr;
426 OpStr =
" umin_seq ";
450 OS <<
"(" << *UDiv->
getLHS() <<
" /u " << *UDiv->
getRHS() <<
")";
457 OS <<
"***COULDNOTCOMPUTE***";
535 if (!
Mul)
return false;
539 if (!SC)
return false;
557 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
559 UniqueSCEVs.InsertNode(S, IP);
574 ConstantInt::get(ITy, V, isSigned,
true));
582 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
585 UniqueSCEVs.InsertNode(S, IP);
606 "Must be a non-bit-width-changing pointer-to-integer cast!");
613 "Must be a non-bit-width-changing pointer-to-integer cast!");
625 "Cannot truncate non-integer value!");
632 "Cannot zero extend non-integer value!");
639 "Cannot sign extend non-integer value!");
644 SE->forgetMemoizedResults({
this});
647 SE->UniqueSCEVs.RemoveNode(
this);
653void SCEVUnknown::allUsesReplacedWith(
Value *New) {
655 SE->forgetMemoizedResults({
this});
658 SE->UniqueSCEVs.RemoveNode(
this);
680 if (LIsPointer != RIsPointer)
681 return (
int)LIsPointer - (int)RIsPointer;
686 return (
int)LID - (int)RID;
691 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
692 return (
int)LArgNo - (int)RArgNo;
698 if (
auto L = LGV->getLinkage() - RGV->getLinkage())
701 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
702 auto LT = GV->getLinkage();
709 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
710 return LGV->getName().compare(RGV->getName());
721 if (LParent != RParent) {
724 if (LDepth != RDepth)
725 return (
int)LDepth - (int)RDepth;
729 unsigned LNumOps = LInst->getNumOperands(),
730 RNumOps = RInst->getNumOperands();
731 if (LNumOps != RNumOps)
732 return (
int)LNumOps - (int)RNumOps;
734 for (
unsigned Idx :
seq(LNumOps)) {
736 RInst->getOperand(Idx),
Depth + 1);
750static std::optional<int>
760 return (
int)LType - (int)RType;
785 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
786 if (LBitWidth != RBitWidth)
787 return (
int)LBitWidth - (int)RBitWidth;
788 return LA.
ult(
RA) ? -1 : 1;
794 return LTy->getBitWidth() - RTy->getBitWidth();
805 if (LLoop != RLoop) {
807 assert(LHead != RHead &&
"Two loops share the same header?");
811 "No dominance between recurrences used by one SCEV?");
835 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
836 if (LNumOps != RNumOps)
837 return (
int)LNumOps - (int)RNumOps;
839 for (
unsigned i = 0; i != LNumOps; ++i) {
865 if (
Ops.size() < 2)
return;
870 return Complexity && *Complexity < 0;
872 if (
Ops.size() == 2) {
876 if (IsLessComplex(
RHS,
LHS))
889 for (
unsigned i = 0, e =
Ops.size(); i != e-2; ++i) {
895 for (
unsigned j = i+1; j != e &&
Ops[j]->getSCEVType() == Complexity; ++j) {
900 if (i == e-2)
return;
922template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
926 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
928 for (
unsigned Idx = 0; Idx <
Ops.size();) {
936 Ops.erase(
Ops.begin() + Idx);
943 assert(Folded &&
"Must have folded value");
947 if (Folded && IsAbsorber(Folded->
getAPInt()))
951 if (Folded && !IsIdentity(Folded->
getAPInt()))
952 Ops.insert(
Ops.begin(), Folded);
954 return Ops.size() == 1 ?
Ops[0] :
nullptr;
1029 APInt OddFactorial(W, 1);
1031 for (
unsigned i = 3; i <= K; ++i) {
1034 OddFactorial *= (i >> TwoFactors);
1038 unsigned CalculationBits = W +
T;
1052 for (
unsigned i = 1; i != K; ++i) {
1085 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
1114 ConversionFn CreatePtrCast;
1118 ConversionFn CreatePtrCast)
1119 : Base(
SE), TargetTy(TargetTy), CreatePtrCast(
std::
move(CreatePtrCast)) {}
1122 Type *TargetTy, ConversionFn CreatePtrCast) {
1124 return Rewriter.visit(Scev);
1160 "Should only reach pointer-typed SCEVUnknown's.");
1165 return SE.getZero(TargetTy);
1166 return CreatePtrCast(Expr);
1171 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1190 Op, *
this, IntPtrTy, [
this, IntPtrTy](
const SCEVUnknown *U) {
1195 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1197 SCEV *S =
new (SCEVAllocator)
1199 UniqueSCEVs.InsertNode(S, IP);
1202 return static_cast<const SCEV *
>(S);
1205 "We must have succeeded in sinking the cast, "
1206 "and ending up with an integer-typed expression!");
1211 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1215 if (DL.hasUnstableRepresentation(
Op->getType()))
1218 Type *Ty = DL.getAddressType(
Op->getType());
1229 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1231 SCEV *S =
new (SCEVAllocator)
1233 UniqueSCEVs.InsertNode(S, IP);
1236 return static_cast<const SCEV *
>(S);
1239 "We must have succeeded in sinking the cast, "
1240 "and ending up with an integer-typed expression!");
1245 assert(Ty->isIntegerTy() &&
"Target type must be an integer type!");
1257 "This is not a truncating conversion!");
1259 "This is not a conversion to a SCEVable type!");
1260 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1268 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1290 UniqueSCEVs.InsertNode(S, IP);
1303 unsigned numTruncs = 0;
1304 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1312 if (numTruncs < 2) {
1322 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1329 for (
const SCEV *
Op : AddRec->operands())
1344 UniqueSCEVs.InsertNode(S, IP);
1385struct ExtendOpTraitsBase {
1386 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(
const SCEV *,
Type *,
1391template <
typename ExtendOp>
struct ExtendOpTraits {
1407 static const GetExtendExprTy GetExtendExpr;
1409 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1410 ICmpInst::Predicate *Pred,
1411 ScalarEvolution *SE) {
1416const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1423 static const GetExtendExprTy GetExtendExpr;
1425 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1426 ICmpInst::Predicate *Pred,
1427 ScalarEvolution *SE) {
1432const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1444template <
typename ExtendOpTy>
1447 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1448 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1464 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1477 auto PreStartFlags =
1495 const SCEV *OperandExtendedStart =
1497 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1498 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1510 const SCEV *OverflowLimit =
1511 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1513 if (OverflowLimit &&
1521template <
typename ExtendOpTy>
1525 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1533 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1568template <
typename ExtendOpTy>
1569bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1572 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1582 APInt StartAI = StartC->
getAPInt();
1584 for (
unsigned Delta : {-2, -1, 1, 2}) {
1585 const SCEV *PreStart =
getConstant(StartAI - Delta);
1587 FoldingSetNodeID
ID;
1589 ID.AddPointer(PreStart);
1590 ID.AddPointer(Step);
1594 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
1598 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1601 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1602 DeltaS, &Pred,
this);
1620 const unsigned BitWidth =
C.getBitWidth();
1638 const APInt &ConstantStart,
1657 auto &UserIDs = FoldCacheUser[
I.first->second];
1658 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1659 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1660 if (UserIDs[
I] ==
ID) {
1665 I.first->second = S;
1667 FoldCacheUser[S].push_back(
ID);
1673 "This is not an extending conversion!");
1675 "This is not a conversion to a SCEVable type!");
1676 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1680 if (
const SCEV *S = FoldCache.lookup(
ID))
1692 "This is not an extending conversion!");
1694 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1711 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1715 UniqueSCEVs.InsertNode(S, IP);
1725 const SCEV *
X = ST->getOperand();
1739 if (AR->isAffine()) {
1740 const SCEV *Start = AR->getStart();
1741 const SCEV *Step = AR->getStepRecurrence(*
this);
1743 const Loop *L = AR->getLoop();
1747 if (AR->hasNoUnsignedWrap()) {
1768 const SCEV *CastedMaxBECount =
1772 if (MaxBECount == RecastedMaxBECount) {
1782 const SCEV *WideMaxBECount =
1784 const SCEV *OperandExtendedAdd =
1790 if (ZAdd == OperandExtendedAdd) {
1801 OperandExtendedAdd =
1807 if (ZAdd == OperandExtendedAdd) {
1828 !AC.assumptions().empty()) {
1830 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1832 if (AR->hasNoUnsignedWrap()) {
1867 const APInt &
C = SC->getAPInt();
1871 const SCEV *SResidual =
1880 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1905 if (SA->hasNoUnsignedWrap()) {
1926 const SCEV *SResidual =
1938 if (
SM->hasNoUnsignedWrap()) {
1960 const SCEV *TruncRHS;
1997 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2000 UniqueSCEVs.InsertNode(S, IP);
2009 "This is not an extending conversion!");
2011 "This is not a conversion to a SCEVable type!");
2012 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
2016 if (
const SCEV *S = FoldCache.lookup(
ID))
2028 "This is not an extending conversion!");
2030 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
2052 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2057 UniqueSCEVs.InsertNode(S, IP);
2067 const SCEV *
X = ST->getOperand();
2078 if (SA->hasNoSignedWrap()) {
2100 const SCEV *SResidual =
2114 if (AR->isAffine()) {
2115 const SCEV *Start = AR->getStart();
2116 const SCEV *Step = AR->getStepRecurrence(*
this);
2118 const Loop *L = AR->getLoop();
2122 if (AR->hasNoSignedWrap()) {
2144 const SCEV *CastedMaxBECount =
2148 if (MaxBECount == RecastedMaxBECount) {
2158 const SCEV *WideMaxBECount =
2160 const SCEV *OperandExtendedAdd =
2166 if (SAdd == OperandExtendedAdd) {
2177 OperandExtendedAdd =
2183 if (SAdd == OperandExtendedAdd) {
2203 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2205 if (AR->hasNoSignedWrap()) {
2220 const APInt &
C = SC->getAPInt();
2224 const SCEV *SResidual =
2233 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2261 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2264 UniqueSCEVs.InsertNode(S, IP);
2291 "This is not an extending conversion!");
2293 "This is not a conversion to a SCEVable type!");
2298 if (SC->getAPInt().isNegative())
2303 const SCEV *NewOp =
T->getOperand();
2322 for (
const SCEV *
Op : AR->operands())
2360 APInt &AccumulatedConstant,
2364 bool Interesting =
false;
2371 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2373 AccumulatedConstant += Scale *
C->getAPInt();
2378 for (; i !=
Ops.size(); ++i) {
2387 M, NewOps, AccumulatedConstant,
Add->operands(), NewScale, SE);
2393 auto Pair = M.insert({
Key, NewScale});
2397 Pair.first->second += NewScale;
2405 auto Pair = M.insert({
Ops[i], Scale});
2409 Pair.first->second += Scale;
2428 case Instruction::Add:
2431 case Instruction::Sub:
2434 case Instruction::Mul:
2448 const SCEV *
A = (this->*Extension)(
2450 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0);
2451 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0);
2459 if (BinOp == Instruction::Mul)
2465 APInt C = RHSC->getAPInt();
2466 unsigned NumBits =
C.getBitWidth();
2467 bool IsSub = (BinOp == Instruction::Sub);
2468 bool IsNegativeConst = (
Signed &&
C.isNegative());
2470 bool OverflowDown = IsSub ^ IsNegativeConst;
2472 if (IsNegativeConst) {
2485 APInt Limit = Min + Magnitude;
2491 APInt Limit = Max - Magnitude;
2496std::optional<SCEV::NoWrapFlags>
2501 return std::nullopt;
2510 bool Deduced =
false;
2512 if (OBO->
getOpcode() != Instruction::Add &&
2515 return std::nullopt;
2524 false, LHS, RHS, CtxI)) {
2531 true, LHS, RHS, CtxI)) {
2538 return std::nullopt;
2548 using namespace std::placeholders;
2555 assert(CanAnalyze &&
"don't call from other places!");
2562 auto IsKnownNonNegative = [&](
SCEVUse U) {
2572 if (SignOrUnsignWrap != SignOrUnsignMask &&
2579 return Instruction::Add;
2581 return Instruction::Mul;
2592 Opcode,
C, OBO::NoSignedWrap);
2600 Opcode,
C, OBO::NoUnsignedWrap);
2610 Ops[0]->isZero() && IsKnownNonNegative(
Ops[1]))
2617 if (UDiv->getOperand(1) ==
Ops[1])
2620 if (UDiv->getOperand(1) ==
Ops[0])
2636 "only nuw or nsw allowed");
2637 assert(!
Ops.empty() &&
"Cannot get empty add!");
2638 if (
Ops.size() == 1)
return Ops[0];
2641 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
2643 "SCEVAddExpr operand types don't match!");
2645 Ops, [](
const SCEV *
Op) {
return Op->getType()->isPointerTy(); });
2646 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2651 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2652 [](
const APInt &
C) {
return C.isZero(); },
2653 [](
const APInt &
C) {
return false; });
2666 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
2671 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2672 Add->setNoWrapFlags(ComputeFlags(
Ops));
2680 bool FoundMatch =
false;
2681 for (
unsigned i = 0, e =
Ops.size(); i != e-1; ++i)
2682 if (
Ops[i] ==
Ops[i+1]) {
2694 --i; e -=
Count - 1;
2704 auto FindTruncSrcType = [&]() ->
Type * {
2710 return T->getOperand()->getType();
2712 SCEVUse LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2714 return T->getOperand()->getType();
2718 if (
auto *SrcType = FindTruncSrcType()) {
2725 if (
T->getOperand()->getType() != SrcType) {
2734 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2737 if (
T->getOperand()->getType() != SrcType) {
2765 if (
Ops.size() == 2) {
2775 auto C2 =
C->getAPInt();
2778 APInt ConstAdd = C1 + C2;
2779 auto AddFlags = AddExpr->getNoWrapFlags();
2820 if (
Ops.size() == 2 &&
2831 if (Idx <
Ops.size()) {
2832 bool DeletedAdd =
false;
2843 Ops.erase(
Ops.begin()+Idx);
2846 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2869 struct APIntCompare {
2870 bool operator()(
const APInt &LHS,
const APInt &RHS)
const {
2871 return LHS.ult(RHS);
2878 std::map<APInt, SmallVector<SCEVUse, 4>, APIntCompare> MulOpLists;
2879 for (
const SCEV *NewOp : NewOps)
2880 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2883 if (AccumulatedConstant != 0)
2885 for (
auto &MulOp : MulOpLists) {
2886 if (MulOp.first == 1) {
2888 }
else if (MulOp.first != 0) {
2897 if (
Ops.size() == 1)
2908 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2909 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2912 for (
unsigned AddOp = 0, e =
Ops.size(); AddOp != e; ++AddOp)
2913 if (MulOpSCEV ==
Ops[AddOp]) {
2915 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2916 if (
Mul->getNumOperands() != 2) {
2923 const SCEV *AddOne =
2927 if (
Ops.size() == 2)
return OuterMul;
2929 Ops.erase(
Ops.begin()+AddOp);
2930 Ops.erase(
Ops.begin()+Idx-1);
2932 Ops.erase(
Ops.begin()+Idx);
2933 Ops.erase(
Ops.begin()+AddOp-1);
2935 Ops.push_back(OuterMul);
2940 for (
unsigned OtherMulIdx = Idx+1;
2947 OMulOp != e; ++OMulOp)
2948 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2950 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2951 if (
Mul->getNumOperands() != 2) {
2959 OtherMul->
operands().take_front(OMulOp));
2963 const SCEV *InnerMulSum =
2967 if (
Ops.size() == 2)
return OuterMul;
2968 Ops.erase(
Ops.begin()+Idx);
2969 Ops.erase(
Ops.begin()+OtherMulIdx-1);
2970 Ops.push_back(OuterMul);
2990 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
2993 Ops.erase(
Ops.begin()+i);
2998 if (!LIOps.
empty()) {
3023 auto *DefI = getDefiningScopeBound(LIOps);
3025 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
3037 if (
Ops.size() == 1)
return NewRec;
3040 for (
unsigned i = 0;; ++i)
3041 if (
Ops[i] == AddRec) {
3051 for (
unsigned OtherIdx = Idx+1;
3059 "AddRecExprs are not sorted in reverse dominance order?");
3066 if (OtherAddRec->getLoop() == AddRecLoop) {
3067 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
3069 if (i >= AddRecOps.
size()) {
3070 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
3074 getAddExpr(AddRecOps[i], OtherAddRec->getOperand(i),
3077 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3092 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
3103 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3107 S =
new (SCEVAllocator)
3109 UniqueSCEVs.InsertNode(S, IP);
3120 FoldingSetNodeID
ID;
3122 for (
const SCEV *
Op :
Ops)
3127 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3131 S =
new (SCEVAllocator)
3132 SCEVAddRecExpr(
ID.Intern(SCEVAllocator), O,
Ops.size(), L);
3133 UniqueSCEVs.InsertNode(S, IP);
3135 LoopUsers[
L].push_back(S);
3144 FoldingSetNodeID
ID;
3146 for (
const SCEV *
Op :
Ops)
3150 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3154 S =
new (SCEVAllocator) SCEVMulExpr(
ID.Intern(SCEVAllocator),
3156 UniqueSCEVs.InsertNode(S, IP);
3166 if (j > 1 && k / j != i) Overflow =
true;
3182 if (n == 0 || n == k)
return 1;
3183 if (k > n)
return 0;
3189 for (
uint64_t i = 1; i <= k; ++i) {
3190 r =
umul_ov(r, n-(i-1), Overflow);
3199 struct FindConstantInAddMulChain {
3200 bool FoundConstant =
false;
3202 bool follow(
const SCEV *S) {
3207 bool isDone()
const {
3208 return FoundConstant;
3212 FindConstantInAddMulChain
F;
3214 ST.visitAll(StartExpr);
3215 return F.FoundConstant;
3223 "only nuw or nsw allowed");
3224 assert(!
Ops.empty() &&
"Cannot get empty mul!");
3225 if (
Ops.size() == 1)
return Ops[0];
3227 Type *ETy =
Ops[0]->getType();
3229 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
3231 "SCEVMulExpr operand types don't match!");
3236 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3237 [](
const APInt &
C) {
return C.isOne(); },
3238 [](
const APInt &
C) {
return C.isZero(); });
3249 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3254 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3255 Mul->setNoWrapFlags(ComputeFlags(
Ops));
3260 if (
Ops.size() == 2) {
3268 const SCEV *Op0, *Op1;
3276 if (
Ops[0]->isAllOnesValue()) {
3281 bool AnyFolded =
false;
3282 for (
const SCEV *AddOp :
Add->operands()) {
3309 AddRec->getNoWrapFlags(FlagsMask));
3332 APInt C1V = LHSC->getAPInt();
3342 const SCEV *NewMul =
nullptr;
3346 assert(C1V.
ugt(1) &&
"C1 <= 1 should have been folded earlier");
3361 if (Idx <
Ops.size()) {
3362 bool DeletedMul =
false;
3368 Ops.erase(
Ops.begin()+Idx);
3392 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
3395 Ops.erase(
Ops.begin()+i);
3400 if (!LIOps.
empty()) {
3413 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3429 if (
Ops.size() == 1)
return NewRec;
3432 for (
unsigned i = 0;; ++i)
3433 if (
Ops[i] == AddRec) {
3454 bool OpsModified =
false;
3455 for (
unsigned OtherIdx = Idx+1;
3469 bool Overflow =
false;
3476 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3480 z < ze && !Overflow; ++z) {
3483 if (LargerThan64Bits)
3484 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3486 Coeff = Coeff1*Coeff2;
3501 if (
Ops.size() == 2)
return NewAddRec;
3502 Ops[Idx] = NewAddRec;
3503 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3519 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3526 "SCEVURemExpr operand types don't match!");
3531 if (RHSC->getValue()->isOne())
3532 return getZero(LHS->getType());
3535 if (RHSC->getAPInt().isPowerOf2()) {
3536 Type *FullTy = LHS->getType();
3552 assert(!LHS->getType()->isPointerTy() &&
3553 "SCEVUDivExpr operand can't be pointer!");
3554 assert(LHS->getType() == RHS->getType() &&
3555 "SCEVUDivExpr operand types don't match!");
3562 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3570 if (RHSC->getValue()->isOne())
3575 if (!RHSC->getValue()->isZero()) {
3579 Type *Ty = LHS->getType();
3580 unsigned LZ = RHSC->getAPInt().countl_zero();
3584 if (!RHSC->getAPInt().isPowerOf2())
3592 const APInt &StepInt = Step->getAPInt();
3593 const APInt &DivInt = RHSC->getAPInt();
3594 if (!StepInt.
urem(DivInt) &&
3600 for (
const SCEV *
Op : AR->operands())
3606 const APInt *StartRem;
3619 bool CanFoldWithWrap = StepInt.
ule(DivInt) &&
3623 const SCEV *NewStart =
3625 if (*StartRem != 0 && (NoWrap || CanFoldWithWrap) &&
3627 const SCEV *NewLHS =
3630 if (LHS != NewLHS) {
3640 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3649 for (
const SCEV *
Op : M->operands())
3653 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3654 const SCEV *
Op = M->getOperand(i);
3666 if (
auto *DivisorConstant =
3668 bool Overflow =
false;
3670 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3681 for (
const SCEV *
Op :
A->operands())
3685 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3692 if (Operands.
size() ==
A->getNumOperands())
3699 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3709 return getZero(LHS->getType());
3713 const SCEV *NewLHS, *NewRHS;
3721 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3724 UniqueSCEVs.InsertNode(S, IP);
3754 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3761 if (LHSCst == RHSCst) {
3769 APInt Factor =
gcd(LHSCst, RHSCst);
3787 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3788 if (
Mul->getOperand(i) == RHS) {
3807 if (StepChrec->getLoop() == L) {
3821 if (Operands.
size() == 1)
return Operands[0];
3826 "SCEVAddRecExpr operand types don't match!");
3827 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3829 for (
const SCEV *
Op : Operands)
3831 "SCEVAddRecExpr operand is not available at loop entry!");
3834 if (Operands.
back()->isZero()) {
3849 const Loop *NestedLoop = NestedAR->getLoop();
3850 if (L->contains(NestedLoop)
3853 DT.dominates(L->getHeader(), NestedLoop->
getHeader()))) {
3855 Operands[0] = NestedAR->getStart();
3859 bool AllInvariant =
all_of(
3871 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3882 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3886 Operands[0] = NestedAR;
3892 return getOrCreateAddRecExpr(Operands, L, Flags);
3908 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3912 return getGEPExpr(BaseExpr, IndexExprs,
GEP->getSourceElementType(), NW);
3926 bool FirstIter =
true;
3928 for (
SCEVUse IndexExpr : IndexExprs) {
3935 Offsets.push_back(FieldOffset);
3938 CurTy = STy->getTypeAtIndex(Index);
3943 "The first index of a GEP indexes a pointer");
3944 CurTy = SrcElementTy;
3955 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3956 Offsets.push_back(LocalOffset);
3961 if (Offsets.empty())
3974 "GEP should not change type mid-flight.");
3978SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3981 ID.AddInteger(SCEVType);
3985 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3988SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3991 ID.AddInteger(SCEVType);
3995 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4005 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
4006 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
4007 if (
Ops.size() == 1)
return Ops[0];
4010 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4012 "Operand types don't match!");
4015 "min/max should be consistently pointerish");
4041 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
4043 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
4048 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
4050 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
4056 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops)) {
4062 while (Idx <
Ops.size() &&
Ops[Idx]->getSCEVType() < Kind)
4067 if (Idx <
Ops.size()) {
4068 bool DeletedAny =
false;
4069 while (
Ops[Idx]->getSCEVType() == Kind) {
4071 Ops.erase(
Ops.begin()+Idx);
4089 for (
unsigned i = 0, e =
Ops.size() - 1; i != e; ++i) {
4090 if (
Ops[i] ==
Ops[i + 1] ||
4091 isKnownViaNonRecursiveReasoning(FirstPred,
Ops[i],
Ops[i + 1])) {
4094 Ops.erase(
Ops.begin() + i + 1,
Ops.begin() + i + 2);
4097 }
else if (isKnownViaNonRecursiveReasoning(SecondPred,
Ops[i],
4100 Ops.erase(
Ops.begin() + i,
Ops.begin() + i + 1);
4106 if (
Ops.size() == 1)
return Ops[0];
4108 assert(!
Ops.empty() &&
"Reduced smax down to nothing!");
4113 ID.AddInteger(Kind);
4117 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4119 return ExistingSCEV;
4122 SCEV *S =
new (SCEVAllocator)
4125 UniqueSCEVs.InsertNode(S, IP);
4133class SCEVSequentialMinMaxDeduplicatingVisitor final
4134 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
4135 std::optional<const SCEV *>> {
4136 using RetVal = std::optional<const SCEV *>;
4144 bool canRecurseInto(
SCEVTypes Kind)
const {
4147 return RootKind == Kind || NonSequentialRootKind == Kind;
4150 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
4152 "Only for min/max expressions.");
4155 if (!canRecurseInto(Kind))
4165 return std::nullopt;
4172 RetVal
visit(
const SCEV *S) {
4174 if (!SeenOps.
insert(S).second)
4175 return std::nullopt;
4176 return Base::visit(S);
4180 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE,
4182 : SE(SE), RootKind(RootKind),
4183 NonSequentialRootKind(
4184 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
4188 SmallVectorImpl<SCEVUse> &NewOps) {
4193 for (
const SCEV *
Op : OrigOps) {
4198 Ops.emplace_back(*NewOp);
4202 NewOps = std::move(
Ops);
4206 RetVal visitConstant(
const SCEVConstant *Constant) {
return Constant; }
4208 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4210 RetVal visitPtrToAddrExpr(
const SCEVPtrToAddrExpr *Expr) {
return Expr; }
4212 RetVal visitPtrToIntExpr(
const SCEVPtrToIntExpr *Expr) {
return Expr; }
4214 RetVal visitTruncateExpr(
const SCEVTruncateExpr *Expr) {
return Expr; }
4216 RetVal visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
return Expr; }
4218 RetVal visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
return Expr; }
4220 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4222 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4224 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4226 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4228 RetVal visitSMaxExpr(
const SCEVSMaxExpr *Expr) {
4229 return visitAnyMinMaxExpr(Expr);
4232 RetVal visitUMaxExpr(
const SCEVUMaxExpr *Expr) {
4233 return visitAnyMinMaxExpr(Expr);
4236 RetVal visitSMinExpr(
const SCEVSMinExpr *Expr) {
4237 return visitAnyMinMaxExpr(Expr);
4240 RetVal visitUMinExpr(
const SCEVUMinExpr *Expr) {
4241 return visitAnyMinMaxExpr(Expr);
4244 RetVal visitSequentialUMinExpr(
const SCEVSequentialUMinExpr *Expr) {
4245 return visitAnyMinMaxExpr(Expr);
4248 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4250 RetVal visitCouldNotCompute(
const SCEVCouldNotCompute *Expr) {
return Expr; }
4293struct SCEVPoisonCollector {
4294 bool LookThroughMaybePoisonBlocking;
4295 SmallPtrSet<const SCEVUnknown *, 4> MaybePoison;
4296 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4297 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4299 bool follow(
const SCEV *S) {
4300 if (!LookThroughMaybePoisonBlocking &&
4310 bool isDone()
const {
return false; }
4320 SCEVPoisonCollector PC1(
true);
4325 if (PC1.MaybePoison.empty())
4331 SCEVPoisonCollector PC2(
false);
4341 SCEVPoisonCollector PC(
false);
4364 while (!Worklist.
empty()) {
4366 if (!Visited.
insert(V).second)
4370 if (Visited.
size() > 16)
4375 if (PoisonVals.
contains(V) || ::isGuaranteedNotToBePoison(V))
4386 if (PDI->isDisjoint())
4393 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4400 if (
I->hasPoisonGeneratingAnnotations())
4411 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4412 "Not a SCEVSequentialMinMaxExpr!");
4413 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
4414 if (
Ops.size() == 1)
4418 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4420 "Operand types don't match!");
4423 "min/max should be consistently pointerish");
4431 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops))
4438 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4448 bool DeletedAny =
false;
4449 while (Idx <
Ops.size()) {
4450 if (
Ops[Idx]->getSCEVType() != Kind) {
4455 Ops.erase(
Ops.begin() + Idx);
4456 Ops.insert(
Ops.begin() + Idx, SMME->operands().begin(),
4457 SMME->operands().end());
4465 const SCEV *SaturationPoint;
4476 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4477 if (!isGuaranteedNotToCauseUB(
Ops[i]))
4489 Ops.erase(
Ops.begin() + i);
4494 if (isKnownViaNonRecursiveReasoning(Pred,
Ops[i - 1],
Ops[i])) {
4495 Ops.erase(
Ops.begin() + i);
4503 ID.AddInteger(Kind);
4507 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4509 return ExistingSCEV;
4513 SCEV *S =
new (SCEVAllocator)
4516 UniqueSCEVs.InsertNode(S, IP);
4564 if (
Size.isScalable())
4585 "Cannot get offset for structure containing scalable vector types");
4599 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4601 "Stale SCEVUnknown in uniquing map!");
4607 UniqueSCEVs.InsertNode(S, IP);
4622 return Ty->isIntOrPtrTy();
4629 if (Ty->isPointerTy())
4640 if (Ty->isIntegerTy())
4644 assert(Ty->isPointerTy() &&
"Unexpected non-pointer non-integer type!");
4656 bool PreciseA, PreciseB;
4657 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4658 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4659 if (!PreciseA || !PreciseB)
4662 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) ||
4663 DT.dominates(ScopeB, ScopeA);
4667 return CouldNotCompute.get();
4670bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4673 return SU && SU->getValue() ==
nullptr;
4676 return !ContainsNulls;
4681 if (
I != HasRecMap.end())
4686 HasRecMap.insert({S, FoundAddRec});
4694 if (
SI == ExprValueMap.
end())
4696 return SI->second.getArrayRef();
4702void ScalarEvolution::eraseValueFromMap(
Value *V) {
4704 if (
I != ValueExprMap.end()) {
4705 auto EVIt = ExprValueMap.find(
I->second);
4706 bool Removed = EVIt->second.remove(V);
4708 assert(Removed &&
"Value not in ExprValueMap?");
4709 ValueExprMap.erase(
I);
4713void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4717 auto It = ValueExprMap.find_as(V);
4718 if (It == ValueExprMap.end()) {
4720 ExprValueMap[S].insert(V);
4731 return createSCEVIter(V);
4738 if (
I != ValueExprMap.end()) {
4739 const SCEV *S =
I->second;
4740 assert(checkValidity(S) &&
4741 "existing SCEV has not been properly invalidated");
4754 Type *Ty = V->getType();
4770 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4783 return (
const SCEV *)
nullptr;
4789 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4793 Type *Ty = V->getType();
4799 assert(
P->getType()->isPointerTy());
4814 if (AddOp->getType()->isPointerTy()) {
4815 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4833 return getZero(LHS->getType());
4838 if (RHS->getType()->isPointerTy()) {
4839 if (!LHS->getType()->isPointerTy() ||
4849 const bool RHSIsNotMinSigned =
4880 Type *SrcTy = V->getType();
4881 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4882 "Cannot truncate or zero extend with non-integer arguments!");
4892 Type *SrcTy = V->getType();
4893 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4894 "Cannot truncate or zero extend with non-integer arguments!");
4904 Type *SrcTy = V->getType();
4905 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4906 "Cannot noop or zero extend with non-integer arguments!");
4908 "getNoopOrZeroExtend cannot truncate!");
4916 Type *SrcTy = V->getType();
4917 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4918 "Cannot noop or sign extend with non-integer arguments!");
4920 "getNoopOrSignExtend cannot truncate!");
4928 Type *SrcTy = V->getType();
4929 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4930 "Cannot noop or any extend with non-integer arguments!");
4932 "getNoopOrAnyExtend cannot truncate!");
4940 Type *SrcTy = V->getType();
4941 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4942 "Cannot truncate or noop with non-integer arguments!");
4944 "getTruncateOrNoop cannot extend!");
4952 const SCEV *PromotedLHS = LHS;
4953 const SCEV *PromotedRHS = RHS;
4973 assert(!
Ops.empty() &&
"At least one operand must be!");
4975 if (
Ops.size() == 1)
4979 Type *MaxType =
nullptr;
4985 assert(MaxType &&
"Failed to find maximum type!");
4998 if (!V->getType()->isPointerTy())
5003 V = AddRec->getStart();
5005 const SCEV *PtrOp =
nullptr;
5006 for (
const SCEV *AddOp :
Add->operands()) {
5007 if (AddOp->getType()->isPointerTy()) {
5008 assert(!PtrOp &&
"Cannot have multiple pointer ops");
5012 assert(PtrOp &&
"Must have pointer op");
5024 for (
User *U :
I->users()) {
5026 if (Visited.
insert(UserInsn).second)
5040 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE,
5041 bool IgnoreOtherLoops =
true) {
5044 if (
Rewriter.hasSeenLoopVariantSCEVUnknown())
5046 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
5051 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5053 SeenLoopVariantSCEVUnknown =
true;
5057 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5061 SeenOtherLoops =
true;
5065 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
5067 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
5070 explicit SCEVInitRewriter(
const Loop *L, ScalarEvolution &SE)
5071 : SCEVRewriteVisitor(SE),
L(
L) {}
5074 bool SeenLoopVariantSCEVUnknown =
false;
5075 bool SeenOtherLoops =
false;
5084 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE) {
5085 SCEVPostIncRewriter
Rewriter(L, SE);
5087 return Rewriter.hasSeenLoopVariantSCEVUnknown()
5092 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5094 SeenLoopVariantSCEVUnknown =
true;
5098 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5102 SeenOtherLoops =
true;
5106 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
5108 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
5111 explicit SCEVPostIncRewriter(
const Loop *L, ScalarEvolution &SE)
5112 : SCEVRewriteVisitor(SE),
L(
L) {}
5115 bool SeenLoopVariantSCEVUnknown =
false;
5116 bool SeenOtherLoops =
false;
5122class SCEVBackedgeConditionFolder
5125 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5126 ScalarEvolution &SE) {
5127 bool IsPosBECond =
false;
5128 Value *BECond =
nullptr;
5129 if (BasicBlock *Latch =
L->getLoopLatch()) {
5131 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
5132 "Both outgoing branches should not target same header!");
5133 BECond = BI->getCondition();
5134 IsPosBECond = BI->getSuccessor(0) ==
L->getHeader();
5139 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
5143 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5144 const SCEV *
Result = Expr;
5149 switch (
I->getOpcode()) {
5150 case Instruction::Select: {
5152 std::optional<const SCEV *> Res =
5153 compareWithBackedgeCondition(
SI->getCondition());
5161 std::optional<const SCEV *> Res = compareWithBackedgeCondition(
I);
5172 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
5173 bool IsPosBECond, ScalarEvolution &SE)
5174 : SCEVRewriteVisitor(SE),
L(
L), BackedgeCond(BECond),
5175 IsPositiveBECond(IsPosBECond) {}
5177 std::optional<const SCEV *> compareWithBackedgeCondition(
Value *IC);
5181 Value *BackedgeCond =
nullptr;
5183 bool IsPositiveBECond;
5186std::optional<const SCEV *>
5187SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
5192 if (BackedgeCond == IC)
5195 return std::nullopt;
5200 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5201 ScalarEvolution &SE) {
5207 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5214 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5224 explicit SCEVShiftRewriter(
const Loop *L, ScalarEvolution &SE)
5225 : SCEVRewriteVisitor(SE),
L(
L) {}
5234ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
5238 using OBO = OverflowingBinaryOperator;
5246 const APInt &BECountAP = BECountMax->getAPInt();
5247 unsigned NoOverflowBitWidth =
5259 Instruction::Add, IncRange, OBO::NoSignedWrap);
5260 if (NSWRegion.contains(AddRecRange))
5269 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5270 if (NUWRegion.contains(AddRecRange))
5278ScalarEvolution::proveNoSignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5288 if (!SignedWrapViaInductionTried.insert(AR).second)
5313 AC.assumptions().empty())
5321 const SCEV *OverflowLimit =
5323 if (OverflowLimit &&
5331ScalarEvolution::proveNoUnsignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5341 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5367 AC.assumptions().empty())
5402 explicit BinaryOp(Operator *
Op)
5406 IsNSW = OBO->hasNoSignedWrap();
5407 IsNUW = OBO->hasNoUnsignedWrap();
5411 explicit BinaryOp(
unsigned Opcode,
Value *
LHS,
Value *
RHS,
bool IsNSW =
false,
5413 : Opcode(Opcode),
LHS(
LHS),
RHS(
RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5425 return std::nullopt;
5431 switch (
Op->getOpcode()) {
5432 case Instruction::Add:
5433 case Instruction::Sub:
5434 case Instruction::Mul:
5435 case Instruction::UDiv:
5436 case Instruction::URem:
5437 case Instruction::And:
5438 case Instruction::AShr:
5439 case Instruction::Shl:
5440 return BinaryOp(
Op);
5442 case Instruction::Or: {
5445 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1),
5447 return BinaryOp(
Op);
5450 case Instruction::Xor:
5454 if (RHSC->getValue().isSignMask())
5455 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5457 if (V->getType()->isIntegerTy(1))
5458 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5459 return BinaryOp(
Op);
5461 case Instruction::LShr:
5470 if (SA->getValue().ult(
BitWidth)) {
5472 ConstantInt::get(SA->getContext(),
5474 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
5477 return BinaryOp(
Op);
5479 case Instruction::ExtractValue: {
5481 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5489 bool Signed = WO->isSigned();
5492 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5497 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5508 if (
II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5509 return BinaryOp(Instruction::Sub,
II->getOperand(0),
II->getOperand(1));
5511 return std::nullopt;
5537 if (
Op == SymbolicPHI)
5542 if (SourceBits != NewBits)
5560 if (!L || L->getHeader() != PN->
getParent())
5618std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5619ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
5627 assert(L &&
"Expecting an integer loop header phi");
5632 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5633 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5634 Value *
V = PN->getIncomingValue(i);
5635 if (
L->contains(PN->getIncomingBlock(i))) {
5638 }
else if (BEValueV != V) {
5642 }
else if (!StartValueV) {
5644 }
else if (StartValueV != V) {
5645 StartValueV =
nullptr;
5649 if (!BEValueV || !StartValueV)
5650 return std::nullopt;
5652 const SCEV *BEValue =
getSCEV(BEValueV);
5659 return std::nullopt;
5663 unsigned FoundIndex =
Add->getNumOperands();
5664 Type *TruncTy =
nullptr;
5666 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5669 if (FoundIndex == e) {
5674 if (FoundIndex ==
Add->getNumOperands())
5675 return std::nullopt;
5679 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5680 if (i != FoundIndex)
5681 Ops.push_back(
Add->getOperand(i));
5687 return std::nullopt;
5740 const SCEV *StartVal =
getSCEV(StartValueV);
5741 const SCEV *PHISCEV =
5768 auto getExtendedExpr = [&](
const SCEV *Expr,
5769 bool CreateSignExtend) ->
const SCEV * {
5772 const SCEV *ExtendedExpr =
5775 return ExtendedExpr;
5783 auto PredIsKnownFalse = [&](
const SCEV *Expr,
5784 const SCEV *ExtendedExpr) ->
bool {
5785 return Expr != ExtendedExpr &&
5789 const SCEV *StartExtended = getExtendedExpr(StartVal,
Signed);
5790 if (PredIsKnownFalse(StartVal, StartExtended)) {
5792 return std::nullopt;
5797 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
5798 if (PredIsKnownFalse(Accum, AccumExtended)) {
5800 return std::nullopt;
5803 auto AppendPredicate = [&](
const SCEV *Expr,
5804 const SCEV *ExtendedExpr) ->
void {
5805 if (Expr != ExtendedExpr &&
5813 AppendPredicate(StartVal, StartExtended);
5814 AppendPredicate(Accum, AccumExtended);
5822 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5823 std::make_pair(NewAR, Predicates);
5825 PredicatedSCEVRewrites[{SymbolicPHI,
L}] = PredRewrite;
5829std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5834 return std::nullopt;
5837 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5838 if (
I != PredicatedSCEVRewrites.end()) {
5839 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5842 if (Rewrite.first == SymbolicPHI)
5843 return std::nullopt;
5847 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
5851 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5852 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5857 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5858 return std::nullopt;
5875 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
5876 if (Expr1 != Expr2 &&
5877 !Preds->implies(SE.getEqualPredicate(Expr1, Expr2), SE) &&
5878 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1), SE))
5895const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
5897 Value *StartValueV) {
5900 assert(BEValueV && StartValueV);
5906 if (BO->Opcode != Instruction::Add)
5909 const SCEV *Accum =
nullptr;
5910 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5912 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5926 insertValueToMap(PN, PHISCEV);
5931 proveNoWrapViaConstantRanges(AR)));
5939 "Accum is defined outside L, but is not invariant?");
5940 if (isAddRecNeverPoison(BEInst, L))
5947const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5948 const Loop *
L = LI.getLoopFor(PN->
getParent());
5955 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5961 }
else if (BEValueV != V) {
5965 }
else if (!StartValueV) {
5967 }
else if (StartValueV != V) {
5968 StartValueV =
nullptr;
5972 if (!BEValueV || !StartValueV)
5975 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5976 "PHI node already processed?");
5980 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5985 insertValueToMap(PN, SymbolicName);
5989 const SCEV *BEValue =
getSCEV(BEValueV);
5999 unsigned FoundIndex =
Add->getNumOperands();
6000 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
6001 if (
Add->getOperand(i) == SymbolicName)
6002 if (FoundIndex == e) {
6007 if (FoundIndex !=
Add->getNumOperands()) {
6010 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
6011 if (i != FoundIndex)
6012 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
6024 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
6031 if (
GEP->getOperand(0) == PN) {
6032 GEPNoWrapFlags NW =
GEP->getNoWrapFlags();
6050 const SCEV *StartVal =
getSCEV(StartValueV);
6051 const SCEV *PHISCEV =
getAddRecExpr(StartVal, Accum, L, Flags);
6056 forgetMemoizedResults({SymbolicName});
6057 insertValueToMap(PN, PHISCEV);
6062 proveNoWrapViaConstantRanges(AR)));
6087 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
6088 const SCEV *
Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
6090 isGuaranteedNotToCauseUB(Shifted) &&
::impliesPoison(Shifted, Start)) {
6091 const SCEV *StartVal =
getSCEV(StartValueV);
6092 if (Start == StartVal) {
6096 forgetMemoizedResults({SymbolicName});
6097 insertValueToMap(PN, Shifted);
6107 eraseValueFromMap(PN);
6122 Use &LeftUse =
Merge->getOperandUse(0);
6123 Use &RightUse =
Merge->getOperandUse(1);
6159 assert(IDom &&
"At least the entry block should dominate PN");
6167const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
6172 return createNodeForSelectOrPHI(PN,
Cond,
LHS,
RHS);
6189 CommonInst = IncomingInst;
6205ScalarEvolution::createNodeForPHIWithIdenticalOperands(
PHINode *PN) {
6211 const SCEV *CommonSCEV =
getSCEV(CommonInst);
6212 bool SCEVExprsIdentical =
6214 [
this, CommonSCEV](
Value *V) { return CommonSCEV == getSCEV(V); });
6215 return SCEVExprsIdentical ? CommonSCEV :
nullptr;
6218const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
6219 if (
const SCEV *S = createAddRecFromPHI(PN))
6229 if (
const SCEV *S = createNodeForPHIWithIdenticalOperands(PN))
6232 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
6241 struct FindClosure {
6242 const SCEV *OperandToFind;
6248 bool canRecurseInto(
SCEVTypes Kind)
const {
6251 return RootKind == Kind || NonSequentialRootKind == Kind ||
6256 : OperandToFind(OperandToFind), RootKind(RootKind),
6257 NonSequentialRootKind(
6261 bool follow(
const SCEV *S) {
6262 Found = S == OperandToFind;
6264 return !isDone() && canRecurseInto(S->
getSCEVType());
6267 bool isDone()
const {
return Found; }
6270 FindClosure FC(OperandToFind, RootKind);
6275std::optional<const SCEV *>
6276ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Type *Ty,
6286 switch (ICI->getPredicate()) {
6300 bool Signed = ICI->isSigned();
6301 const SCEV *LA =
getSCEV(TrueVal);
6309 if (LA == LS &&
RA == RS)
6311 if (LA == RS &&
RA == LS)
6314 auto CoerceOperand = [&](
const SCEV *
Op) ->
const SCEV * {
6315 if (
Op->getType()->isPointerTy()) {
6326 LS = CoerceOperand(LS);
6327 RS = CoerceOperand(RS);
6351 const SCEV *TrueValExpr =
getSCEV(TrueVal);
6352 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6366 X = ZExt->getOperand();
6368 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6379 return std::nullopt;
6382static std::optional<const SCEV *>
6384 const SCEV *TrueExpr,
const SCEV *FalseExpr) {
6388 "Unexpected operands of a select.");
6400 return std::nullopt;
6415static std::optional<const SCEV *>
6419 return std::nullopt;
6422 const auto *SETrue = SE->
getSCEV(TrueVal);
6423 const auto *SEFalse = SE->
getSCEV(FalseVal);
6427const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6429 assert(
Cond->getType()->isIntegerTy(1) &&
"Select condition is not an i1?");
6431 V->getType() ==
TrueVal->getType() &&
6432 "Types of select hands and of the result must match.");
6435 if (!
V->getType()->isIntegerTy(1))
6438 if (std::optional<const SCEV *> S =
6451 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6455 if (std::optional<const SCEV *> S =
6456 createNodeForSelectOrPHIInstWithICmpInstCond(
I->getType(), ICI,
6462 return createNodeForSelectOrPHIViaUMinSeq(V,
Cond, TrueVal, FalseVal);
6468 assert(
GEP->getSourceElementType()->isSized() &&
6469 "GEP source element type must be sized");
6472 for (
Value *Index :
GEP->indices())
6477APInt ScalarEvolution::getConstantMultipleImpl(
const SCEV *S,
6480 auto GetShiftedByZeros = [
BitWidth](uint32_t TrailingZeros) {
6483 : APInt::getOneBitSet(
BitWidth, TrailingZeros);
6485 auto GetGCDMultiple = [
this, CtxI](
const SCEVNAryExpr *
N) {
6488 for (
unsigned I = 1,
E =
N->getNumOperands();
I <
E && Res != 1; ++
I)
6507 return GetShiftedByZeros(TZ);
6517 return GetShiftedByZeros(TZ);
6521 if (
M->hasNoUnsignedWrap()) {
6524 for (
const SCEV *Operand :
M->operands().drop_front())
6532 for (
const SCEV *Operand :
M->operands())
6534 return GetShiftedByZeros(TZ);
6539 if (
N->hasNoUnsignedWrap())
6540 return GetGCDMultiple(
N);
6543 for (
const SCEV *Operand :
N->operands().drop_front())
6545 return GetShiftedByZeros(TZ);
6562 CtxI = &*F.getEntryBlock().begin();
6568 .countMinTrailingZeros();
6569 return GetShiftedByZeros(Known);
6582 return getConstantMultipleImpl(S, CtxI);
6584 auto I = ConstantMultipleCache.find(S);
6585 if (
I != ConstantMultipleCache.end())
6588 APInt Result = getConstantMultipleImpl(S, CtxI);
6589 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6590 assert(InsertPair.second &&
"Should insert a new key");
6591 return InsertPair.first->second;
6608 if (
MDNode *MD =
I->getMetadata(LLVMContext::MD_range))
6611 if (std::optional<ConstantRange>
Range = CB->getRange())
6615 if (std::optional<ConstantRange>
Range =
A->getRange())
6618 return std::nullopt;
6625 UnsignedRanges.erase(AddRec);
6626 SignedRanges.erase(AddRec);
6627 ConstantMultipleCache.erase(AddRec);
6632getRangeForUnknownRecurrence(
const SCEVUnknown *U) {
6658 Value *Start, *Step;
6665 assert(L && L->getHeader() ==
P->getParent());
6678 case Instruction::AShr:
6679 case Instruction::LShr:
6680 case Instruction::Shl:
6695 KnownStep.getBitWidth() ==
BitWidth);
6698 auto MaxShiftAmt = KnownStep.getMaxValue();
6700 bool Overflow =
false;
6701 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6708 case Instruction::AShr: {
6716 if (KnownStart.isNonNegative())
6719 KnownStart.getMaxValue() + 1);
6720 if (KnownStart.isNegative())
6723 KnownEnd.getMaxValue() + 1);
6726 case Instruction::LShr: {
6735 KnownStart.getMaxValue() + 1);
6737 case Instruction::Shl: {
6741 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6742 return ConstantRange(KnownStart.getMinValue(),
6743 KnownEnd.getMaxValue() + 1);
6768 [&](
Value *Operand) { return DT.dominates(Operand, PHI); }))
6775ScalarEvolution::getRangeRefIter(
const SCEV *S,
6776 ScalarEvolution::RangeSignHint SignHint) {
6777 DenseMap<const SCEV *, ConstantRange> &Cache =
6778 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6781 SmallPtrSet<const SCEV *, 8> Seen;
6785 auto AddToWorklist = [&WorkList, &Seen, &Cache](
const SCEV *Expr) {
6786 if (!Seen.
insert(Expr).second)
6820 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
6821 const SCEV *
P = WorkList[
I];
6825 for (
const SCEV *
Op :
P->operands())
6838 if (!WorkList.
empty()) {
6843 getRangeRef(
P, SignHint);
6847 return getRangeRef(S, SignHint, 0);
6854 const SCEV *S, ScalarEvolution::RangeSignHint SignHint,
unsigned Depth) {
6855 DenseMap<const SCEV *, ConstantRange> &Cache =
6856 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6864 if (
I != Cache.
end())
6868 return setRange(
C, SignHint, ConstantRange(
C->getAPInt()));
6873 return getRangeRefIter(S, SignHint);
6876 ConstantRange ConservativeResult(
BitWidth,
true);
6877 using OBO = OverflowingBinaryOperator;
6881 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6885 ConservativeResult =
6892 ConservativeResult = ConstantRange(
6908 ConservativeResult.intersectWith(
X.truncate(
BitWidth), RangeType));
6915 ConservativeResult.intersectWith(
X.zeroExtend(
BitWidth), RangeType));
6922 ConservativeResult.intersectWith(
X.signExtend(
BitWidth), RangeType));
6928 return setRange(Cast, SignHint,
X);
6933 const SCEV *URemLHS =
nullptr, *URemRHS =
nullptr;
6934 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED &&
6936 ConstantRange LHSRange = getRangeRef(URemLHS, SignHint,
Depth + 1);
6937 ConstantRange RHSRange = getRangeRef(URemRHS, SignHint,
Depth + 1);
6938 ConservativeResult =
6939 ConservativeResult.intersectWith(LHSRange.
urem(RHSRange), RangeType);
6941 ConstantRange
X = getRangeRef(
Add->getOperand(0), SignHint,
Depth + 1);
6942 unsigned WrapType = OBO::AnyWrap;
6943 if (
Add->hasNoSignedWrap())
6944 WrapType |= OBO::NoSignedWrap;
6945 if (
Add->hasNoUnsignedWrap())
6946 WrapType |= OBO::NoUnsignedWrap;
6948 X =
X.addWithNoWrap(getRangeRef(
Op, SignHint,
Depth + 1), WrapType,
6950 return setRange(
Add, SignHint,
6951 ConservativeResult.intersectWith(
X, RangeType));
6955 ConstantRange
X = getRangeRef(
Mul->getOperand(0), SignHint,
Depth + 1);
6957 X =
X.multiply(getRangeRef(
Op, SignHint,
Depth + 1));
6958 return setRange(
Mul, SignHint,
6959 ConservativeResult.intersectWith(
X, RangeType));
6963 ConstantRange
X = getRangeRef(UDiv->
getLHS(), SignHint,
Depth + 1);
6964 ConstantRange
Y = getRangeRef(UDiv->
getRHS(), SignHint,
Depth + 1);
6965 return setRange(UDiv, SignHint,
6966 ConservativeResult.intersectWith(
X.udiv(
Y), RangeType));
6974 if (!UnsignedMinValue.
isZero())
6975 ConservativeResult = ConservativeResult.intersectWith(
6976 ConstantRange(UnsignedMinValue, APInt(
BitWidth, 0)), RangeType);
6985 bool AllNonNeg =
true;
6986 bool AllNonPos =
true;
6987 for (
unsigned i = 1, e = AddRec->
getNumOperands(); i != e; ++i) {
6994 ConservativeResult = ConservativeResult.intersectWith(
6999 ConservativeResult = ConservativeResult.intersectWith(
7008 const SCEV *MaxBEScev =
7022 auto RangeFromAffine = getRangeForAffineAR(
7024 ConservativeResult =
7025 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
7027 auto RangeFromFactoring = getRangeViaFactoring(
7029 ConservativeResult =
7030 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
7036 const SCEV *SymbolicMaxBECount =
7041 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
7042 AddRec, SymbolicMaxBECount,
BitWidth, SignHint);
7043 ConservativeResult =
7044 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
7049 return setRange(AddRec, SignHint, std::move(ConservativeResult));
7059 ID = Intrinsic::umax;
7062 ID = Intrinsic::smax;
7066 ID = Intrinsic::umin;
7069 ID = Intrinsic::smin;
7076 ConstantRange
X = getRangeRef(NAry->getOperand(0), SignHint,
Depth + 1);
7077 for (
unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
7079 ID, {
X, getRangeRef(NAry->getOperand(i), SignHint,
Depth + 1)});
7080 return setRange(S, SignHint,
7081 ConservativeResult.intersectWith(
X, RangeType));
7090 ConservativeResult =
7091 ConservativeResult.intersectWith(*MDRange, RangeType);
7096 auto CR = getRangeForUnknownRecurrence(U);
7097 ConservativeResult = ConservativeResult.intersectWith(CR);
7108 if (
U->getType()->isPointerTy()) {
7111 unsigned ptrSize = DL.getPointerTypeSizeInBits(
U->getType());
7112 int ptrIdxDiff = ptrSize -
BitWidth;
7113 if (ptrIdxDiff > 0 && ptrSize >
BitWidth && NS > (
unsigned)ptrIdxDiff)
7126 ConservativeResult = ConservativeResult.intersectWith(
7130 ConservativeResult = ConservativeResult.intersectWith(
7135 if (
U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
7138 bool CanBeNull, CanBeFreed;
7139 uint64_t DerefBytes =
7140 V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
7150 uint64_t
Align =
U->getValue()->getPointerAlignment(DL).value();
7151 uint64_t Rem = MaxVal.
urem(Align);
7156 ConservativeResult = ConservativeResult.intersectWith(
7166 return getRangeRef(AR, SignHint,
Depth + 1);
7170 ConstantRange RangeFromOps(
BitWidth,
false);
7172 for (
const auto &
Op :
Phi->operands()) {
7174 RangeFromOps = RangeFromOps.unionWith(OpRange);
7176 if (RangeFromOps.isFullSet())
7179 ConservativeResult =
7180 ConservativeResult.intersectWith(RangeFromOps, RangeType);
7186 if (
II->getIntrinsicID() == Intrinsic::vscale) {
7188 ConservativeResult = ConservativeResult.difference(Disallowed);
7191 return setRange(U, SignHint, std::move(ConservativeResult));
7197 return setRange(S, SignHint, std::move(ConservativeResult));
7206 const APInt &MaxBECount,
7213 if (Step == 0 || MaxBECount == 0)
7220 return ConstantRange::getFull(
BitWidth);
7236 return ConstantRange::getFull(
BitWidth);
7248 APInt MovedBoundary = Descending ? (StartLower - std::move(
Offset))
7249 : (StartUpper + std::move(
Offset));
7254 if (StartRange.
contains(MovedBoundary))
7255 return ConstantRange::getFull(
BitWidth);
7258 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7260 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7269 const APInt &MaxBECount) {
7273 "mismatched bit widths");
7282 StepSRange.
getSignedMin(), StartSRange, MaxBECount,
true);
7284 StartSRange, MaxBECount,
7296ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7298 ScalarEvolution::RangeSignHint SignHint) {
7299 assert(AddRec->
isAffine() &&
"Non-affine AddRecs are not suppored!\n");
7301 "This only works for non-self-wrapping AddRecs!");
7302 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7306 return ConstantRange::getFull(
BitWidth);
7314 return ConstantRange::getFull(
BitWidth);
7318 const SCEV *MaxItersWithoutWrap =
getUDivExpr(RangeWidth, StepAbs);
7320 MaxItersWithoutWrap))
7321 return ConstantRange::getFull(
BitWidth);
7342 ConstantRange StartRange = getRangeRef(Start, SignHint);
7343 ConstantRange EndRange = getRangeRef(End, SignHint);
7344 ConstantRange RangeBetween = StartRange.
unionWith(EndRange);
7348 return RangeBetween;
7353 return ConstantRange::getFull(
BitWidth);
7356 isKnownPredicateViaConstantRanges(LEPred, Start, End))
7357 return RangeBetween;
7359 isKnownPredicateViaConstantRanges(GEPred, Start, End))
7360 return RangeBetween;
7361 return ConstantRange::getFull(
BitWidth);
7366 const APInt &MaxBECount) {
7373 "mismatched bit widths");
7375 struct SelectPattern {
7376 Value *Condition =
nullptr;
7380 explicit SelectPattern(ScalarEvolution &SE,
unsigned BitWidth,
7382 std::optional<unsigned> CastOp;
7396 CastOp = SCast->getSCEVType();
7397 S = SCast->getOperand();
7400 using namespace llvm::PatternMatch;
7407 Condition =
nullptr;
7439 bool isRecognized() {
return Condition !=
nullptr; }
7442 SelectPattern StartPattern(*
this,
BitWidth, Start);
7443 if (!StartPattern.isRecognized())
7444 return ConstantRange::getFull(
BitWidth);
7446 SelectPattern StepPattern(*
this,
BitWidth, Step);
7447 if (!StepPattern.isRecognized())
7448 return ConstantRange::getFull(
BitWidth);
7450 if (StartPattern.Condition != StepPattern.Condition) {
7454 return ConstantRange::getFull(
BitWidth);
7465 const SCEV *TrueStart = this->
getConstant(StartPattern.TrueValue);
7466 const SCEV *TrueStep = this->
getConstant(StepPattern.TrueValue);
7467 const SCEV *FalseStart = this->
getConstant(StartPattern.FalseValue);
7468 const SCEV *FalseStep = this->
getConstant(StepPattern.FalseValue);
7470 ConstantRange TrueRange =
7471 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7472 ConstantRange FalseRange =
7473 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7495ScalarEvolution::getNonTrivialDefiningScopeBound(
const SCEV *S) {
7508 SmallPtrSet<const SCEV *, 16> Visited;
7510 auto pushOp = [&](
const SCEV *S) {
7511 if (!Visited.
insert(S).second)
7514 if (Visited.
size() > 30) {
7525 while (!Worklist.
empty()) {
7527 if (
auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7528 if (!Bound || DT.dominates(Bound, DefI))
7535 return Bound ? Bound : &*F.getEntryBlock().begin();
7541 return getDefiningScopeBound(
Ops, Discard);
7544bool ScalarEvolution::isGuaranteedToTransferExecutionTo(
const Instruction *
A,
7546 if (
A->getParent() ==
B->getParent() &&
7551 auto *BLoop = LI.getLoopFor(
B->getParent());
7552 if (BLoop && BLoop->getHeader() ==
B->getParent() &&
7553 BLoop->getLoopPreheader() ==
A->getParent() &&
7555 A->getParent()->end()) &&
7562bool ScalarEvolution::isGuaranteedNotToBePoison(
const SCEV *
Op) {
7563 SCEVPoisonCollector PC(
true);
7565 return PC.MaybePoison.empty();
7568bool ScalarEvolution::isGuaranteedNotToCauseUB(
const SCEV *
Op) {
7574 return M && (!
isKnownNonZero(Op1) || !isGuaranteedNotToBePoison(Op1));
7578bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *
I) {
7595 for (
const Use &
Op :
I->operands()) {
7601 auto *DefI = getDefiningScopeBound(SCEVOps);
7602 return isGuaranteedToTransferExecutionTo(DefI,
I);
7605bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *
I,
const Loop *L) {
7607 if (isSCEVExprNeverPoison(
I))
7618 auto *ExitingBB =
L->getExitingBlock();
7622 SmallPtrSet<const Value *, 16> KnownPoison;
7631 while (!Worklist.
empty()) {
7634 for (
const Use &U :
Poison->uses()) {
7637 DT.dominates(PoisonUser->
getParent(), ExitingBB))
7641 if (KnownPoison.
insert(PoisonUser).second)
7649ScalarEvolution::LoopProperties
7650ScalarEvolution::getLoopProperties(
const Loop *L) {
7651 using LoopProperties = ScalarEvolution::LoopProperties;
7653 auto Itr = LoopPropertiesCache.find(L);
7654 if (Itr == LoopPropertiesCache.end()) {
7657 return !
SI->isSimple();
7667 return I->mayWriteToMemory();
7670 LoopProperties LP = {
true,
7673 for (
auto *BB :
L->getBlocks())
7674 for (
auto &
I : *BB) {
7676 LP.HasNoAbnormalExits =
false;
7677 if (HasSideEffects(&
I))
7678 LP.HasNoSideEffects =
false;
7679 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7683 auto InsertPair = LoopPropertiesCache.insert({
L, LP});
7684 assert(InsertPair.second &&
"We just checked!");
7685 Itr = InsertPair.first;
7698const SCEV *ScalarEvolution::createSCEVIter(
Value *V) {
7704 Stack.emplace_back(V,
true);
7705 Stack.emplace_back(V,
false);
7706 while (!Stack.empty()) {
7707 auto E = Stack.pop_back_val();
7708 Value *CurV = E.getPointer();
7714 const SCEV *CreatedSCEV =
nullptr;
7717 CreatedSCEV = createSCEV(CurV);
7722 CreatedSCEV = getOperandsToCreate(CurV,
Ops);
7726 insertValueToMap(CurV, CreatedSCEV);
7730 Stack.emplace_back(CurV,
true);
7732 Stack.emplace_back(
Op,
false);
7749 if (!DT.isReachableFromEntry(
I->getParent()))
7762 switch (BO->Opcode) {
7763 case Instruction::Add:
7764 case Instruction::Mul: {
7771 Ops.push_back(BO->
Op);
7775 Ops.push_back(BO->RHS);
7779 (BO->Opcode == Instruction::Add &&
7780 (NewBO->Opcode != Instruction::Add &&
7781 NewBO->Opcode != Instruction::Sub)) ||
7782 (BO->Opcode == Instruction::Mul &&
7783 NewBO->Opcode != Instruction::Mul)) {
7784 Ops.push_back(BO->LHS);
7789 if (BO->
Op && (BO->IsNSW || BO->IsNUW)) {
7792 Ops.push_back(BO->LHS);
7800 case Instruction::Sub:
7801 case Instruction::UDiv:
7802 case Instruction::URem:
7804 case Instruction::AShr:
7805 case Instruction::Shl:
7806 case Instruction::Xor:
7810 case Instruction::And:
7811 case Instruction::Or:
7815 case Instruction::LShr:
7822 Ops.push_back(BO->LHS);
7823 Ops.push_back(BO->RHS);
7827 switch (
U->getOpcode()) {
7828 case Instruction::Trunc:
7829 case Instruction::ZExt:
7830 case Instruction::SExt:
7831 case Instruction::PtrToAddr:
7832 case Instruction::PtrToInt:
7833 Ops.push_back(
U->getOperand(0));
7836 case Instruction::BitCast:
7838 Ops.push_back(
U->getOperand(0));
7843 case Instruction::SDiv:
7844 case Instruction::SRem:
7845 Ops.push_back(
U->getOperand(0));
7846 Ops.push_back(
U->getOperand(1));
7849 case Instruction::GetElementPtr:
7851 "GEP source element type must be sized");
7855 case Instruction::IntToPtr:
7858 case Instruction::PHI:
7889 Ops.push_back(CondICmp->getOperand(0));
7890 Ops.push_back(CondICmp->getOperand(1));
7910 case Instruction::Select: {
7912 auto CanSimplifyToUnknown = [
this,
U]() {
7930 if (CanSimplifyToUnknown())
7937 case Instruction::Call:
7938 case Instruction::Invoke:
7945 switch (
II->getIntrinsicID()) {
7946 case Intrinsic::abs:
7947 Ops.push_back(
II->getArgOperand(0));
7949 case Intrinsic::umax:
7950 case Intrinsic::umin:
7951 case Intrinsic::smax:
7952 case Intrinsic::smin:
7953 case Intrinsic::usub_sat:
7954 case Intrinsic::uadd_sat:
7955 Ops.push_back(
II->getArgOperand(0));
7956 Ops.push_back(
II->getArgOperand(1));
7958 case Intrinsic::start_loop_iterations:
7959 case Intrinsic::annotation:
7960 case Intrinsic::ptr_annotation:
7961 Ops.push_back(
II->getArgOperand(0));
7973const SCEV *ScalarEvolution::createSCEV(
Value *V) {
7982 if (!DT.isReachableFromEntry(
I->getParent()))
7997 switch (BO->Opcode) {
7998 case Instruction::Add: {
8024 if (BO->Opcode == Instruction::Sub)
8032 if (BO->Opcode == Instruction::Sub)
8039 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
8040 NewBO->Opcode != Instruction::Sub)) {
8050 case Instruction::Mul: {
8071 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
8080 case Instruction::UDiv:
8084 case Instruction::URem:
8088 case Instruction::Sub: {
8091 Flags = getNoWrapFlagsFromUB(BO->
Op);
8096 case Instruction::And:
8102 if (CI->isMinusOne())
8104 const APInt &
A = CI->getValue();
8110 unsigned LZ =
A.countl_zero();
8111 unsigned TZ =
A.countr_zero();
8116 APInt EffectiveMask =
8118 if ((LZ != 0 || TZ != 0) && !((~
A & ~Known.
Zero) & EffectiveMask)) {
8121 const SCEV *ShiftedLHS =
nullptr;
8125 unsigned MulZeros = OpC->getAPInt().countr_zero();
8126 unsigned GCD = std::min(MulZeros, TZ);
8131 auto *NewMul =
getMulExpr(MulOps, LHSMul->getNoWrapFlags());
8153 case Instruction::Or:
8162 case Instruction::Xor:
8165 if (CI->isMinusOne())
8174 if (LBO->getOpcode() == Instruction::And &&
8175 LCI->getValue() == CI->getValue())
8176 if (
const SCEVZeroExtendExpr *Z =
8179 const SCEV *Z0 =
Z->getOperand();
8186 if (CI->getValue().isMask(Z0TySize))
8192 APInt Trunc = CI->getValue().trunc(Z0TySize);
8201 case Instruction::Shl:
8219 auto MulFlags = getNoWrapFlagsFromUB(BO->
Op);
8227 ConstantInt *
X = ConstantInt::get(
8233 case Instruction::AShr:
8255 const SCEV *AddTruncateExpr =
nullptr;
8256 ConstantInt *ShlAmtCI =
nullptr;
8257 const SCEV *AddConstant =
nullptr;
8259 if (L &&
L->getOpcode() == Instruction::Add) {
8267 if (LShift && LShift->
getOpcode() == Instruction::Shl) {
8274 APInt AddOperand = AddOperandCI->
getValue().
ashr(AShrAmt);
8282 }
else if (L &&
L->getOpcode() == Instruction::Shl) {
8287 const SCEV *ShlOp0SCEV =
getSCEV(
L->getOperand(0));
8292 if (AddTruncateExpr && ShlAmtCI) {
8304 const APInt &ShlAmt = ShlAmtCI->
getValue();
8308 const SCEV *CompositeExpr =
8310 if (
L->getOpcode() != Instruction::Shl)
8311 CompositeExpr =
getAddExpr(CompositeExpr, AddConstant);
8320 switch (
U->getOpcode()) {
8321 case Instruction::Trunc:
8324 case Instruction::ZExt:
8327 case Instruction::SExt:
8337 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8338 Type *Ty =
U->getType();
8346 case Instruction::BitCast:
8352 case Instruction::PtrToAddr: {
8359 case Instruction::PtrToInt: {
8362 Type *DstIntTy =
U->getType();
8370 case Instruction::IntToPtr:
8374 case Instruction::SDiv:
8381 case Instruction::SRem:
8388 case Instruction::GetElementPtr:
8391 case Instruction::PHI:
8394 case Instruction::Select:
8395 return createNodeForSelectOrPHI(U,
U->getOperand(0),
U->getOperand(1),
8398 case Instruction::Call:
8399 case Instruction::Invoke:
8404 switch (
II->getIntrinsicID()) {
8405 case Intrinsic::abs:
8409 case Intrinsic::umax:
8413 case Intrinsic::umin:
8417 case Intrinsic::smax:
8421 case Intrinsic::smin:
8425 case Intrinsic::usub_sat: {
8426 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8427 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8431 case Intrinsic::uadd_sat: {
8432 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8433 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8437 case Intrinsic::start_loop_iterations:
8438 case Intrinsic::annotation:
8439 case Intrinsic::ptr_annotation:
8443 case Intrinsic::vscale:
8463 auto *ExitCountType = ExitCount->
getType();
8464 assert(ExitCountType->isIntegerTy());
8466 1 + ExitCountType->getScalarSizeInBits());
8479 auto CanAddOneWithoutOverflow = [&]() {
8481 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8492 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8522 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8523 assert(L->isLoopExiting(ExitingBlock) &&
8524 "Exiting block must actually branch out of the loop!");
8533 const auto *MaxExitCount =
8541 L->getExitingBlocks(ExitingBlocks);
8543 std::optional<unsigned> Res;
8544 for (
auto *ExitingBB : ExitingBlocks) {
8548 Res = std::gcd(*Res, Multiple);
8550 return Res.value_or(1);
8554 const SCEV *ExitCount) {
8584 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8585 assert(L->isLoopExiting(ExitingBlock) &&
8586 "Exiting block must actually branch out of the loop!");
8596 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
8598 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this);
8600 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this);
8610 return getPredicatedBackedgeTakenInfo(L).getExact(ExitingBlock,
this,
8613 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this,
8616 return getPredicatedBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this,
8624 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
8631 return getBackedgeTakenInfo(L).getExact(L,
this);
8633 return getBackedgeTakenInfo(L).getConstantMax(
this);
8635 return getBackedgeTakenInfo(L).getSymbolicMax(L,
this);
8642 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L,
this, &Preds);
8647 return getPredicatedBackedgeTakenInfo(L).getConstantMax(
this, &Preds);
8651 return getBackedgeTakenInfo(L).isConstantMaxOrZero(
this);
8661 for (
PHINode &PN : Header->phis())
8662 if (Visited.
insert(&PN).second)
8666ScalarEvolution::BackedgeTakenInfo &
8667ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
8668 auto &BTI = getBackedgeTakenInfo(L);
8669 if (BTI.hasFullInfo())
8672 auto Pair = PredicatedBackedgeTakenCounts.try_emplace(L);
8675 return Pair.first->second;
8677 BackedgeTakenInfo
Result =
8678 computeBackedgeTakenCount(L,
true);
8680 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8683ScalarEvolution::BackedgeTakenInfo &
8684ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
8690 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
8691 BackedgeTakenCounts.try_emplace(L);
8693 return Pair.first->second;
8698 BackedgeTakenInfo
Result = computeBackedgeTakenCount(L);
8705 if (
Result.hasAnyInfo()) {
8708 auto LoopUsersIt = LoopUsers.find(L);
8709 if (LoopUsersIt != LoopUsers.end())
8711 forgetMemoizedResults(ToForget);
8714 for (PHINode &PN :
L->getHeader()->phis())
8715 ConstantEvolutionLoopExitValue.erase(&PN);
8723 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8732 BackedgeTakenCounts.clear();
8733 PredicatedBackedgeTakenCounts.clear();
8734 BECountUsers.clear();
8735 LoopPropertiesCache.clear();
8736 ConstantEvolutionLoopExitValue.clear();
8737 ValueExprMap.clear();
8738 ValuesAtScopes.clear();
8739 ValuesAtScopesUsers.clear();
8740 LoopDispositions.clear();
8741 BlockDispositions.clear();
8742 UnsignedRanges.clear();
8743 SignedRanges.clear();
8744 ExprValueMap.clear();
8746 ConstantMultipleCache.clear();
8747 PredicatedSCEVRewrites.clear();
8749 FoldCacheUser.clear();
8751void ScalarEvolution::visitAndClearUsers(
8755 while (!Worklist.
empty()) {
8762 if (It != ValueExprMap.
end()) {
8763 eraseValueFromMap(It->first);
8766 ConstantEvolutionLoopExitValue.erase(PN);
8780 while (!LoopWorklist.
empty()) {
8784 forgetBackedgeTakenCounts(CurrL,
false);
8785 forgetBackedgeTakenCounts(CurrL,
true);
8788 for (
auto I = PredicatedSCEVRewrites.begin();
8789 I != PredicatedSCEVRewrites.end();) {
8790 std::pair<const SCEV *, const Loop *> Entry =
I->first;
8791 if (Entry.second == CurrL)
8792 PredicatedSCEVRewrites.erase(
I++);
8797 auto LoopUsersItr = LoopUsers.find(CurrL);
8798 if (LoopUsersItr != LoopUsers.end())
8803 visitAndClearUsers(Worklist, Visited, ToForget);
8805 LoopPropertiesCache.erase(CurrL);
8808 LoopWorklist.
append(CurrL->begin(), CurrL->end());
8810 forgetMemoizedResults(ToForget);
8827 visitAndClearUsers(Worklist, Visited, ToForget);
8829 forgetMemoizedResults(ToForget);
8841 struct InvalidationRootCollector {
8845 InvalidationRootCollector(
Loop *L) : L(L) {}
8847 bool follow(
const SCEV *S) {
8853 if (L->contains(AddRec->
getLoop()))
8858 bool isDone()
const {
return false; }
8861 InvalidationRootCollector
C(L);
8863 forgetMemoizedResults(
C.Roots);
8876 BlockDispositions.clear();
8877 LoopDispositions.clear();
8894 while (!Worklist.
empty()) {
8896 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8897 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8898 if (!LoopDispoRemoved && !BlockDispoRemoved)
8900 auto Users = SCEVUsers.find(Curr);
8901 if (
Users != SCEVUsers.end())
8914const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
8918 if (!isComplete() || ExitNotTaken.
empty())
8929 for (
const auto &ENT : ExitNotTaken) {
8930 const SCEV *BECount = ENT.ExactNotTaken;
8933 "We should only have known counts for exiting blocks that dominate "
8936 Ops.push_back(BECount);
8941 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8942 "Predicate should be always true!");
8951const ScalarEvolution::ExitNotTakenInfo *
8952ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8953 const BasicBlock *ExitingBlock,
8954 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8955 for (
const auto &ENT : ExitNotTaken)
8956 if (ENT.ExitingBlock == ExitingBlock) {
8957 if (ENT.hasAlwaysTruePredicate())
8959 else if (Predicates) {
8969const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8971 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8972 if (!getConstantMax())
8975 for (
const auto &ENT : ExitNotTaken)
8976 if (!ENT.hasAlwaysTruePredicate()) {
8984 "No point in having a non-constant max backedge taken count!");
8985 return getConstantMax();
8988const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8990 SmallVectorImpl<const SCEVPredicate *> *Predicates) {
8998 for (
const auto &ENT : ExitNotTaken) {
8999 const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
9002 "We should only have known counts for exiting blocks that "
9008 assert((Predicates || ENT.hasAlwaysTruePredicate()) &&
9009 "Predicate should be always true!");
9012 if (ExitCounts.
empty())
9021bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
9023 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
9024 return !ENT.hasAlwaysTruePredicate();
9026 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
9042 this->ExactNotTaken = E = ConstantMaxNotTaken;
9043 this->SymbolicMaxNotTaken = SymbolicMaxNotTaken = ConstantMaxNotTaken;
9048 "Exact is not allowed to be less precise than Constant Max");
9051 "Exact is not allowed to be less precise than Symbolic Max");
9054 "Symbolic Max is not allowed to be less precise than Constant Max");
9057 "No point in having a non-constant max backedge taken count!");
9059 for (
const auto PredList : PredLists)
9060 for (
const auto *
P : PredList) {
9068 "Backedge count should be int");
9071 "Max backedge count should be int");
9084ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
9086 bool IsComplete,
const SCEV *ConstantMax,
bool MaxOrZero)
9087 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
9088 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
9090 ExitNotTaken.reserve(ExitCounts.
size());
9091 std::transform(ExitCounts.
begin(), ExitCounts.
end(),
9092 std::back_inserter(ExitNotTaken),
9093 [&](
const EdgeExitInfo &EEI) {
9094 BasicBlock *ExitBB = EEI.first;
9095 const ExitLimit &EL = EEI.second;
9096 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
9097 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
9102 "No point in having a non-constant max backedge taken count!");
9106ScalarEvolution::BackedgeTakenInfo
9107ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
9108 bool AllowPredicates) {
9110 L->getExitingBlocks(ExitingBlocks);
9112 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
9115 bool CouldComputeBECount =
true;
9117 const SCEV *MustExitMaxBECount =
nullptr;
9118 const SCEV *MayExitMaxBECount =
nullptr;
9119 bool MustExitMaxOrZero =
false;
9120 bool IsOnlyExit = ExitingBlocks.
size() == 1;
9131 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
9132 if (ExitIfTrue == CI->
isZero())
9136 ExitLimit EL = computeExitLimit(L, ExitBB, IsOnlyExit, AllowPredicates);
9138 assert((AllowPredicates || EL.Predicates.empty()) &&
9139 "Predicated exit limit when predicates are not allowed!");
9144 ++NumExitCountsComputed;
9148 CouldComputeBECount =
false;
9155 "Exact is known but symbolic isn't?");
9156 ++NumExitCountsNotComputed;
9171 DT.dominates(ExitBB, Latch)) {
9172 if (!MustExitMaxBECount) {
9173 MustExitMaxBECount = EL.ConstantMaxNotTaken;
9174 MustExitMaxOrZero = EL.MaxOrZero;
9177 EL.ConstantMaxNotTaken);
9181 MayExitMaxBECount = EL.ConstantMaxNotTaken;
9184 EL.ConstantMaxNotTaken);
9188 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
9192 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
9198 for (
const auto &Pair : ExitCounts) {
9200 BECountUsers[Pair.second.ExactNotTaken].insert({
L, AllowPredicates});
9202 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
9203 {
L, AllowPredicates});
9205 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
9206 MaxBECount, MaxOrZero);
9209ScalarEvolution::ExitLimit
9210ScalarEvolution::computeExitLimit(
const Loop *L, BasicBlock *ExitingBlock,
9211 bool IsOnlyExit,
bool AllowPredicates) {
9212 assert(
L->contains(ExitingBlock) &&
"Exit count for non-loop block?");
9216 if (!Latch || !DT.dominates(ExitingBlock, Latch))
9221 bool ExitIfTrue = !
L->contains(BI->getSuccessor(0));
9222 assert(ExitIfTrue ==
L->contains(BI->getSuccessor(1)) &&
9223 "It should have one successor in loop and one exit block!");
9234 if (!
L->contains(SBB)) {
9239 assert(Exit &&
"Exiting block must have at least one exit");
9240 return computeExitLimitFromSingleExitSwitch(
9241 L, SI, Exit, IsOnlyExit);
9248 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9249 bool AllowPredicates) {
9250 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
9251 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
9252 ControlsOnlyExit, AllowPredicates);
9255std::optional<ScalarEvolution::ExitLimit>
9256ScalarEvolution::ExitLimitCache::find(
const Loop *L,
Value *ExitCond,
9257 bool ExitIfTrue,
bool ControlsOnlyExit,
9258 bool AllowPredicates) {
9260 (void)this->ExitIfTrue;
9261 (void)this->AllowPredicates;
9263 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9264 this->AllowPredicates == AllowPredicates &&
9265 "Variance in assumed invariant key components!");
9266 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
9267 if (Itr == TripCountMap.end())
9268 return std::nullopt;
9272void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
9274 bool ControlsOnlyExit,
9275 bool AllowPredicates,
9277 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9278 this->AllowPredicates == AllowPredicates &&
9279 "Variance in assumed invariant key components!");
9281 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
9282 assert(InsertResult.second &&
"Expected successful insertion!");
9287ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
9288 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9289 bool ControlsOnlyExit,
bool AllowPredicates) {
9291 if (
auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
9295 ExitLimit EL = computeExitLimitFromCondImpl(
9296 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
9297 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
9301ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
9302 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9303 bool ControlsOnlyExit,
bool AllowPredicates) {
9305 if (
auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
9306 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates))
9307 return *LimitFromBinOp;
9313 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
9314 if (EL.hasFullInfo() || !AllowPredicates)
9318 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
9338 const WithOverflowInst *WO;
9353 auto EL = computeExitLimitFromICmp(L, Pred,
LHS,
getConstant(NewRHSC),
9354 ControlsOnlyExit, AllowPredicates);
9355 if (EL.hasAnyInfo())
9360 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9363std::optional<ScalarEvolution::ExitLimit>
9364ScalarEvolution::computeExitLimitFromCondFromBinOp(
9365 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9366 bool ControlsOnlyExit,
bool AllowPredicates) {
9375 return std::nullopt;
9380 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9381 ExitLimit EL0 = computeExitLimitFromCondCached(
9382 Cache, L, Op0, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9384 ExitLimit EL1 = computeExitLimitFromCondCached(
9385 Cache, L, Op1, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9389 const Constant *NeutralElement = ConstantInt::get(ExitCond->
getType(), IsAnd);
9391 return Op1 == NeutralElement ? EL0 : EL1;
9393 return Op0 == NeutralElement ? EL1 : EL0;
9398 if (EitherMayExit) {
9408 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9410 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9413 EL1.ConstantMaxNotTaken);
9415 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9417 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9420 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9424 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9425 BECount = EL0.ExactNotTaken;
9438 SymbolicMaxBECount =
9440 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
9444ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9445 const Loop *L, ICmpInst *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9446 bool AllowPredicates) {
9458 ExitLimit EL = computeExitLimitFromICmp(L, Pred,
LHS,
RHS, ControlsOnlyExit,
9460 if (EL.hasAnyInfo())
9463 auto *ExhaustiveCount =
9464 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9467 return ExhaustiveCount;
9469 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
9472ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9474 bool ControlsOnlyExit,
bool AllowPredicates) {
9499 ConstantRange CompRange =
9517 InnerLHS = ZExt->getOperand();
9564 if (EL.hasAnyInfo())
9581 if (EL.hasAnyInfo())
return EL;
9613 ExitLimit EL = howManyLessThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9615 if (EL.hasAnyInfo())
9631 ExitLimit EL = howManyGreaterThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9633 if (EL.hasAnyInfo())
9644ScalarEvolution::ExitLimit
9645ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
9647 BasicBlock *ExitingBlock,
9648 bool ControlsOnlyExit) {
9649 assert(!
L->contains(ExitingBlock) &&
"Not an exiting block!");
9652 if (
Switch->getDefaultDest() == ExitingBlock)
9656 "Default case must not exit the loop!");
9662 if (EL.hasAnyInfo())
9674 "Evaluation of SCEV at constant didn't fold correctly?");
9678ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
9688 const BasicBlock *Predecessor =
L->getLoopPredecessor();
9694 auto MatchPositiveShift =
9697 using namespace PatternMatch;
9699 ConstantInt *ShiftAmt;
9701 OutOpCode = Instruction::LShr;
9703 OutOpCode = Instruction::AShr;
9705 OutOpCode = Instruction::Shl;
9720 auto MatchShiftRecurrence =
9722 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9737 if (MatchPositiveShift(
LHS, V, OpC)) {
9738 PostShiftOpCode = OpC;
9744 if (!PNOut || PNOut->getParent() !=
L->getHeader())
9747 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9753 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9760 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9765 if (!MatchShiftRecurrence(
LHS, PN, OpCode))
9777 ConstantInt *StableValue =
nullptr;
9782 case Instruction::AShr: {
9790 StableValue = ConstantInt::get(Ty, 0);
9792 StableValue = ConstantInt::get(Ty, -1,
true);
9798 case Instruction::LShr:
9799 case Instruction::Shl:
9809 "Otherwise cannot be an operand to a branch instruction");
9811 if (
Result->isNullValue()) {
9813 const SCEV *UpperBound =
9830 if (
const Function *
F = CI->getCalledFunction())
9839 if (!L->contains(
I))
return false;
9844 return L->getHeader() ==
I->getParent();
9920 if (!
I)
return nullptr;
9933 std::vector<Constant*> Operands(
I->getNumOperands());
9935 for (
unsigned i = 0, e =
I->getNumOperands(); i != e; ++i) {
9939 if (!Operands[i])
return nullptr;
9944 if (!
C)
return nullptr;
9966 if (IncomingVal != CurrentVal) {
9969 IncomingVal = CurrentVal;
9981ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
9984 auto [
I,
Inserted] = ConstantEvolutionLoopExitValue.try_emplace(PN);
9993 DenseMap<Instruction *, Constant *> CurrentIterVals;
9995 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
10001 for (PHINode &
PHI : Header->phis()) {
10003 CurrentIterVals[&
PHI] = StartCST;
10005 if (!CurrentIterVals.
count(PN))
10006 return RetVal =
nullptr;
10012 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
10015 unsigned IterationNum = 0;
10017 for (; ; ++IterationNum) {
10018 if (IterationNum == NumIterations)
10019 return RetVal = CurrentIterVals[PN];
10023 DenseMap<Instruction *, Constant *> NextIterVals;
10028 NextIterVals[PN] = NextPHI;
10030 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
10036 for (
const auto &
I : CurrentIterVals) {
10038 if (!
PHI ||
PHI == PN ||
PHI->getParent() != Header)
continue;
10043 for (
const auto &
I : PHIsToCompute) {
10044 PHINode *
PHI =
I.first;
10047 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
10050 if (NextPHI !=
I.second)
10051 StoppedEvolving =
false;
10056 if (StoppedEvolving)
10057 return RetVal = CurrentIterVals[PN];
10059 CurrentIterVals.swap(NextIterVals);
10063const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
10073 DenseMap<Instruction *, Constant *> CurrentIterVals;
10075 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
10078 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
10080 for (PHINode &
PHI : Header->phis()) {
10082 CurrentIterVals[&
PHI] = StartCST;
10084 if (!CurrentIterVals.
count(PN))
10092 for (
unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
10099 if (CondVal->getValue() == uint64_t(ExitWhen)) {
10100 ++NumBruteForceTripCountsComputed;
10105 DenseMap<Instruction *, Constant *> NextIterVals;
10111 for (
const auto &
I : CurrentIterVals) {
10113 if (!
PHI ||
PHI->getParent() != Header)
continue;
10116 for (PHINode *
PHI : PHIsToCompute) {
10118 if (NextPHI)
continue;
10120 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
10123 CurrentIterVals.
swap(NextIterVals);
10134 for (
auto &LS : Values)
10136 return LS.second ? LS.second : V;
10141 const SCEV *
C = computeSCEVAtScope(V, L);
10142 for (
auto &LS :
reverse(ValuesAtScopes[V]))
10143 if (LS.first == L) {
10146 ValuesAtScopesUsers[
C].push_back({L, V});
10157 switch (V->getSCEVType()) {
10197 assert(!
C->getType()->isPointerTy() &&
10198 "Can only have one pointer, and it must be last");
10223const SCEV *ScalarEvolution::getWithOperands(
const SCEV *S,
10224 SmallVectorImpl<SCEVUse> &NewOps) {
10259const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
10260 switch (
V->getSCEVType()) {
10271 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
10282 for (++i; i !=
e; ++i)
10327 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i) {
10337 for (++i; i !=
e; ++i) {
10342 return getWithOperands(V, NewOps);
10357 const Loop *CurrLoop = this->LI[
I->getParent()];
10368 if (BackedgeTakenCount->
isZero()) {
10369 Value *InitValue =
nullptr;
10370 bool MultipleInitValues =
false;
10376 MultipleInitValues =
true;
10381 if (!MultipleInitValues && InitValue)
10390 unsigned InLoopPred =
10401 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10415 SmallVector<Constant *, 4> Operands;
10416 Operands.
reserve(
I->getNumOperands());
10417 bool MadeImprovement =
false;
10432 MadeImprovement |= OrigV != OpV;
10437 assert(
C->getType() ==
Op->getType() &&
"Type mismatch");
10442 if (!MadeImprovement)
10463const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
10465 return stripInjectiveFunctions(ZExt->getOperand());
10467 return stripInjectiveFunctions(SExt->getOperand());
10485 assert(
A != 0 &&
"A must be non-zero.");
10501 if (MinTZ < Mult2 && L->getLoopPredecessor())
10503 if (MinTZ < Mult2) {
10526 APInt AD =
A.lshr(Mult2).trunc(BW - Mult2);
10546static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10552 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: "
10553 << *AddRec <<
'\n');
10556 if (!LC || !MC || !
NC) {
10557 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
10558 return std::nullopt;
10564 assert(!
N.isZero() &&
"This is not a quadratic addrec");
10572 N =
N.sext(NewWidth);
10573 M = M.sext(NewWidth);
10574 L = L.sext(NewWidth);
10591 <<
"x + " <<
C <<
", coeff bw: " << NewWidth
10592 <<
", multiplied by " <<
T <<
'\n');
10601 std::optional<APInt>
Y) {
10603 unsigned W = std::max(
X->getBitWidth(),
Y->getBitWidth());
10606 return XW.
slt(YW) ? *
X : *
Y;
10609 return std::nullopt;
10610 return X ? *
X : *
Y;
10627 return std::nullopt;
10628 unsigned W =
X->getBitWidth();
10648static std::optional<APInt>
10654 return std::nullopt;
10657 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
10658 std::optional<APInt>
X =
10661 return std::nullopt;
10666 return std::nullopt;
10681static std::optional<APInt>
10685 "Starting value of addrec should be 0");
10686 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range "
10687 <<
Range <<
", addrec " << *AddRec <<
'\n');
10691 "Addrec's initial value should be in range");
10697 return std::nullopt;
10707 auto SolveForBoundary =
10708 [&](
APInt Bound) -> std::pair<std::optional<APInt>,
bool> {
10711 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary "
10712 << Bound <<
" (before multiplying by " << M <<
")\n");
10715 std::optional<APInt> SO;
10718 "signed overflow\n");
10722 "unsigned overflow\n");
10723 std::optional<APInt> UO =
10726 auto LeavesRange = [&] (
const APInt &
X) {
10743 return {std::nullopt,
false};
10748 if (LeavesRange(*Min))
10749 return { Min,
true };
10750 std::optional<APInt> Max = Min == SO ? UO : SO;
10751 if (LeavesRange(*Max))
10752 return { Max,
true };
10755 return {std::nullopt,
true};
10762 auto SL = SolveForBoundary(
Lower);
10763 auto SU = SolveForBoundary(
Upper);
10766 if (!SL.second || !SU.second)
10767 return std::nullopt;
10810ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(
const SCEV *V,
10812 bool ControlsOnlyExit,
10813 bool AllowPredicates) {
10824 if (
C->getValue()->isZero())
return C;
10828 const SCEVAddRecExpr *AddRec =
10831 if (!AddRec && AllowPredicates)
10837 if (!AddRec || AddRec->
getLoop() != L)
10848 return ExitLimit(R, R, R,
false, Predicates);
10906 const SCEV *DistancePlusOne =
getAddExpr(Distance, One);
10932 const SCEV *
Exact =
10940 const SCEV *SymbolicMax =
10942 return ExitLimit(
Exact, ConstantMax, SymbolicMax,
false, Predicates);
10951 AllowPredicates ? &Predicates :
nullptr, *
this, L);
10959 return ExitLimit(
E, M, S,
false, Predicates);
10962ScalarEvolution::ExitLimit
10963ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
10971 if (!
C->getValue()->isZero())
10981std::pair<const BasicBlock *, const BasicBlock *>
10982ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
const BasicBlock *BB)
10993 if (
const Loop *L = LI.getLoopFor(BB))
10994 return {
L->getLoopPredecessor(),
L->getHeader()};
10996 return {
nullptr, BB};
11005 if (
A ==
B)
return true;
11020 if (ComputesEqualValues(AI, BI))
11028 const SCEV *Op0, *Op1;
11047 auto TrivialCase = [&](
bool TriviallyTrue) {
11056 const SCEV *NewLHS, *NewRHS;
11080 return TrivialCase(
false);
11081 return TrivialCase(
true);
11104 const APInt &
RA = RC->getAPInt();
11106 bool SimplifiedByConstantRange =
false;
11111 return TrivialCase(
true);
11113 return TrivialCase(
false);
11122 Changed = SimplifiedByConstantRange =
true;
11126 if (!SimplifiedByConstantRange) {
11143 assert(!
RA.isMinValue() &&
"Should have been caught earlier!");
11149 assert(!
RA.isMaxValue() &&
"Should have been caught earlier!");
11155 assert(!
RA.isMinSignedValue() &&
"Should have been caught earlier!");
11161 assert(!
RA.isMaxSignedValue() &&
"Should have been caught earlier!");
11173 return TrivialCase(
true);
11175 return TrivialCase(
false);
11280 auto NonRecursive = [OrNegative](
const SCEV *S) {
11282 return C->getAPInt().isPowerOf2() ||
11283 (OrNegative &&
C->getAPInt().isNegatedPowerOf2());
11289 if (NonRecursive(S))
11315 APInt C = Cst->getAPInt();
11316 return C.urem(M) == 0;
11324 const SCEV *SmodM =
11339 for (
auto *
A : Assumptions)
11340 if (
A->implies(
P, *
this))
11353std::pair<const SCEV *, const SCEV *>
11356 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
11358 return { Start, Start };
11360 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
11369 getUsedLoops(LHS, LoopsUsed);
11370 getUsedLoops(RHS, LoopsUsed);
11372 if (LoopsUsed.
empty())
11377 for (
const auto *L1 : LoopsUsed)
11378 for (
const auto *L2 : LoopsUsed)
11379 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
11380 DT.dominates(L2->getHeader(), L1->getHeader())) &&
11381 "Domination relationship is not a linear order");
11411 SplitRHS.second) &&
11423 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
11427 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
11437 return std::nullopt;
11452 if (KnownWithoutContext)
11453 return KnownWithoutContext;
11460 return std::nullopt;
11466 const Loop *L = LHS->getLoop();
11471std::optional<ScalarEvolution::MonotonicPredicateType>
11474 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
11480 auto ResultSwapped =
11483 assert(*ResultSwapped != *Result &&
11484 "monotonicity should flip as we flip the predicate");
11491std::optional<ScalarEvolution::MonotonicPredicateType>
11492ScalarEvolution::getMonotonicPredicateTypeImpl(
const SCEVAddRecExpr *LHS,
11506 return std::nullopt;
11510 "Should be greater or less!");
11514 if (!LHS->hasNoUnsignedWrap())
11515 return std::nullopt;
11519 "Relational predicate is either signed or unsigned!");
11520 if (!
LHS->hasNoSignedWrap())
11521 return std::nullopt;
11523 const SCEV *Step =
LHS->getStepRecurrence(*
this);
11531 return std::nullopt;
11534std::optional<ScalarEvolution::LoopInvariantPredicate>
11541 return std::nullopt;
11548 if (!ArLHS || ArLHS->
getLoop() != L)
11549 return std::nullopt;
11553 return std::nullopt;
11579 return std::nullopt;
11616 return std::nullopt;
11619std::optional<ScalarEvolution::LoopInvariantPredicate>
11624 Pred, LHS, RHS, L, CtxI, MaxIter))
11634 Pred, LHS, RHS, L, CtxI,
Op))
11636 return std::nullopt;
11639std::optional<ScalarEvolution::LoopInvariantPredicate>
11654 return std::nullopt;
11661 if (!AR || AR->
getLoop() != L)
11662 return std::nullopt;
11667 Pred = Pred.dropSameSign();
11671 return std::nullopt;
11677 if (Step != One && Step != MinusOne)
11678 return std::nullopt;
11684 return std::nullopt;
11690 return std::nullopt;
11698 if (Step == MinusOne)
11702 return std::nullopt;
11708bool ScalarEvolution::isKnownPredicateViaConstantRanges(
CmpPredicate Pred,
11714 auto CheckRange = [&](
bool IsSigned) {
11717 return RangeLHS.
icmp(Pred, RangeRHS);
11726 if (CheckRange(
true) || CheckRange(
false))
11735bool ScalarEvolution::isKnownPredicateViaNoOverflow(CmpPredicate Pred,
11744 SCEVUse XNonConstOp, XConstOp;
11745 SCEVUse YNonConstOp, YConstOp;
11749 if (!splitBinaryAdd(
X, XConstOp, XNonConstOp, XFlagsPresent)) {
11752 XFlagsPresent = ExpectedFlags;
11757 if (!splitBinaryAdd(
Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11760 YFlagsPresent = ExpectedFlags;
11763 if (YNonConstOp != XNonConstOp)
11771 if ((YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11774 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) {
11834bool ScalarEvolution::isKnownPredicateViaSplitting(CmpPredicate Pred,
11855bool ScalarEvolution::isImpliedViaGuard(
const BasicBlock *BB, CmpPredicate Pred,
11856 const SCEV *
LHS,
const SCEV *
RHS) {
11861 return any_of(*BB, [&](
const Instruction &
I) {
11862 using namespace llvm::PatternMatch;
11867 isImpliedCond(Pred,
LHS,
RHS, Condition,
false);
11881 if (!L || !DT.isReachableFromEntry(L->getHeader()))
11886 "This cannot be done on broken IR!");
11889 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11898 if (LoopContinuePredicate &&
11899 isImpliedCond(Pred, LHS, RHS, LoopContinuePredicate->
getCondition(),
11900 LoopContinuePredicate->
getSuccessor(0) != L->getHeader()))
11905 if (WalkingBEDominatingConds)
11911 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11912 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
11919 const SCEV *LoopCounter =
11927 for (
auto &AssumeVH : AC.assumptions()) {
11934 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0),
false))
11938 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
11941 for (
DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11942 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11943 assert(DTN &&
"should reach the loop header before reaching the root!");
11946 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
11964 if (isImpliedCond(Pred, LHS, RHS, ContBr->
getCondition(),
11977 if (!DT.isReachableFromEntry(BB))
11981 "This cannot be done on broken IR!");
11989 const bool ProvingStrictComparison =
11991 bool ProvedNonStrictComparison =
false;
11992 bool ProvedNonEquality =
false;
11995 if (!ProvedNonStrictComparison)
11996 ProvedNonStrictComparison = Fn(NonStrictPredicate);
11997 if (!ProvedNonEquality)
11999 if (ProvedNonStrictComparison && ProvedNonEquality)
12004 if (ProvingStrictComparison) {
12006 return isKnownViaNonRecursiveReasoning(
P, LHS, RHS);
12008 if (SplitAndProve(ProofFn))
12013 auto ProveViaCond = [&](
const Value *Condition,
bool Inverse) {
12015 if (isImpliedCond(Pred, LHS, RHS, Condition,
Inverse, CtxI))
12017 if (ProvingStrictComparison) {
12019 return isImpliedCond(
P, LHS, RHS, Condition,
Inverse, CtxI);
12021 if (SplitAndProve(ProofFn))
12030 const Loop *ContainingLoop = LI.getLoopFor(BB);
12032 if (ContainingLoop && ContainingLoop->
getHeader() == BB)
12036 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
12037 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
12040 if (!BlockEntryPredicate)
12049 for (
auto &AssumeVH : AC.assumptions()) {
12053 if (!DT.dominates(CI, BB))
12056 if (ProveViaCond(CI->getArgOperand(0),
false))
12062 F.getParent(), Intrinsic::experimental_guard);
12064 for (
const auto *GU : GuardDecl->users())
12066 if (Guard->getFunction() == BB->
getParent() && DT.dominates(Guard, BB))
12067 if (ProveViaCond(Guard->getArgOperand(0),
false))
12082 "LHS is not available at Loop Entry");
12084 "RHS is not available at Loop Entry");
12086 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
12097 if (FoundCondValue ==
12101 if (!PendingLoopPredicates.insert(FoundCondValue).second)
12105 [&]() { PendingLoopPredicates.erase(FoundCondValue); });
12108 const Value *Op0, *Op1;
12111 return isImpliedCond(Pred,
LHS,
RHS, Op0,
Inverse, CtxI) ||
12115 return isImpliedCond(Pred,
LHS,
RHS, Op0, Inverse, CtxI) ||
12116 isImpliedCond(Pred,
LHS,
RHS, Op1, Inverse, CtxI);
12120 if (!ICI)
return false;
12124 CmpPredicate FoundPred;
12133 return isImpliedCond(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
12136bool ScalarEvolution::isImpliedCond(CmpPredicate Pred,
const SCEV *
LHS,
12137 const SCEV *
RHS, CmpPredicate FoundPred,
12138 const SCEV *FoundLHS,
const SCEV *FoundRHS,
12139 const Instruction *CtxI) {
12149 auto *WideType = FoundLHS->
getType();
12161 TruncFoundLHS, TruncFoundRHS, CtxI))
12187 return isImpliedCondBalancedTypes(Pred,
LHS,
RHS, FoundPred, FoundLHS,
12191bool ScalarEvolution::isImpliedCondBalancedTypes(
12196 "Types should be balanced!");
12203 if (FoundLHS == FoundRHS)
12207 if (
LHS == FoundRHS ||
RHS == FoundLHS) {
12219 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12236 LHS, FoundLHS, FoundRHS, CtxI);
12238 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundRHS, FoundLHS, CtxI);
12260 assert(P1 != P2 &&
"Handled earlier!");
12264 if (IsSignFlippedPredicate(Pred, FoundPred)) {
12268 return isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12271 CmpPredicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
12272 const SCEV *CanonicalLHS =
LHS, *CanonicalRHS =
RHS,
12273 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
12278 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
12289 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12290 CanonicalRHS, CanonicalFoundLHS,
12291 CanonicalFoundRHS);
12296 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12297 CanonicalRHS, CanonicalFoundLHS,
12298 CanonicalFoundRHS);
12305 const SCEVConstant *
C =
nullptr;
12306 const SCEV *
V =
nullptr;
12324 if (Min ==
C->getAPInt()) {
12329 APInt SharperMin = Min + 1;
12332 case ICmpInst::ICMP_SGE:
12333 case ICmpInst::ICMP_UGE:
12336 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
12341 case ICmpInst::ICMP_SGT:
12342 case ICmpInst::ICMP_UGT:
12352 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI))
12357 case ICmpInst::ICMP_SLE:
12358 case ICmpInst::ICMP_ULE:
12359 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12360 LHS, V, getConstant(SharperMin), CtxI))
12364 case ICmpInst::ICMP_SLT:
12365 case ICmpInst::ICMP_ULT:
12366 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12367 LHS, V, getConstant(Min), CtxI))
12381 if (isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12385 if (isImpliedCondOperands(FoundPred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12388 if (isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS))
12404std::optional<APInt>
12411 APInt DiffMul(BW, 1);
12414 for (
unsigned I = 0;
I < 8; ++
I) {
12423 if (LAR->getLoop() != MAR->getLoop())
12424 return std::nullopt;
12428 if (!LAR->isAffine() || !MAR->isAffine())
12429 return std::nullopt;
12431 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
12432 return std::nullopt;
12434 Less = LAR->getStart();
12435 More = MAR->getStart();
12440 auto MatchConstMul =
12441 [](
const SCEV *S) -> std::optional<std::pair<const SCEV *, APInt>> {
12446 return std::nullopt;
12448 if (
auto MatchedMore = MatchConstMul(More)) {
12449 if (
auto MatchedLess = MatchConstMul(
Less)) {
12450 if (MatchedMore->second == MatchedLess->second) {
12451 More = MatchedMore->first;
12452 Less = MatchedLess->first;
12453 DiffMul *= MatchedMore->second;
12464 Diff +=
C->getAPInt() * DiffMul;
12467 Diff -=
C->getAPInt() * DiffMul;
12470 Multiplicity[S] +=
Mul;
12472 auto Decompose = [&](
const SCEV *S,
int Mul) {
12479 Decompose(More, 1);
12480 Decompose(
Less, -1);
12484 const SCEV *NewMore =
nullptr, *NewLess =
nullptr;
12485 for (
const auto &[S,
Mul] : Multiplicity) {
12490 return std::nullopt;
12492 }
else if (
Mul == -1) {
12494 return std::nullopt;
12497 return std::nullopt;
12501 if (NewMore == More || NewLess ==
Less)
12502 return std::nullopt;
12508 if (!More && !
Less)
12512 if (!More || !
Less)
12513 return std::nullopt;
12517 return std::nullopt;
12520bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12542 const auto *Latch = L->getLoopLatch();
12545 if (!L->contains(ContextBB) || !Latch || !DT.
dominates(ContextBB, Latch))
12554 const auto *Latch = L->getLoopLatch();
12557 if (!L->contains(ContextBB) || !Latch || !DT.
dominates(ContextBB, Latch))
12567bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(CmpPredicate Pred,
12570 const SCEV *FoundLHS,
12571 const SCEV *FoundRHS) {
12580 if (!AddRecFoundLHS)
12587 const Loop *
L = AddRecFoundLHS->getLoop();
12588 if (L != AddRecLHS->getLoop())
12627 if (!RDiff || *LDiff != *RDiff)
12630 if (LDiff->isMinValue())
12633 APInt FoundRHSLimit;
12636 FoundRHSLimit = -(*RDiff);
12648bool ScalarEvolution::isImpliedViaMerge(CmpPredicate Pred,
const SCEV *
LHS,
12649 const SCEV *
RHS,
const SCEV *FoundLHS,
12650 const SCEV *FoundRHS,
unsigned Depth) {
12651 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
12655 bool Erased = PendingMerges.erase(LPhi);
12656 assert(Erased &&
"Failed to erase LPhi!");
12660 bool Erased = PendingMerges.erase(RPhi);
12661 assert(Erased &&
"Failed to erase RPhi!");
12669 if (!PendingMerges.insert(Phi).second)
12683 if (!PendingMerges.insert(Phi).second)
12689 if (!LPhi && !RPhi)
12700 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
12704 auto ProvedEasily = [&](
const SCEV *
S1,
const SCEV *S2) {
12705 return isKnownViaNonRecursiveReasoning(Pred,
S1, S2) ||
12706 isImpliedCondOperandsViaRanges(Pred,
S1, S2, Pred, FoundLHS, FoundRHS) ||
12707 isImpliedViaOperations(Pred,
S1, S2, FoundLHS, FoundRHS,
Depth);
12710 if (RPhi && RPhi->getParent() == LBB) {
12717 const SCEV *
R =
getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12718 if (!ProvedEasily(L, R))
12729 auto *RLoop = RAR->
getLoop();
12730 auto *Predecessor = RLoop->getLoopPredecessor();
12731 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
12733 if (!ProvedEasily(L1, RAR->
getStart()))
12735 auto *Latch = RLoop->getLoopLatch();
12736 assert(Latch &&
"Loop with AddRec with no latch?");
12757 if (
auto *Loop = LI.getLoopFor(LBB))
12760 if (!ProvedEasily(L,
RHS))
12767bool ScalarEvolution::isImpliedCondOperandsViaShift(CmpPredicate Pred,
12770 const SCEV *FoundLHS,
12771 const SCEV *FoundRHS) {
12774 if (
RHS == FoundRHS) {
12779 if (
LHS != FoundLHS)
12786 Value *Shiftee, *ShiftValue;
12788 using namespace PatternMatch;
12789 if (
match(SUFoundRHS->getValue(),
12791 auto *ShifteeS =
getSCEV(Shiftee);
12809bool ScalarEvolution::isImpliedCondOperands(CmpPredicate Pred,
const SCEV *
LHS,
12811 const SCEV *FoundLHS,
12812 const SCEV *FoundRHS,
12813 const Instruction *CtxI) {
12814 return isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, Pred, FoundLHS,
12816 isImpliedCondOperandsViaNoOverflow(Pred,
LHS,
RHS, FoundLHS,
12818 isImpliedCondOperandsViaShift(Pred,
LHS,
RHS, FoundLHS, FoundRHS) ||
12819 isImpliedCondOperandsViaAddRecStart(Pred,
LHS,
RHS, FoundLHS, FoundRHS,
12821 isImpliedCondOperandsHelper(Pred,
LHS,
RHS, FoundLHS, FoundRHS);
12825template <
typename MinMaxExprType>
12827 const SCEV *Candidate) {
12832 return is_contained(MinMaxExpr->operands(), Candidate);
12845 const SCEV *LStart, *RStart, *Step;
12895bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred,
const SCEV *
LHS,
12897 const SCEV *FoundLHS,
12898 const SCEV *FoundRHS,
12902 "LHS and RHS have different sizes?");
12905 "FoundLHS and FoundRHS have different sizes?");
12939 auto GetOpFromSExt = [&](
const SCEV *S) ->
const SCEV * {
12941 return Ext->getOperand();
12948 auto *OrigLHS =
LHS;
12949 auto *OrigFoundLHS = FoundLHS;
12950 LHS = GetOpFromSExt(
LHS);
12951 FoundLHS = GetOpFromSExt(FoundLHS);
12954 auto IsSGTViaContext = [&](
const SCEV *
S1,
const SCEV *S2) {
12957 FoundRHS,
Depth + 1);
12970 if (!LHSAddExpr->hasNoSignedWrap())
12973 SCEVUse LL = LHSAddExpr->getOperand(0);
12974 SCEVUse LR = LHSAddExpr->getOperand(1);
12978 auto IsSumGreaterThanRHS = [&](
const SCEV *
S1,
const SCEV *S2) {
12979 return IsSGTViaContext(
S1, MinusOne) && IsSGTViaContext(S2,
RHS);
12984 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
12990 using namespace llvm::PatternMatch;
13009 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
13017 auto *DTy = Denominator->getType();
13018 auto *FRHSTy = FoundRHS->
getType();
13019 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
13038 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
13049 auto *NegDenomMinusOne =
getMinusSCEV(MinusOne, DenominatorExt);
13051 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
13059 if (isImpliedViaMerge(Pred, OrigLHS,
RHS, OrigFoundLHS, FoundRHS,
Depth + 1))
13092bool ScalarEvolution::isKnownViaNonRecursiveReasoning(CmpPredicate Pred,
13096 isKnownPredicateViaConstantRanges(Pred,
LHS,
RHS) ||
13099 isKnownPredicateViaNoOverflow(Pred,
LHS,
RHS);
13102bool ScalarEvolution::isImpliedCondOperandsHelper(CmpPredicate Pred,
13105 const SCEV *FoundLHS,
13106 const SCEV *FoundRHS) {
13142 if (isImpliedViaOperations(Pred,
LHS,
RHS, FoundLHS, FoundRHS))
13148bool ScalarEvolution::isImpliedCondOperandsViaRanges(
13149 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
13150 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
13164 ConstantRange FoundLHSRange =
13168 ConstantRange LHSRange = FoundLHSRange.
add(ConstantRange(*Addend));
13175 return LHSRange.
icmp(Pred, ConstRHS);
13178bool ScalarEvolution::canIVOverflowOnLT(
const SCEV *
RHS,
const SCEV *Stride,
13191 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
13199 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
13202bool ScalarEvolution::canIVOverflowOnGT(
const SCEV *
RHS,
const SCEV *Stride,
13214 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
13222 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
13234const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
13235 const SCEV *Stride,
13266 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
13277 :
APIntOps::umax(MaxEnd, MinStart);
13284ScalarEvolution::howManyLessThans(
const SCEV *
LHS,
const SCEV *
RHS,
13285 const Loop *L,
bool IsSigned,
13286 bool ControlsOnlyExit,
bool AllowPredicates) {
13290 bool PredicatedIV =
false;
13295 auto canProveNUW = [&]() {
13298 if (!ControlsOnlyExit)
13319 Limit = Limit.
zext(OuterBitWidth);
13331 Type *Ty = ZExt->getType();
13342 if (!
IV && AllowPredicates) {
13347 PredicatedIV =
true;
13351 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13365 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13368 const SCEV *Stride =
IV->getStepRecurrence(*
this);
13373 if (!PositiveStride) {
13425 auto wouldZeroStrideBeUB = [&]() {
13437 if (!wouldZeroStrideBeUB()) {
13441 }
else if (!NoWrap) {
13444 if (canIVOverflowOnLT(
RHS, Stride, IsSigned))
13457 const SCEV *
Start =
IV->getStart();
13463 const SCEV *OrigStart =
Start;
13464 const SCEV *OrigRHS =
RHS;
13465 if (
Start->getType()->isPointerTy()) {
13476 const SCEV *End =
nullptr, *BECount =
nullptr,
13477 *BECountIfBackedgeTaken =
nullptr;
13480 if (PositiveStride && RHSAddRec !=
nullptr && RHSAddRec->getLoop() == L &&
13481 RHSAddRec->getNoWrapFlags()) {
13494 const SCEV *RHSStart = RHSAddRec->getStart();
13495 const SCEV *RHSStride = RHSAddRec->getStepRecurrence(*
this);
13507 const SCEV *Denominator =
getMinusSCEV(Stride, RHSStride);
13516 BECountIfBackedgeTaken =
13521 if (BECount ==
nullptr) {
13526 const SCEV *MaxBECount = computeMaxBECountForLT(
13529 MaxBECount,
false , Predicates);
13536 auto *OrigStartMinusStride =
getMinusSCEV(OrigStart, Stride);
13563 const SCEV *Numerator =
13569 auto canProveRHSGreaterThanEqualStart = [&]() {
13588 auto *StartMinusOne =
13595 if (canProveRHSGreaterThanEqualStart()) {
13610 BECountIfBackedgeTaken =
13626 bool MayAddOverflow = [&] {
13672 if (Start == Stride || Start ==
getMinusSCEV(Stride, One)) {
13686 if (!MayAddOverflow) {
13698 const SCEV *ConstantMaxBECount;
13699 bool MaxOrZero =
false;
13701 ConstantMaxBECount = BECount;
13702 }
else if (BECountIfBackedgeTaken &&
13707 ConstantMaxBECount = BECountIfBackedgeTaken;
13710 ConstantMaxBECount = computeMaxBECountForLT(
13718 const SCEV *SymbolicMaxBECount =
13720 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13724ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
13725 const SCEV *
LHS,
const SCEV *
RHS,
const Loop *L,
bool IsSigned,
13726 bool ControlsOnlyExit,
bool AllowPredicates) {
13733 if (!
IV && AllowPredicates)
13740 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13744 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13757 if (!Stride->
isOne() && !NoWrap)
13758 if (canIVOverflowOnGT(
RHS, Stride, IsSigned))
13761 const SCEV *
Start =
IV->getStart();
13762 const SCEV *End =
RHS;
13773 if (
Start->getType()->isPointerTy()) {
13808 const SCEV *ConstantMaxBECount =
13815 ConstantMaxBECount = BECount;
13816 const SCEV *SymbolicMaxBECount =
13819 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
13825 if (
Range.isFullSet())
13830 if (!SC->getValue()->isZero()) {
13836 return ShiftedAddRec->getNumIterationsInRange(
13837 Range.subtract(SC->getAPInt()), SE);
13868 APInt ExitVal = (End +
A).udiv(
A);
13881 ConstantInt::get(SE.
getContext(), ExitVal - 1), SE)->getValue()) &&
13882 "Linear scev computation is off in a bad way!");
13913 assert(!
Last->isZero() &&
"Recurrency with zero step?");
13938 Ty = Store->getValueOperand()->getType();
13940 Ty = Load->getType();
13953 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13955 SE->ConstantEvolutionLoopExitValue.erase(PN);
13956 SE->eraseValueFromMap(getValPtr());
13960void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
13961 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13971 : CallbackVH(
V), SE(se) {}
13980 : F(F), DL(F.
getDataLayout()), TLI(TLI), AC(AC), DT(DT), LI(LI),
13982 LoopDispositions(64), BlockDispositions(64) {
13994 F.getParent(), Intrinsic::experimental_guard);
13995 HasGuards = GuardDecl && !GuardDecl->use_empty();
13999 : F(Arg.F), DL(Arg.DL), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC),
14000 DT(Arg.DT), LI(Arg.LI), CouldNotCompute(
std::
move(Arg.CouldNotCompute)),
14001 ValueExprMap(
std::
move(Arg.ValueExprMap)),
14002 PendingLoopPredicates(
std::
move(Arg.PendingLoopPredicates)),
14003 PendingMerges(
std::
move(Arg.PendingMerges)),
14004 ConstantMultipleCache(
std::
move(Arg.ConstantMultipleCache)),
14005 BackedgeTakenCounts(
std::
move(Arg.BackedgeTakenCounts)),
14006 PredicatedBackedgeTakenCounts(
14007 std::
move(Arg.PredicatedBackedgeTakenCounts)),
14008 BECountUsers(
std::
move(Arg.BECountUsers)),
14009 ConstantEvolutionLoopExitValue(
14010 std::
move(Arg.ConstantEvolutionLoopExitValue)),
14011 ValuesAtScopes(
std::
move(Arg.ValuesAtScopes)),
14012 ValuesAtScopesUsers(
std::
move(Arg.ValuesAtScopesUsers)),
14013 LoopDispositions(
std::
move(Arg.LoopDispositions)),
14014 LoopPropertiesCache(
std::
move(Arg.LoopPropertiesCache)),
14015 BlockDispositions(
std::
move(Arg.BlockDispositions)),
14016 SCEVUsers(
std::
move(Arg.SCEVUsers)),
14017 UnsignedRanges(
std::
move(Arg.UnsignedRanges)),
14018 SignedRanges(
std::
move(Arg.SignedRanges)),
14019 UniqueSCEVs(
std::
move(Arg.UniqueSCEVs)),
14020 UniquePreds(
std::
move(Arg.UniquePreds)),
14021 SCEVAllocator(
std::
move(Arg.SCEVAllocator)),
14022 LoopUsers(
std::
move(Arg.LoopUsers)),
14023 PredicatedSCEVRewrites(
std::
move(Arg.PredicatedSCEVRewrites)),
14024 FirstUnknown(Arg.FirstUnknown) {
14025 Arg.FirstUnknown =
nullptr;
14034 Tmp->~SCEVUnknown();
14036 FirstUnknown =
nullptr;
14038 ExprValueMap.clear();
14039 ValueExprMap.clear();
14041 BackedgeTakenCounts.clear();
14042 PredicatedBackedgeTakenCounts.clear();
14044 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
14045 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
14046 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
14047 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
14069 L->getHeader()->printAsOperand(OS,
false);
14073 L->getExitingBlocks(ExitingBlocks);
14074 if (ExitingBlocks.
size() != 1)
14075 OS <<
"<multiple exits> ";
14079 OS <<
"backedge-taken count is ";
14082 OS <<
"Unpredictable backedge-taken count.";
14085 if (ExitingBlocks.
size() > 1)
14086 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
14087 OS <<
" exit count for " << ExitingBlock->
getName() <<
": ";
14095 OS <<
"\n predicated exit count for " << ExitingBlock->
getName()
14098 OS <<
"\n Predicates:\n";
14099 for (
const auto *
P : Predicates)
14107 L->getHeader()->printAsOperand(OS,
false);
14112 OS <<
"constant max backedge-taken count is ";
14115 OS <<
", actual taken count either this or zero.";
14117 OS <<
"Unpredictable constant max backedge-taken count. ";
14122 L->getHeader()->printAsOperand(OS,
false);
14127 OS <<
"symbolic max backedge-taken count is ";
14130 OS <<
", actual taken count either this or zero.";
14132 OS <<
"Unpredictable symbolic max backedge-taken count. ";
14136 if (ExitingBlocks.
size() > 1)
14137 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
14138 OS <<
" symbolic max exit count for " << ExitingBlock->
getName() <<
": ";
14148 OS <<
"\n predicated symbolic max exit count for "
14149 << ExitingBlock->
getName() <<
": ";
14151 OS <<
"\n Predicates:\n";
14152 for (
const auto *
P : Predicates)
14162 assert(!Preds.
empty() &&
"Different predicated BTC, but no predicates");
14164 L->getHeader()->printAsOperand(OS,
false);
14167 OS <<
"Predicated backedge-taken count is ";
14170 OS <<
"Unpredictable predicated backedge-taken count.";
14172 OS <<
" Predicates:\n";
14173 for (
const auto *
P : Preds)
14178 auto *PredConstantMax =
14180 if (PredConstantMax != ConstantBTC) {
14182 "different predicated constant max BTC but no predicates");
14184 L->getHeader()->printAsOperand(OS,
false);
14187 OS <<
"Predicated constant max backedge-taken count is ";
14190 OS <<
"Unpredictable predicated constant max backedge-taken count.";
14192 OS <<
" Predicates:\n";
14193 for (
const auto *
P : Preds)
14198 auto *PredSymbolicMax =
14200 if (SymbolicBTC != PredSymbolicMax) {
14202 "Different predicated symbolic max BTC, but no predicates");
14204 L->getHeader()->printAsOperand(OS,
false);
14207 OS <<
"Predicated symbolic max backedge-taken count is ";
14210 OS <<
"Unpredictable predicated symbolic max backedge-taken count.";
14212 OS <<
" Predicates:\n";
14213 for (
const auto *
P : Preds)
14219 L->getHeader()->printAsOperand(OS,
false);
14243 OS <<
"Computable";
14253 OS <<
"DoesNotDominate";
14259 OS <<
"ProperlyDominates";
14276 OS <<
"Classifying expressions for: ";
14277 F.printAsOperand(OS,
false);
14292 const Loop *L = LI.getLoopFor(
I.getParent());
14307 OS <<
"\t\t" "Exits: ";
14310 OS <<
"<<Unknown>>";
14316 for (
const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
14318 Iter->getHeader()->printAsOperand(OS,
false);
14326 InnerL->getHeader()->printAsOperand(OS,
false);
14337 OS <<
"Determining loop execution counts for: ";
14338 F.printAsOperand(OS,
false);
14346 auto &Values = LoopDispositions[S];
14347 for (
auto &V : Values) {
14348 if (V.getPointer() == L)
14353 auto &Values2 = LoopDispositions[S];
14355 if (V.getPointer() == L) {
14364ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
14383 assert(!L->contains(AR->
getLoop()) &&
"Containing loop's header does not"
14384 " dominate the contained loop's header?");
14412 bool HasVarying =
false;
14446 auto &Values = BlockDispositions[S];
14447 for (
auto &V : Values) {
14448 if (V.getPointer() == BB)
14453 auto &Values2 = BlockDispositions[S];
14455 if (V.getPointer() == BB) {
14464ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
14494 bool Proper =
true;
14505 if (Instruction *
I =
14507 if (
I->getParent() == BB)
14509 if (DT.properlyDominates(
I->getParent(), BB))
14532void ScalarEvolution::forgetBackedgeTakenCounts(
const Loop *L,
14535 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14536 auto It = BECounts.find(L);
14537 if (It != BECounts.end()) {
14538 for (
const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
14539 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14541 auto UserIt = BECountUsers.find(S);
14542 assert(UserIt != BECountUsers.end());
14547 BECounts.erase(It);
14555 while (!Worklist.
empty()) {
14557 auto Users = SCEVUsers.find(Curr);
14558 if (
Users != SCEVUsers.end())
14559 for (
const auto *User :
Users->second)
14560 if (ToForget.
insert(User).second)
14564 for (
const auto *S : ToForget)
14565 forgetMemoizedResultsImpl(S);
14567 for (
auto I = PredicatedSCEVRewrites.begin();
14568 I != PredicatedSCEVRewrites.end();) {
14569 std::pair<const SCEV *, const Loop *>
Entry =
I->first;
14570 if (ToForget.count(
Entry.first))
14571 PredicatedSCEVRewrites.erase(
I++);
14577void ScalarEvolution::forgetMemoizedResultsImpl(
const SCEV *S) {
14578 LoopDispositions.erase(S);
14579 BlockDispositions.erase(S);
14580 UnsignedRanges.erase(S);
14581 SignedRanges.erase(S);
14582 HasRecMap.erase(S);
14583 ConstantMultipleCache.erase(S);
14586 UnsignedWrapViaInductionTried.erase(AR);
14587 SignedWrapViaInductionTried.erase(AR);
14590 auto ExprIt = ExprValueMap.find(S);
14591 if (ExprIt != ExprValueMap.end()) {
14592 for (
Value *V : ExprIt->second) {
14593 auto ValueIt = ValueExprMap.find_as(V);
14594 if (ValueIt != ValueExprMap.end())
14595 ValueExprMap.erase(ValueIt);
14597 ExprValueMap.erase(ExprIt);
14600 auto ScopeIt = ValuesAtScopes.find(S);
14601 if (ScopeIt != ValuesAtScopes.end()) {
14602 for (
const auto &Pair : ScopeIt->second)
14605 std::make_pair(Pair.first, S));
14606 ValuesAtScopes.erase(ScopeIt);
14609 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
14610 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
14611 for (
const auto &Pair : ScopeUserIt->second)
14612 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
14613 ValuesAtScopesUsers.erase(ScopeUserIt);
14616 auto BEUsersIt = BECountUsers.find(S);
14617 if (BEUsersIt != BECountUsers.end()) {
14619 auto Copy = BEUsersIt->second;
14620 for (
const auto &Pair : Copy)
14621 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
14622 BECountUsers.erase(BEUsersIt);
14625 auto FoldUser = FoldCacheUser.find(S);
14626 if (FoldUser != FoldCacheUser.end())
14627 for (
auto &KV : FoldUser->second)
14628 FoldCache.erase(KV);
14629 FoldCacheUser.erase(S);
14633ScalarEvolution::getUsedLoops(
const SCEV *S,
14635 struct FindUsedLoops {
14636 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
14637 : LoopsUsed(LoopsUsed) {}
14638 SmallPtrSetImpl<const Loop *> &LoopsUsed;
14639 bool follow(
const SCEV *S) {
14645 bool isDone()
const {
return false; }
14648 FindUsedLoops
F(LoopsUsed);
14649 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
14652void ScalarEvolution::getReachableBlocks(
14655 Worklist.
push_back(&F.getEntryBlock());
14656 while (!Worklist.
empty()) {
14658 if (!Reachable.
insert(BB).second)
14666 Worklist.
push_back(
C->isOne() ? TrueBB : FalseBB);
14673 if (isKnownPredicateViaConstantRanges(
Cmp->getCmpPredicate(), L, R)) {
14677 if (isKnownPredicateViaConstantRanges(
Cmp->getInverseCmpPredicate(), L,
14712 SCEVMapper SCM(SE2);
14714 SE2.getReachableBlocks(ReachableBlocks, F);
14716 auto GetDelta = [&](
const SCEV *Old,
const SCEV *New) ->
const SCEV * {
14734 while (!LoopStack.
empty()) {
14740 if (!ReachableBlocks.
contains(L->getHeader()))
14745 auto It = BackedgeTakenCounts.find(L);
14746 if (It == BackedgeTakenCounts.end())
14750 SCM.visit(It->second.getExact(L,
const_cast<ScalarEvolution *
>(
this)));
14770 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14771 if (Delta && !Delta->
isZero()) {
14772 dbgs() <<
"Trip Count for " << *L <<
" Changed!\n";
14773 dbgs() <<
"Old: " << *CurBECount <<
"\n";
14774 dbgs() <<
"New: " << *NewBECount <<
"\n";
14775 dbgs() <<
"Delta: " << *Delta <<
"\n";
14783 while (!Worklist.
empty()) {
14785 if (ValidLoops.
insert(L).second)
14786 Worklist.
append(L->begin(), L->end());
14788 for (
const auto &KV : ValueExprMap) {
14793 "AddRec references invalid loop");
14798 auto It = ExprValueMap.find(KV.second);
14799 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) {
14800 dbgs() <<
"Value " << *KV.first
14801 <<
" is in ValueExprMap but not in ExprValueMap\n";
14806 if (!ReachableBlocks.
contains(
I->getParent()))
14808 const SCEV *OldSCEV = SCM.visit(KV.second);
14810 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14811 if (Delta && !Delta->
isZero()) {
14812 dbgs() <<
"SCEV for value " << *
I <<
" changed!\n"
14813 <<
"Old: " << *OldSCEV <<
"\n"
14814 <<
"New: " << *NewSCEV <<
"\n"
14815 <<
"Delta: " << *Delta <<
"\n";
14821 for (
const auto &KV : ExprValueMap) {
14822 for (
Value *V : KV.second) {
14823 const SCEV *S = ValueExprMap.lookup(V);
14825 dbgs() <<
"Value " << *V
14826 <<
" is in ExprValueMap but not in ValueExprMap\n";
14829 if (S != KV.first) {
14830 dbgs() <<
"Value " << *V <<
" mapped to " << *S <<
" rather than "
14831 << *KV.first <<
"\n";
14838 for (
const auto &S : UniqueSCEVs) {
14843 auto It = SCEVUsers.find(
Op);
14844 if (It != SCEVUsers.end() && It->second.count(&S))
14846 dbgs() <<
"Use of operand " << *
Op <<
" by user " << S
14847 <<
" is not being tracked!\n";
14853 for (
const auto &ValueAndVec : ValuesAtScopes) {
14855 for (
const auto &LoopAndValueAtScope : ValueAndVec.second) {
14856 const Loop *L = LoopAndValueAtScope.first;
14857 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14859 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14860 if (It != ValuesAtScopesUsers.end() &&
14863 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14864 << *ValueAtScope <<
" missing in ValuesAtScopesUsers\n";
14870 for (
const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14871 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14872 for (
const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14873 const Loop *L = LoopAndValue.first;
14874 const SCEV *
Value = LoopAndValue.second;
14876 auto It = ValuesAtScopes.find(
Value);
14877 if (It != ValuesAtScopes.end() &&
14878 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14880 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14881 << *ValueAtScope <<
" missing in ValuesAtScopes\n";
14887 auto VerifyBECountUsers = [&](
bool Predicated) {
14889 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14890 for (
const auto &LoopAndBEInfo : BECounts) {
14891 for (
const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14892 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14894 auto UserIt = BECountUsers.find(S);
14895 if (UserIt != BECountUsers.end() &&
14896 UserIt->second.contains({ LoopAndBEInfo.first, Predicated }))
14898 dbgs() <<
"Value " << *S <<
" for loop " << *LoopAndBEInfo.first
14899 <<
" missing from BECountUsers\n";
14906 VerifyBECountUsers(
false);
14907 VerifyBECountUsers(
true);
14910 for (
auto &[S, Values] : LoopDispositions) {
14911 for (
auto [
Loop, CachedDisposition] : Values) {
14913 if (CachedDisposition != RecomputedDisposition) {
14914 dbgs() <<
"Cached disposition of " << *S <<
" for loop " << *
Loop
14915 <<
" is incorrect: cached " << CachedDisposition <<
", actual "
14916 << RecomputedDisposition <<
"\n";
14923 for (
auto &[S, Values] : BlockDispositions) {
14924 for (
auto [BB, CachedDisposition] : Values) {
14926 if (CachedDisposition != RecomputedDisposition) {
14927 dbgs() <<
"Cached disposition of " << *S <<
" for block %"
14928 << BB->
getName() <<
" is incorrect: cached " << CachedDisposition
14929 <<
", actual " << RecomputedDisposition <<
"\n";
14936 for (
auto [
FoldID, Expr] : FoldCache) {
14937 auto I = FoldCacheUser.find(Expr);
14938 if (
I == FoldCacheUser.end()) {
14939 dbgs() <<
"Missing entry in FoldCacheUser for cached expression " << *Expr
14944 dbgs() <<
"Missing FoldID in cached users of " << *Expr <<
"!\n";
14948 for (
auto [Expr, IDs] : FoldCacheUser) {
14949 for (
auto &
FoldID : IDs) {
14952 dbgs() <<
"Missing entry in FoldCache for expression " << *Expr
14957 dbgs() <<
"Entry in FoldCache doesn't match FoldCacheUser: " << *S
14958 <<
" != " << *Expr <<
"!\n";
14969 for (
auto [S, Multiple] : ConstantMultipleCache) {
14971 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14972 Multiple.
urem(RecomputedMultiple) != 0 &&
14973 RecomputedMultiple.
urem(Multiple) != 0)) {
14974 dbgs() <<
"Incorrect cached computation in ConstantMultipleCache for "
14975 << *S <<
" : Computed " << RecomputedMultiple
14976 <<
" but cache contains " << Multiple <<
"!\n";
14984 FunctionAnalysisManager::Invalidator &Inv) {
15016 OS <<
"Printing analysis 'Scalar Evolution Analysis' for function '"
15017 <<
F.getName() <<
"':\n";
15023 "Scalar Evolution Analysis",
false,
true)
15072 const SCEV *LHS,
const SCEV *RHS) {
15074 assert(LHS->getType() == RHS->getType() &&
15075 "Type mismatch between LHS and RHS");
15078 ID.AddInteger(Pred);
15079 ID.AddPointer(LHS);
15080 ID.AddPointer(RHS);
15081 void *IP =
nullptr;
15082 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
15086 UniquePreds.InsertNode(Eq, IP);
15097 ID.AddInteger(AddedFlags);
15098 void *IP =
nullptr;
15099 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
15101 auto *OF =
new (SCEVAllocator)
15103 UniquePreds.InsertNode(OF, IP);
15123 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
15124 return Rewriter.visit(S);
15130 for (
const auto *Pred : U->getPredicates())
15132 if (IPred->getLHS() == Expr &&
15134 return IPred->getRHS();
15136 if (IPred->getLHS() == Expr &&
15137 IPred->getPredicate() == ICmpInst::ICMP_EQ)
15138 return IPred->getRHS();
15141 return convertToAddRecWithPreds(Expr);
15144 const SCEV *visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
15160 const SCEV *visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
15177 explicit SCEVPredicateRewriter(
15178 const Loop *L, ScalarEvolution &SE,
15179 SmallVectorImpl<const SCEVPredicate *> *NewPreds,
15180 const SCEVPredicate *Pred)
15181 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred),
L(
L) {}
15183 bool addOverflowAssumption(
const SCEVPredicate *
P) {
15186 return Pred && Pred->
implies(
P, SE);
15192 bool addOverflowAssumption(
const SCEVAddRecExpr *AR,
15195 return addOverflowAssumption(
A);
15204 const SCEV *convertToAddRecWithPreds(
const SCEVUnknown *Expr) {
15208 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
15210 if (!PredicatedRewrite)
15212 for (
const auto *
P : PredicatedRewrite->second){
15215 if (L != WP->getExpr()->getLoop())
15218 if (!addOverflowAssumption(
P))
15221 return PredicatedRewrite->first;
15224 SmallVectorImpl<const SCEVPredicate *> *NewPreds;
15225 const SCEVPredicate *Pred;
15234 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
15241 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
15261 if (!Step->
isOne())
15286 assert(LHS->getType() == RHS->getType() &&
"LHS and RHS types don't match");
15287 assert(LHS != RHS &&
"LHS and RHS are the same SCEV");
15300 return Op->LHS == LHS &&
Op->RHS == RHS;
15307 OS.
indent(
Depth) <<
"Equal predicate: " << *LHS <<
" == " << *RHS <<
"\n";
15309 OS.
indent(
Depth) <<
"Compare predicate: " << *LHS <<
" " << Pred <<
") "
15334 const SCEV *Start = AR->getStart();
15335 const SCEV *OpStart =
Op->AR->getStart();
15340 if (Start->getType()->isPointerTy() && Start->getType() != OpStart->
getType())
15343 const SCEV *Step = AR->getStepRecurrence(SE);
15344 const SCEV *OpStep =
Op->AR->getStepRecurrence(SE);
15397 if (Step->getValue()->getValue().isNonNegative())
15401 return ImpliedFlags;
15408 for (
const auto *
P : Preds)
15421 return this->implies(I, SE);
15429 for (
const auto *Pred : Preds)
15430 Pred->print(OS,
Depth);
15435 for (
const auto *Pred : Set->Preds)
15443 bool CheckImplies = Preds.
size() < 16;
15446 if (CheckImplies &&
implies(
N, SE))
15452 for (
auto *
P : Preds) {
15453 if (CheckImplies &&
N->implies(
P, SE))
15457 Preds = std::move(PrunedPreds);
15458 Preds.push_back(
N);
15465 Preds = std::make_unique<SCEVUnionPredicate>(
Empty, SE);
15470 for (
const auto *
Op :
Ops)
15475 SCEVUsers[
Op].insert(
User);
15484 SCEVUsers[
Op].insert(
User);
15488 const SCEV *Expr = SE.getSCEV(V);
15493 RewriteEntry &Entry = RewriteMap[Expr];
15496 if (Entry.second && Generation == Entry.first)
15497 return Entry.second;
15502 Expr = Entry.second;
15504 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
15505 Entry = {Generation, NewSCEV};
15511 if (!BackedgeCount) {
15513 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds);
15514 for (
const auto *
P : Preds)
15517 return BackedgeCount;
15521 if (!SymbolicMaxBackedgeCount) {
15523 SymbolicMaxBackedgeCount =
15524 SE.getPredicatedSymbolicMaxBackedgeTakenCount(&L, Preds);
15525 for (
const auto *
P : Preds)
15528 return SymbolicMaxBackedgeCount;
15532 if (!SmallConstantMaxTripCount) {
15534 SmallConstantMaxTripCount = SE.getSmallConstantMaxTripCount(&L, &Preds);
15535 for (
const auto *
P : Preds)
15538 return *SmallConstantMaxTripCount;
15542 if (Preds->implies(&Pred, SE))
15547 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds, SE);
15548 updateGeneration();
15555void PredicatedScalarEvolution::updateGeneration() {
15557 if (++Generation == 0) {
15558 for (
auto &
II : RewriteMap) {
15559 const SCEV *Rewritten =
II.second.second;
15576 auto II = FlagsMap.insert({V, Flags});
15589 auto II = FlagsMap.find(V);
15591 if (
II != FlagsMap.end())
15600 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
15605 for (
const auto *
P : NewPreds)
15608 RewriteMap[SE.getSCEV(V)] = {Generation, New};
15614 : RewriteMap(
Init.RewriteMap), SE(
Init.SE), L(
Init.L),
15617 Generation(
Init.Generation), BackedgeCount(
Init.BackedgeCount) {
15618 for (
auto I :
Init.FlagsMap)
15619 FlagsMap.insert(
I);
15624 for (
auto *BB : L.getBlocks())
15625 for (
auto &
I : *BB) {
15626 if (!SE.isSCEVable(
I.getType()))
15629 auto *Expr = SE.getSCEV(&
I);
15630 auto II = RewriteMap.find(Expr);
15632 if (
II == RewriteMap.end())
15636 if (
II->second.second == Expr)
15641 OS.
indent(
Depth + 2) <<
"--> " << *
II->second.second <<
"\n";
15649 LoopGuards Guards(SE);
15657void ScalarEvolution::LoopGuards::collectFromPHI(
15665 using MinMaxPattern = std::pair<const SCEVConstant *, SCEVTypes>;
15666 auto GetMinMaxConst = [&](
unsigned IncomingIdx) -> MinMaxPattern {
15680 auto &RewriteMap =
G->second.RewriteMap;
15681 if (RewriteMap.empty())
15683 auto S = RewriteMap.find(SE.
getSCEV(
Phi.getIncomingValue(IncomingIdx)));
15684 if (S == RewriteMap.end())
15690 return {C0,
SM->getSCEVType()};
15693 auto MergeMinMaxConst = [](MinMaxPattern
P1,
15694 MinMaxPattern
P2) -> MinMaxPattern {
15695 auto [C1,
T1] =
P1;
15696 auto [C2, T2] =
P2;
15697 if (!C1 || !C2 ||
T1 != T2)
15701 return {C1->getAPInt().
ult(C2->getAPInt()) ? C1 : C2,
T1};
15703 return {C1->getAPInt().
slt(C2->getAPInt()) ? C1 : C2,
T1};
15705 return {C1->getAPInt().
ugt(C2->getAPInt()) ? C1 : C2,
T1};
15707 return {C1->getAPInt().
sgt(C2->getAPInt()) ? C1 : C2,
T1};
15712 auto P = GetMinMaxConst(0);
15713 for (
unsigned int In = 1;
In <
Phi.getNumIncomingValues();
In++) {
15716 P = MergeMinMaxConst(
P, GetMinMaxConst(In));
15719 const SCEV *
LHS = SE.
getSCEV(
const_cast<PHINode *
>(&Phi));
15722 Guards.RewriteMap.insert({
LHS,
RHS});
15730 const APInt &DivisorVal,
15732 const APInt *ExprVal;
15745 const APInt &DivisorVal,
15747 const APInt *ExprVal;
15755 return SE.
getConstant(*ExprVal + DivisorVal - Rem);
15769 const SCEV *URemRHS =
nullptr;
15773 const SCEV *Multiple =
15775 DivInfo[URemLHS] = Multiple;
15777 Multiples[URemLHS] =
C->getAPInt();
15797 auto IsMinMaxSCEVWithNonNegativeConstant =
15801 if (
MinMax->getNumOperands() != 2)
15804 if (
C->getAPInt().isNegative())
15806 SCTy =
MinMax->getSCEVType();
15815 const SCEV *MinMaxLHS =
nullptr, *MinMaxRHS =
nullptr;
15817 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15822 auto *DivisibleExpr =
15830void ScalarEvolution::LoopGuards::collectFromBlock(
15832 const BasicBlock *
Block,
const BasicBlock *Pred,
15840 DenseMap<const SCEV *, const SCEV *> &RewriteMap,
15851 &ExprsToRewrite]() {
15852 const SCEVConstant *C1;
15865 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15867 auto [
I,
Inserted] = RewriteMap.try_emplace(LHSUnknown);
15868 const SCEV *RewrittenLHS =
Inserted ? LHSUnknown :
I->second;
15876 if (MatchRangeCheckIdiom())
15893 auto AddRewrite = [&](
const SCEV *From,
const SCEV *FromRewritten,
15895 if (From == FromRewritten)
15897 RewriteMap[From] = To;
15903 auto GetMaybeRewritten = [&](
const SCEV *S) {
15904 return RewriteMap.lookup_or(S, S);
15907 const SCEV *RewrittenLHS = GetMaybeRewritten(
LHS);
15909 const APInt &DividesBy =
15924 switch (Predicate) {
15953 SmallPtrSet<const SCEV *, 16> Visited;
15955 auto EnqueueOperands = [&Worklist](
const SCEVNAryExpr *S) {
15959 while (!Worklist.
empty()) {
15963 if (!Visited.
insert(From).second)
15965 const SCEV *FromRewritten = GetMaybeRewritten(From);
15966 const SCEV *To =
nullptr;
15968 switch (Predicate) {
15973 EnqueueOperands(
UMax);
15979 EnqueueOperands(
SMax);
15985 EnqueueOperands(
UMin);
15991 EnqueueOperands(
SMin);
15999 const SCEV *OneAlignedUp =
16001 To = SE.
getUMaxExpr(FromRewritten, OneAlignedUp);
16013 const SCEVConstant *
C;
16022 Guards.NotEqual.insert({
LHS,
RHS});
16031 AddRewrite(From, FromRewritten, To);
16048 SE.F.
getParent(), Intrinsic::experimental_guard);
16050 for (
const auto *GU : GuardDecl->users())
16052 if (Guard->getFunction() ==
Block->getParent() &&
16061 unsigned NumCollectedConditions = 0;
16063 std::pair<const BasicBlock *, const BasicBlock *> Pair(Pred,
Block);
16065 Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
16067 const CondBrInst *LoopEntryPredicate =
16069 if (!LoopEntryPredicate)
16074 NumCollectedConditions++;
16078 if (
Depth > 0 && NumCollectedConditions == 2)
16086 if (Pair.second->hasNPredecessorsOrMore(2) &&
16088 SmallDenseMap<const BasicBlock *, LoopGuards> IncomingGuards;
16089 for (
auto &Phi : Pair.second->phis())
16100 for (
auto [Term, EnterIfTrue] :
reverse(Terms)) {
16101 SmallVector<Value *, 8> Worklist;
16102 SmallPtrSet<Value *, 8> Visited;
16104 while (!Worklist.
empty()) {
16111 EnterIfTrue ?
Cmp->getPredicate() :
Cmp->getInversePredicate();
16135 DenseMap<const SCEV *, APInt> Multiples;
16137 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess) {
16144 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess)
16145 CollectCondition(Predicate,
LHS,
RHS, Guards.RewriteMap, DivGuards);
16149 for (
const auto &[K, Divisor] : Multiples) {
16150 const SCEV *DivisorSCEV = SE.
getConstant(Divisor);
16151 Guards.RewriteMap[
K] =
16153 Guards.
rewrite(K), Divisor, SE),
16162 Guards.PreserveNUW =
true;
16163 Guards.PreserveNSW =
true;
16164 for (
const SCEV *Expr : ExprsToRewrite) {
16165 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16166 Guards.PreserveNUW &=
16168 Guards.PreserveNSW &=
16175 if (ExprsToRewrite.size() > 1) {
16176 for (
const SCEV *Expr : ExprsToRewrite) {
16177 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16178 Guards.RewriteMap.erase(Expr);
16179 Guards.RewriteMap.insert({Expr, Guards.
rewrite(RewriteTo)});
16188 class SCEVLoopGuardRewriter
16199 NotEqual(Guards.NotEqual) {
16200 if (Guards.PreserveNUW)
16202 if (Guards.PreserveNSW)
16209 return Map.lookup_or(Expr, Expr);
16213 if (
const SCEV *S = Map.lookup(Expr))
16220 unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
16221 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
16222 Bitwidth >
Op->getType()->getScalarSizeInBits()) {
16224 auto *NarrowExt = SE.getZeroExtendExpr(
Op, NarrowTy);
16225 if (
const SCEV *S = Map.lookup(NarrowExt))
16226 return SE.getZeroExtendExpr(S, Ty);
16227 Bitwidth = Bitwidth / 2;
16235 if (
const SCEV *S = Map.lookup(Expr))
16242 if (
const SCEV *S = Map.lookup(Expr))
16248 if (
const SCEV *S = Map.lookup(Expr))
16256 auto RewriteSubtraction = [&](
const SCEV *S) ->
const SCEV * {
16261 if (NotEqual.contains({LHS, RHS})) {
16263 SE.getOne(S->
getType()), SE.getConstantMultiple(S), SE);
16264 return SE.getUMaxExpr(OneAlignedUp, S);
16271 if (
const SCEV *Rewritten = RewriteSubtraction(Expr))
16282 if (
const SCEV *Rewritten = RewriteSubtraction(
Add))
16283 return SE.getAddExpr(
16286 if (
const SCEV *S = Map.lookup(
Add))
16287 return SE.getAddExpr(Expr->
getOperand(0), S);
16299 : SE.getAddExpr(Operands,
16315 : SE.getMulExpr(Operands,
16321 if (RewriteMap.empty() && NotEqual.empty())
16324 SCEVLoopGuardRewriter
Rewriter(SE, *
this);
16325 return Rewriter.visit(Expr);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SmallPtrSet< const BasicBlock *, 8 > VisitedBlocks
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
This file defines a hash set that can be used to remove duplication of nodes in a graph.
Value * getPointer(Value *Ptr)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static constexpr unsigned SM(unsigned Version)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static DominatorTree getDomTree(Function &F)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI optimize exec mask operations pre RA
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static int CompareValueComplexity(const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static const SCEV * getNextSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool hasHugeExpression(ArrayRef< SCEVUse > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< unsigned > MaxLoopGuardCollectionDepth("scalar-evolution-max-loop-guard-collection-depth", cl::Hidden, cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool RangeRefPHIAllowedOperands(DominatorTree &DT, PHINode *PHI)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static BinaryOperator * getCommonInstForPHI(PHINode *PN)
static bool isDivisibilityGuard(const SCEV *LHS, const SCEV *RHS, ScalarEvolution &SE)
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, SmallVectorImpl< const SCEVPredicate * > *Predicates, ScalarEvolution &SE, const Loop *L)
Finds the minimum unsigned root of the following equation:
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, ArrayRef< SCEVUse > Ops, SCEV::NoWrapFlags Flags)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool CollectAddOperandsWithScales(SmallDenseMap< SCEVUse, APInt, 16 > &M, SmallVectorImpl< SCEVUse > &NewOps, APInt &AccumulatedConstant, ArrayRef< SCEVUse > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static const SCEV * constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, SmallVectorImpl< SCEVUse > &Ops, FoldT Fold, IsIdentityT IsIdentity, IsAbsorberT IsAbsorber)
Performs a number of common optimizations on the passed Ops.
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static void GroupByComplexity(SmallVectorImpl< SCEVUse > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static bool collectDivisibilityInformation(ICmpInst::Predicate Predicate, const SCEV *LHS, const SCEV *RHS, DenseMap< const SCEV *, const SCEV * > &DivInfo, DenseMap< const SCEV *, APInt > &Multiples, ScalarEvolution &SE)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static bool getOperandsForSelectLikePHI(DominatorTree &DT, PHINode *PN, Value *&Cond, Value *&LHS, Value *&RHS)
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static bool MatchBinarySub(const SCEV *S, SCEVUse &LHS, SCEVUse &RHS)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * getPreviousSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static const SCEV * applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr, APInt Divisor, ScalarEvolution &SE)
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static std::optional< int > CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
static bool BrPHIToSelect(DominatorTree &DT, CondBrInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool InBlock(const Value *V, const BasicBlock *BB)
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
Virtual Register Rewriter
static const uint32_t IV[8]
SCEVCastSinkingRewriter(ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
static const SCEV * rewrite(const SCEV *Scev, ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
const SCEV * visitUnknown(const SCEVUnknown *Expr)
const SCEV * visitMulExpr(const SCEVMulExpr *Expr)
const SCEV * visitAddExpr(const SCEVAddExpr *Expr)
const SCEV * visit(const SCEV *S)
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
unsigned countTrailingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt multiplicativeInverse() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< WeakVH > assumptions()
Access the list of assumption handles currently tracked for this function.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
This class represents a function call, abstracting a target machine's calling convention.
virtual void deleted()
Callback for Value destruction.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
Conditional Branch instruction.
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
static LLVM_ABI Constant * getNot(Constant *C)
static Constant * getPtrAdd(Constant *Ptr, Constant *Offset, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReduced=nullptr)
Create a getelementptr i8, ptr, offset constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPtrToAddr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
const APInt & getLower() const
Return the lower value for this range.
LLVM_ABI ConstantRange urem(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned remainder operation of...
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
LLVM_ABI ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FoldingSetNodeIDRef - This class describes a reference to an interned FoldingSetNodeID,...
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
static GEPNoWrapFlags none()
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
A Module instance is used to store all the information related to an LLVM module.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI const SCEV * getPredicatedSCEV(const SCEV *Expr)
Returns the rewritten SCEV for Expr in the context of the current SCEV predicate.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
constexpr bool isValid() const
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
friend class ScalarEvolution
LLVM_ABI const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
LLVM_ABI const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
LLVM_ABI const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
const Loop * getLoop() const
SCEVUse getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This is the base class for unary cast operator classes.
SCEVUse getOperand() const
LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Implementation of the SCEVPredicate interface.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
ArrayRef< SCEVUse > operands() const
bool hasNoSelfWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
SCEVUse getOperand(unsigned i) const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicate(const SCEVPredicate &)=default
virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const =0
Returns true if this predicate implies N.
This class represents a cast from a pointer to a pointer-sized integer value, without capturing the p...
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visit(const SCEV *S)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
SCEVRewriteVisitor(ScalarEvolution &SE)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
static SCEVTypes getEquivalentNonSequentialSCEVType(SCEVTypes Ty)
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds, ScalarEvolution &SE)
Union predicates don't get cached so create a dummy set ID for it.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
unsigned short getExpressionSize() const
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI void computeAndSetCanonical(ScalarEvolution &SE)
Compute and set the canonical SCEV, by constructing a SCEV with the same operands,...
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
const SCEV * CanonicalSCEV
Pointer to the canonical version of the SCEV, i.e.
LLVM_ABI void dump() const
This method is used for debugging.
LLVM_ABI bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
LLVM_ABI ArrayRef< SCEVUse > operands() const
Return operands of this SCEV expression.
LLVM_ABI void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, unsigned short ExpressionSize)
SCEVTypes getSCEVType() const
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
Analysis pass that exposes the ScalarEvolution for a function.
LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
ScalarEvolutionWrapperPass()
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
LLVM_ABI const SCEV * rewrite(const SCEV *Expr) const
Try to apply the collected loop guards to Expr.
The main scalar evolution driver.
LLVM_ABI const SCEV * getUDivExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
LLVM_ABI const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getURemExpr(SCEVUse LHS, SCEVUse RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getSMinExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
LLVM_ABI const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
LLVM_ABI const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< SCEVUse > &Operands)
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(SCEVUse LHS, SCEVUse RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
LLVM_ABI const SCEV * getAddRecExpr(SCEVUse Start, SCEVUse Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getLosslessPtrToIntExpr(const SCEV *Op)
LLVM_ABI const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
LLVM_ABI const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< SCEVUse > &Operands)
LLVM_ABI std::optional< bool > evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, SCEVUse &LHS, SCEVUse &RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool haveSameSign(const SCEV *S1, const SCEV *S2)
Return true if we know that S1 and S2 must have the same sign.
LLVM_ABI const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void print(raw_ostream &OS) const
LLVM_ABI const SCEV * getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock, SmallVectorImpl< const SCEVPredicate * > *Predicates, ExitCountKind Kind=Exact)
Same as above except this uses the predicated backedge taken info and may require predicates.
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getUMaxExpr(SCEVUse LHS, SCEVUse RHS)
@ MonotonicallyDecreasing
@ MonotonicallyIncreasing
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
LLVM_ABI const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LLVM_ABI APInt getConstantMultiple(const SCEV *S, const Instruction *CtxI=nullptr)
Returns the max constant multiple of S.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
friend class SCEVCallbackVH
LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M, SmallVectorImpl< const SCEVPredicate * > &Assumptions)
Check that S is a multiple of M.
LLVM_ABI const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero=false, bool OrNegative=false)
Test if the given expression is known to be a power of 2.
LLVM_ABI std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
LLVM_ABI std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S, const Instruction *CtxI=nullptr)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
LLVM_ABI const SCEV * getVScale(Type *Ty)
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI void forgetAllLoops()
LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getPtrToAddrExpr(const SCEV *Op)
LLVM_ABI const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
LLVM_ABI const SCEV * getSMaxExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< bool > evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
LLVM_ABI const SCEV * getUnknown(Value *V)
LLVM_ABI std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
LLVM_ABI const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
LLVM_ABI const SCEV * getUDivExactExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
LLVM_ABI std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI ~ScalarEvolution()
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, ArrayRef< SCEVUse > IndexExprs)
Returns an expression for a GEP.
LLVM_ABI const SCEV * getUMinExpr(SCEVUse LHS, SCEVUse RHS, bool Sequential=false)
LLVM_ABI void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVM_ABI void verify() const
LLVMContext & getContext() const
Implements a dense probed hash-table based set with some number of buckets stored inline.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
TypeSize getSizeInBits() const
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVMContext & getContext() const
All values hold a context through their type.
unsigned getValueID() const
Return an ID for the concrete type of this object.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
LLVM_ABI std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
int getMinValue(MCInstrInfo const &MCII, MCInst const &MCI)
Return the minimum value of an extendable operand.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVZeroExtendExpr, Op0_t > m_scev_ZExt(const Op0_t &Op0)
is_undef_or_poison m_scev_UndefOrPoison()
Match an SCEVUnknown wrapping undef or poison.
class_match< const SCEVConstant > m_SCEVConstant()
cst_pred_ty< is_one > m_scev_One()
Match an integer 1.
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
SCEVUnaryExpr_match< SCEVSignExtendExpr, Op0_t > m_scev_SExt(const Op0_t &Op0)
cst_pred_ty< is_zero > m_scev_Zero()
Match an integer 0.
SCEVUnaryExpr_match< SCEVTruncateExpr, Op0_t > m_scev_Trunc(const Op0_t &Op0)
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVUDivExpr, Op0_t, Op1_t > m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true > m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1)
class_match< const Loop > m_Loop()
bind_ty< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
bind_ty< const SCEVUnknown > m_SCEVUnknown(const SCEVUnknown *&V)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVSMaxExpr, Op0_t, Op1_t > m_scev_SMax(const Op0_t &Op0, const Op1_t &Op1)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
class_match< const SCEV > m_SCEV()
@ Valid
The data is already valid.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
SaveAndRestore(T &) -> SaveAndRestore< T >
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
scope_exit(Callable) -> scope_exit< Callable >
constexpr from_range_t from_range
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
auto uninitialized_copy(R &&Src, IterTy Dst)
bool isa_and_nonnull(const Y &Val)
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
DomTreeNodeBase< BasicBlock > DomTreeNode
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
auto reverse(ContainerTy &&C)
LLVM_ABI bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
unsigned short computeExpressionSize(ArrayRef< SCEVUse > Args)
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
FunctionAddr VTableAddr Count
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
SCEVUseT< const SCEV * > SCEVUse
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Incoming for lane mask phi as machine instruction, incoming register Reg and incoming block Block are...
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
An object of this class is returned by queries that could not be answered.
LLVM_ABI SCEVCouldNotCompute()
static LLVM_ABI bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
LLVM_ABI ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
const SCEV * ExactNotTaken
const SCEV * SymbolicMaxNotTaken
SmallVector< const SCEVPredicate *, 4 > Predicates
A vector of predicate guards for this ExitLimit.
const SCEV * ConstantMaxNotTaken