83#include "llvm/Config/llvm-config.h"
138#define DEBUG_TYPE "scalar-evolution"
141 "Number of loop exits with predictable exit counts");
143 "Number of loop exits without predictable exit counts");
145 "Number of loops with trip counts computed by force");
147#ifdef EXPENSIVE_CHECKS
155 cl::desc(
"Maximum number of iterations SCEV will "
156 "symbolically execute a constant "
162 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
165 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
169 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
179 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
183 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
184 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
188 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
189 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
193 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
194 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
199 cl::desc(
"Maximum depth of recursive arithmetics"),
203 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
208 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::desc(
"Max coefficients in AddRec during evolving"),
218 cl::desc(
"Size of the expression which is considered huge"),
223 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
227 "scalar-evolution-max-loop-guard-collection-depth",
cl::Hidden,
228 cl::desc(
"Maximum depth for recursive loop guard collection"),
cl::init(1));
233 cl::desc(
"When printing analysis, include information on every instruction"));
236 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
238 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
239 "be costly in terms of compile time"));
242 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
243 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
244 "Phi strongly connected components"),
249 cl::desc(
"Handle <= and >= in finite loops"),
253 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
254 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
343#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
363 OS <<
"(ptrto" << OpS <<
" " << *
Op->getType() <<
" " << *
Op <<
" to "
370 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
377 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
384 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
413 const char *OpStr =
nullptr;
426 OpStr =
" umin_seq ";
450 OS <<
"(" << *UDiv->
getLHS() <<
" /u " << *UDiv->
getRHS() <<
")";
457 OS <<
"***COULDNOTCOMPUTE***";
535 if (!
Mul)
return false;
539 if (!SC)
return false;
557 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
559 UniqueSCEVs.InsertNode(S, IP);
574 ConstantInt::get(ITy, V,
isSigned,
true));
582 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
585 UniqueSCEVs.InsertNode(S, IP);
606 "Must be a non-bit-width-changing pointer-to-integer cast!");
613 "Must be a non-bit-width-changing pointer-to-integer cast!");
625 "Cannot truncate non-integer value!");
632 "Cannot zero extend non-integer value!");
639 "Cannot sign extend non-integer value!");
644 SE->forgetMemoizedResults({
this});
647 SE->UniqueSCEVs.RemoveNode(
this);
653void SCEVUnknown::allUsesReplacedWith(
Value *New) {
655 SE->forgetMemoizedResults({
this});
658 SE->UniqueSCEVs.RemoveNode(
this);
680 if (LIsPointer != RIsPointer)
681 return (
int)LIsPointer - (int)RIsPointer;
686 return (
int)LID - (int)RID;
691 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
692 return (
int)LArgNo - (int)RArgNo;
698 if (
auto L = LGV->getLinkage() - RGV->getLinkage())
701 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
702 auto LT = GV->getLinkage();
709 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
710 return LGV->getName().compare(RGV->getName());
721 if (LParent != RParent) {
724 if (LDepth != RDepth)
725 return (
int)LDepth - (int)RDepth;
729 unsigned LNumOps = LInst->getNumOperands(),
730 RNumOps = RInst->getNumOperands();
731 if (LNumOps != RNumOps)
732 return (
int)LNumOps - (int)RNumOps;
734 for (
unsigned Idx :
seq(LNumOps)) {
736 RInst->getOperand(Idx),
Depth + 1);
750static std::optional<int>
760 return (
int)LType - (int)RType;
785 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
786 if (LBitWidth != RBitWidth)
787 return (
int)LBitWidth - (int)RBitWidth;
788 return LA.
ult(
RA) ? -1 : 1;
794 return LTy->getBitWidth() - RTy->getBitWidth();
805 if (LLoop != RLoop) {
807 assert(LHead != RHead &&
"Two loops share the same header?");
811 "No dominance between recurrences used by one SCEV?");
835 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
836 if (LNumOps != RNumOps)
837 return (
int)LNumOps - (int)RNumOps;
839 for (
unsigned i = 0; i != LNumOps; ++i) {
865 if (
Ops.size() < 2)
return;
870 return Complexity && *Complexity < 0;
872 if (
Ops.size() == 2) {
876 if (IsLessComplex(
RHS,
LHS))
889 for (
unsigned i = 0, e =
Ops.size(); i != e-2; ++i) {
895 for (
unsigned j = i+1; j != e &&
Ops[j]->getSCEVType() == Complexity; ++j) {
900 if (i == e-2)
return;
922template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
926 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
928 for (
unsigned Idx = 0; Idx <
Ops.size();) {
936 Ops.erase(
Ops.begin() + Idx);
943 assert(Folded &&
"Must have folded value");
947 if (Folded && IsAbsorber(Folded->
getAPInt()))
951 if (Folded && !IsIdentity(Folded->
getAPInt()))
952 Ops.insert(
Ops.begin(), Folded);
954 return Ops.size() == 1 ?
Ops[0] :
nullptr;
1029 APInt OddFactorial(W, 1);
1031 for (
unsigned i = 3; i <= K; ++i) {
1034 OddFactorial *= (i >> TwoFactors);
1038 unsigned CalculationBits = W +
T;
1052 for (
unsigned i = 1; i != K; ++i) {
1085 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
1114 ConversionFn CreatePtrCast;
1118 ConversionFn CreatePtrCast)
1119 : Base(
SE), TargetTy(TargetTy), CreatePtrCast(
std::
move(CreatePtrCast)) {}
1122 Type *TargetTy, ConversionFn CreatePtrCast) {
1124 return Rewriter.visit(Scev);
1160 "Should only reach pointer-typed SCEVUnknown's.");
1165 return SE.getZero(TargetTy);
1166 return CreatePtrCast(Expr);
1171 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1190 Op, *
this, IntPtrTy, [
this, IntPtrTy](
const SCEVUnknown *U) {
1195 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1197 SCEV *S =
new (SCEVAllocator)
1199 UniqueSCEVs.InsertNode(S, IP);
1202 return static_cast<const SCEV *
>(S);
1205 "We must have succeeded in sinking the cast, "
1206 "and ending up with an integer-typed expression!");
1211 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1215 if (DL.hasUnstableRepresentation(
Op->getType()))
1218 Type *Ty = DL.getAddressType(
Op->getType());
1229 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1231 SCEV *S =
new (SCEVAllocator)
1233 UniqueSCEVs.InsertNode(S, IP);
1236 return static_cast<const SCEV *
>(S);
1239 "We must have succeeded in sinking the cast, "
1240 "and ending up with an integer-typed expression!");
1245 assert(Ty->isIntegerTy() &&
"Target type must be an integer type!");
1257 "This is not a truncating conversion!");
1259 "This is not a conversion to a SCEVable type!");
1260 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1268 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1290 UniqueSCEVs.InsertNode(S, IP);
1303 unsigned numTruncs = 0;
1304 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1312 if (numTruncs < 2) {
1322 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1329 for (
const SCEV *
Op : AddRec->operands())
1344 UniqueSCEVs.InsertNode(S, IP);
1385struct ExtendOpTraitsBase {
1386 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(
const SCEV *,
Type *,
1391template <
typename ExtendOp>
struct ExtendOpTraits {
1407 static const GetExtendExprTy GetExtendExpr;
1409 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1410 ICmpInst::Predicate *Pred,
1411 ScalarEvolution *SE) {
1416const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1423 static const GetExtendExprTy GetExtendExpr;
1425 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1426 ICmpInst::Predicate *Pred,
1427 ScalarEvolution *SE) {
1432const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1444template <
typename ExtendOpTy>
1447 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1448 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1464 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1477 auto PreStartFlags =
1495 const SCEV *OperandExtendedStart =
1497 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1498 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1510 const SCEV *OverflowLimit =
1511 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1513 if (OverflowLimit &&
1521template <
typename ExtendOpTy>
1525 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1533 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1568template <
typename ExtendOpTy>
1569bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1572 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1582 APInt StartAI = StartC->
getAPInt();
1584 for (
unsigned Delta : {-2, -1, 1, 2}) {
1585 const SCEV *PreStart =
getConstant(StartAI - Delta);
1587 FoldingSetNodeID
ID;
1589 ID.AddPointer(PreStart);
1590 ID.AddPointer(Step);
1594 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
1598 if (PreAR &&
any(PreAR->getNoWrapFlags(WrapType))) {
1601 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1602 DeltaS, &Pred,
this);
1620 const unsigned BitWidth =
C.getBitWidth();
1638 const APInt &ConstantStart,
1657 auto &UserIDs = FoldCacheUser[
I.first->second];
1658 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1659 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1660 if (UserIDs[
I] ==
ID) {
1665 I.first->second = S;
1667 FoldCacheUser[S].push_back(
ID);
1673 "This is not an extending conversion!");
1675 "This is not a conversion to a SCEVable type!");
1676 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1680 if (
const SCEV *S = FoldCache.lookup(
ID))
1692 "This is not an extending conversion!");
1694 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1711 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1715 UniqueSCEVs.InsertNode(S, IP);
1725 const SCEV *
X = ST->getOperand();
1739 if (AR->isAffine()) {
1740 const SCEV *Start = AR->getStart();
1741 const SCEV *Step = AR->getStepRecurrence(*
this);
1743 const Loop *L = AR->getLoop();
1747 if (AR->hasNoUnsignedWrap()) {
1768 const SCEV *CastedMaxBECount =
1772 if (MaxBECount == RecastedMaxBECount) {
1782 const SCEV *WideMaxBECount =
1784 const SCEV *OperandExtendedAdd =
1790 if (ZAdd == OperandExtendedAdd) {
1801 OperandExtendedAdd =
1807 if (ZAdd == OperandExtendedAdd) {
1828 !AC.assumptions().empty()) {
1830 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1832 if (AR->hasNoUnsignedWrap()) {
1867 const APInt &
C = SC->getAPInt();
1871 const SCEV *SResidual =
1879 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1904 if (SA->hasNoUnsignedWrap()) {
1925 const SCEV *SResidual =
1936 if (
SM->hasNoUnsignedWrap()) {
1958 const SCEV *TruncRHS;
1995 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1998 UniqueSCEVs.InsertNode(S, IP);
2007 "This is not an extending conversion!");
2009 "This is not a conversion to a SCEVable type!");
2010 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
2014 if (
const SCEV *S = FoldCache.lookup(
ID))
2026 "This is not an extending conversion!");
2028 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
2050 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2055 UniqueSCEVs.InsertNode(S, IP);
2065 const SCEV *
X = ST->getOperand();
2076 if (SA->hasNoSignedWrap()) {
2098 const SCEV *SResidual =
2111 if (AR->isAffine()) {
2112 const SCEV *Start = AR->getStart();
2113 const SCEV *Step = AR->getStepRecurrence(*
this);
2115 const Loop *L = AR->getLoop();
2119 if (AR->hasNoSignedWrap()) {
2141 const SCEV *CastedMaxBECount =
2145 if (MaxBECount == RecastedMaxBECount) {
2155 const SCEV *WideMaxBECount =
2157 const SCEV *OperandExtendedAdd =
2163 if (SAdd == OperandExtendedAdd) {
2174 OperandExtendedAdd =
2180 if (SAdd == OperandExtendedAdd) {
2200 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2202 if (AR->hasNoSignedWrap()) {
2217 const APInt &
C = SC->getAPInt();
2221 const SCEV *SResidual =
2229 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2257 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2260 UniqueSCEVs.InsertNode(S, IP);
2287 "This is not an extending conversion!");
2289 "This is not a conversion to a SCEVable type!");
2294 if (SC->getAPInt().isNegative())
2299 const SCEV *NewOp =
T->getOperand();
2318 for (
const SCEV *
Op : AR->operands())
2356 APInt &AccumulatedConstant,
2360 bool Interesting =
false;
2367 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2369 AccumulatedConstant += Scale *
C->getAPInt();
2374 for (; i !=
Ops.size(); ++i) {
2383 M, NewOps, AccumulatedConstant,
Add->operands(), NewScale, SE);
2389 auto Pair = M.insert({
Key, NewScale});
2393 Pair.first->second += NewScale;
2401 auto Pair = M.insert({
Ops[i], Scale});
2405 Pair.first->second += Scale;
2424 case Instruction::Add:
2427 case Instruction::Sub:
2430 case Instruction::Mul:
2444 const SCEV *
A = (this->*Extension)(
2446 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0);
2447 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0);
2455 if (BinOp == Instruction::Mul)
2461 APInt C = RHSC->getAPInt();
2462 unsigned NumBits =
C.getBitWidth();
2463 bool IsSub = (BinOp == Instruction::Sub);
2464 bool IsNegativeConst = (
Signed &&
C.isNegative());
2466 bool OverflowDown = IsSub ^ IsNegativeConst;
2468 if (IsNegativeConst) {
2481 APInt Limit = Min + Magnitude;
2487 APInt Limit = Max - Magnitude;
2492std::optional<SCEV::NoWrapFlags>
2497 return std::nullopt;
2506 bool Deduced =
false;
2508 if (OBO->
getOpcode() != Instruction::Add &&
2511 return std::nullopt;
2520 false, LHS, RHS, CtxI)) {
2527 true, LHS, RHS, CtxI)) {
2534 return std::nullopt;
2544 using namespace std::placeholders;
2551 assert(CanAnalyze &&
"don't call from other places!");
2558 auto IsKnownNonNegative = [&](
SCEVUse U) {
2567 if (SignOrUnsignWrap != SignOrUnsignMask &&
2574 return Instruction::Add;
2576 return Instruction::Mul;
2587 Opcode,
C, OBO::NoSignedWrap);
2595 Opcode,
C, OBO::NoUnsignedWrap);
2605 Ops[0]->isZero() && IsKnownNonNegative(
Ops[1]))
2612 if (UDiv->getOperand(1) ==
Ops[1])
2615 if (UDiv->getOperand(1) ==
Ops[0])
2631 "only nuw or nsw allowed");
2632 assert(!
Ops.empty() &&
"Cannot get empty add!");
2633 if (
Ops.size() == 1)
return Ops[0];
2636 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
2638 "SCEVAddExpr operand types don't match!");
2640 Ops, [](
const SCEV *
Op) {
return Op->getType()->isPointerTy(); });
2641 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2646 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2647 [](
const APInt &
C) {
return C.isZero(); },
2648 [](
const APInt &
C) {
return false; });
2661 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
2666 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2667 Add->setNoWrapFlags(ComputeFlags(
Ops));
2675 bool FoundMatch =
false;
2676 for (
unsigned i = 0, e =
Ops.size(); i != e-1; ++i)
2677 if (
Ops[i] ==
Ops[i+1]) {
2689 --i; e -=
Count - 1;
2699 auto FindTruncSrcType = [&]() ->
Type * {
2705 return T->getOperand()->getType();
2707 SCEVUse LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2709 return T->getOperand()->getType();
2713 if (
auto *SrcType = FindTruncSrcType()) {
2720 if (
T->getOperand()->getType() != SrcType) {
2729 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2732 if (
T->getOperand()->getType() != SrcType) {
2760 if (
Ops.size() == 2) {
2770 auto C2 =
C->getAPInt();
2773 APInt ConstAdd = C1 + C2;
2774 auto AddFlags = AddExpr->getNoWrapFlags();
2815 if (
Ops.size() == 2 &&
2826 if (Idx <
Ops.size()) {
2827 bool DeletedAdd =
false;
2838 Ops.erase(
Ops.begin()+Idx);
2841 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2864 struct APIntCompare {
2865 bool operator()(
const APInt &LHS,
const APInt &RHS)
const {
2866 return LHS.ult(RHS);
2873 std::map<APInt, SmallVector<SCEVUse, 4>, APIntCompare> MulOpLists;
2874 for (
const SCEV *NewOp : NewOps)
2875 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2878 if (AccumulatedConstant != 0)
2880 for (
auto &MulOp : MulOpLists) {
2881 if (MulOp.first == 1) {
2883 }
else if (MulOp.first != 0) {
2892 if (
Ops.size() == 1)
2903 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2904 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2907 for (
unsigned AddOp = 0, e =
Ops.size(); AddOp != e; ++AddOp)
2908 if (MulOpSCEV ==
Ops[AddOp]) {
2910 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2911 if (
Mul->getNumOperands() != 2) {
2918 const SCEV *AddOne =
2922 if (
Ops.size() == 2)
return OuterMul;
2924 Ops.erase(
Ops.begin()+AddOp);
2925 Ops.erase(
Ops.begin()+Idx-1);
2927 Ops.erase(
Ops.begin()+Idx);
2928 Ops.erase(
Ops.begin()+AddOp-1);
2930 Ops.push_back(OuterMul);
2935 for (
unsigned OtherMulIdx = Idx+1;
2942 OMulOp != e; ++OMulOp)
2943 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2945 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2946 if (
Mul->getNumOperands() != 2) {
2954 OtherMul->
operands().take_front(OMulOp));
2958 const SCEV *InnerMulSum =
2962 if (
Ops.size() == 2)
return OuterMul;
2963 Ops.erase(
Ops.begin()+Idx);
2964 Ops.erase(
Ops.begin()+OtherMulIdx-1);
2965 Ops.push_back(OuterMul);
2985 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
2988 Ops.erase(
Ops.begin()+i);
2993 if (!LIOps.
empty()) {
3018 auto *DefI = getDefiningScopeBound(LIOps);
3020 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
3032 if (
Ops.size() == 1)
return NewRec;
3035 for (
unsigned i = 0;; ++i)
3036 if (
Ops[i] == AddRec) {
3046 for (
unsigned OtherIdx = Idx+1;
3054 "AddRecExprs are not sorted in reverse dominance order?");
3061 if (OtherAddRec->getLoop() == AddRecLoop) {
3062 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
3064 if (i >= AddRecOps.
size()) {
3065 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
3069 getAddExpr(AddRecOps[i], OtherAddRec->getOperand(i),
3072 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3087 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
3098 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3102 S =
new (SCEVAllocator)
3104 UniqueSCEVs.InsertNode(S, IP);
3115 FoldingSetNodeID
ID;
3117 for (
const SCEV *
Op :
Ops)
3122 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3126 S =
new (SCEVAllocator)
3127 SCEVAddRecExpr(
ID.Intern(SCEVAllocator), O,
Ops.size(), L);
3128 UniqueSCEVs.InsertNode(S, IP);
3130 LoopUsers[
L].push_back(S);
3139 FoldingSetNodeID
ID;
3141 for (
const SCEV *
Op :
Ops)
3145 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3149 S =
new (SCEVAllocator) SCEVMulExpr(
ID.Intern(SCEVAllocator),
3151 UniqueSCEVs.InsertNode(S, IP);
3161 if (j > 1 && k / j != i) Overflow =
true;
3177 if (n == 0 || n == k)
return 1;
3178 if (k > n)
return 0;
3184 for (
uint64_t i = 1; i <= k; ++i) {
3185 r =
umul_ov(r, n-(i-1), Overflow);
3194 struct FindConstantInAddMulChain {
3195 bool FoundConstant =
false;
3197 bool follow(
const SCEV *S) {
3202 bool isDone()
const {
3203 return FoundConstant;
3207 FindConstantInAddMulChain
F;
3209 ST.visitAll(StartExpr);
3210 return F.FoundConstant;
3218 "only nuw or nsw allowed");
3219 assert(!
Ops.empty() &&
"Cannot get empty mul!");
3220 if (
Ops.size() == 1)
return Ops[0];
3222 Type *ETy =
Ops[0]->getType();
3224 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
3226 "SCEVMulExpr operand types don't match!");
3231 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3232 [](
const APInt &
C) {
return C.isOne(); },
3233 [](
const APInt &
C) {
return C.isZero(); });
3244 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3249 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3250 Mul->setNoWrapFlags(ComputeFlags(
Ops));
3255 if (
Ops.size() == 2) {
3263 const SCEV *Op0, *Op1;
3271 if (
Ops[0]->isAllOnesValue()) {
3276 bool AnyFolded =
false;
3277 for (
const SCEV *AddOp :
Add->operands()) {
3297 if (AddRec->hasNoSignedWrap()) {
3304 AddRec->getNoWrapFlags(FlagsMask));
3327 APInt C1V = LHSC->getAPInt();
3337 const SCEV *NewMul =
nullptr;
3341 assert(C1V.
ugt(1) &&
"C1 <= 1 should have been folded earlier");
3356 if (Idx <
Ops.size()) {
3357 bool DeletedMul =
false;
3363 Ops.erase(
Ops.begin()+Idx);
3387 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
3390 Ops.erase(
Ops.begin()+i);
3395 if (!LIOps.
empty()) {
3408 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3424 if (
Ops.size() == 1)
return NewRec;
3427 for (
unsigned i = 0;; ++i)
3428 if (
Ops[i] == AddRec) {
3449 bool OpsModified =
false;
3450 for (
unsigned OtherIdx = Idx+1;
3464 bool Overflow =
false;
3471 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3475 z < ze && !Overflow; ++z) {
3478 if (LargerThan64Bits)
3479 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3481 Coeff = Coeff1*Coeff2;
3496 if (
Ops.size() == 2)
return NewAddRec;
3497 Ops[Idx] = NewAddRec;
3498 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3514 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3521 "SCEVURemExpr operand types don't match!");
3526 if (RHSC->getValue()->isOne())
3527 return getZero(LHS->getType());
3530 if (RHSC->getAPInt().isPowerOf2()) {
3531 Type *FullTy = LHS->getType();
3547 assert(!LHS->getType()->isPointerTy() &&
3548 "SCEVUDivExpr operand can't be pointer!");
3549 assert(LHS->getType() == RHS->getType() &&
3550 "SCEVUDivExpr operand types don't match!");
3557 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3565 if (RHSC->getValue()->isOne())
3570 if (!RHSC->getValue()->isZero()) {
3574 Type *Ty = LHS->getType();
3575 unsigned LZ = RHSC->getAPInt().countl_zero();
3579 if (!RHSC->getAPInt().isPowerOf2())
3587 const APInt &StepInt = Step->getAPInt();
3588 const APInt &DivInt = RHSC->getAPInt();
3589 if (!StepInt.
urem(DivInt) &&
3595 for (
const SCEV *
Op : AR->operands())
3601 const APInt *StartRem;
3614 bool CanFoldWithWrap = StepInt.
ule(DivInt) &&
3618 const SCEV *NewStart =
3620 if (*StartRem != 0 && (NoWrap || CanFoldWithWrap) &&
3622 const SCEV *NewLHS =
3625 if (LHS != NewLHS) {
3635 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3644 for (
const SCEV *
Op : M->operands())
3648 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3649 const SCEV *
Op = M->getOperand(i);
3676 if (
auto *DivisorConstant =
3678 bool Overflow =
false;
3680 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3691 for (
const SCEV *
Op :
A->operands())
3695 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3702 if (Operands.
size() ==
A->getNumOperands())
3709 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3719 return getZero(LHS->getType());
3723 if (
Mul &&
Mul->hasNoUnsignedWrap()) {
3724 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3725 if (
Mul->getOperand(i) == RHS) {
3736 const SCEV *NewLHS, *NewRHS;
3744 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3747 UniqueSCEVs.InsertNode(S, IP);
3784 if (StepChrec->getLoop() == L) {
3798 if (Operands.
size() == 1)
return Operands[0];
3803 "SCEVAddRecExpr operand types don't match!");
3804 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3806 for (
const SCEV *
Op : Operands)
3808 "SCEVAddRecExpr operand is not available at loop entry!");
3811 if (Operands.
back()->isZero()) {
3826 const Loop *NestedLoop = NestedAR->getLoop();
3827 if (L->contains(NestedLoop)
3830 DT.dominates(L->getHeader(), NestedLoop->
getHeader()))) {
3832 Operands[0] = NestedAR->getStart();
3836 bool AllInvariant =
all_of(
3848 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3859 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3863 Operands[0] = NestedAR;
3869 return getOrCreateAddRecExpr(Operands, L, Flags);
3885 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3889 return getGEPExpr(BaseExpr, IndexExprs,
GEP->getSourceElementType(), NW);
3903 bool FirstIter =
true;
3905 for (
SCEVUse IndexExpr : IndexExprs) {
3912 Offsets.push_back(FieldOffset);
3915 CurTy = STy->getTypeAtIndex(Index);
3920 "The first index of a GEP indexes a pointer");
3921 CurTy = SrcElementTy;
3932 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3933 Offsets.push_back(LocalOffset);
3938 if (Offsets.empty())
3951 "GEP should not change type mid-flight.");
3955SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3958 ID.AddInteger(SCEVType);
3962 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3965SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3968 ID.AddInteger(SCEVType);
3972 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3982 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3983 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
3984 if (
Ops.size() == 1)
return Ops[0];
3987 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
3989 "Operand types don't match!");
3992 "min/max should be consistently pointerish");
4018 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
4020 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
4025 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
4027 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
4033 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops)) {
4039 while (Idx <
Ops.size() &&
Ops[Idx]->getSCEVType() < Kind)
4044 if (Idx <
Ops.size()) {
4045 bool DeletedAny =
false;
4046 while (
Ops[Idx]->getSCEVType() == Kind) {
4048 Ops.erase(
Ops.begin()+Idx);
4066 for (
unsigned i = 0, e =
Ops.size() - 1; i != e; ++i) {
4067 if (
Ops[i] ==
Ops[i + 1] ||
4068 isKnownViaNonRecursiveReasoning(FirstPred,
Ops[i],
Ops[i + 1])) {
4071 Ops.erase(
Ops.begin() + i + 1,
Ops.begin() + i + 2);
4074 }
else if (isKnownViaNonRecursiveReasoning(SecondPred,
Ops[i],
4077 Ops.erase(
Ops.begin() + i,
Ops.begin() + i + 1);
4083 if (
Ops.size() == 1)
return Ops[0];
4085 assert(!
Ops.empty() &&
"Reduced smax down to nothing!");
4090 ID.AddInteger(Kind);
4094 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4096 return ExistingSCEV;
4099 SCEV *S =
new (SCEVAllocator)
4102 UniqueSCEVs.InsertNode(S, IP);
4110class SCEVSequentialMinMaxDeduplicatingVisitor final
4111 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
4112 std::optional<const SCEV *>> {
4113 using RetVal = std::optional<const SCEV *>;
4121 bool canRecurseInto(
SCEVTypes Kind)
const {
4124 return RootKind == Kind || NonSequentialRootKind == Kind;
4127 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
4129 "Only for min/max expressions.");
4132 if (!canRecurseInto(Kind))
4142 return std::nullopt;
4149 RetVal
visit(
const SCEV *S) {
4151 if (!SeenOps.
insert(S).second)
4152 return std::nullopt;
4153 return Base::visit(S);
4157 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE,
4159 : SE(SE), RootKind(RootKind),
4160 NonSequentialRootKind(
4161 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
4165 SmallVectorImpl<SCEVUse> &NewOps) {
4170 for (
const SCEV *
Op : OrigOps) {
4175 Ops.emplace_back(*NewOp);
4179 NewOps = std::move(
Ops);
4183 RetVal visitConstant(
const SCEVConstant *Constant) {
return Constant; }
4185 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4187 RetVal visitPtrToAddrExpr(
const SCEVPtrToAddrExpr *Expr) {
return Expr; }
4189 RetVal visitPtrToIntExpr(
const SCEVPtrToIntExpr *Expr) {
return Expr; }
4191 RetVal visitTruncateExpr(
const SCEVTruncateExpr *Expr) {
return Expr; }
4193 RetVal visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
return Expr; }
4195 RetVal visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
return Expr; }
4197 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4199 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4201 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4203 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4205 RetVal visitSMaxExpr(
const SCEVSMaxExpr *Expr) {
4206 return visitAnyMinMaxExpr(Expr);
4209 RetVal visitUMaxExpr(
const SCEVUMaxExpr *Expr) {
4210 return visitAnyMinMaxExpr(Expr);
4213 RetVal visitSMinExpr(
const SCEVSMinExpr *Expr) {
4214 return visitAnyMinMaxExpr(Expr);
4217 RetVal visitUMinExpr(
const SCEVUMinExpr *Expr) {
4218 return visitAnyMinMaxExpr(Expr);
4221 RetVal visitSequentialUMinExpr(
const SCEVSequentialUMinExpr *Expr) {
4222 return visitAnyMinMaxExpr(Expr);
4225 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4227 RetVal visitCouldNotCompute(
const SCEVCouldNotCompute *Expr) {
return Expr; }
4270struct SCEVPoisonCollector {
4271 bool LookThroughMaybePoisonBlocking;
4272 SmallPtrSet<const SCEVUnknown *, 4> MaybePoison;
4273 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4274 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4276 bool follow(
const SCEV *S) {
4277 if (!LookThroughMaybePoisonBlocking &&
4287 bool isDone()
const {
return false; }
4297 SCEVPoisonCollector PC1(
true);
4302 if (PC1.MaybePoison.empty())
4308 SCEVPoisonCollector PC2(
false);
4318 SCEVPoisonCollector PC(
false);
4341 while (!Worklist.
empty()) {
4343 if (!Visited.
insert(V).second)
4347 if (Visited.
size() > 16)
4352 if (PoisonVals.
contains(V) || ::isGuaranteedNotToBePoison(V))
4363 if (PDI->isDisjoint())
4370 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4377 if (
I->hasPoisonGeneratingAnnotations())
4388 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4389 "Not a SCEVSequentialMinMaxExpr!");
4390 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
4391 if (
Ops.size() == 1)
4395 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4397 "Operand types don't match!");
4400 "min/max should be consistently pointerish");
4408 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops))
4415 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4425 bool DeletedAny =
false;
4426 while (Idx <
Ops.size()) {
4427 if (
Ops[Idx]->getSCEVType() != Kind) {
4432 Ops.erase(
Ops.begin() + Idx);
4433 Ops.insert(
Ops.begin() + Idx, SMME->operands().begin(),
4434 SMME->operands().end());
4442 const SCEV *SaturationPoint;
4453 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4454 if (!isGuaranteedNotToCauseUB(
Ops[i]))
4466 Ops.erase(
Ops.begin() + i);
4471 if (isKnownViaNonRecursiveReasoning(Pred,
Ops[i - 1],
Ops[i])) {
4472 Ops.erase(
Ops.begin() + i);
4480 ID.AddInteger(Kind);
4484 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4486 return ExistingSCEV;
4490 SCEV *S =
new (SCEVAllocator)
4493 UniqueSCEVs.InsertNode(S, IP);
4541 if (
Size.isScalable())
4562 "Cannot get offset for structure containing scalable vector types");
4576 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4578 "Stale SCEVUnknown in uniquing map!");
4584 UniqueSCEVs.InsertNode(S, IP);
4599 return Ty->isIntOrPtrTy();
4606 if (Ty->isPointerTy())
4617 if (Ty->isIntegerTy())
4621 assert(Ty->isPointerTy() &&
"Unexpected non-pointer non-integer type!");
4633 bool PreciseA, PreciseB;
4634 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4635 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4636 if (!PreciseA || !PreciseB)
4639 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) ||
4640 DT.dominates(ScopeB, ScopeA);
4644 return CouldNotCompute.get();
4647bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4650 return SU && SU->getValue() ==
nullptr;
4653 return !ContainsNulls;
4658 if (
I != HasRecMap.end())
4663 HasRecMap.insert({S, FoundAddRec});
4671 if (
SI == ExprValueMap.
end())
4673 return SI->second.getArrayRef();
4679void ScalarEvolution::eraseValueFromMap(
Value *V) {
4681 if (
I != ValueExprMap.end()) {
4682 auto EVIt = ExprValueMap.find(
I->second);
4683 bool Removed = EVIt->second.remove(V);
4685 assert(Removed &&
"Value not in ExprValueMap?");
4686 ValueExprMap.erase(
I);
4690void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4694 auto It = ValueExprMap.find_as(V);
4695 if (It == ValueExprMap.end()) {
4697 ExprValueMap[S].insert(V);
4708 return createSCEVIter(V);
4715 if (
I != ValueExprMap.end()) {
4716 const SCEV *S =
I->second;
4717 assert(checkValidity(S) &&
4718 "existing SCEV has not been properly invalidated");
4731 Type *Ty = V->getType();
4747 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4760 return (
const SCEV *)
nullptr;
4766 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4770 Type *Ty = V->getType();
4776 assert(
P->getType()->isPointerTy());
4791 if (AddOp->getType()->isPointerTy()) {
4792 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4810 return getZero(LHS->getType());
4815 if (RHS->getType()->isPointerTy()) {
4816 if (!LHS->getType()->isPointerTy() ||
4826 const bool RHSIsNotMinSigned =
4857 Type *SrcTy = V->getType();
4858 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4859 "Cannot truncate or zero extend with non-integer arguments!");
4869 Type *SrcTy = V->getType();
4870 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4871 "Cannot truncate or zero extend with non-integer arguments!");
4881 Type *SrcTy = V->getType();
4882 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4883 "Cannot noop or zero extend with non-integer arguments!");
4885 "getNoopOrZeroExtend cannot truncate!");
4893 Type *SrcTy = V->getType();
4894 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4895 "Cannot noop or sign extend with non-integer arguments!");
4897 "getNoopOrSignExtend cannot truncate!");
4905 Type *SrcTy = V->getType();
4906 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4907 "Cannot noop or any extend with non-integer arguments!");
4909 "getNoopOrAnyExtend cannot truncate!");
4917 Type *SrcTy = V->getType();
4918 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4919 "Cannot truncate or noop with non-integer arguments!");
4921 "getTruncateOrNoop cannot extend!");
4929 const SCEV *PromotedLHS = LHS;
4930 const SCEV *PromotedRHS = RHS;
4950 assert(!
Ops.empty() &&
"At least one operand must be!");
4952 if (
Ops.size() == 1)
4956 Type *MaxType =
nullptr;
4962 assert(MaxType &&
"Failed to find maximum type!");
4975 if (!V->getType()->isPointerTy())
4980 V = AddRec->getStart();
4982 const SCEV *PtrOp =
nullptr;
4983 for (
const SCEV *AddOp :
Add->operands()) {
4984 if (AddOp->getType()->isPointerTy()) {
4985 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4989 assert(PtrOp &&
"Must have pointer op");
5001 for (
User *U :
I->users()) {
5003 if (Visited.
insert(UserInsn).second)
5017 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE,
5018 bool IgnoreOtherLoops =
true) {
5021 if (
Rewriter.hasSeenLoopVariantSCEVUnknown())
5023 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
5028 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5030 SeenLoopVariantSCEVUnknown =
true;
5034 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5038 SeenOtherLoops =
true;
5042 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
5044 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
5047 explicit SCEVInitRewriter(
const Loop *L, ScalarEvolution &SE)
5048 : SCEVRewriteVisitor(SE),
L(
L) {}
5051 bool SeenLoopVariantSCEVUnknown =
false;
5052 bool SeenOtherLoops =
false;
5061 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE) {
5062 SCEVPostIncRewriter
Rewriter(L, SE);
5064 return Rewriter.hasSeenLoopVariantSCEVUnknown()
5069 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5071 SeenLoopVariantSCEVUnknown =
true;
5075 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5079 SeenOtherLoops =
true;
5083 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
5085 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
5088 explicit SCEVPostIncRewriter(
const Loop *L, ScalarEvolution &SE)
5089 : SCEVRewriteVisitor(SE),
L(
L) {}
5092 bool SeenLoopVariantSCEVUnknown =
false;
5093 bool SeenOtherLoops =
false;
5099class SCEVBackedgeConditionFolder
5102 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5103 ScalarEvolution &SE) {
5104 bool IsPosBECond =
false;
5105 Value *BECond =
nullptr;
5106 if (BasicBlock *Latch =
L->getLoopLatch()) {
5108 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
5109 "Both outgoing branches should not target same header!");
5110 BECond = BI->getCondition();
5111 IsPosBECond = BI->getSuccessor(0) ==
L->getHeader();
5116 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
5120 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5121 const SCEV *
Result = Expr;
5126 switch (
I->getOpcode()) {
5127 case Instruction::Select: {
5129 std::optional<const SCEV *> Res =
5130 compareWithBackedgeCondition(
SI->getCondition());
5138 std::optional<const SCEV *> Res = compareWithBackedgeCondition(
I);
5149 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
5150 bool IsPosBECond, ScalarEvolution &SE)
5151 : SCEVRewriteVisitor(SE),
L(
L), BackedgeCond(BECond),
5152 IsPositiveBECond(IsPosBECond) {}
5154 std::optional<const SCEV *> compareWithBackedgeCondition(
Value *IC);
5158 Value *BackedgeCond =
nullptr;
5160 bool IsPositiveBECond;
5163std::optional<const SCEV *>
5164SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
5169 if (BackedgeCond == IC)
5172 return std::nullopt;
5177 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5178 ScalarEvolution &SE) {
5184 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5191 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5201 explicit SCEVShiftRewriter(
const Loop *L, ScalarEvolution &SE)
5202 : SCEVRewriteVisitor(SE),
L(
L) {}
5211ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
5215 using OBO = OverflowingBinaryOperator;
5223 const APInt &BECountAP = BECountMax->getAPInt();
5224 unsigned NoOverflowBitWidth =
5236 Instruction::Add, IncRange, OBO::NoSignedWrap);
5237 if (NSWRegion.contains(AddRecRange))
5246 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5247 if (NUWRegion.contains(AddRecRange))
5255ScalarEvolution::proveNoSignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5265 if (!SignedWrapViaInductionTried.insert(AR).second)
5290 AC.assumptions().empty())
5298 const SCEV *OverflowLimit =
5300 if (OverflowLimit &&
5308ScalarEvolution::proveNoUnsignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5318 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5344 AC.assumptions().empty())
5379 explicit BinaryOp(Operator *
Op)
5380 : Opcode(
Op->getOpcode()),
LHS(
Op->getOperand(0)),
RHS(
Op->getOperand(1)),
5383 IsNSW = OBO->hasNoSignedWrap();
5384 IsNUW = OBO->hasNoUnsignedWrap();
5388 explicit BinaryOp(
unsigned Opcode,
Value *
LHS,
Value *
RHS,
bool IsNSW =
false,
5390 : Opcode(Opcode),
LHS(
LHS),
RHS(
RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5402 return std::nullopt;
5408 switch (
Op->getOpcode()) {
5409 case Instruction::Add:
5410 case Instruction::Sub:
5411 case Instruction::Mul:
5412 case Instruction::UDiv:
5413 case Instruction::URem:
5414 case Instruction::And:
5415 case Instruction::AShr:
5416 case Instruction::Shl:
5417 return BinaryOp(
Op);
5419 case Instruction::Or: {
5422 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1),
5424 return BinaryOp(
Op);
5427 case Instruction::Xor:
5431 if (RHSC->getValue().isSignMask())
5432 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5434 if (V->getType()->isIntegerTy(1))
5435 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5436 return BinaryOp(
Op);
5438 case Instruction::LShr:
5447 if (SA->getValue().ult(
BitWidth)) {
5449 ConstantInt::get(SA->getContext(),
5451 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
5454 return BinaryOp(
Op);
5456 case Instruction::ExtractValue: {
5458 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5466 bool Signed = WO->isSigned();
5469 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5474 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5485 if (
II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5486 return BinaryOp(Instruction::Sub,
II->getOperand(0),
II->getOperand(1));
5488 return std::nullopt;
5514 if (
Op == SymbolicPHI)
5519 if (SourceBits != NewBits)
5537 if (!L || L->getHeader() != PN->
getParent())
5595std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5596ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
5604 assert(L &&
"Expecting an integer loop header phi");
5609 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5610 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5611 Value *
V = PN->getIncomingValue(i);
5612 if (
L->contains(PN->getIncomingBlock(i))) {
5615 }
else if (BEValueV != V) {
5619 }
else if (!StartValueV) {
5621 }
else if (StartValueV != V) {
5622 StartValueV =
nullptr;
5626 if (!BEValueV || !StartValueV)
5627 return std::nullopt;
5629 const SCEV *BEValue =
getSCEV(BEValueV);
5636 return std::nullopt;
5640 unsigned FoundIndex =
Add->getNumOperands();
5641 Type *TruncTy =
nullptr;
5643 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5646 if (FoundIndex == e) {
5651 if (FoundIndex ==
Add->getNumOperands())
5652 return std::nullopt;
5656 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5657 if (i != FoundIndex)
5658 Ops.push_back(
Add->getOperand(i));
5664 return std::nullopt;
5717 const SCEV *StartVal =
getSCEV(StartValueV);
5718 const SCEV *PHISCEV =
5745 auto getExtendedExpr = [&](
const SCEV *Expr,
5746 bool CreateSignExtend) ->
const SCEV * {
5749 const SCEV *ExtendedExpr =
5752 return ExtendedExpr;
5760 auto PredIsKnownFalse = [&](
const SCEV *Expr,
5761 const SCEV *ExtendedExpr) ->
bool {
5762 return Expr != ExtendedExpr &&
5766 const SCEV *StartExtended = getExtendedExpr(StartVal,
Signed);
5767 if (PredIsKnownFalse(StartVal, StartExtended)) {
5769 return std::nullopt;
5774 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
5775 if (PredIsKnownFalse(Accum, AccumExtended)) {
5777 return std::nullopt;
5780 auto AppendPredicate = [&](
const SCEV *Expr,
5781 const SCEV *ExtendedExpr) ->
void {
5782 if (Expr != ExtendedExpr &&
5790 AppendPredicate(StartVal, StartExtended);
5791 AppendPredicate(Accum, AccumExtended);
5799 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5800 std::make_pair(NewAR, Predicates);
5802 PredicatedSCEVRewrites[{SymbolicPHI,
L}] = PredRewrite;
5806std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5811 return std::nullopt;
5814 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5815 if (
I != PredicatedSCEVRewrites.end()) {
5816 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5819 if (Rewrite.first == SymbolicPHI)
5820 return std::nullopt;
5824 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
5828 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5829 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5834 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5835 return std::nullopt;
5852 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
5853 if (Expr1 != Expr2 &&
5854 !Preds->implies(SE.getEqualPredicate(Expr1, Expr2), SE) &&
5855 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1), SE))
5872const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
5874 Value *StartValueV) {
5877 assert(BEValueV && StartValueV);
5883 if (BO->Opcode != Instruction::Add)
5886 const SCEV *Accum =
nullptr;
5887 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5889 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5903 insertValueToMap(PN, PHISCEV);
5915 "Accum is defined outside L, but is not invariant?");
5916 if (isAddRecNeverPoison(BEInst, L))
5923const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5924 const Loop *
L = LI.getLoopFor(PN->
getParent());
5931 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5937 }
else if (BEValueV != V) {
5941 }
else if (!StartValueV) {
5943 }
else if (StartValueV != V) {
5944 StartValueV =
nullptr;
5948 if (!BEValueV || !StartValueV)
5951 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5952 "PHI node already processed?");
5956 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5961 insertValueToMap(PN, SymbolicName);
5965 const SCEV *BEValue =
getSCEV(BEValueV);
5975 unsigned FoundIndex =
Add->getNumOperands();
5976 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5977 if (
Add->getOperand(i) == SymbolicName)
5978 if (FoundIndex == e) {
5983 if (FoundIndex !=
Add->getNumOperands()) {
5986 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5987 if (i != FoundIndex)
5988 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
6000 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
6007 if (
GEP->getOperand(0) == PN) {
6008 GEPNoWrapFlags NW =
GEP->getNoWrapFlags();
6026 const SCEV *StartVal =
getSCEV(StartValueV);
6027 const SCEV *PHISCEV =
getAddRecExpr(StartVal, Accum, L, Flags);
6032 forgetMemoizedResults({SymbolicName});
6033 insertValueToMap(PN, PHISCEV);
6037 const_cast<SCEVAddRecExpr *
>(AR),
6063 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
6064 const SCEV *
Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
6066 isGuaranteedNotToCauseUB(Shifted) &&
::impliesPoison(Shifted, Start)) {
6067 const SCEV *StartVal =
getSCEV(StartValueV);
6068 if (Start == StartVal) {
6072 forgetMemoizedResults({SymbolicName});
6073 insertValueToMap(PN, Shifted);
6083 eraseValueFromMap(PN);
6098 Use &LeftUse =
Merge->getOperandUse(0);
6099 Use &RightUse =
Merge->getOperandUse(1);
6135 assert(IDom &&
"At least the entry block should dominate PN");
6143const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
6148 return createNodeForSelectOrPHI(PN,
Cond,
LHS,
RHS);
6165 CommonInst = IncomingInst;
6181ScalarEvolution::createNodeForPHIWithIdenticalOperands(
PHINode *PN) {
6187 const SCEV *CommonSCEV =
getSCEV(CommonInst);
6188 bool SCEVExprsIdentical =
6190 [
this, CommonSCEV](
Value *V) { return CommonSCEV == getSCEV(V); });
6191 return SCEVExprsIdentical ? CommonSCEV :
nullptr;
6194const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
6195 if (
const SCEV *S = createAddRecFromPHI(PN))
6205 if (
const SCEV *S = createNodeForPHIWithIdenticalOperands(PN))
6208 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
6217 struct FindClosure {
6218 const SCEV *OperandToFind;
6224 bool canRecurseInto(
SCEVTypes Kind)
const {
6227 return RootKind == Kind || NonSequentialRootKind == Kind ||
6232 : OperandToFind(OperandToFind), RootKind(RootKind),
6233 NonSequentialRootKind(
6237 bool follow(
const SCEV *S) {
6238 Found = S == OperandToFind;
6240 return !isDone() && canRecurseInto(S->
getSCEVType());
6243 bool isDone()
const {
return Found; }
6246 FindClosure FC(OperandToFind, RootKind);
6251std::optional<const SCEV *>
6252ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Type *Ty,
6262 switch (ICI->getPredicate()) {
6276 bool Signed = ICI->isSigned();
6277 const SCEV *LA =
getSCEV(TrueVal);
6285 if (LA == LS &&
RA == RS)
6287 if (LA == RS &&
RA == LS)
6290 auto CoerceOperand = [&](
const SCEV *
Op) ->
const SCEV * {
6291 if (
Op->getType()->isPointerTy()) {
6302 LS = CoerceOperand(LS);
6303 RS = CoerceOperand(RS);
6327 const SCEV *TrueValExpr =
getSCEV(TrueVal);
6328 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6342 X = ZExt->getOperand();
6344 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6355 return std::nullopt;
6358static std::optional<const SCEV *>
6360 const SCEV *TrueExpr,
const SCEV *FalseExpr) {
6364 "Unexpected operands of a select.");
6376 return std::nullopt;
6391static std::optional<const SCEV *>
6395 return std::nullopt;
6398 const auto *SETrue = SE->
getSCEV(TrueVal);
6399 const auto *SEFalse = SE->
getSCEV(FalseVal);
6403const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6405 assert(
Cond->getType()->isIntegerTy(1) &&
"Select condition is not an i1?");
6407 V->getType() ==
TrueVal->getType() &&
6408 "Types of select hands and of the result must match.");
6411 if (!
V->getType()->isIntegerTy(1))
6414 if (std::optional<const SCEV *> S =
6427 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6431 if (std::optional<const SCEV *> S =
6432 createNodeForSelectOrPHIInstWithICmpInstCond(
I->getType(), ICI,
6438 return createNodeForSelectOrPHIViaUMinSeq(V,
Cond, TrueVal, FalseVal);
6444 assert(
GEP->getSourceElementType()->isSized() &&
6445 "GEP source element type must be sized");
6448 for (
Value *Index :
GEP->indices())
6453APInt ScalarEvolution::getConstantMultipleImpl(
const SCEV *S,
6456 auto GetShiftedByZeros = [
BitWidth](uint32_t TrailingZeros) {
6459 : APInt::getOneBitSet(
BitWidth, TrailingZeros);
6461 auto GetGCDMultiple = [
this, CtxI](
const SCEVNAryExpr *
N) {
6464 for (
unsigned I = 1,
E =
N->getNumOperands();
I <
E && Res != 1; ++
I)
6483 return GetShiftedByZeros(TZ);
6493 return GetShiftedByZeros(TZ);
6497 if (
M->hasNoUnsignedWrap()) {
6500 for (
const SCEV *Operand :
M->operands().drop_front())
6508 for (
const SCEV *Operand :
M->operands())
6510 return GetShiftedByZeros(TZ);
6515 if (
N->hasNoUnsignedWrap())
6516 return GetGCDMultiple(
N);
6519 for (
const SCEV *Operand :
N->operands().drop_front())
6521 return GetShiftedByZeros(TZ);
6538 CtxI = &*F.getEntryBlock().begin();
6545 .allowEphemerals(
true))
6546 .countMinTrailingZeros();
6547 return GetShiftedByZeros(Known);
6560 return getConstantMultipleImpl(S, CtxI);
6562 auto I = ConstantMultipleCache.find(S);
6563 if (
I != ConstantMultipleCache.end())
6566 APInt Result = getConstantMultipleImpl(S, CtxI);
6567 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6568 assert(InsertPair.second &&
"Should insert a new key");
6569 return InsertPair.first->second;
6586 if (
MDNode *MD =
I->getMetadata(LLVMContext::MD_range))
6589 if (std::optional<ConstantRange>
Range = CB->getRange())
6593 if (std::optional<ConstantRange>
Range =
A->getRange())
6596 return std::nullopt;
6603 UnsignedRanges.erase(AddRec);
6604 SignedRanges.erase(AddRec);
6605 ConstantMultipleCache.erase(AddRec);
6610getRangeForUnknownRecurrence(
const SCEVUnknown *U) {
6636 Value *Start, *Step;
6643 assert(L && L->getHeader() ==
P->getParent());
6656 case Instruction::AShr:
6657 case Instruction::LShr:
6658 case Instruction::Shl:
6673 KnownStep.getBitWidth() ==
BitWidth);
6676 auto MaxShiftAmt = KnownStep.getMaxValue();
6678 bool Overflow =
false;
6679 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6686 case Instruction::AShr: {
6694 if (KnownStart.isNonNegative())
6697 KnownStart.getMaxValue() + 1);
6698 if (KnownStart.isNegative())
6701 KnownEnd.getMaxValue() + 1);
6704 case Instruction::LShr: {
6713 KnownStart.getMaxValue() + 1);
6715 case Instruction::Shl: {
6719 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6720 return ConstantRange(KnownStart.getMinValue(),
6721 KnownEnd.getMaxValue() + 1);
6746 [&](
Value *Operand) { return DT.dominates(Operand, PHI); }))
6753ScalarEvolution::getRangeRefIter(
const SCEV *S,
6754 ScalarEvolution::RangeSignHint SignHint) {
6755 DenseMap<const SCEV *, ConstantRange> &Cache =
6756 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6759 SmallPtrSet<const SCEV *, 8> Seen;
6763 auto AddToWorklist = [&WorkList, &Seen, &Cache](
const SCEV *Expr) {
6764 if (!Seen.
insert(Expr).second)
6798 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
6799 const SCEV *
P = WorkList[
I];
6803 for (
const SCEV *
Op :
P->operands())
6816 if (!WorkList.
empty()) {
6821 getRangeRef(
P, SignHint);
6825 return getRangeRef(S, SignHint, 0);
6832 const SCEV *S, ScalarEvolution::RangeSignHint SignHint,
unsigned Depth) {
6833 DenseMap<const SCEV *, ConstantRange> &Cache =
6834 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6841 auto I = Cache.
find(S);
6842 if (
I != Cache.
end())
6846 return setRange(
C, SignHint, ConstantRange(
C->getAPInt()));
6851 return getRangeRefIter(S, SignHint);
6854 ConstantRange ConservativeResult(
BitWidth,
true);
6855 using OBO = OverflowingBinaryOperator;
6859 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6863 ConservativeResult =
6870 ConservativeResult = ConstantRange(
6886 ConservativeResult.intersectWith(
X.truncate(
BitWidth), RangeType));
6893 ConservativeResult.intersectWith(
X.zeroExtend(
BitWidth), RangeType));
6900 ConservativeResult.intersectWith(
X.signExtend(
BitWidth), RangeType));
6906 return setRange(Cast, SignHint,
X);
6911 const SCEV *URemLHS =
nullptr, *URemRHS =
nullptr;
6912 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED &&
6914 ConstantRange LHSRange = getRangeRef(URemLHS, SignHint,
Depth + 1);
6915 ConstantRange RHSRange = getRangeRef(URemRHS, SignHint,
Depth + 1);
6916 ConservativeResult =
6917 ConservativeResult.intersectWith(LHSRange.
urem(RHSRange), RangeType);
6919 ConstantRange
X = getRangeRef(
Add->getOperand(0), SignHint,
Depth + 1);
6920 unsigned WrapType = OBO::AnyWrap;
6921 if (
Add->hasNoSignedWrap())
6922 WrapType |= OBO::NoSignedWrap;
6923 if (
Add->hasNoUnsignedWrap())
6924 WrapType |= OBO::NoUnsignedWrap;
6926 X =
X.addWithNoWrap(getRangeRef(
Op, SignHint,
Depth + 1), WrapType,
6928 return setRange(
Add, SignHint,
6929 ConservativeResult.intersectWith(
X, RangeType));
6933 ConstantRange
X = getRangeRef(
Mul->getOperand(0), SignHint,
Depth + 1);
6935 X =
X.multiply(getRangeRef(
Op, SignHint,
Depth + 1));
6936 return setRange(
Mul, SignHint,
6937 ConservativeResult.intersectWith(
X, RangeType));
6941 ConstantRange
X = getRangeRef(UDiv->
getLHS(), SignHint,
Depth + 1);
6942 ConstantRange
Y = getRangeRef(UDiv->
getRHS(), SignHint,
Depth + 1);
6943 return setRange(UDiv, SignHint,
6944 ConservativeResult.intersectWith(
X.udiv(
Y), RangeType));
6952 if (!UnsignedMinValue.
isZero())
6953 ConservativeResult = ConservativeResult.intersectWith(
6954 ConstantRange(UnsignedMinValue, APInt(
BitWidth, 0)), RangeType);
6963 bool AllNonNeg =
true;
6964 bool AllNonPos =
true;
6965 for (
unsigned i = 1, e = AddRec->
getNumOperands(); i != e; ++i) {
6972 ConservativeResult = ConservativeResult.intersectWith(
6977 ConservativeResult = ConservativeResult.intersectWith(
6986 const SCEV *MaxBEScev =
7000 auto RangeFromAffine = getRangeForAffineAR(
7002 ConservativeResult =
7003 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
7005 auto RangeFromFactoring = getRangeViaFactoring(
7007 ConservativeResult =
7008 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
7014 const SCEV *SymbolicMaxBECount =
7019 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
7020 AddRec, SymbolicMaxBECount,
BitWidth, SignHint);
7021 ConservativeResult =
7022 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
7027 return setRange(AddRec, SignHint, std::move(ConservativeResult));
7037 ID = Intrinsic::umax;
7040 ID = Intrinsic::smax;
7044 ID = Intrinsic::umin;
7047 ID = Intrinsic::smin;
7054 ConstantRange
X = getRangeRef(NAry->getOperand(0), SignHint,
Depth + 1);
7055 for (
unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
7057 ID, {
X, getRangeRef(NAry->getOperand(i), SignHint,
Depth + 1)});
7058 return setRange(S, SignHint,
7059 ConservativeResult.intersectWith(
X, RangeType));
7068 ConservativeResult =
7069 ConservativeResult.intersectWith(*MDRange, RangeType);
7074 auto CR = getRangeForUnknownRecurrence(U);
7075 ConservativeResult = ConservativeResult.intersectWith(CR);
7086 if (
U->getType()->isPointerTy()) {
7089 unsigned ptrSize = DL.getPointerTypeSizeInBits(
U->getType());
7090 int ptrIdxDiff = ptrSize -
BitWidth;
7091 if (ptrIdxDiff > 0 && ptrSize >
BitWidth && NS > (
unsigned)ptrIdxDiff)
7104 ConservativeResult = ConservativeResult.intersectWith(
7108 ConservativeResult = ConservativeResult.intersectWith(
7113 if (
U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
7116 bool CanBeNull, CanBeFreed;
7117 uint64_t DerefBytes =
7118 V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
7128 uint64_t
Align =
U->getValue()->getPointerAlignment(DL).value();
7129 uint64_t Rem = MaxVal.
urem(Align);
7134 ConservativeResult = ConservativeResult.intersectWith(
7144 return getRangeRef(AR, SignHint,
Depth + 1);
7148 ConstantRange RangeFromOps(
BitWidth,
false);
7150 for (
const auto &
Op :
Phi->operands()) {
7152 RangeFromOps = RangeFromOps.unionWith(OpRange);
7154 if (RangeFromOps.isFullSet())
7157 ConservativeResult =
7158 ConservativeResult.intersectWith(RangeFromOps, RangeType);
7164 if (
II->getIntrinsicID() == Intrinsic::vscale) {
7166 ConservativeResult = ConservativeResult.difference(Disallowed);
7169 return setRange(U, SignHint, std::move(ConservativeResult));
7175 return setRange(S, SignHint, std::move(ConservativeResult));
7184 const APInt &MaxBECount,
7191 if (Step == 0 || MaxBECount == 0)
7198 return ConstantRange::getFull(
BitWidth);
7214 return ConstantRange::getFull(
BitWidth);
7226 APInt MovedBoundary = Descending ? (StartLower - std::move(
Offset))
7227 : (StartUpper + std::move(
Offset));
7232 if (StartRange.
contains(MovedBoundary))
7233 return ConstantRange::getFull(
BitWidth);
7236 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7238 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7247 const APInt &MaxBECount) {
7251 "mismatched bit widths");
7260 StepSRange.
getSignedMin(), StartSRange, MaxBECount,
true);
7262 StartSRange, MaxBECount,
7274ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7276 ScalarEvolution::RangeSignHint SignHint) {
7277 assert(AddRec->
isAffine() &&
"Non-affine AddRecs are not suppored!\n");
7279 "This only works for non-self-wrapping AddRecs!");
7280 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7284 return ConstantRange::getFull(
BitWidth);
7292 return ConstantRange::getFull(
BitWidth);
7296 const SCEV *MaxItersWithoutWrap =
getUDivExpr(RangeWidth, StepAbs);
7298 MaxItersWithoutWrap))
7299 return ConstantRange::getFull(
BitWidth);
7320 ConstantRange StartRange = getRangeRef(Start, SignHint);
7321 ConstantRange EndRange = getRangeRef(End, SignHint);
7322 ConstantRange RangeBetween = StartRange.
unionWith(EndRange);
7326 return RangeBetween;
7331 return ConstantRange::getFull(
BitWidth);
7334 isKnownPredicateViaConstantRanges(LEPred, Start, End))
7335 return RangeBetween;
7337 isKnownPredicateViaConstantRanges(GEPred, Start, End))
7338 return RangeBetween;
7339 return ConstantRange::getFull(
BitWidth);
7344 const APInt &MaxBECount) {
7351 "mismatched bit widths");
7353 struct SelectPattern {
7354 Value *Condition =
nullptr;
7358 explicit SelectPattern(ScalarEvolution &SE,
unsigned BitWidth,
7360 std::optional<unsigned> CastOp;
7374 CastOp = SCast->getSCEVType();
7375 S = SCast->getOperand();
7378 using namespace llvm::PatternMatch;
7385 Condition =
nullptr;
7417 bool isRecognized() {
return Condition !=
nullptr; }
7420 SelectPattern StartPattern(*
this,
BitWidth, Start);
7421 if (!StartPattern.isRecognized())
7422 return ConstantRange::getFull(
BitWidth);
7424 SelectPattern StepPattern(*
this,
BitWidth, Step);
7425 if (!StepPattern.isRecognized())
7426 return ConstantRange::getFull(
BitWidth);
7428 if (StartPattern.Condition != StepPattern.Condition) {
7432 return ConstantRange::getFull(
BitWidth);
7443 const SCEV *TrueStart = this->
getConstant(StartPattern.TrueValue);
7444 const SCEV *TrueStep = this->
getConstant(StepPattern.TrueValue);
7445 const SCEV *FalseStart = this->
getConstant(StartPattern.FalseValue);
7446 const SCEV *FalseStep = this->
getConstant(StepPattern.FalseValue);
7448 ConstantRange TrueRange =
7449 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7450 ConstantRange FalseRange =
7451 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7473ScalarEvolution::getNonTrivialDefiningScopeBound(
const SCEV *S) {
7486 SmallPtrSet<const SCEV *, 16> Visited;
7488 auto pushOp = [&](
const SCEV *S) {
7489 if (!Visited.
insert(S).second)
7492 if (Visited.
size() > 30) {
7503 while (!Worklist.
empty()) {
7505 if (
auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7506 if (!Bound || DT.dominates(Bound, DefI))
7513 return Bound ? Bound : &*F.getEntryBlock().begin();
7519 return getDefiningScopeBound(
Ops, Discard);
7522bool ScalarEvolution::isGuaranteedToTransferExecutionTo(
const Instruction *
A,
7524 if (
A->getParent() ==
B->getParent() &&
7529 auto *BLoop = LI.getLoopFor(
B->getParent());
7530 if (BLoop && BLoop->getHeader() ==
B->getParent() &&
7531 BLoop->getLoopPreheader() ==
A->getParent() &&
7533 A->getParent()->end()) &&
7540bool ScalarEvolution::isGuaranteedNotToBePoison(
const SCEV *
Op) {
7541 SCEVPoisonCollector PC(
true);
7543 return PC.MaybePoison.empty();
7546bool ScalarEvolution::isGuaranteedNotToCauseUB(
const SCEV *
Op) {
7552 return M && (!
isKnownNonZero(Op1) || !isGuaranteedNotToBePoison(Op1));
7556bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *
I) {
7573 for (
const Use &
Op :
I->operands()) {
7579 auto *DefI = getDefiningScopeBound(SCEVOps);
7580 return isGuaranteedToTransferExecutionTo(DefI,
I);
7583bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *
I,
const Loop *L) {
7585 if (isSCEVExprNeverPoison(
I))
7596 auto *ExitingBB =
L->getExitingBlock();
7600 SmallPtrSet<const Value *, 16> KnownPoison;
7609 while (!Worklist.
empty()) {
7612 for (
const Use &U :
Poison->uses()) {
7615 DT.dominates(PoisonUser->
getParent(), ExitingBB))
7619 if (KnownPoison.
insert(PoisonUser).second)
7627ScalarEvolution::LoopProperties
7628ScalarEvolution::getLoopProperties(
const Loop *L) {
7629 using LoopProperties = ScalarEvolution::LoopProperties;
7631 auto Itr = LoopPropertiesCache.find(L);
7632 if (Itr == LoopPropertiesCache.end()) {
7635 return !
SI->isSimple();
7645 return I->mayWriteToMemory();
7648 LoopProperties LP = {
true,
7651 for (
auto *BB :
L->getBlocks())
7652 for (
auto &
I : *BB) {
7654 LP.HasNoAbnormalExits =
false;
7655 if (HasSideEffects(&
I))
7656 LP.HasNoSideEffects =
false;
7657 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7661 auto InsertPair = LoopPropertiesCache.insert({
L, LP});
7662 assert(InsertPair.second &&
"We just checked!");
7663 Itr = InsertPair.first;
7676const SCEV *ScalarEvolution::createSCEVIter(
Value *V) {
7682 Stack.emplace_back(V,
true);
7683 Stack.emplace_back(V,
false);
7684 while (!Stack.empty()) {
7685 auto E = Stack.pop_back_val();
7686 Value *CurV = E.getPointer();
7692 const SCEV *CreatedSCEV =
nullptr;
7695 CreatedSCEV = createSCEV(CurV);
7700 CreatedSCEV = getOperandsToCreate(CurV,
Ops);
7704 insertValueToMap(CurV, CreatedSCEV);
7708 Stack.emplace_back(CurV,
true);
7710 Stack.emplace_back(
Op,
false);
7727 if (!DT.isReachableFromEntry(
I->getParent()))
7740 switch (BO->Opcode) {
7741 case Instruction::Add:
7742 case Instruction::Mul: {
7749 Ops.push_back(BO->
Op);
7753 Ops.push_back(BO->RHS);
7757 (BO->Opcode == Instruction::Add &&
7758 (NewBO->Opcode != Instruction::Add &&
7759 NewBO->Opcode != Instruction::Sub)) ||
7760 (BO->Opcode == Instruction::Mul &&
7761 NewBO->Opcode != Instruction::Mul)) {
7762 Ops.push_back(BO->LHS);
7767 if (BO->
Op && (BO->IsNSW || BO->IsNUW)) {
7770 Ops.push_back(BO->LHS);
7778 case Instruction::Sub:
7779 case Instruction::UDiv:
7780 case Instruction::URem:
7782 case Instruction::AShr:
7783 case Instruction::Shl:
7784 case Instruction::Xor:
7788 case Instruction::And:
7789 case Instruction::Or:
7793 case Instruction::LShr:
7800 Ops.push_back(BO->LHS);
7801 Ops.push_back(BO->RHS);
7805 switch (
U->getOpcode()) {
7806 case Instruction::Trunc:
7807 case Instruction::ZExt:
7808 case Instruction::SExt:
7809 case Instruction::PtrToAddr:
7810 case Instruction::PtrToInt:
7811 Ops.push_back(
U->getOperand(0));
7814 case Instruction::BitCast:
7816 Ops.push_back(
U->getOperand(0));
7821 case Instruction::SDiv:
7822 case Instruction::SRem:
7823 Ops.push_back(
U->getOperand(0));
7824 Ops.push_back(
U->getOperand(1));
7827 case Instruction::GetElementPtr:
7829 "GEP source element type must be sized");
7833 case Instruction::IntToPtr:
7836 case Instruction::PHI:
7867 Ops.push_back(CondICmp->getOperand(0));
7868 Ops.push_back(CondICmp->getOperand(1));
7888 case Instruction::Select: {
7890 auto CanSimplifyToUnknown = [
this,
U]() {
7908 if (CanSimplifyToUnknown())
7915 case Instruction::Call:
7916 case Instruction::Invoke:
7923 switch (
II->getIntrinsicID()) {
7924 case Intrinsic::abs:
7925 Ops.push_back(
II->getArgOperand(0));
7927 case Intrinsic::umax:
7928 case Intrinsic::umin:
7929 case Intrinsic::smax:
7930 case Intrinsic::smin:
7931 case Intrinsic::usub_sat:
7932 case Intrinsic::uadd_sat:
7933 Ops.push_back(
II->getArgOperand(0));
7934 Ops.push_back(
II->getArgOperand(1));
7936 case Intrinsic::start_loop_iterations:
7937 case Intrinsic::annotation:
7938 case Intrinsic::ptr_annotation:
7939 Ops.push_back(
II->getArgOperand(0));
7951const SCEV *ScalarEvolution::createSCEV(
Value *V) {
7960 if (!DT.isReachableFromEntry(
I->getParent()))
7975 switch (BO->Opcode) {
7976 case Instruction::Add: {
8002 if (BO->Opcode == Instruction::Sub)
8010 if (BO->Opcode == Instruction::Sub)
8017 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
8018 NewBO->Opcode != Instruction::Sub)) {
8028 case Instruction::Mul: {
8049 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
8058 case Instruction::UDiv:
8062 case Instruction::URem:
8066 case Instruction::Sub: {
8069 Flags = getNoWrapFlagsFromUB(BO->
Op);
8074 case Instruction::And:
8080 if (CI->isMinusOne())
8082 const APInt &
A = CI->getValue();
8088 unsigned LZ =
A.countl_zero();
8089 unsigned TZ =
A.countr_zero();
8094 APInt EffectiveMask =
8096 if ((LZ != 0 || TZ != 0) && !((~
A & ~Known.
Zero) & EffectiveMask)) {
8099 const SCEV *ShiftedLHS =
nullptr;
8103 unsigned MulZeros = OpC->getAPInt().countr_zero();
8104 unsigned GCD = std::min(MulZeros, TZ);
8109 auto *NewMul =
getMulExpr(MulOps, LHSMul->getNoWrapFlags());
8131 case Instruction::Or:
8140 case Instruction::Xor:
8143 if (CI->isMinusOne())
8152 if (LBO->getOpcode() == Instruction::And &&
8153 LCI->getValue() == CI->getValue())
8154 if (
const SCEVZeroExtendExpr *Z =
8157 const SCEV *Z0 =
Z->getOperand();
8164 if (CI->getValue().isMask(Z0TySize))
8170 APInt Trunc = CI->getValue().trunc(Z0TySize);
8179 case Instruction::Shl:
8197 auto MulFlags = getNoWrapFlagsFromUB(BO->
Op);
8206 ConstantInt *
X = ConstantInt::get(
8212 case Instruction::AShr:
8234 const SCEV *AddTruncateExpr =
nullptr;
8235 ConstantInt *ShlAmtCI =
nullptr;
8236 const SCEV *AddConstant =
nullptr;
8238 if (L &&
L->getOpcode() == Instruction::Add) {
8246 if (LShift && LShift->
getOpcode() == Instruction::Shl) {
8253 APInt AddOperand = AddOperandCI->
getValue().
ashr(AShrAmt);
8261 }
else if (L &&
L->getOpcode() == Instruction::Shl) {
8266 const SCEV *ShlOp0SCEV =
getSCEV(
L->getOperand(0));
8271 if (AddTruncateExpr && ShlAmtCI) {
8283 const APInt &ShlAmt = ShlAmtCI->
getValue();
8287 const SCEV *CompositeExpr =
8289 if (
L->getOpcode() != Instruction::Shl)
8290 CompositeExpr =
getAddExpr(CompositeExpr, AddConstant);
8299 switch (
U->getOpcode()) {
8300 case Instruction::Trunc:
8303 case Instruction::ZExt:
8306 case Instruction::SExt:
8316 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8317 Type *Ty =
U->getType();
8325 case Instruction::BitCast:
8331 case Instruction::PtrToAddr: {
8338 case Instruction::PtrToInt: {
8341 Type *DstIntTy =
U->getType();
8349 case Instruction::IntToPtr:
8353 case Instruction::SDiv:
8360 case Instruction::SRem:
8367 case Instruction::GetElementPtr:
8370 case Instruction::PHI:
8373 case Instruction::Select:
8374 return createNodeForSelectOrPHI(U,
U->getOperand(0),
U->getOperand(1),
8377 case Instruction::Call:
8378 case Instruction::Invoke:
8383 switch (
II->getIntrinsicID()) {
8384 case Intrinsic::abs:
8388 case Intrinsic::umax:
8392 case Intrinsic::umin:
8396 case Intrinsic::smax:
8400 case Intrinsic::smin:
8404 case Intrinsic::usub_sat: {
8405 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8406 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8410 case Intrinsic::uadd_sat: {
8411 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8412 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8416 case Intrinsic::start_loop_iterations:
8417 case Intrinsic::annotation:
8418 case Intrinsic::ptr_annotation:
8422 case Intrinsic::vscale:
8442 auto *ExitCountType = ExitCount->
getType();
8443 assert(ExitCountType->isIntegerTy());
8445 1 + ExitCountType->getScalarSizeInBits());
8458 auto CanAddOneWithoutOverflow = [&]() {
8460 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8471 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8501 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8502 assert(L->isLoopExiting(ExitingBlock) &&
8503 "Exiting block must actually branch out of the loop!");
8512 const auto *MaxExitCount =
8520 L->getExitingBlocks(ExitingBlocks);
8522 std::optional<unsigned> Res;
8523 for (
auto *ExitingBB : ExitingBlocks) {
8527 Res = std::gcd(*Res, Multiple);
8529 return Res.value_or(1);
8533 const SCEV *ExitCount) {
8563 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8564 assert(L->isLoopExiting(ExitingBlock) &&
8565 "Exiting block must actually branch out of the loop!");
8575 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
8577 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this);
8579 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this);
8589 return getPredicatedBackedgeTakenInfo(L).getExact(ExitingBlock,
this,
8592 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this,
8595 return getPredicatedBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this,
8603 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
8610 return getBackedgeTakenInfo(L).getExact(L,
this);
8612 return getBackedgeTakenInfo(L).getConstantMax(
this);
8614 return getBackedgeTakenInfo(L).getSymbolicMax(L,
this);
8621 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L,
this, &Preds);
8626 return getPredicatedBackedgeTakenInfo(L).getConstantMax(
this, &Preds);
8630 return getBackedgeTakenInfo(L).isConstantMaxOrZero(
this);
8640 for (
PHINode &PN : Header->phis())
8641 if (Visited.
insert(&PN).second)
8645ScalarEvolution::BackedgeTakenInfo &
8646ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
8647 auto &BTI = getBackedgeTakenInfo(L);
8648 if (BTI.hasFullInfo())
8651 auto Pair = PredicatedBackedgeTakenCounts.try_emplace(L);
8654 return Pair.first->second;
8656 BackedgeTakenInfo
Result =
8657 computeBackedgeTakenCount(L,
true);
8659 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8662ScalarEvolution::BackedgeTakenInfo &
8663ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
8669 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
8670 BackedgeTakenCounts.try_emplace(L);
8672 return Pair.first->second;
8677 BackedgeTakenInfo
Result = computeBackedgeTakenCount(L);
8684 if (
Result.hasAnyInfo()) {
8687 auto LoopUsersIt = LoopUsers.find(L);
8688 if (LoopUsersIt != LoopUsers.end())
8690 forgetMemoizedResults(ToForget);
8693 for (PHINode &PN :
L->getHeader()->phis())
8694 ConstantEvolutionLoopExitValue.erase(&PN);
8702 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8711 BackedgeTakenCounts.clear();
8712 PredicatedBackedgeTakenCounts.clear();
8713 BECountUsers.clear();
8714 LoopPropertiesCache.clear();
8715 ConstantEvolutionLoopExitValue.clear();
8716 ValueExprMap.clear();
8717 ValuesAtScopes.clear();
8718 ValuesAtScopesUsers.clear();
8719 LoopDispositions.clear();
8720 BlockDispositions.clear();
8721 UnsignedRanges.clear();
8722 SignedRanges.clear();
8723 ExprValueMap.clear();
8725 ConstantMultipleCache.clear();
8726 PredicatedSCEVRewrites.clear();
8728 FoldCacheUser.clear();
8730void ScalarEvolution::visitAndClearUsers(
8734 while (!Worklist.
empty()) {
8741 if (It != ValueExprMap.
end()) {
8742 eraseValueFromMap(It->first);
8745 ConstantEvolutionLoopExitValue.erase(PN);
8759 while (!LoopWorklist.
empty()) {
8763 forgetBackedgeTakenCounts(CurrL,
false);
8764 forgetBackedgeTakenCounts(CurrL,
true);
8767 for (
auto I = PredicatedSCEVRewrites.begin();
8768 I != PredicatedSCEVRewrites.end();) {
8769 std::pair<const SCEV *, const Loop *> Entry =
I->first;
8770 if (Entry.second == CurrL)
8771 PredicatedSCEVRewrites.erase(
I++);
8776 auto LoopUsersItr = LoopUsers.find(CurrL);
8777 if (LoopUsersItr != LoopUsers.end())
8782 visitAndClearUsers(Worklist, Visited, ToForget);
8784 LoopPropertiesCache.erase(CurrL);
8787 LoopWorklist.
append(CurrL->begin(), CurrL->end());
8789 forgetMemoizedResults(ToForget);
8806 visitAndClearUsers(Worklist, Visited, ToForget);
8808 forgetMemoizedResults(ToForget);
8820 struct InvalidationRootCollector {
8824 InvalidationRootCollector(
Loop *L) : L(L) {}
8826 bool follow(
const SCEV *S) {
8832 if (L->contains(AddRec->
getLoop()))
8837 bool isDone()
const {
return false; }
8840 InvalidationRootCollector
C(L);
8842 forgetMemoizedResults(
C.Roots);
8855 BlockDispositions.clear();
8856 LoopDispositions.clear();
8873 while (!Worklist.
empty()) {
8875 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8876 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8877 if (!LoopDispoRemoved && !BlockDispoRemoved)
8879 auto Users = SCEVUsers.find(Curr);
8880 if (
Users != SCEVUsers.end())
8893const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
8897 if (!isComplete() || ExitNotTaken.
empty())
8908 for (
const auto &ENT : ExitNotTaken) {
8909 const SCEV *BECount = ENT.ExactNotTaken;
8912 "We should only have known counts for exiting blocks that dominate "
8915 Ops.push_back(BECount);
8920 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8921 "Predicate should be always true!");
8930const ScalarEvolution::ExitNotTakenInfo *
8931ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8932 const BasicBlock *ExitingBlock,
8933 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8934 for (
const auto &ENT : ExitNotTaken)
8935 if (ENT.ExitingBlock == ExitingBlock) {
8936 if (ENT.hasAlwaysTruePredicate())
8938 else if (Predicates) {
8948const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8950 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8951 if (!getConstantMax())
8954 for (
const auto &ENT : ExitNotTaken)
8955 if (!ENT.hasAlwaysTruePredicate()) {
8963 "No point in having a non-constant max backedge taken count!");
8964 return getConstantMax();
8967const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8969 SmallVectorImpl<const SCEVPredicate *> *Predicates) {
8977 for (
const auto &ENT : ExitNotTaken) {
8978 const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
8981 "We should only have known counts for exiting blocks that "
8987 assert((Predicates || ENT.hasAlwaysTruePredicate()) &&
8988 "Predicate should be always true!");
8991 if (ExitCounts.
empty())
9000bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
9002 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
9003 return !ENT.hasAlwaysTruePredicate();
9005 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
9021 this->ExactNotTaken = E = ConstantMaxNotTaken;
9022 this->SymbolicMaxNotTaken = SymbolicMaxNotTaken = ConstantMaxNotTaken;
9027 "Exact is not allowed to be less precise than Constant Max");
9030 "Exact is not allowed to be less precise than Symbolic Max");
9033 "Symbolic Max is not allowed to be less precise than Constant Max");
9036 "No point in having a non-constant max backedge taken count!");
9038 for (
const auto PredList : PredLists)
9039 for (
const auto *
P : PredList) {
9047 "Backedge count should be int");
9050 "Max backedge count should be int");
9063ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
9065 bool IsComplete,
const SCEV *ConstantMax,
bool MaxOrZero)
9066 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
9067 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
9069 ExitNotTaken.reserve(ExitCounts.
size());
9070 std::transform(ExitCounts.
begin(), ExitCounts.
end(),
9071 std::back_inserter(ExitNotTaken),
9072 [&](
const EdgeExitInfo &EEI) {
9073 BasicBlock *ExitBB = EEI.first;
9074 const ExitLimit &EL = EEI.second;
9075 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
9076 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
9081 "No point in having a non-constant max backedge taken count!");
9085ScalarEvolution::BackedgeTakenInfo
9086ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
9087 bool AllowPredicates) {
9089 L->getExitingBlocks(ExitingBlocks);
9091 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
9094 bool CouldComputeBECount =
true;
9096 const SCEV *MustExitMaxBECount =
nullptr;
9097 const SCEV *MayExitMaxBECount =
nullptr;
9098 bool MustExitMaxOrZero =
false;
9099 bool IsOnlyExit = ExitingBlocks.
size() == 1;
9110 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
9111 if (ExitIfTrue == CI->
isZero())
9115 ExitLimit EL = computeExitLimit(L, ExitBB, IsOnlyExit, AllowPredicates);
9117 assert((AllowPredicates || EL.Predicates.empty()) &&
9118 "Predicated exit limit when predicates are not allowed!");
9123 ++NumExitCountsComputed;
9127 CouldComputeBECount =
false;
9134 "Exact is known but symbolic isn't?");
9135 ++NumExitCountsNotComputed;
9150 DT.dominates(ExitBB, Latch)) {
9151 if (!MustExitMaxBECount) {
9152 MustExitMaxBECount = EL.ConstantMaxNotTaken;
9153 MustExitMaxOrZero = EL.MaxOrZero;
9156 EL.ConstantMaxNotTaken);
9160 MayExitMaxBECount = EL.ConstantMaxNotTaken;
9163 EL.ConstantMaxNotTaken);
9167 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
9171 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
9177 for (
const auto &Pair : ExitCounts) {
9179 BECountUsers[Pair.second.ExactNotTaken].insert({
L, AllowPredicates});
9181 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
9182 {
L, AllowPredicates});
9184 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
9185 MaxBECount, MaxOrZero);
9188ScalarEvolution::ExitLimit
9189ScalarEvolution::computeExitLimit(
const Loop *L, BasicBlock *ExitingBlock,
9190 bool IsOnlyExit,
bool AllowPredicates) {
9191 assert(
L->contains(ExitingBlock) &&
"Exit count for non-loop block?");
9195 if (!Latch || !DT.dominates(ExitingBlock, Latch))
9200 bool ExitIfTrue = !
L->contains(BI->getSuccessor(0));
9201 assert(ExitIfTrue ==
L->contains(BI->getSuccessor(1)) &&
9202 "It should have one successor in loop and one exit block!");
9213 if (!
L->contains(SBB)) {
9218 assert(Exit &&
"Exiting block must have at least one exit");
9219 return computeExitLimitFromSingleExitSwitch(
9220 L, SI, Exit, IsOnlyExit);
9227 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9228 bool AllowPredicates) {
9229 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
9230 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
9231 ControlsOnlyExit, AllowPredicates);
9234std::optional<ScalarEvolution::ExitLimit>
9235ScalarEvolution::ExitLimitCache::find(
const Loop *L,
Value *ExitCond,
9236 bool ExitIfTrue,
bool ControlsOnlyExit,
9237 bool AllowPredicates) {
9239 (void)this->ExitIfTrue;
9240 (void)this->AllowPredicates;
9242 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9243 this->AllowPredicates == AllowPredicates &&
9244 "Variance in assumed invariant key components!");
9245 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
9246 if (Itr == TripCountMap.end())
9247 return std::nullopt;
9251void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
9253 bool ControlsOnlyExit,
9254 bool AllowPredicates,
9256 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9257 this->AllowPredicates == AllowPredicates &&
9258 "Variance in assumed invariant key components!");
9260 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
9261 assert(InsertResult.second &&
"Expected successful insertion!");
9266ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
9267 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9268 bool ControlsOnlyExit,
bool AllowPredicates) {
9270 if (
auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
9274 ExitLimit EL = computeExitLimitFromCondImpl(
9275 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
9276 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
9280ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
9281 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9282 bool ControlsOnlyExit,
bool AllowPredicates) {
9284 if (
auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
9285 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates))
9286 return *LimitFromBinOp;
9292 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
9293 if (EL.hasFullInfo() || !AllowPredicates)
9297 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
9317 const WithOverflowInst *WO;
9332 auto EL = computeExitLimitFromICmp(L, Pred,
LHS,
getConstant(NewRHSC),
9333 ControlsOnlyExit, AllowPredicates);
9334 if (EL.hasAnyInfo())
9339 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9342std::optional<ScalarEvolution::ExitLimit>
9343ScalarEvolution::computeExitLimitFromCondFromBinOp(
9344 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9345 bool ControlsOnlyExit,
bool AllowPredicates) {
9354 return std::nullopt;
9357 const Constant *NeutralElement = ConstantInt::get(ExitCond->
getType(), IsAnd);
9358 if (Op0 == NeutralElement)
9360 if (Op1 == NeutralElement)
9361 return computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue,
9362 ControlsOnlyExit, AllowPredicates);
9366 ExitLimit EL0 = computeExitLimitFromCondCached(
9367 Cache, L, Op0, ExitIfTrue,
false, AllowPredicates);
9368 ExitLimit EL1 = computeExitLimitFromCondCached(
9369 Cache, L, Op1, ExitIfTrue,
false, AllowPredicates);
9374 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9379 if (EitherMayExit) {
9389 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9391 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9394 EL1.ConstantMaxNotTaken);
9396 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9398 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9401 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9405 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9406 BECount = EL0.ExactNotTaken;
9419 SymbolicMaxBECount =
9421 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
9425ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9426 const Loop *L, ICmpInst *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9427 bool AllowPredicates) {
9439 ExitLimit EL = computeExitLimitFromICmp(L, Pred,
LHS,
RHS, ControlsOnlyExit,
9441 if (EL.hasAnyInfo())
9444 auto *ExhaustiveCount =
9445 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9448 return ExhaustiveCount;
9450 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
9453ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9455 bool ControlsOnlyExit,
bool AllowPredicates) {
9480 ConstantRange CompRange =
9498 InnerLHS = ZExt->getOperand();
9545 if (EL.hasAnyInfo())
9562 if (EL.hasAnyInfo())
return EL;
9594 ExitLimit EL = howManyLessThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9596 if (EL.hasAnyInfo())
9612 ExitLimit EL = howManyGreaterThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9614 if (EL.hasAnyInfo())
9625ScalarEvolution::ExitLimit
9626ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
9628 BasicBlock *ExitingBlock,
9629 bool ControlsOnlyExit) {
9630 assert(!
L->contains(ExitingBlock) &&
"Not an exiting block!");
9633 if (
Switch->getDefaultDest() == ExitingBlock)
9637 "Default case must not exit the loop!");
9643 if (EL.hasAnyInfo())
9655 "Evaluation of SCEV at constant didn't fold correctly?");
9659ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
9669 const BasicBlock *Predecessor =
L->getLoopPredecessor();
9675 auto MatchPositiveShift =
9678 using namespace PatternMatch;
9680 ConstantInt *ShiftAmt;
9682 OutOpCode = Instruction::LShr;
9684 OutOpCode = Instruction::AShr;
9686 OutOpCode = Instruction::Shl;
9701 auto MatchShiftRecurrence =
9703 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9718 if (MatchPositiveShift(
LHS, V, OpC)) {
9719 PostShiftOpCode = OpC;
9725 if (!PNOut || PNOut->getParent() !=
L->getHeader())
9728 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9734 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9741 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9746 if (!MatchShiftRecurrence(
LHS, PN, OpCode))
9758 ConstantInt *StableValue =
nullptr;
9763 case Instruction::AShr: {
9771 StableValue = ConstantInt::get(Ty, 0);
9773 StableValue = ConstantInt::get(Ty, -1,
true);
9779 case Instruction::LShr:
9780 case Instruction::Shl:
9790 "Otherwise cannot be an operand to a branch instruction");
9792 if (
Result->isNullValue()) {
9794 const SCEV *UpperBound =
9811 if (
const Function *
F = CI->getCalledFunction())
9820 if (!L->contains(
I))
return false;
9825 return L->getHeader() ==
I->getParent();
9901 if (!
I)
return nullptr;
9914 std::vector<Constant*> Operands(
I->getNumOperands());
9916 for (
unsigned i = 0, e =
I->getNumOperands(); i != e; ++i) {
9920 if (!Operands[i])
return nullptr;
9925 if (!
C)
return nullptr;
9947 if (IncomingVal != CurrentVal) {
9950 IncomingVal = CurrentVal;
9962ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
9965 auto [
I,
Inserted] = ConstantEvolutionLoopExitValue.try_emplace(PN);
9974 DenseMap<Instruction *, Constant *> CurrentIterVals;
9976 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9982 for (PHINode &
PHI : Header->phis()) {
9984 CurrentIterVals[&
PHI] = StartCST;
9986 if (!CurrentIterVals.
count(PN))
9987 return RetVal =
nullptr;
9993 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
9996 unsigned IterationNum = 0;
9998 for (; ; ++IterationNum) {
9999 if (IterationNum == NumIterations)
10000 return RetVal = CurrentIterVals[PN];
10004 DenseMap<Instruction *, Constant *> NextIterVals;
10009 NextIterVals[PN] = NextPHI;
10011 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
10017 for (
const auto &
I : CurrentIterVals) {
10019 if (!
PHI ||
PHI == PN ||
PHI->getParent() != Header)
continue;
10024 for (
const auto &
I : PHIsToCompute) {
10025 PHINode *
PHI =
I.first;
10028 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
10031 if (NextPHI !=
I.second)
10032 StoppedEvolving =
false;
10037 if (StoppedEvolving)
10038 return RetVal = CurrentIterVals[PN];
10040 CurrentIterVals.swap(NextIterVals);
10044const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
10054 DenseMap<Instruction *, Constant *> CurrentIterVals;
10056 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
10059 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
10061 for (PHINode &
PHI : Header->phis()) {
10063 CurrentIterVals[&
PHI] = StartCST;
10065 if (!CurrentIterVals.
count(PN))
10073 for (
unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
10080 if (CondVal->getValue() == uint64_t(ExitWhen)) {
10081 ++NumBruteForceTripCountsComputed;
10086 DenseMap<Instruction *, Constant *> NextIterVals;
10092 for (
const auto &
I : CurrentIterVals) {
10094 if (!
PHI ||
PHI->getParent() != Header)
continue;
10097 for (PHINode *
PHI : PHIsToCompute) {
10099 if (NextPHI)
continue;
10101 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
10104 CurrentIterVals.
swap(NextIterVals);
10115 for (
auto &LS : Values)
10117 return LS.second ? LS.second : V;
10122 const SCEV *
C = computeSCEVAtScope(V, L);
10123 for (
auto &LS :
reverse(ValuesAtScopes[V]))
10124 if (LS.first == L) {
10127 ValuesAtScopesUsers[
C].push_back({L, V});
10138 switch (V->getSCEVType()) {
10178 assert(!
C->getType()->isPointerTy() &&
10179 "Can only have one pointer, and it must be last");
10204const SCEV *ScalarEvolution::getWithOperands(
const SCEV *S,
10205 SmallVectorImpl<SCEVUse> &NewOps) {
10240const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
10241 switch (
V->getSCEVType()) {
10252 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
10263 for (++i; i !=
e; ++i)
10308 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i) {
10318 for (++i; i !=
e; ++i) {
10323 return getWithOperands(V, NewOps);
10338 const Loop *CurrLoop = this->LI[
I->getParent()];
10349 if (BackedgeTakenCount->
isZero()) {
10350 Value *InitValue =
nullptr;
10351 bool MultipleInitValues =
false;
10357 MultipleInitValues =
true;
10362 if (!MultipleInitValues && InitValue)
10371 unsigned InLoopPred =
10382 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10396 SmallVector<Constant *, 4> Operands;
10397 Operands.
reserve(
I->getNumOperands());
10398 bool MadeImprovement =
false;
10413 MadeImprovement |= OrigV != OpV;
10418 assert(
C->getType() ==
Op->getType() &&
"Type mismatch");
10423 if (!MadeImprovement)
10444const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
10446 return stripInjectiveFunctions(ZExt->getOperand());
10448 return stripInjectiveFunctions(SExt->getOperand());
10466 assert(
A != 0 &&
"A must be non-zero.");
10482 if (MinTZ < Mult2 && L->getLoopPredecessor())
10484 if (MinTZ < Mult2) {
10507 APInt AD =
A.lshr(Mult2).trunc(BW - Mult2);
10527static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10533 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: "
10534 << *AddRec <<
'\n');
10537 if (!LC || !MC || !
NC) {
10538 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
10539 return std::nullopt;
10545 assert(!
N.isZero() &&
"This is not a quadratic addrec");
10553 N =
N.sext(NewWidth);
10554 M = M.sext(NewWidth);
10555 L = L.sext(NewWidth);
10572 <<
"x + " <<
C <<
", coeff bw: " << NewWidth
10573 <<
", multiplied by " <<
T <<
'\n');
10582 std::optional<APInt>
Y) {
10584 unsigned W = std::max(
X->getBitWidth(),
Y->getBitWidth());
10587 return XW.
slt(YW) ? *
X : *
Y;
10590 return std::nullopt;
10591 return X ? *
X : *
Y;
10608 return std::nullopt;
10609 unsigned W =
X->getBitWidth();
10629static std::optional<APInt>
10635 return std::nullopt;
10638 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
10639 std::optional<APInt>
X =
10642 return std::nullopt;
10647 return std::nullopt;
10662static std::optional<APInt>
10666 "Starting value of addrec should be 0");
10667 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range "
10668 <<
Range <<
", addrec " << *AddRec <<
'\n');
10672 "Addrec's initial value should be in range");
10678 return std::nullopt;
10688 auto SolveForBoundary =
10689 [&](
APInt Bound) -> std::pair<std::optional<APInt>,
bool> {
10692 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary "
10693 << Bound <<
" (before multiplying by " << M <<
")\n");
10696 std::optional<APInt> SO;
10699 "signed overflow\n");
10703 "unsigned overflow\n");
10704 std::optional<APInt> UO =
10707 auto LeavesRange = [&] (
const APInt &
X) {
10724 return {std::nullopt,
false};
10729 if (LeavesRange(*Min))
10730 return { Min,
true };
10731 std::optional<APInt> Max = Min == SO ? UO : SO;
10732 if (LeavesRange(*Max))
10733 return { Max,
true };
10736 return {std::nullopt,
true};
10743 auto SL = SolveForBoundary(
Lower);
10744 auto SU = SolveForBoundary(
Upper);
10747 if (!SL.second || !SU.second)
10748 return std::nullopt;
10791ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(
const SCEV *V,
10793 bool ControlsOnlyExit,
10794 bool AllowPredicates) {
10805 if (
C->getValue()->isZero())
return C;
10809 const SCEVAddRecExpr *AddRec =
10812 if (!AddRec && AllowPredicates)
10818 if (!AddRec || AddRec->
getLoop() != L)
10829 return ExitLimit(R, R, R,
false, Predicates);
10887 const SCEV *DistancePlusOne =
getAddExpr(Distance, One);
10913 const SCEV *
Exact =
10921 const SCEV *SymbolicMax =
10923 return ExitLimit(
Exact, ConstantMax, SymbolicMax,
false, Predicates);
10932 AllowPredicates ? &Predicates :
nullptr, *
this, L);
10940 return ExitLimit(
E, M, S,
false, Predicates);
10943ScalarEvolution::ExitLimit
10944ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
10952 if (!
C->getValue()->isZero())
10962std::pair<const BasicBlock *, const BasicBlock *>
10963ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
const BasicBlock *BB)
10974 if (
const Loop *L = LI.getLoopFor(BB))
10975 return {
L->getLoopPredecessor(),
L->getHeader()};
10977 return {
nullptr, BB};
10986 if (
A ==
B)
return true;
11001 if (ComputesEqualValues(AI, BI))
11009 const SCEV *Op0, *Op1;
11028 auto TrivialCase = [&](
bool TriviallyTrue) {
11037 const SCEV *NewLHS, *NewRHS;
11061 return TrivialCase(
false);
11062 return TrivialCase(
true);
11085 const APInt &
RA = RC->getAPInt();
11087 bool SimplifiedByConstantRange =
false;
11092 return TrivialCase(
true);
11094 return TrivialCase(
false);
11103 Changed = SimplifiedByConstantRange =
true;
11107 if (!SimplifiedByConstantRange) {
11124 assert(!
RA.isMinValue() &&
"Should have been caught earlier!");
11130 assert(!
RA.isMaxValue() &&
"Should have been caught earlier!");
11136 assert(!
RA.isMinSignedValue() &&
"Should have been caught earlier!");
11142 assert(!
RA.isMaxSignedValue() &&
"Should have been caught earlier!");
11154 return TrivialCase(
true);
11156 return TrivialCase(
false);
11261 auto NonRecursive = [OrNegative](
const SCEV *S) {
11263 return C->getAPInt().isPowerOf2() ||
11264 (OrNegative &&
C->getAPInt().isNegatedPowerOf2());
11270 if (NonRecursive(S))
11296 APInt C = Cst->getAPInt();
11297 return C.urem(M) == 0;
11305 const SCEV *SmodM =
11320 for (
auto *
A : Assumptions)
11321 if (
A->implies(
P, *
this))
11334std::pair<const SCEV *, const SCEV *>
11337 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
11339 return { Start, Start };
11341 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
11350 getUsedLoops(LHS, LoopsUsed);
11351 getUsedLoops(RHS, LoopsUsed);
11353 if (LoopsUsed.
empty())
11358 for (
const auto *L1 : LoopsUsed)
11359 for (
const auto *L2 : LoopsUsed)
11360 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
11361 DT.dominates(L2->getHeader(), L1->getHeader())) &&
11362 "Domination relationship is not a linear order");
11392 SplitRHS.second) &&
11404 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
11408 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
11418 return std::nullopt;
11433 if (KnownWithoutContext)
11434 return KnownWithoutContext;
11441 return std::nullopt;
11447 const Loop *L = LHS->getLoop();
11452std::optional<ScalarEvolution::MonotonicPredicateType>
11455 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
11461 auto ResultSwapped =
11464 assert(*ResultSwapped != *Result &&
11465 "monotonicity should flip as we flip the predicate");
11472std::optional<ScalarEvolution::MonotonicPredicateType>
11473ScalarEvolution::getMonotonicPredicateTypeImpl(
const SCEVAddRecExpr *LHS,
11487 return std::nullopt;
11491 "Should be greater or less!");
11495 if (!LHS->hasNoUnsignedWrap())
11496 return std::nullopt;
11500 "Relational predicate is either signed or unsigned!");
11501 if (!
LHS->hasNoSignedWrap())
11502 return std::nullopt;
11504 const SCEV *Step =
LHS->getStepRecurrence(*
this);
11512 return std::nullopt;
11515std::optional<ScalarEvolution::LoopInvariantPredicate>
11522 return std::nullopt;
11529 if (!ArLHS || ArLHS->
getLoop() != L)
11530 return std::nullopt;
11534 return std::nullopt;
11560 return std::nullopt;
11597 return std::nullopt;
11600std::optional<ScalarEvolution::LoopInvariantPredicate>
11605 Pred, LHS, RHS, L, CtxI, MaxIter))
11615 Pred, LHS, RHS, L, CtxI,
Op))
11617 return std::nullopt;
11620std::optional<ScalarEvolution::LoopInvariantPredicate>
11635 return std::nullopt;
11642 if (!AR || AR->
getLoop() != L)
11643 return std::nullopt;
11648 Pred = Pred.dropSameSign();
11652 return std::nullopt;
11658 if (Step != One && Step != MinusOne)
11659 return std::nullopt;
11665 return std::nullopt;
11671 return std::nullopt;
11679 if (Step == MinusOne)
11683 return std::nullopt;
11689bool ScalarEvolution::isKnownPredicateViaConstantRanges(
CmpPredicate Pred,
11695 auto CheckRange = [&](
bool IsSigned) {
11698 return RangeLHS.
icmp(Pred, RangeRHS);
11707 if (CheckRange(
true) || CheckRange(
false))
11716bool ScalarEvolution::isKnownPredicateViaNoOverflow(CmpPredicate Pred,
11725 SCEVUse XNonConstOp, XConstOp;
11726 SCEVUse YNonConstOp, YConstOp;
11730 if (!splitBinaryAdd(
X, XConstOp, XNonConstOp, XFlagsPresent)) {
11733 XFlagsPresent = ExpectedFlags;
11738 if (!splitBinaryAdd(
Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11741 YFlagsPresent = ExpectedFlags;
11744 if (YNonConstOp != XNonConstOp)
11752 if ((YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11755 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) {
11815bool ScalarEvolution::isKnownPredicateViaSplitting(CmpPredicate Pred,
11836bool ScalarEvolution::isImpliedViaGuard(
const BasicBlock *BB, CmpPredicate Pred,
11837 const SCEV *
LHS,
const SCEV *
RHS) {
11842 return any_of(*BB, [&](
const Instruction &
I) {
11843 using namespace llvm::PatternMatch;
11848 isImpliedCond(Pred,
LHS,
RHS, Condition,
false);
11862 if (!L || !DT.isReachableFromEntry(L->getHeader()))
11867 "This cannot be done on broken IR!");
11870 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11879 if (LoopContinuePredicate &&
11880 isImpliedCond(Pred, LHS, RHS, LoopContinuePredicate->
getCondition(),
11881 LoopContinuePredicate->
getSuccessor(0) != L->getHeader()))
11886 if (WalkingBEDominatingConds)
11892 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11893 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
11900 const SCEV *LoopCounter =
11908 for (
auto &AssumeVH : AC.assumptions()) {
11915 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0),
false))
11919 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
11922 for (
DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11923 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11924 assert(DTN &&
"should reach the loop header before reaching the root!");
11927 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
11945 if (isImpliedCond(Pred, LHS, RHS, ContBr->
getCondition(),
11958 if (!DT.isReachableFromEntry(BB))
11962 "This cannot be done on broken IR!");
11970 const bool ProvingStrictComparison =
11972 bool ProvedNonStrictComparison =
false;
11973 bool ProvedNonEquality =
false;
11976 if (!ProvedNonStrictComparison)
11977 ProvedNonStrictComparison = Fn(NonStrictPredicate);
11978 if (!ProvedNonEquality)
11980 if (ProvedNonStrictComparison && ProvedNonEquality)
11985 if (ProvingStrictComparison) {
11987 return isKnownViaNonRecursiveReasoning(
P, LHS, RHS);
11989 if (SplitAndProve(ProofFn))
11994 auto ProveViaCond = [&](
const Value *Condition,
bool Inverse) {
11996 if (isImpliedCond(Pred, LHS, RHS, Condition,
Inverse, CtxI))
11998 if (ProvingStrictComparison) {
12000 return isImpliedCond(
P, LHS, RHS, Condition,
Inverse, CtxI);
12002 if (SplitAndProve(ProofFn))
12011 const Loop *ContainingLoop = LI.getLoopFor(BB);
12013 if (ContainingLoop && ContainingLoop->
getHeader() == BB)
12017 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
12018 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
12021 if (!BlockEntryPredicate)
12030 for (
auto &AssumeVH : AC.assumptions()) {
12034 if (!DT.dominates(CI, BB))
12037 if (ProveViaCond(CI->getArgOperand(0),
false))
12043 F.getParent(), Intrinsic::experimental_guard);
12045 for (
const auto *GU : GuardDecl->users())
12047 if (Guard->getFunction() == BB->
getParent() && DT.dominates(Guard, BB))
12048 if (ProveViaCond(Guard->getArgOperand(0),
false))
12063 "LHS is not available at Loop Entry");
12065 "RHS is not available at Loop Entry");
12067 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
12078 if (FoundCondValue ==
12082 if (!PendingLoopPredicates.insert(FoundCondValue).second)
12086 [&]() { PendingLoopPredicates.erase(FoundCondValue); });
12089 const Value *Op0, *Op1;
12092 return isImpliedCond(Pred,
LHS,
RHS, Op0,
Inverse, CtxI) ||
12096 return isImpliedCond(Pred,
LHS,
RHS, Op0, Inverse, CtxI) ||
12097 isImpliedCond(Pred,
LHS,
RHS, Op1, Inverse, CtxI);
12101 if (!ICI)
return false;
12105 CmpPredicate FoundPred;
12114 return isImpliedCond(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
12117bool ScalarEvolution::isImpliedCond(CmpPredicate Pred,
const SCEV *
LHS,
12118 const SCEV *
RHS, CmpPredicate FoundPred,
12119 const SCEV *FoundLHS,
const SCEV *FoundRHS,
12120 const Instruction *CtxI) {
12130 auto *WideType = FoundLHS->
getType();
12142 TruncFoundLHS, TruncFoundRHS, CtxI))
12168 return isImpliedCondBalancedTypes(Pred,
LHS,
RHS, FoundPred, FoundLHS,
12172bool ScalarEvolution::isImpliedCondBalancedTypes(
12177 "Types should be balanced!");
12184 if (FoundLHS == FoundRHS)
12188 if (
LHS == FoundRHS ||
RHS == FoundLHS) {
12200 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12217 LHS, FoundLHS, FoundRHS, CtxI);
12219 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundRHS, FoundLHS, CtxI);
12241 assert(P1 != P2 &&
"Handled earlier!");
12245 if (IsSignFlippedPredicate(Pred, FoundPred)) {
12249 return isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12252 CmpPredicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
12253 const SCEV *CanonicalLHS =
LHS, *CanonicalRHS =
RHS,
12254 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
12259 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
12270 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12271 CanonicalRHS, CanonicalFoundLHS,
12272 CanonicalFoundRHS);
12277 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12278 CanonicalRHS, CanonicalFoundLHS,
12279 CanonicalFoundRHS);
12286 const SCEVConstant *
C =
nullptr;
12287 const SCEV *
V =
nullptr;
12305 if (Min ==
C->getAPInt()) {
12310 APInt SharperMin = Min + 1;
12313 case ICmpInst::ICMP_SGE:
12314 case ICmpInst::ICMP_UGE:
12317 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
12322 case ICmpInst::ICMP_SGT:
12323 case ICmpInst::ICMP_UGT:
12333 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI))
12338 case ICmpInst::ICMP_SLE:
12339 case ICmpInst::ICMP_ULE:
12340 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12341 LHS, V, getConstant(SharperMin), CtxI))
12345 case ICmpInst::ICMP_SLT:
12346 case ICmpInst::ICMP_ULT:
12347 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12348 LHS, V, getConstant(Min), CtxI))
12362 if (isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12366 if (isImpliedCondOperands(FoundPred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12369 if (isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS))
12385std::optional<APInt>
12392 APInt DiffMul(BW, 1);
12395 for (
unsigned I = 0;
I < 8; ++
I) {
12404 if (LAR->getLoop() != MAR->getLoop())
12405 return std::nullopt;
12409 if (!LAR->isAffine() || !MAR->isAffine())
12410 return std::nullopt;
12412 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
12413 return std::nullopt;
12415 Less = LAR->getStart();
12416 More = MAR->getStart();
12421 auto MatchConstMul =
12422 [](
const SCEV *S) -> std::optional<std::pair<const SCEV *, APInt>> {
12427 return std::nullopt;
12429 if (
auto MatchedMore = MatchConstMul(More)) {
12430 if (
auto MatchedLess = MatchConstMul(
Less)) {
12431 if (MatchedMore->second == MatchedLess->second) {
12432 More = MatchedMore->first;
12433 Less = MatchedLess->first;
12434 DiffMul *= MatchedMore->second;
12445 Diff +=
C->getAPInt() * DiffMul;
12448 Diff -=
C->getAPInt() * DiffMul;
12451 Multiplicity[S] +=
Mul;
12453 auto Decompose = [&](
const SCEV *S,
int Mul) {
12460 Decompose(More, 1);
12461 Decompose(
Less, -1);
12465 const SCEV *NewMore =
nullptr, *NewLess =
nullptr;
12466 for (
const auto &[S,
Mul] : Multiplicity) {
12471 return std::nullopt;
12473 }
else if (
Mul == -1) {
12475 return std::nullopt;
12478 return std::nullopt;
12482 if (NewMore == More || NewLess ==
Less)
12483 return std::nullopt;
12489 if (!More && !
Less)
12493 if (!More || !
Less)
12494 return std::nullopt;
12498 return std::nullopt;
12501bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12523 const auto *Latch = L->getLoopLatch();
12526 if (!L->contains(ContextBB) || !Latch || !DT.
dominates(ContextBB, Latch))
12535 const auto *Latch = L->getLoopLatch();
12538 if (!L->contains(ContextBB) || !Latch || !DT.
dominates(ContextBB, Latch))
12548bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(CmpPredicate Pred,
12551 const SCEV *FoundLHS,
12552 const SCEV *FoundRHS) {
12561 if (!AddRecFoundLHS)
12568 const Loop *
L = AddRecFoundLHS->getLoop();
12569 if (L != AddRecLHS->getLoop())
12608 if (!RDiff || *LDiff != *RDiff)
12611 if (LDiff->isMinValue())
12614 APInt FoundRHSLimit;
12617 FoundRHSLimit = -(*RDiff);
12629bool ScalarEvolution::isImpliedViaMerge(CmpPredicate Pred,
const SCEV *
LHS,
12630 const SCEV *
RHS,
const SCEV *FoundLHS,
12631 const SCEV *FoundRHS,
unsigned Depth) {
12632 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
12636 bool Erased = PendingMerges.erase(LPhi);
12637 assert(Erased &&
"Failed to erase LPhi!");
12641 bool Erased = PendingMerges.erase(RPhi);
12642 assert(Erased &&
"Failed to erase RPhi!");
12650 if (!PendingMerges.insert(Phi).second)
12664 if (!PendingMerges.insert(Phi).second)
12670 if (!LPhi && !RPhi)
12681 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
12685 auto ProvedEasily = [&](
const SCEV *
S1,
const SCEV *S2) {
12686 return isKnownViaNonRecursiveReasoning(Pred,
S1, S2) ||
12687 isImpliedCondOperandsViaRanges(Pred,
S1, S2, Pred, FoundLHS, FoundRHS) ||
12688 isImpliedViaOperations(Pred,
S1, S2, FoundLHS, FoundRHS,
Depth);
12691 if (RPhi && RPhi->getParent() == LBB) {
12698 const SCEV *
R =
getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12699 if (!ProvedEasily(L, R))
12710 auto *RLoop = RAR->
getLoop();
12711 auto *Predecessor = RLoop->getLoopPredecessor();
12712 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
12714 if (!ProvedEasily(L1, RAR->
getStart()))
12716 auto *Latch = RLoop->getLoopLatch();
12717 assert(Latch &&
"Loop with AddRec with no latch?");
12738 if (
auto *Loop = LI.getLoopFor(LBB))
12741 if (!ProvedEasily(L,
RHS))
12748bool ScalarEvolution::isImpliedCondOperandsViaShift(CmpPredicate Pred,
12751 const SCEV *FoundLHS,
12752 const SCEV *FoundRHS) {
12755 if (
RHS == FoundRHS) {
12760 if (
LHS != FoundLHS)
12767 Value *Shiftee, *ShiftValue;
12769 using namespace PatternMatch;
12770 if (
match(SUFoundRHS->getValue(),
12772 auto *ShifteeS =
getSCEV(Shiftee);
12790bool ScalarEvolution::isImpliedCondOperands(CmpPredicate Pred,
const SCEV *
LHS,
12792 const SCEV *FoundLHS,
12793 const SCEV *FoundRHS,
12794 const Instruction *CtxI) {
12795 return isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, Pred, FoundLHS,
12797 isImpliedCondOperandsViaNoOverflow(Pred,
LHS,
RHS, FoundLHS,
12799 isImpliedCondOperandsViaShift(Pred,
LHS,
RHS, FoundLHS, FoundRHS) ||
12800 isImpliedCondOperandsViaAddRecStart(Pred,
LHS,
RHS, FoundLHS, FoundRHS,
12802 isImpliedCondOperandsHelper(Pred,
LHS,
RHS, FoundLHS, FoundRHS);
12806template <
typename MinMaxExprType>
12808 const SCEV *Candidate) {
12813 return is_contained(MinMaxExpr->operands(), Candidate);
12826 const SCEV *LStart, *RStart, *Step;
12876bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred,
const SCEV *
LHS,
12878 const SCEV *FoundLHS,
12879 const SCEV *FoundRHS,
12883 "LHS and RHS have different sizes?");
12886 "FoundLHS and FoundRHS have different sizes?");
12920 auto GetOpFromSExt = [&](
const SCEV *S) ->
const SCEV * {
12922 return Ext->getOperand();
12929 auto *OrigLHS =
LHS;
12930 auto *OrigFoundLHS = FoundLHS;
12931 LHS = GetOpFromSExt(
LHS);
12932 FoundLHS = GetOpFromSExt(FoundLHS);
12935 auto IsSGTViaContext = [&](
const SCEV *
S1,
const SCEV *S2) {
12938 FoundRHS,
Depth + 1);
12951 if (!LHSAddExpr->hasNoSignedWrap())
12954 SCEVUse LL = LHSAddExpr->getOperand(0);
12955 SCEVUse LR = LHSAddExpr->getOperand(1);
12959 auto IsSumGreaterThanRHS = [&](
const SCEV *
S1,
const SCEV *S2) {
12960 return IsSGTViaContext(
S1, MinusOne) && IsSGTViaContext(S2,
RHS);
12965 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
12971 using namespace llvm::PatternMatch;
12990 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
12998 auto *DTy = Denominator->getType();
12999 auto *FRHSTy = FoundRHS->
getType();
13000 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
13019 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
13030 auto *NegDenomMinusOne =
getMinusSCEV(MinusOne, DenominatorExt);
13032 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
13040 if (isImpliedViaMerge(Pred, OrigLHS,
RHS, OrigFoundLHS, FoundRHS,
Depth + 1))
13073bool ScalarEvolution::isKnownViaNonRecursiveReasoning(CmpPredicate Pred,
13077 isKnownPredicateViaConstantRanges(Pred,
LHS,
RHS) ||
13080 isKnownPredicateViaNoOverflow(Pred,
LHS,
RHS);
13083bool ScalarEvolution::isImpliedCondOperandsHelper(CmpPredicate Pred,
13086 const SCEV *FoundLHS,
13087 const SCEV *FoundRHS) {
13123 if (isImpliedViaOperations(Pred,
LHS,
RHS, FoundLHS, FoundRHS))
13129bool ScalarEvolution::isImpliedCondOperandsViaRanges(
13130 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
13131 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
13145 ConstantRange FoundLHSRange =
13149 ConstantRange LHSRange = FoundLHSRange.
add(ConstantRange(*Addend));
13156 return LHSRange.
icmp(Pred, ConstRHS);
13159bool ScalarEvolution::canIVOverflowOnLT(
const SCEV *
RHS,
const SCEV *Stride,
13172 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
13180 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
13183bool ScalarEvolution::canIVOverflowOnGT(
const SCEV *
RHS,
const SCEV *Stride,
13195 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
13203 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
13215const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
13216 const SCEV *Stride,
13247 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
13258 :
APIntOps::umax(MaxEnd, MinStart);
13265ScalarEvolution::howManyLessThans(
const SCEV *
LHS,
const SCEV *
RHS,
13266 const Loop *L,
bool IsSigned,
13267 bool ControlsOnlyExit,
bool AllowPredicates) {
13271 bool PredicatedIV =
false;
13276 auto canProveNUW = [&]() {
13279 if (!ControlsOnlyExit)
13300 Limit = Limit.
zext(OuterBitWidth);
13312 Type *Ty = ZExt->getType();
13323 if (!
IV && AllowPredicates) {
13328 PredicatedIV =
true;
13332 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13346 bool NoWrap = ControlsOnlyExit &&
any(
IV->getNoWrapFlags(WrapType));
13349 const SCEV *Stride =
IV->getStepRecurrence(*
this);
13354 if (!PositiveStride) {
13406 auto wouldZeroStrideBeUB = [&]() {
13418 if (!wouldZeroStrideBeUB()) {
13422 }
else if (!NoWrap) {
13425 if (canIVOverflowOnLT(
RHS, Stride, IsSigned))
13438 const SCEV *
Start =
IV->getStart();
13444 const SCEV *OrigStart =
Start;
13445 const SCEV *OrigRHS =
RHS;
13446 if (
Start->getType()->isPointerTy()) {
13457 const SCEV *End =
nullptr, *BECount =
nullptr,
13458 *BECountIfBackedgeTaken =
nullptr;
13461 if (PositiveStride && RHSAddRec !=
nullptr && RHSAddRec->getLoop() == L &&
13462 any(RHSAddRec->getNoWrapFlags())) {
13475 const SCEV *RHSStart = RHSAddRec->getStart();
13476 const SCEV *RHSStride = RHSAddRec->getStepRecurrence(*
this);
13488 const SCEV *Denominator =
getMinusSCEV(Stride, RHSStride);
13497 BECountIfBackedgeTaken =
13502 if (BECount ==
nullptr) {
13507 const SCEV *MaxBECount = computeMaxBECountForLT(
13510 MaxBECount,
false , Predicates);
13517 auto *OrigStartMinusStride =
getMinusSCEV(OrigStart, Stride);
13544 const SCEV *Numerator =
13550 auto canProveRHSGreaterThanEqualStart = [&]() {
13569 auto *StartMinusOne =
13576 if (canProveRHSGreaterThanEqualStart()) {
13591 BECountIfBackedgeTaken =
13607 bool MayAddOverflow = [&] {
13653 if (Start == Stride || Start ==
getMinusSCEV(Stride, One)) {
13667 if (!MayAddOverflow) {
13679 const SCEV *ConstantMaxBECount;
13680 bool MaxOrZero =
false;
13682 ConstantMaxBECount = BECount;
13683 }
else if (BECountIfBackedgeTaken &&
13688 ConstantMaxBECount = BECountIfBackedgeTaken;
13691 ConstantMaxBECount = computeMaxBECountForLT(
13699 const SCEV *SymbolicMaxBECount =
13701 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13705ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
13706 const SCEV *
LHS,
const SCEV *
RHS,
const Loop *L,
bool IsSigned,
13707 bool ControlsOnlyExit,
bool AllowPredicates) {
13714 if (!
IV && AllowPredicates)
13721 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13725 bool NoWrap = ControlsOnlyExit &&
any(
IV->getNoWrapFlags(WrapType));
13738 if (!Stride->
isOne() && !NoWrap)
13739 if (canIVOverflowOnGT(
RHS, Stride, IsSigned))
13742 const SCEV *
Start =
IV->getStart();
13743 const SCEV *End =
RHS;
13754 if (
Start->getType()->isPointerTy()) {
13789 const SCEV *ConstantMaxBECount =
13796 ConstantMaxBECount = BECount;
13797 const SCEV *SymbolicMaxBECount =
13800 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
13806 if (
Range.isFullSet())
13811 if (!SC->getValue()->isZero()) {
13817 return ShiftedAddRec->getNumIterationsInRange(
13818 Range.subtract(SC->getAPInt()), SE);
13849 APInt ExitVal = (End +
A).udiv(
A);
13862 ConstantInt::get(SE.
getContext(), ExitVal - 1), SE)->getValue()) &&
13863 "Linear scev computation is off in a bad way!");
13894 assert(!
Last->isZero() &&
"Recurrency with zero step?");
13919 Ty = Store->getValueOperand()->getType();
13921 Ty = Load->getType();
13934 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13936 SE->ConstantEvolutionLoopExitValue.erase(PN);
13937 SE->eraseValueFromMap(getValPtr());
13941void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
13942 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13952 : CallbackVH(
V), SE(se) {}
13961 : F(F), DL(F.
getDataLayout()), TLI(TLI), AC(AC), DT(DT), LI(LI),
13963 LoopDispositions(64), BlockDispositions(64) {
13975 F.getParent(), Intrinsic::experimental_guard);
13976 HasGuards = GuardDecl && !GuardDecl->use_empty();
13980 : F(Arg.F), DL(Arg.DL), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC),
13981 DT(Arg.DT), LI(Arg.LI), CouldNotCompute(
std::
move(Arg.CouldNotCompute)),
13982 ValueExprMap(
std::
move(Arg.ValueExprMap)),
13983 PendingLoopPredicates(
std::
move(Arg.PendingLoopPredicates)),
13984 PendingMerges(
std::
move(Arg.PendingMerges)),
13985 ConstantMultipleCache(
std::
move(Arg.ConstantMultipleCache)),
13986 BackedgeTakenCounts(
std::
move(Arg.BackedgeTakenCounts)),
13987 PredicatedBackedgeTakenCounts(
13988 std::
move(Arg.PredicatedBackedgeTakenCounts)),
13989 BECountUsers(
std::
move(Arg.BECountUsers)),
13990 ConstantEvolutionLoopExitValue(
13991 std::
move(Arg.ConstantEvolutionLoopExitValue)),
13992 ValuesAtScopes(
std::
move(Arg.ValuesAtScopes)),
13993 ValuesAtScopesUsers(
std::
move(Arg.ValuesAtScopesUsers)),
13994 LoopDispositions(
std::
move(Arg.LoopDispositions)),
13995 LoopPropertiesCache(
std::
move(Arg.LoopPropertiesCache)),
13996 BlockDispositions(
std::
move(Arg.BlockDispositions)),
13997 SCEVUsers(
std::
move(Arg.SCEVUsers)),
13998 UnsignedRanges(
std::
move(Arg.UnsignedRanges)),
13999 SignedRanges(
std::
move(Arg.SignedRanges)),
14000 UniqueSCEVs(
std::
move(Arg.UniqueSCEVs)),
14001 UniquePreds(
std::
move(Arg.UniquePreds)),
14002 SCEVAllocator(
std::
move(Arg.SCEVAllocator)),
14003 LoopUsers(
std::
move(Arg.LoopUsers)),
14004 PredicatedSCEVRewrites(
std::
move(Arg.PredicatedSCEVRewrites)),
14005 FirstUnknown(Arg.FirstUnknown) {
14006 Arg.FirstUnknown =
nullptr;
14015 Tmp->~SCEVUnknown();
14017 FirstUnknown =
nullptr;
14019 ExprValueMap.clear();
14020 ValueExprMap.clear();
14022 BackedgeTakenCounts.clear();
14023 PredicatedBackedgeTakenCounts.clear();
14025 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
14026 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
14027 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
14028 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
14050 L->getHeader()->printAsOperand(OS,
false);
14054 L->getExitingBlocks(ExitingBlocks);
14055 if (ExitingBlocks.
size() != 1)
14056 OS <<
"<multiple exits> ";
14060 OS <<
"backedge-taken count is ";
14063 OS <<
"Unpredictable backedge-taken count.";
14066 if (ExitingBlocks.
size() > 1)
14067 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
14068 OS <<
" exit count for " << ExitingBlock->
getName() <<
": ";
14076 OS <<
"\n predicated exit count for " << ExitingBlock->
getName()
14079 OS <<
"\n Predicates:\n";
14080 for (
const auto *
P : Predicates)
14088 L->getHeader()->printAsOperand(OS,
false);
14093 OS <<
"constant max backedge-taken count is ";
14096 OS <<
", actual taken count either this or zero.";
14098 OS <<
"Unpredictable constant max backedge-taken count. ";
14103 L->getHeader()->printAsOperand(OS,
false);
14108 OS <<
"symbolic max backedge-taken count is ";
14111 OS <<
", actual taken count either this or zero.";
14113 OS <<
"Unpredictable symbolic max backedge-taken count. ";
14117 if (ExitingBlocks.
size() > 1)
14118 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
14119 OS <<
" symbolic max exit count for " << ExitingBlock->
getName() <<
": ";
14129 OS <<
"\n predicated symbolic max exit count for "
14130 << ExitingBlock->
getName() <<
": ";
14132 OS <<
"\n Predicates:\n";
14133 for (
const auto *
P : Predicates)
14143 assert(!Preds.
empty() &&
"Different predicated BTC, but no predicates");
14145 L->getHeader()->printAsOperand(OS,
false);
14148 OS <<
"Predicated backedge-taken count is ";
14151 OS <<
"Unpredictable predicated backedge-taken count.";
14153 OS <<
" Predicates:\n";
14154 for (
const auto *
P : Preds)
14159 auto *PredConstantMax =
14161 if (PredConstantMax != ConstantBTC) {
14163 "different predicated constant max BTC but no predicates");
14165 L->getHeader()->printAsOperand(OS,
false);
14168 OS <<
"Predicated constant max backedge-taken count is ";
14171 OS <<
"Unpredictable predicated constant max backedge-taken count.";
14173 OS <<
" Predicates:\n";
14174 for (
const auto *
P : Preds)
14179 auto *PredSymbolicMax =
14181 if (SymbolicBTC != PredSymbolicMax) {
14183 "Different predicated symbolic max BTC, but no predicates");
14185 L->getHeader()->printAsOperand(OS,
false);
14188 OS <<
"Predicated symbolic max backedge-taken count is ";
14191 OS <<
"Unpredictable predicated symbolic max backedge-taken count.";
14193 OS <<
" Predicates:\n";
14194 for (
const auto *
P : Preds)
14200 L->getHeader()->printAsOperand(OS,
false);
14224 OS <<
"Computable";
14234 OS <<
"DoesNotDominate";
14240 OS <<
"ProperlyDominates";
14257 OS <<
"Classifying expressions for: ";
14258 F.printAsOperand(OS,
false);
14273 const Loop *L = LI.getLoopFor(
I.getParent());
14288 OS <<
"\t\t" "Exits: ";
14291 OS <<
"<<Unknown>>";
14297 for (
const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
14299 Iter->getHeader()->printAsOperand(OS,
false);
14307 InnerL->getHeader()->printAsOperand(OS,
false);
14318 OS <<
"Determining loop execution counts for: ";
14319 F.printAsOperand(OS,
false);
14327 auto &Values = LoopDispositions[S];
14328 for (
auto &V : Values) {
14329 if (V.getPointer() == L)
14334 auto &Values2 = LoopDispositions[S];
14336 if (V.getPointer() == L) {
14345ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
14364 assert(!L->contains(AR->
getLoop()) &&
"Containing loop's header does not"
14365 " dominate the contained loop's header?");
14393 bool HasVarying =
false;
14427 auto &Values = BlockDispositions[S];
14428 for (
auto &V : Values) {
14429 if (V.getPointer() == BB)
14434 auto &Values2 = BlockDispositions[S];
14436 if (V.getPointer() == BB) {
14445ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
14475 bool Proper =
true;
14486 if (Instruction *
I =
14488 if (
I->getParent() == BB)
14490 if (DT.properlyDominates(
I->getParent(), BB))
14513void ScalarEvolution::forgetBackedgeTakenCounts(
const Loop *L,
14516 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14517 auto It = BECounts.find(L);
14518 if (It != BECounts.end()) {
14519 for (
const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
14520 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14522 auto UserIt = BECountUsers.find(S);
14523 assert(UserIt != BECountUsers.end());
14528 BECounts.erase(It);
14536 while (!Worklist.
empty()) {
14538 auto Users = SCEVUsers.find(Curr);
14539 if (
Users != SCEVUsers.end())
14540 for (
const auto *User :
Users->second)
14541 if (ToForget.
insert(User).second)
14545 for (
const auto *S : ToForget)
14546 forgetMemoizedResultsImpl(S);
14548 for (
auto I = PredicatedSCEVRewrites.begin();
14549 I != PredicatedSCEVRewrites.end();) {
14550 std::pair<const SCEV *, const Loop *>
Entry =
I->first;
14551 if (ToForget.count(
Entry.first))
14552 PredicatedSCEVRewrites.erase(
I++);
14558void ScalarEvolution::forgetMemoizedResultsImpl(
const SCEV *S) {
14559 LoopDispositions.erase(S);
14560 BlockDispositions.erase(S);
14561 UnsignedRanges.erase(S);
14562 SignedRanges.erase(S);
14563 HasRecMap.erase(S);
14564 ConstantMultipleCache.erase(S);
14567 UnsignedWrapViaInductionTried.erase(AR);
14568 SignedWrapViaInductionTried.erase(AR);
14571 auto ExprIt = ExprValueMap.find(S);
14572 if (ExprIt != ExprValueMap.end()) {
14573 for (
Value *V : ExprIt->second) {
14574 auto ValueIt = ValueExprMap.find_as(V);
14575 if (ValueIt != ValueExprMap.end())
14576 ValueExprMap.erase(ValueIt);
14578 ExprValueMap.erase(ExprIt);
14581 auto ScopeIt = ValuesAtScopes.find(S);
14582 if (ScopeIt != ValuesAtScopes.end()) {
14583 for (
const auto &Pair : ScopeIt->second)
14586 std::make_pair(Pair.first, S));
14587 ValuesAtScopes.erase(ScopeIt);
14590 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
14591 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
14592 for (
const auto &Pair : ScopeUserIt->second)
14593 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
14594 ValuesAtScopesUsers.erase(ScopeUserIt);
14597 auto BEUsersIt = BECountUsers.find(S);
14598 if (BEUsersIt != BECountUsers.end()) {
14600 auto Copy = BEUsersIt->second;
14601 for (
const auto &Pair : Copy)
14602 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
14603 BECountUsers.erase(BEUsersIt);
14606 auto FoldUser = FoldCacheUser.find(S);
14607 if (FoldUser != FoldCacheUser.end())
14608 for (
auto &KV : FoldUser->second)
14609 FoldCache.erase(KV);
14610 FoldCacheUser.erase(S);
14614ScalarEvolution::getUsedLoops(
const SCEV *S,
14616 struct FindUsedLoops {
14617 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
14618 : LoopsUsed(LoopsUsed) {}
14619 SmallPtrSetImpl<const Loop *> &LoopsUsed;
14620 bool follow(
const SCEV *S) {
14626 bool isDone()
const {
return false; }
14629 FindUsedLoops
F(LoopsUsed);
14630 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
14633void ScalarEvolution::getReachableBlocks(
14636 Worklist.
push_back(&F.getEntryBlock());
14637 while (!Worklist.
empty()) {
14639 if (!Reachable.
insert(BB).second)
14647 Worklist.
push_back(
C->isOne() ? TrueBB : FalseBB);
14654 if (isKnownPredicateViaConstantRanges(
Cmp->getCmpPredicate(), L, R)) {
14658 if (isKnownPredicateViaConstantRanges(
Cmp->getInverseCmpPredicate(), L,
14693 SCEVMapper SCM(SE2);
14695 SE2.getReachableBlocks(ReachableBlocks, F);
14697 auto GetDelta = [&](
const SCEV *Old,
const SCEV *New) ->
const SCEV * {
14715 while (!LoopStack.
empty()) {
14721 if (!ReachableBlocks.
contains(L->getHeader()))
14726 auto It = BackedgeTakenCounts.find(L);
14727 if (It == BackedgeTakenCounts.end())
14731 SCM.visit(It->second.getExact(L,
const_cast<ScalarEvolution *
>(
this)));
14751 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14752 if (Delta && !Delta->
isZero()) {
14753 dbgs() <<
"Trip Count for " << *L <<
" Changed!\n";
14754 dbgs() <<
"Old: " << *CurBECount <<
"\n";
14755 dbgs() <<
"New: " << *NewBECount <<
"\n";
14756 dbgs() <<
"Delta: " << *Delta <<
"\n";
14764 while (!Worklist.
empty()) {
14766 if (ValidLoops.
insert(L).second)
14767 Worklist.
append(L->begin(), L->end());
14769 for (
const auto &KV : ValueExprMap) {
14774 "AddRec references invalid loop");
14779 auto It = ExprValueMap.find(KV.second);
14780 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) {
14781 dbgs() <<
"Value " << *KV.first
14782 <<
" is in ValueExprMap but not in ExprValueMap\n";
14787 if (!ReachableBlocks.
contains(
I->getParent()))
14789 const SCEV *OldSCEV = SCM.visit(KV.second);
14791 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14792 if (Delta && !Delta->
isZero()) {
14793 dbgs() <<
"SCEV for value " << *
I <<
" changed!\n"
14794 <<
"Old: " << *OldSCEV <<
"\n"
14795 <<
"New: " << *NewSCEV <<
"\n"
14796 <<
"Delta: " << *Delta <<
"\n";
14802 for (
const auto &KV : ExprValueMap) {
14803 for (
Value *V : KV.second) {
14804 const SCEV *S = ValueExprMap.lookup(V);
14806 dbgs() <<
"Value " << *V
14807 <<
" is in ExprValueMap but not in ValueExprMap\n";
14810 if (S != KV.first) {
14811 dbgs() <<
"Value " << *V <<
" mapped to " << *S <<
" rather than "
14812 << *KV.first <<
"\n";
14819 for (
const auto &S : UniqueSCEVs) {
14824 auto It = SCEVUsers.find(
Op);
14825 if (It != SCEVUsers.end() && It->second.count(&S))
14827 dbgs() <<
"Use of operand " << *
Op <<
" by user " << S
14828 <<
" is not being tracked!\n";
14834 for (
const auto &ValueAndVec : ValuesAtScopes) {
14836 for (
const auto &LoopAndValueAtScope : ValueAndVec.second) {
14837 const Loop *L = LoopAndValueAtScope.first;
14838 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14840 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14841 if (It != ValuesAtScopesUsers.end() &&
14844 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14845 << *ValueAtScope <<
" missing in ValuesAtScopesUsers\n";
14851 for (
const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14852 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14853 for (
const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14854 const Loop *L = LoopAndValue.first;
14855 const SCEV *
Value = LoopAndValue.second;
14857 auto It = ValuesAtScopes.find(
Value);
14858 if (It != ValuesAtScopes.end() &&
14859 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14861 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14862 << *ValueAtScope <<
" missing in ValuesAtScopes\n";
14868 auto VerifyBECountUsers = [&](
bool Predicated) {
14870 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14871 for (
const auto &LoopAndBEInfo : BECounts) {
14872 for (
const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14873 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14875 auto UserIt = BECountUsers.find(S);
14876 if (UserIt != BECountUsers.end() &&
14877 UserIt->second.contains({ LoopAndBEInfo.first, Predicated }))
14879 dbgs() <<
"Value " << *S <<
" for loop " << *LoopAndBEInfo.first
14880 <<
" missing from BECountUsers\n";
14887 VerifyBECountUsers(
false);
14888 VerifyBECountUsers(
true);
14891 for (
auto &[S, Values] : LoopDispositions) {
14892 for (
auto [
Loop, CachedDisposition] : Values) {
14894 if (CachedDisposition != RecomputedDisposition) {
14895 dbgs() <<
"Cached disposition of " << *S <<
" for loop " << *
Loop
14896 <<
" is incorrect: cached " << CachedDisposition <<
", actual "
14897 << RecomputedDisposition <<
"\n";
14904 for (
auto &[S, Values] : BlockDispositions) {
14905 for (
auto [BB, CachedDisposition] : Values) {
14907 if (CachedDisposition != RecomputedDisposition) {
14908 dbgs() <<
"Cached disposition of " << *S <<
" for block %"
14909 << BB->
getName() <<
" is incorrect: cached " << CachedDisposition
14910 <<
", actual " << RecomputedDisposition <<
"\n";
14917 for (
auto [
FoldID, Expr] : FoldCache) {
14918 auto I = FoldCacheUser.find(Expr);
14919 if (
I == FoldCacheUser.end()) {
14920 dbgs() <<
"Missing entry in FoldCacheUser for cached expression " << *Expr
14925 dbgs() <<
"Missing FoldID in cached users of " << *Expr <<
"!\n";
14929 for (
auto [Expr, IDs] : FoldCacheUser) {
14930 for (
auto &
FoldID : IDs) {
14933 dbgs() <<
"Missing entry in FoldCache for expression " << *Expr
14938 dbgs() <<
"Entry in FoldCache doesn't match FoldCacheUser: " << *S
14939 <<
" != " << *Expr <<
"!\n";
14950 for (
auto [S, Multiple] : ConstantMultipleCache) {
14952 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14953 Multiple.
urem(RecomputedMultiple) != 0 &&
14954 RecomputedMultiple.
urem(Multiple) != 0)) {
14955 dbgs() <<
"Incorrect cached computation in ConstantMultipleCache for "
14956 << *S <<
" : Computed " << RecomputedMultiple
14957 <<
" but cache contains " << Multiple <<
"!\n";
14965 FunctionAnalysisManager::Invalidator &Inv) {
14997 OS <<
"Printing analysis 'Scalar Evolution Analysis' for function '"
14998 <<
F.getName() <<
"':\n";
15004 "Scalar Evolution Analysis",
false,
true)
15053 const SCEV *LHS,
const SCEV *RHS) {
15055 assert(LHS->getType() == RHS->getType() &&
15056 "Type mismatch between LHS and RHS");
15059 ID.AddInteger(Pred);
15060 ID.AddPointer(LHS);
15061 ID.AddPointer(RHS);
15062 void *IP =
nullptr;
15063 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
15067 UniquePreds.InsertNode(Eq, IP);
15078 ID.AddInteger(AddedFlags);
15079 void *IP =
nullptr;
15080 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
15082 auto *OF =
new (SCEVAllocator)
15084 UniquePreds.InsertNode(OF, IP);
15104 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
15105 return Rewriter.visit(S);
15111 for (
const auto *Pred : U->getPredicates())
15113 if (IPred->getLHS() == Expr &&
15115 return IPred->getRHS();
15117 if (IPred->getLHS() == Expr &&
15118 IPred->getPredicate() == ICmpInst::ICMP_EQ)
15119 return IPred->getRHS();
15122 return convertToAddRecWithPreds(Expr);
15125 const SCEV *visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
15141 const SCEV *visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
15158 explicit SCEVPredicateRewriter(
15159 const Loop *L, ScalarEvolution &SE,
15160 SmallVectorImpl<const SCEVPredicate *> *NewPreds,
15161 const SCEVPredicate *Pred)
15162 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred),
L(
L) {}
15164 bool addOverflowAssumption(
const SCEVPredicate *
P) {
15167 return Pred && Pred->
implies(
P, SE);
15173 bool addOverflowAssumption(
const SCEVAddRecExpr *AR,
15176 return addOverflowAssumption(
A);
15185 const SCEV *convertToAddRecWithPreds(
const SCEVUnknown *Expr) {
15189 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
15191 if (!PredicatedRewrite)
15193 for (
const auto *
P : PredicatedRewrite->second){
15196 if (L != WP->getExpr()->getLoop())
15199 if (!addOverflowAssumption(
P))
15202 return PredicatedRewrite->first;
15205 SmallVectorImpl<const SCEVPredicate *> *NewPreds;
15206 const SCEVPredicate *Pred;
15215 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
15222 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
15242 if (!Step->
isOne())
15267 assert(LHS->getType() == RHS->getType() &&
"LHS and RHS types don't match");
15268 assert(LHS != RHS &&
"LHS and RHS are the same SCEV");
15281 return Op->LHS == LHS &&
Op->RHS == RHS;
15288 OS.
indent(
Depth) <<
"Equal predicate: " << *LHS <<
" == " << *RHS <<
"\n";
15290 OS.
indent(
Depth) <<
"Compare predicate: " << *LHS <<
" " << Pred <<
") "
15315 const SCEV *Start = AR->getStart();
15316 const SCEV *OpStart =
Op->AR->getStart();
15321 if (Start->getType()->isPointerTy() && Start->getType() != OpStart->
getType())
15330 const SCEV *Step = AR->getStepRecurrence(SE);
15331 const SCEV *OpStep =
Op->AR->getStepRecurrence(SE);
15384 if (Step->getValue()->getValue().isNonNegative())
15388 return ImpliedFlags;
15395 for (
const auto *
P : Preds)
15408 return this->implies(I, SE);
15416 for (
const auto *Pred : Preds)
15417 Pred->print(OS,
Depth);
15422 for (
const auto *Pred : Set->Preds)
15430 bool CheckImplies = Preds.
size() < 16;
15433 if (CheckImplies &&
implies(
N, SE))
15439 for (
auto *
P : Preds) {
15440 if (CheckImplies &&
N->implies(
P, SE))
15444 Preds = std::move(PrunedPreds);
15445 Preds.push_back(
N);
15452 Preds = std::make_unique<SCEVUnionPredicate>(
Empty, SE);
15457 for (
const auto *
Op :
Ops)
15462 SCEVUsers[
Op].insert(
User);
15471 SCEVUsers[
Op].insert(
User);
15475 const SCEV *Expr = SE.getSCEV(V);
15480 RewriteEntry &Entry = RewriteMap[Expr];
15483 if (Entry.second && Generation == Entry.first)
15484 return Entry.second;
15489 Expr = Entry.second;
15491 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
15492 Entry = {Generation, NewSCEV};
15498 if (!BackedgeCount) {
15500 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds);
15501 for (
const auto *
P : Preds)
15504 return BackedgeCount;
15508 if (!SymbolicMaxBackedgeCount) {
15510 SymbolicMaxBackedgeCount =
15511 SE.getPredicatedSymbolicMaxBackedgeTakenCount(&L, Preds);
15512 for (
const auto *
P : Preds)
15515 return SymbolicMaxBackedgeCount;
15519 if (!SmallConstantMaxTripCount) {
15521 SmallConstantMaxTripCount = SE.getSmallConstantMaxTripCount(&L, &Preds);
15522 for (
const auto *
P : Preds)
15525 return *SmallConstantMaxTripCount;
15529 if (Preds->implies(&Pred, SE))
15534 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds, SE);
15535 updateGeneration();
15542void PredicatedScalarEvolution::updateGeneration() {
15544 if (++Generation == 0) {
15545 for (
auto &
II : RewriteMap) {
15546 const SCEV *Rewritten =
II.second.second;
15563 auto II = FlagsMap.insert({V, Flags});
15576 auto II = FlagsMap.find(V);
15578 if (
II != FlagsMap.end())
15587 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
15592 for (
const auto *
P : NewPreds)
15595 RewriteMap[SE.getSCEV(V)] = {Generation, New};
15601 : RewriteMap(
Init.RewriteMap), SE(
Init.SE), L(
Init.L),
15604 Generation(
Init.Generation), BackedgeCount(
Init.BackedgeCount) {
15605 for (
auto I :
Init.FlagsMap)
15606 FlagsMap.insert(
I);
15611 for (
auto *BB : L.getBlocks())
15612 for (
auto &
I : *BB) {
15613 if (!SE.isSCEVable(
I.getType()))
15616 auto *Expr = SE.getSCEV(&
I);
15617 auto II = RewriteMap.find(Expr);
15619 if (
II == RewriteMap.end())
15623 if (
II->second.second == Expr)
15628 OS.
indent(
Depth + 2) <<
"--> " << *
II->second.second <<
"\n";
15636 LoopGuards Guards(SE);
15644void ScalarEvolution::LoopGuards::collectFromPHI(
15652 using MinMaxPattern = std::pair<const SCEVConstant *, SCEVTypes>;
15653 auto GetMinMaxConst = [&](
unsigned IncomingIdx) -> MinMaxPattern {
15667 auto &RewriteMap =
G->second.RewriteMap;
15668 if (RewriteMap.empty())
15670 auto S = RewriteMap.find(SE.
getSCEV(
Phi.getIncomingValue(IncomingIdx)));
15671 if (S == RewriteMap.end())
15677 return {C0,
SM->getSCEVType()};
15680 auto MergeMinMaxConst = [](MinMaxPattern
P1,
15681 MinMaxPattern
P2) -> MinMaxPattern {
15682 auto [C1,
T1] =
P1;
15683 auto [C2, T2] =
P2;
15684 if (!C1 || !C2 ||
T1 != T2)
15688 return {C1->getAPInt().
ult(C2->getAPInt()) ? C1 : C2,
T1};
15690 return {C1->getAPInt().
slt(C2->getAPInt()) ? C1 : C2,
T1};
15692 return {C1->getAPInt().
ugt(C2->getAPInt()) ? C1 : C2,
T1};
15694 return {C1->getAPInt().
sgt(C2->getAPInt()) ? C1 : C2,
T1};
15699 auto P = GetMinMaxConst(0);
15700 for (
unsigned int In = 1;
In <
Phi.getNumIncomingValues();
In++) {
15703 P = MergeMinMaxConst(
P, GetMinMaxConst(In));
15706 const SCEV *
LHS = SE.
getSCEV(
const_cast<PHINode *
>(&Phi));
15709 Guards.RewriteMap.insert({
LHS,
RHS});
15717 const APInt &DivisorVal,
15719 const APInt *ExprVal;
15732 const APInt &DivisorVal,
15734 const APInt *ExprVal;
15742 return SE.
getConstant(*ExprVal + DivisorVal - Rem);
15756 const SCEV *URemRHS =
nullptr;
15760 const SCEV *Multiple =
15762 DivInfo[URemLHS] = Multiple;
15764 Multiples[URemLHS] =
C->getAPInt();
15784 auto IsMinMaxSCEVWithNonNegativeConstant =
15788 if (
MinMax->getNumOperands() != 2)
15791 if (
C->getAPInt().isNegative())
15793 SCTy =
MinMax->getSCEVType();
15802 const SCEV *MinMaxLHS =
nullptr, *MinMaxRHS =
nullptr;
15804 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15809 auto *DivisibleExpr =
15817void ScalarEvolution::LoopGuards::collectFromBlock(
15819 const BasicBlock *
Block,
const BasicBlock *Pred,
15827 DenseMap<const SCEV *, const SCEV *> &RewriteMap,
15838 &ExprsToRewrite]() {
15839 const SCEVConstant *C1;
15852 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15854 auto [
I,
Inserted] = RewriteMap.try_emplace(LHSUnknown);
15855 const SCEV *RewrittenLHS =
Inserted ? LHSUnknown :
I->second;
15863 if (MatchRangeCheckIdiom())
15880 auto AddRewrite = [&](
const SCEV *From,
const SCEV *FromRewritten,
15882 if (From == FromRewritten)
15884 RewriteMap[From] = To;
15890 auto GetMaybeRewritten = [&](
const SCEV *S) {
15891 return RewriteMap.lookup_or(S, S);
15894 const SCEV *RewrittenLHS = GetMaybeRewritten(
LHS);
15896 const APInt &DividesBy =
15911 switch (Predicate) {
15940 SmallPtrSet<const SCEV *, 16> Visited;
15942 auto EnqueueOperands = [&Worklist](
const SCEVNAryExpr *S) {
15946 while (!Worklist.
empty()) {
15950 if (!Visited.
insert(From).second)
15952 const SCEV *FromRewritten = GetMaybeRewritten(From);
15953 const SCEV *To =
nullptr;
15955 switch (Predicate) {
15960 EnqueueOperands(
UMax);
15966 EnqueueOperands(
SMax);
15972 EnqueueOperands(
UMin);
15978 EnqueueOperands(
SMin);
15986 const SCEV *OneAlignedUp =
15988 To = SE.
getUMaxExpr(FromRewritten, OneAlignedUp);
16000 const SCEVConstant *
C;
16009 Guards.NotEqual.insert({
LHS,
RHS});
16018 AddRewrite(From, FromRewritten, To);
16035 SE.F.
getParent(), Intrinsic::experimental_guard);
16037 for (
const auto *GU : GuardDecl->users())
16039 if (Guard->getFunction() ==
Block->getParent() &&
16048 unsigned NumCollectedConditions = 0;
16050 std::pair<const BasicBlock *, const BasicBlock *> Pair(Pred,
Block);
16052 Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
16054 const CondBrInst *LoopEntryPredicate =
16056 if (!LoopEntryPredicate)
16061 NumCollectedConditions++;
16065 if (
Depth > 0 && NumCollectedConditions == 2)
16073 if (Pair.second->hasNPredecessorsOrMore(2) &&
16075 SmallDenseMap<const BasicBlock *, LoopGuards> IncomingGuards;
16076 for (
auto &Phi : Pair.second->phis())
16087 for (
auto [Term, EnterIfTrue] :
reverse(Terms)) {
16088 SmallVector<Value *, 8> Worklist;
16089 SmallPtrSet<Value *, 8> Visited;
16091 while (!Worklist.
empty()) {
16098 EnterIfTrue ?
Cmp->getPredicate() :
Cmp->getInversePredicate();
16122 DenseMap<const SCEV *, APInt> Multiples;
16124 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess) {
16131 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess)
16132 CollectCondition(Predicate,
LHS,
RHS, Guards.RewriteMap, DivGuards);
16136 for (
const auto &[K, Divisor] : Multiples) {
16137 const SCEV *DivisorSCEV = SE.
getConstant(Divisor);
16138 Guards.RewriteMap[
K] =
16140 Guards.
rewrite(K), Divisor, SE),
16149 Guards.PreserveNUW =
true;
16150 Guards.PreserveNSW =
true;
16151 for (
const SCEV *Expr : ExprsToRewrite) {
16152 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16153 Guards.PreserveNUW &=
16155 Guards.PreserveNSW &=
16162 if (ExprsToRewrite.size() > 1) {
16163 for (
const SCEV *Expr : ExprsToRewrite) {
16164 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16165 Guards.RewriteMap.erase(Expr);
16166 Guards.RewriteMap.insert({Expr, Guards.
rewrite(RewriteTo)});
16175 class SCEVLoopGuardRewriter
16186 NotEqual(Guards.NotEqual) {
16187 if (Guards.PreserveNUW)
16189 if (Guards.PreserveNSW)
16196 return Map.lookup_or(Expr, Expr);
16200 if (
const SCEV *S = Map.lookup(Expr))
16207 unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
16208 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
16209 Bitwidth >
Op->getType()->getScalarSizeInBits()) {
16211 auto *NarrowExt = SE.getZeroExtendExpr(
Op, NarrowTy);
16212 if (
const SCEV *S = Map.lookup(NarrowExt))
16213 return SE.getZeroExtendExpr(S, Ty);
16214 Bitwidth = Bitwidth / 2;
16222 if (
const SCEV *S = Map.lookup(Expr))
16229 if (
const SCEV *S = Map.lookup(Expr))
16235 if (
const SCEV *S = Map.lookup(Expr))
16243 auto RewriteSubtraction = [&](
const SCEV *S) ->
const SCEV * {
16248 if (NotEqual.contains({LHS, RHS})) {
16250 SE.getOne(S->
getType()), SE.getConstantMultiple(S), SE);
16251 return SE.getUMaxExpr(OneAlignedUp, S);
16258 if (
const SCEV *Rewritten = RewriteSubtraction(Expr))
16269 if (
const SCEV *Rewritten = RewriteSubtraction(
Add))
16270 return SE.getAddExpr(
16273 if (
const SCEV *S = Map.lookup(
Add))
16274 return SE.getAddExpr(Expr->
getOperand(0), S);
16286 : SE.getAddExpr(Operands,
16302 : SE.getMulExpr(Operands,
16308 if (RewriteMap.empty() && NotEqual.empty())
16311 SCEVLoopGuardRewriter
Rewriter(SE, *
this);
16312 return Rewriter.visit(Expr);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SmallPtrSet< const BasicBlock *, 8 > VisitedBlocks
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool isSigned(unsigned Opcode)
This file defines a hash set that can be used to remove duplication of nodes in a graph.
Value * getPointer(Value *Ptr)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static constexpr unsigned SM(unsigned Version)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static DominatorTree getDomTree(Function &F)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI optimize exec mask operations pre RA
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static int CompareValueComplexity(const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static const SCEV * getNextSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool hasHugeExpression(ArrayRef< SCEVUse > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< unsigned > MaxLoopGuardCollectionDepth("scalar-evolution-max-loop-guard-collection-depth", cl::Hidden, cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool RangeRefPHIAllowedOperands(DominatorTree &DT, PHINode *PHI)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static BinaryOperator * getCommonInstForPHI(PHINode *PN)
static bool isDivisibilityGuard(const SCEV *LHS, const SCEV *RHS, ScalarEvolution &SE)
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, SmallVectorImpl< const SCEVPredicate * > *Predicates, ScalarEvolution &SE, const Loop *L)
Finds the minimum unsigned root of the following equation:
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, ArrayRef< SCEVUse > Ops, SCEV::NoWrapFlags Flags)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool CollectAddOperandsWithScales(SmallDenseMap< SCEVUse, APInt, 16 > &M, SmallVectorImpl< SCEVUse > &NewOps, APInt &AccumulatedConstant, ArrayRef< SCEVUse > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static const SCEV * constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, SmallVectorImpl< SCEVUse > &Ops, FoldT Fold, IsIdentityT IsIdentity, IsAbsorberT IsAbsorber)
Performs a number of common optimizations on the passed Ops.
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static void GroupByComplexity(SmallVectorImpl< SCEVUse > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static bool collectDivisibilityInformation(ICmpInst::Predicate Predicate, const SCEV *LHS, const SCEV *RHS, DenseMap< const SCEV *, const SCEV * > &DivInfo, DenseMap< const SCEV *, APInt > &Multiples, ScalarEvolution &SE)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static bool getOperandsForSelectLikePHI(DominatorTree &DT, PHINode *PN, Value *&Cond, Value *&LHS, Value *&RHS)
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static bool MatchBinarySub(const SCEV *S, SCEVUse &LHS, SCEVUse &RHS)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * getPreviousSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static const SCEV * applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr, APInt Divisor, ScalarEvolution &SE)
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static std::optional< int > CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
static bool BrPHIToSelect(DominatorTree &DT, CondBrInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool InBlock(const Value *V, const BasicBlock *BB)
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
Virtual Register Rewriter
static const uint32_t IV[8]
SCEVCastSinkingRewriter(ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
static const SCEV * rewrite(const SCEV *Scev, ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
const SCEV * visitUnknown(const SCEVUnknown *Expr)
const SCEV * visitMulExpr(const SCEVMulExpr *Expr)
const SCEV * visitAddExpr(const SCEVAddExpr *Expr)
const SCEV * visit(const SCEV *S)
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
unsigned countTrailingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt multiplicativeInverse() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
Get the array size.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< WeakVH > assumptions()
Access the list of assumption handles currently tracked for this function.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
This class represents a function call, abstracting a target machine's calling convention.
virtual void deleted()
Callback for Value destruction.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
Conditional Branch instruction.
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
static LLVM_ABI Constant * getNot(Constant *C)
static Constant * getPtrAdd(Constant *Ptr, Constant *Offset, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReduced=nullptr)
Create a getelementptr i8, ptr, offset constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPtrToAddr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
const APInt & getLower() const
Return the lower value for this range.
LLVM_ABI ConstantRange urem(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned remainder operation of...
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
LLVM_ABI ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ValueT lookup(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or a default constructed value if no such entry exists.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This class describes a reference to an interned FoldingSetNodeID, which can be a useful to store node...
This class is used to gather all the unique data bits of a node.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
static GEPNoWrapFlags none()
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
A Module instance is used to store all the information related to an LLVM module.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI const SCEV * getPredicatedSCEV(const SCEV *Expr)
Returns the rewritten SCEV for Expr in the context of the current SCEV predicate.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
constexpr bool isValid() const
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
friend class ScalarEvolution
LLVM_ABI const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
LLVM_ABI const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
LLVM_ABI const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
const Loop * getLoop() const
SCEVUse getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This is the base class for unary cast operator classes.
SCEVUse getOperand() const
LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Implementation of the SCEVPredicate interface.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
ArrayRef< SCEVUse > operands() const
bool hasNoSelfWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
SCEVUse getOperand(unsigned i) const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicate(const SCEVPredicate &)=default
virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const =0
Returns true if this predicate implies N.
This class represents a cast from a pointer to a pointer-sized integer value, without capturing the p...
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visit(const SCEV *S)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
SCEVRewriteVisitor(ScalarEvolution &SE)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
static SCEVTypes getEquivalentNonSequentialSCEVType(SCEVTypes Ty)
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds, ScalarEvolution &SE)
Union predicates don't get cached so create a dummy set ID for it.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
unsigned short getExpressionSize() const
SCEVNoWrapFlags NoWrapFlags
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
static constexpr auto FlagNUW
LLVM_ABI void computeAndSetCanonical(ScalarEvolution &SE)
Compute and set the canonical SCEV, by constructing a SCEV with the same operands,...
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
const SCEV * CanonicalSCEV
Pointer to the canonical version of the SCEV, i.e.
static constexpr auto FlagAnyWrap
LLVM_ABI void dump() const
This method is used for debugging.
LLVM_ABI bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
static constexpr auto FlagNSW
LLVM_ABI ArrayRef< SCEVUse > operands() const
Return operands of this SCEV expression.
LLVM_ABI void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, unsigned short ExpressionSize)
SCEVTypes getSCEVType() const
static constexpr auto FlagNW
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
ScalarEvolutionWrapperPass()
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
LLVM_ABI const SCEV * rewrite(const SCEV *Expr) const
Try to apply the collected loop guards to Expr.
The main scalar evolution driver.
LLVM_ABI const SCEV * getUDivExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
LLVM_ABI const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getURemExpr(SCEVUse LHS, SCEVUse RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getSMinExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
LLVM_ABI const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
LLVM_ABI const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< SCEVUse > &Operands)
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(SCEVUse LHS, SCEVUse RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
LLVM_ABI const SCEV * getAddRecExpr(SCEVUse Start, SCEVUse Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getLosslessPtrToIntExpr(const SCEV *Op)
LLVM_ABI const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
LLVM_ABI const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< SCEVUse > &Operands)
LLVM_ABI std::optional< bool > evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, SCEVUse &LHS, SCEVUse &RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool haveSameSign(const SCEV *S1, const SCEV *S2)
Return true if we know that S1 and S2 must have the same sign.
LLVM_ABI const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void print(raw_ostream &OS) const
LLVM_ABI const SCEV * getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock, SmallVectorImpl< const SCEVPredicate * > *Predicates, ExitCountKind Kind=Exact)
Same as above except this uses the predicated backedge taken info and may require predicates.
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getUMaxExpr(SCEVUse LHS, SCEVUse RHS)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags Mask)
Convenient NoWrapFlags manipulation.
@ MonotonicallyDecreasing
@ MonotonicallyIncreasing
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
LLVM_ABI const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LLVM_ABI APInt getConstantMultiple(const SCEV *S, const Instruction *CtxI=nullptr)
Returns the max constant multiple of S.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
friend class SCEVCallbackVH
LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M, SmallVectorImpl< const SCEVPredicate * > &Assumptions)
Check that S is a multiple of M.
LLVM_ABI const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero=false, bool OrNegative=false)
Test if the given expression is known to be a power of 2.
LLVM_ABI std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
LLVM_ABI std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S, const Instruction *CtxI=nullptr)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
LLVM_ABI const SCEV * getVScale(Type *Ty)
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI void forgetAllLoops()
LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getPtrToAddrExpr(const SCEV *Op)
LLVM_ABI const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
LLVM_ABI const SCEV * getSMaxExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< bool > evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
LLVM_ABI const SCEV * getUnknown(Value *V)
LLVM_ABI std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
LLVM_ABI const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
LLVM_ABI const SCEV * getUDivExactExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
LLVM_ABI std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI ~ScalarEvolution()
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, ArrayRef< SCEVUse > IndexExprs)
Returns an expression for a GEP.
LLVM_ABI const SCEV * getUMinExpr(SCEVUse LHS, SCEVUse RHS, bool Sequential=false)
LLVM_ABI void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVM_ABI void verify() const
LLVMContext & getContext() const
Implements a dense probed hash-table based set with some number of buckets stored inline.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
TypeSize getSizeInBits() const
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVMContext & getContext() const
All values hold a context through their type.
unsigned getValueID() const
Return an ID for the concrete type of this object.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
LLVM_ABI std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
constexpr bool any(E Val)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
int getMinValue(MCInstrInfo const &MCII, MCInst const &MCI)
Return the minimum value of an extendable operand.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_BasicBlock()
Match an arbitrary basic block value and ignore it.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
auto m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_bind< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
brc_match< Cond_t, match_bind< BasicBlock >, match_bind< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
auto m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVZeroExtendExpr, Op0_t > m_scev_ZExt(const Op0_t &Op0)
is_undef_or_poison m_scev_UndefOrPoison()
Match an SCEVUnknown wrapping undef or poison.
cst_pred_ty< is_one > m_scev_One()
Match an integer 1.
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVUnaryExpr_match< SCEVSignExtendExpr, Op0_t > m_scev_SExt(const Op0_t &Op0)
match_bind< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
cst_pred_ty< is_zero > m_scev_Zero()
Match an integer 0.
SCEVUnaryExpr_match< SCEVTruncateExpr, Op0_t > m_scev_Trunc(const Op0_t &Op0)
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVUDivExpr, Op0_t, Op1_t > m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
SCEVAffineAddRec_match< Op0_t, Op1_t, match_isa< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
match_bind< const SCEVUnknown > m_SCEVUnknown(const SCEVUnknown *&V)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true > m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1)
match_bind< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVSMaxExpr, Op0_t, Op1_t > m_scev_SMax(const Op0_t &Op0, const Op1_t &Op1)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
@ Valid
The data is already valid.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
SaveAndRestore(T &) -> SaveAndRestore< T >
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
scope_exit(Callable) -> scope_exit< Callable >
constexpr from_range_t from_range
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
auto uninitialized_copy(R &&Src, IterTy Dst)
bool isa_and_nonnull(const Y &Val)
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
DomTreeNodeBase< BasicBlock > DomTreeNode
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
auto reverse(ContainerTy &&C)
LLVM_ABI bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
unsigned short computeExpressionSize(ArrayRef< SCEVUse > Args)
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
FunctionAddr VTableAddr Count
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
SCEVUseT< const SCEV * > SCEVUse
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
An object of this class is returned by queries that could not be answered.
LLVM_ABI SCEVCouldNotCompute()
static LLVM_ABI bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
LLVM_ABI ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
const SCEV * ExactNotTaken
const SCEV * SymbolicMaxNotTaken
SmallVector< const SCEVPredicate *, 4 > Predicates
A vector of predicate guards for this ExitLimit.
const SCEV * ConstantMaxNotTaken