84#include "llvm/Config/llvm-config.h"
135using namespace PatternMatch;
137#define DEBUG_TYPE "scalar-evolution"
140 "Number of loop exits with predictable exit counts");
142 "Number of loop exits without predictable exit counts");
144 "Number of loops with trip counts computed by force");
146#ifdef EXPENSIVE_CHECKS
154 cl::desc(
"Maximum number of iterations SCEV will "
155 "symbolically execute a constant "
161 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
164 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
168 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
173 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
178 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
182 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
183 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
187 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
188 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
192 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
193 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
198 cl::desc(
"Maximum depth of recursive arithmetics"),
202 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
207 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
212 cl::desc(
"Max coefficients in AddRec during evolving"),
217 cl::desc(
"Size of the expression which is considered huge"),
222 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
228 cl::desc(
"When printing analysis, include information on every instruction"));
231 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
233 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
234 "be costly in terms of compile time"));
237 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
238 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
239 "Phi strongly connected components"),
244 cl::desc(
"Handle <= and >= in finite loops"),
248 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
249 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
260#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
270 cast<SCEVConstant>(
this)->getValue()->printAsOperand(
OS,
false);
278 OS <<
"(ptrtoint " << *
Op->getType() <<
" " << *
Op <<
" to "
279 << *PtrToInt->
getType() <<
")";
285 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
292 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
299 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
328 const char *OpStr =
nullptr;
341 OpStr =
" umin_seq ";
347 ListSeparator LS(OpStr);
371 cast<SCEVUnknown>(
this)->getValue()->printAsOperand(
OS,
false);
374 OS <<
"***COULDNOTCOMPUTE***";
383 return cast<SCEVConstant>(
this)->getType();
385 return cast<SCEVVScale>(
this)->getType();
390 return cast<SCEVCastExpr>(
this)->getType();
392 return cast<SCEVAddRecExpr>(
this)->getType();
394 return cast<SCEVMulExpr>(
this)->getType();
399 return cast<SCEVMinMaxExpr>(
this)->getType();
401 return cast<SCEVSequentialMinMaxExpr>(
this)->getType();
403 return cast<SCEVAddExpr>(
this)->getType();
405 return cast<SCEVUDivExpr>(
this)->getType();
407 return cast<SCEVUnknown>(
this)->getType();
424 return cast<SCEVCastExpr>(
this)->operands();
433 return cast<SCEVNAryExpr>(
this)->operands();
435 return cast<SCEVUDivExpr>(
this)->operands();
443 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(
this))
444 return SC->getValue()->isZero();
449 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(
this))
450 return SC->getValue()->isOne();
455 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(
this))
456 return SC->getValue()->isMinusOne();
462 if (!
Mul)
return false;
466 if (!SC)
return false;
469 return SC->getAPInt().isNegative();
484 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
486 UniqueSCEVs.InsertNode(S, IP);
505 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
508 UniqueSCEVs.InsertNode(S, IP);
520 "Must be a non-bit-width-changing pointer-to-integer cast!");
532 "Cannot truncate non-integer value!");
539 "Cannot zero extend non-integer value!");
546 "Cannot sign extend non-integer value!");
549void SCEVUnknown::deleted() {
551 SE->forgetMemoizedResults(
this);
554 SE->UniqueSCEVs.RemoveNode(
this);
560void SCEVUnknown::allUsesReplacedWith(
Value *New) {
562 SE->forgetMemoizedResults(
this);
565 SE->UniqueSCEVs.RemoveNode(
this);
604 if (LIsPointer != RIsPointer)
605 return (
int)LIsPointer - (int)RIsPointer;
610 return (
int)LID - (int)RID;
613 if (
const auto *LA = dyn_cast<Argument>(LV)) {
614 const auto *
RA = cast<Argument>(RV);
615 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
616 return (
int)LArgNo - (int)RArgNo;
619 if (
const auto *LGV = dyn_cast<GlobalValue>(LV)) {
620 const auto *RGV = cast<GlobalValue>(RV);
622 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
623 auto LT = GV->getLinkage();
630 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
631 return LGV->getName().compare(RGV->getName());
636 if (
const auto *LInst = dyn_cast<Instruction>(LV)) {
637 const auto *RInst = cast<Instruction>(RV);
642 if (LParent != RParent) {
645 if (LDepth != RDepth)
646 return (
int)LDepth - (int)RDepth;
650 unsigned LNumOps = LInst->getNumOperands(),
651 RNumOps = RInst->getNumOperands();
652 if (LNumOps != RNumOps)
653 return (
int)LNumOps - (int)RNumOps;
655 for (
unsigned Idx :
seq(LNumOps)) {
673static std::optional<int>
685 return (
int)LType - (int)RType;
715 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
716 if (LBitWidth != RBitWidth)
717 return (
int)LBitWidth - (int)RBitWidth;
718 return LA.
ult(
RA) ? -1 : 1;
722 const auto *LTy = cast<IntegerType>(cast<SCEVVScale>(
LHS)->
getType());
723 const auto *RTy = cast<IntegerType>(cast<SCEVVScale>(
RHS)->
getType());
724 return LTy->getBitWidth() - RTy->getBitWidth();
735 if (LLoop != RLoop) {
737 assert(LHead != RHead &&
"Two loops share the same header?");
741 "No dominance between recurrences used by one SCEV?");
764 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
765 if (LNumOps != RNumOps)
766 return (
int)LNumOps - (int)RNumOps;
768 for (
unsigned i = 0; i != LNumOps; ++i) {
770 ROps[i], DT,
Depth + 1);
795 if (Ops.
size() < 2)
return;
804 return Complexity && *Complexity < 0;
806 if (Ops.
size() == 2) {
810 if (IsLessComplex(
RHS,
LHS))
817 return IsLessComplex(
LHS,
RHS);
824 for (
unsigned i = 0, e = Ops.
size(); i != e-2; ++i) {
825 const SCEV *S = Ops[i];
830 for (
unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
835 if (i == e-2)
return;
921 APInt OddFactorial(W, 1);
923 for (
unsigned i = 3; i <= K; ++i) {
925 unsigned TwoFactors = Mult.countr_zero();
927 Mult.lshrInPlace(TwoFactors);
928 OddFactorial *= Mult;
932 unsigned CalculationBits = W +
T;
941 APInt MultiplyFactor = OddFactorial.
zext(W+1);
943 MultiplyFactor = MultiplyFactor.
trunc(W);
949 for (
unsigned i = 1; i != K; ++i) {
982 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
987 if (isa<SCEVCouldNotCompute>(Coeff))
1002 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1006 if (!
Op->getType()->isPointerTy())
1017 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1036 if (
auto *U = dyn_cast<SCEVUnknown>(
Op)) {
1041 if (isa<ConstantPointerNull>(U->getValue()))
1047 SCEV *S =
new (SCEVAllocator)
1049 UniqueSCEVs.InsertNode(S, IP);
1054 assert(
Depth == 0 &&
"getLosslessPtrToIntExpr() should not self-recurse for "
1055 "non-SCEVUnknown's.");
1067 class SCEVPtrToIntSinkingRewriter
1075 SCEVPtrToIntSinkingRewriter
Rewriter(SE);
1085 return Base::visit(S);
1090 bool Changed =
false;
1100 bool Changed =
false;
1110 "Should only reach pointer-typed SCEVUnknown's.");
1116 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(
Op, *
this);
1118 "We must have succeeded in sinking the cast, "
1119 "and ending up with an integer-typed expression!");
1127 if (isa<SCEVCouldNotCompute>(IntOp))
1136 "This is not a truncating conversion!");
1138 "This is not a conversion to a SCEVable type!");
1139 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1147 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1169 UniqueSCEVs.InsertNode(S, IP);
1178 if (isa<SCEVAddExpr>(
Op) || isa<SCEVMulExpr>(
Op)) {
1179 auto *CommOp = cast<SCEVCommutativeExpr>(
Op);
1181 unsigned numTruncs = 0;
1182 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1185 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1186 isa<SCEVTruncateExpr>(S))
1190 if (numTruncs < 2) {
1191 if (isa<SCEVAddExpr>(
Op))
1193 if (isa<SCEVMulExpr>(
Op))
1200 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1207 for (
const SCEV *
Op : AddRec->operands())
1222 UniqueSCEVs.InsertNode(S, IP);
1262struct ExtendOpTraitsBase {
1268template <
typename ExtendOp>
struct ExtendOpTraits {
1284 static const GetExtendExprTy GetExtendExpr;
1286 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1293const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1300 static const GetExtendExprTy GetExtendExpr;
1302 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1309const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1321template <
typename ExtendOpTy>
1324 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1325 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1332 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1351 auto PreStartFlags =
1369 const SCEV *OperandExtendedStart =
1371 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1372 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1384 const SCEV *OverflowLimit =
1385 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1387 if (OverflowLimit &&
1395template <
typename ExtendOpTy>
1399 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1401 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE,
Depth);
1407 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1442template <
typename ExtendOpTy>
1443bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1446 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1452 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1458 for (
unsigned Delta : {-2, -1, 1, 2}) {
1463 ID.AddPointer(PreStart);
1464 ID.AddPointer(Step);
1472 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1475 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1476 DeltaS, &Pred,
this);
1494 const unsigned BitWidth =
C.getBitWidth();
1512 const APInt &ConstantStart,
1531 auto &UserIDs = FoldCacheUser[
I.first->second];
1532 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1533 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1534 if (UserIDs[
I] ==
ID) {
1539 I.first->second = S;
1541 auto R = FoldCacheUser.insert({S, {}});
1542 R.first->second.push_back(
ID);
1548 "This is not an extending conversion!");
1550 "This is not a conversion to a SCEVable type!");
1551 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1555 auto Iter = FoldCache.find(
ID);
1556 if (Iter != FoldCache.end())
1557 return Iter->second;
1560 if (!isa<SCEVZeroExtendExpr>(S))
1568 "This is not an extending conversion!");
1570 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1588 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1592 UniqueSCEVs.InsertNode(S, IP);
1601 const SCEV *
X = ST->getOperand();
1615 if (AR->isAffine()) {
1616 const SCEV *Start = AR->getStart();
1617 const SCEV *Step = AR->getStepRecurrence(*
this);
1619 const Loop *L = AR->getLoop();
1623 if (AR->hasNoUnsignedWrap()) {
1625 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1639 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1644 const SCEV *CastedMaxBECount =
1648 if (MaxBECount == RecastedMaxBECount) {
1658 const SCEV *WideMaxBECount =
1660 const SCEV *OperandExtendedAdd =
1666 if (ZAdd == OperandExtendedAdd) {
1670 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1677 OperandExtendedAdd =
1683 if (ZAdd == OperandExtendedAdd) {
1688 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1703 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1706 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1708 if (AR->hasNoUnsignedWrap()) {
1714 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1731 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1742 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1743 const APInt &
C = SC->getAPInt();
1747 const SCEV *SResidual =
1756 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1759 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1775 if (
auto *Div = dyn_cast<SCEVUDivExpr>(
Op))
1779 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1781 if (SA->hasNoUnsignedWrap()) {
1785 for (
const auto *
Op : SA->operands())
1798 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1802 const SCEV *SResidual =
1812 if (
auto *SM = dyn_cast<SCEVMulExpr>(
Op)) {
1814 if (SM->hasNoUnsignedWrap()) {
1818 for (
const auto *
Op : SM->operands())
1835 if (SM->getNumOperands() == 2)
1836 if (
auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1837 if (MulLHS->getAPInt().isPowerOf2())
1838 if (
auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1840 MulLHS->getAPInt().logBase2();
1852 if (isa<SCEVUMinExpr>(
Op) || isa<SCEVUMaxExpr>(
Op)) {
1853 auto *
MinMax = cast<SCEVMinMaxExpr>(
Op);
1855 for (
auto *Operand :
MinMax->operands())
1857 if (isa<SCEVUMinExpr>(
MinMax))
1863 if (
auto *
MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(
Op)) {
1864 assert(isa<SCEVSequentialUMinExpr>(
MinMax) &&
"Not supported!");
1866 for (
auto *Operand :
MinMax->operands())
1873 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1876 UniqueSCEVs.InsertNode(S, IP);
1884 "This is not an extending conversion!");
1886 "This is not a conversion to a SCEVable type!");
1887 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1891 auto Iter = FoldCache.find(
ID);
1892 if (Iter != FoldCache.end())
1893 return Iter->second;
1896 if (!isa<SCEVSignExtendExpr>(S))
1904 "This is not an extending conversion!");
1906 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1929 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1934 UniqueSCEVs.InsertNode(S, IP);
1943 const SCEV *
X = ST->getOperand();
1952 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1954 if (SA->hasNoSignedWrap()) {
1958 for (
const auto *
Op : SA->operands())
1972 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1976 const SCEV *SResidual =
1990 if (AR->isAffine()) {
1991 const SCEV *Start = AR->getStart();
1992 const SCEV *Step = AR->getStepRecurrence(*
this);
1994 const Loop *L = AR->getLoop();
1998 if (AR->hasNoSignedWrap()) {
2000 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2014 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
2020 const SCEV *CastedMaxBECount =
2024 if (MaxBECount == RecastedMaxBECount) {
2034 const SCEV *WideMaxBECount =
2036 const SCEV *OperandExtendedAdd =
2042 if (SAdd == OperandExtendedAdd) {
2046 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2053 OperandExtendedAdd =
2059 if (SAdd == OperandExtendedAdd) {
2071 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2079 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2081 if (AR->hasNoSignedWrap()) {
2087 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2095 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2096 const APInt &
C = SC->getAPInt();
2100 const SCEV *SResidual =
2109 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2112 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2125 if (isa<SCEVSMinExpr>(
Op) || isa<SCEVSMaxExpr>(
Op)) {
2126 auto *
MinMax = cast<SCEVMinMaxExpr>(
Op);
2128 for (
auto *Operand :
MinMax->operands())
2130 if (isa<SCEVSMinExpr>(
MinMax))
2137 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2140 UniqueSCEVs.InsertNode(S, IP);
2166 "This is not an extending conversion!");
2168 "This is not a conversion to a SCEVable type!");
2173 if (SC->getAPInt().isNegative())
2178 const SCEV *NewOp =
T->getOperand();
2186 if (!isa<SCEVZeroExtendExpr>(ZExt))
2191 if (!isa<SCEVSignExtendExpr>(SExt))
2197 for (
const SCEV *
Op : AR->operands())
2203 if (isa<SCEVSMaxExpr>(
Op))
2236 APInt &AccumulatedConstant,
2239 bool Interesting =
false;
2243 while (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[i])) {
2246 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2248 AccumulatedConstant += Scale *
C->getAPInt();
2253 for (; i != Ops.
size(); ++i) {
2255 if (
Mul && isa<SCEVConstant>(
Mul->getOperand(0))) {
2257 Scale * cast<SCEVConstant>(
Mul->getOperand(0))->getAPInt();
2258 if (
Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(
Mul->getOperand(1))) {
2263 Add->operands(), NewScale, SE);
2269 auto Pair = M.insert({Key, NewScale});
2273 Pair.first->second += NewScale;
2281 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2282 M.insert({Ops[i], Scale});
2286 Pair.first->second += Scale;
2305 case Instruction::Add:
2308 case Instruction::Sub:
2311 case Instruction::Mul:
2321 auto *NarrowTy = cast<IntegerType>(
LHS->
getType());
2325 const SCEV *
A = (this->*Extension)(
2327 const SCEV *LHSB = (this->*Extension)(
LHS, WideTy, 0);
2328 const SCEV *RHSB = (this->*Extension)(
RHS, WideTy, 0);
2336 if (BinOp == Instruction::Mul)
2338 auto *RHSC = dyn_cast<SCEVConstant>(
RHS);
2342 APInt C = RHSC->getAPInt();
2343 unsigned NumBits =
C.getBitWidth();
2344 bool IsSub = (BinOp == Instruction::Sub);
2345 bool IsNegativeConst = (
Signed &&
C.isNegative());
2347 bool OverflowDown = IsSub ^ IsNegativeConst;
2349 if (IsNegativeConst) {
2362 APInt Limit = Min + Magnitude;
2368 APInt Limit = Max - Magnitude;
2373std::optional<SCEV::NoWrapFlags>
2378 return std::nullopt;
2387 bool Deduced =
false;
2389 if (OBO->
getOpcode() != Instruction::Add &&
2392 return std::nullopt;
2401 false,
LHS,
RHS, CtxI)) {
2415 return std::nullopt;
2425 using namespace std::placeholders;
2432 assert(CanAnalyze &&
"don't call from other places!");
2439 auto IsKnownNonNegative = [&](
const SCEV *S) {
2449 if (SignOrUnsignWrap != SignOrUnsignMask &&
2451 isa<SCEVConstant>(Ops[0])) {
2456 return Instruction::Add;
2458 return Instruction::Mul;
2464 const APInt &
C = cast<SCEVConstant>(Ops[0])->getAPInt();
2469 Opcode,
C, OBO::NoSignedWrap);
2477 Opcode,
C, OBO::NoUnsignedWrap);
2487 Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2493 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0]))
2494 if (UDiv->getOperand(1) == Ops[1])
2496 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1]))
2497 if (UDiv->getOperand(1) == Ops[0])
2513 "only nuw or nsw allowed");
2515 if (Ops.
size() == 1)
return Ops[0];
2518 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
2520 "SCEVAddExpr operand types don't match!");
2522 Ops, [](
const SCEV *
Op) {
return Op->getType()->isPointerTy(); });
2523 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2531 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2536 Ops[0] =
getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2537 if (Ops.
size() == 2)
return Ops[0];
2539 LHSC = cast<SCEVConstant>(Ops[0]);
2543 if (LHSC->getValue()->isZero()) {
2548 if (Ops.
size() == 1)
return Ops[0];
2558 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2563 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2564 Add->setNoWrapFlags(ComputeFlags(Ops));
2571 Type *Ty = Ops[0]->getType();
2572 bool FoundMatch =
false;
2573 for (
unsigned i = 0, e = Ops.
size(); i != e-1; ++i)
2574 if (Ops[i] == Ops[i+1]) {
2577 while (i+Count != e && Ops[i+Count] == Ops[i])
2582 if (Ops.
size() == Count)
2586 --i; e -= Count - 1;
2596 auto FindTruncSrcType = [&]() ->
Type * {
2601 if (
auto *
T = dyn_cast<SCEVTruncateExpr>(Ops[
Idx]))
2602 return T->getOperand()->getType();
2603 if (
const auto *
Mul = dyn_cast<SCEVMulExpr>(Ops[
Idx])) {
2604 const auto *LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2605 if (
const auto *
T = dyn_cast<SCEVTruncateExpr>(LastOp))
2606 return T->getOperand()->getType();
2610 if (
auto *SrcType = FindTruncSrcType()) {
2615 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
2617 if (
T->getOperand()->getType() != SrcType) {
2622 }
else if (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[i])) {
2624 }
else if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2626 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2628 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2629 if (
T->getOperand()->getType() != SrcType) {
2634 }
else if (
const auto *
C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2652 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2657 if (Ops.
size() == 2) {
2661 const SCEV *
A = Ops[0];
2662 const SCEV *
B = Ops[1];
2663 auto *AddExpr = dyn_cast<SCEVAddExpr>(
B);
2664 auto *
C = dyn_cast<SCEVConstant>(
A);
2665 if (AddExpr &&
C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2666 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2667 auto C2 =
C->getAPInt();
2670 APInt ConstAdd = C1 + C2;
2671 auto AddFlags = AddExpr->getNoWrapFlags();
2683 ConstAdd.
abs().
ule(C1.abs())) {
2697 if (Ops.
size() == 2) {
2699 if (
Mul &&
Mul->getNumOperands() == 2 &&
2700 Mul->getOperand(0)->isAllOnesValue()) {
2703 if (matchURem(
Mul->getOperand(1),
X,
Y) &&
X == Ops[1]) {
2715 bool DeletedAdd =
false;
2729 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2745 if (
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx])) {
2752 struct APIntCompare {
2761 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2762 for (
const SCEV *NewOp : NewOps)
2763 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2766 if (AccumulatedConstant != 0)
2768 for (
auto &MulOp : MulOpLists) {
2769 if (MulOp.first == 1) {
2771 }
else if (MulOp.first != 0) {
2780 if (Ops.
size() == 1)
2789 for (;
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx]); ++
Idx) {
2791 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2792 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2793 if (isa<SCEVConstant>(MulOpSCEV))
2795 for (
unsigned AddOp = 0, e = Ops.
size(); AddOp != e; ++AddOp)
2796 if (MulOpSCEV == Ops[AddOp]) {
2798 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2799 if (
Mul->getNumOperands() != 2) {
2803 Mul->operands().take_front(MulOp));
2811 if (Ops.
size() == 2)
return OuterMul;
2824 for (
unsigned OtherMulIdx =
Idx+1;
2825 OtherMulIdx < Ops.
size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2827 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2831 OMulOp != e; ++OMulOp)
2832 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2834 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2835 if (
Mul->getNumOperands() != 2) {
2837 Mul->operands().take_front(MulOp));
2844 OtherMul->
operands().take_front(OMulOp));
2849 const SCEV *InnerMulSum =
2853 if (Ops.
size() == 2)
return OuterMul;
2870 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
2876 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
2884 if (!LIOps.
empty()) {
2909 auto *DefI = getDefiningScopeBound(LIOps);
2911 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2923 if (Ops.
size() == 1)
return NewRec;
2926 for (
unsigned i = 0;; ++i)
2927 if (Ops[i] == AddRec) {
2937 for (
unsigned OtherIdx =
Idx+1;
2938 OtherIdx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2943 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2945 "AddRecExprs are not sorted in reverse dominance order?");
2946 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2949 for (; OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2951 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2952 if (OtherAddRec->getLoop() == AddRecLoop) {
2953 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2955 if (i >= AddRecOps.
size()) {
2956 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
2960 AddRecOps[i], OtherAddRec->getOperand(i)};
2963 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
2978 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2986 for (
const SCEV *
Op : Ops)
2990 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
2993 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2994 S =
new (SCEVAllocator)
2996 UniqueSCEVs.InsertNode(S, IP);
3008 for (
const SCEV *
Op : Ops)
3016 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3017 S =
new (SCEVAllocator)
3019 UniqueSCEVs.InsertNode(S, IP);
3020 LoopUsers[
L].push_back(S);
3032 for (
const SCEV *
Op : Ops)
3036 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3039 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3040 S =
new (SCEVAllocator)
SCEVMulExpr(
ID.Intern(SCEVAllocator),
3042 UniqueSCEVs.InsertNode(S, IP);
3051 if (j > 1 && k / j != i) Overflow =
true;
3067 if (n == 0 || n == k)
return 1;
3068 if (k > n)
return 0;
3074 for (
uint64_t i = 1; i <= k; ++i) {
3075 r =
umul_ov(r, n-(i-1), Overflow);
3084 struct FindConstantInAddMulChain {
3085 bool FoundConstant =
false;
3087 bool follow(
const SCEV *S) {
3088 FoundConstant |= isa<SCEVConstant>(S);
3089 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
3092 bool isDone()
const {
3093 return FoundConstant;
3097 FindConstantInAddMulChain
F;
3099 ST.visitAll(StartExpr);
3100 return F.FoundConstant;
3108 "only nuw or nsw allowed");
3110 if (Ops.
size() == 1)
return Ops[0];
3112 Type *ETy = Ops[0]->getType();
3114 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
3116 "SCEVMulExpr operand types don't match!");
3124 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3129 Ops[0] =
getConstant(LHSC->getAPInt() * RHSC->getAPInt());
3130 if (Ops.
size() == 2)
return Ops[0];
3132 LHSC = cast<SCEVConstant>(Ops[0]);
3136 if (LHSC->getValue()->isZero())
3140 if (LHSC->getValue()->isOne()) {
3145 if (Ops.
size() == 1)
3156 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3161 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3162 Mul->setNoWrapFlags(ComputeFlags(Ops));
3166 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3167 if (Ops.
size() == 2) {
3184 if (Ops[0]->isAllOnesValue()) {
3189 bool AnyFolded =
false;
3190 for (
const SCEV *AddOp :
Add->operands()) {
3193 if (!isa<SCEVMulExpr>(
Mul)) AnyFolded =
true;
3198 }
else if (
const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3217 AddRec->getNoWrapFlags(FlagsMask));
3229 bool DeletedMul =
false;
3254 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
3259 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
3267 if (!LIOps.
empty()) {
3280 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3296 if (Ops.
size() == 1)
return NewRec;
3299 for (
unsigned i = 0;; ++i)
3300 if (Ops[i] == AddRec) {
3321 bool OpsModified =
false;
3322 for (
unsigned OtherIdx =
Idx+1;
3323 OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3326 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3336 bool Overflow =
false;
3342 SmallVector <const SCEV *, 7> SumOps;
3343 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3347 z < ze && !Overflow; ++z) {
3350 if (LargerThan64Bits)
3351 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3353 Coeff = Coeff1*Coeff2;
3368 if (Ops.
size() == 2)
return NewAddRec;
3369 Ops[
Idx] = NewAddRec;
3370 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
3372 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3386 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3394 "SCEVURemExpr operand types don't match!");
3399 if (RHSC->getValue()->isOne())
3403 if (RHSC->getAPInt().isPowerOf2()) {
3422 "SCEVUDivExpr operand can't be pointer!");
3424 "SCEVUDivExpr operand types don't match!");
3431 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3436 if (LHSC->getValue()->isZero())
3440 if (RHSC->getValue()->isOne())
3445 if (!RHSC->getValue()->isZero()) {
3450 unsigned LZ = RHSC->getAPInt().countl_zero();
3454 if (!RHSC->getAPInt().isPowerOf2())
3460 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*
this))) {
3462 const APInt &StepInt = Step->getAPInt();
3463 const APInt &DivInt = RHSC->getAPInt();
3464 if (!StepInt.
urem(DivInt) &&
3470 for (
const SCEV *
Op : AR->operands())
3477 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3478 if (StartC && !DivInt.
urem(StepInt) &&
3484 const APInt &StartRem = StartInt.
urem(StepInt);
3485 if (StartRem != 0) {
3486 const SCEV *NewLHS =
3489 if (
LHS != NewLHS) {
3499 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3508 for (
const SCEV *
Op : M->operands())
3512 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3513 const SCEV *
Op = M->getOperand(i);
3515 if (!isa<SCEVUDivExpr>(Div) &&
getMulExpr(Div, RHSC) ==
Op) {
3525 if (
auto *DivisorConstant =
3526 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3527 bool Overflow =
false;
3529 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3540 for (
const SCEV *
Op :
A->operands())
3544 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3546 if (isa<SCEVUDivExpr>(
Op) ||
3551 if (
Operands.size() ==
A->getNumOperands())
3558 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3565 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3568 UniqueSCEVs.InsertNode(S, IP);
3598 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3604 if (
const auto *LHSCst = dyn_cast<SCEVConstant>(
Mul->getOperand(0))) {
3605 if (LHSCst == RHSCst) {
3613 APInt Factor =
gcd(LHSCst, RHSCst);
3616 cast<SCEVConstant>(
getConstant(LHSCst->getAPInt().udiv(Factor)));
3618 cast<SCEVConstant>(
getConstant(RHSCst->getAPInt().udiv(Factor)));
3624 Mul = dyn_cast<SCEVMulExpr>(
LHS);
3631 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3632 if (
Mul->getOperand(i) ==
RHS) {
3650 if (
const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3651 if (StepChrec->getLoop() == L) {
3668 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
3670 "SCEVAddRecExpr operand types don't match!");
3673 for (
unsigned i = 0, e =
Operands.size(); i != e; ++i)
3675 "SCEVAddRecExpr operand is not loop-invariant!");
3693 const Loop *NestedLoop = NestedAR->getLoop();
3694 if (L->contains(NestedLoop)
3699 Operands[0] = NestedAR->getStart();
3703 bool AllInvariant =
all_of(
3715 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3726 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3736 return getOrCreateAddRecExpr(
Operands, L, Flags);
3746 const bool AssumeInBoundsFlags = [&]() {
3747 if (!
GEP->isInBounds())
3753 auto *GEPI = dyn_cast<Instruction>(
GEP);
3756 return GEPI && isSCEVExprNeverPoison(GEPI);
3763 bool FirstIter =
true;
3765 for (
const SCEV *IndexExpr : IndexExprs) {
3767 if (
StructType *STy = dyn_cast<StructType>(CurTy)) {
3772 Offsets.push_back(FieldOffset);
3775 CurTy = STy->getTypeAtIndex(
Index);
3779 assert(isa<PointerType>(CurTy) &&
3780 "The first index of a GEP indexes a pointer");
3781 CurTy =
GEP->getSourceElementType();
3792 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3793 Offsets.push_back(LocalOffset);
3798 if (Offsets.empty())
3810 "GEP should not change type mid-flight.");
3814SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3817 ID.AddInteger(SCEVType);
3818 for (
const SCEV *
Op : Ops)
3821 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3831 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3832 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
3833 if (Ops.
size() == 1)
return Ops[0];
3836 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
3838 "Operand types don't match!");
3840 Ops[i]->
getType()->isPointerTy() &&
3841 "min/max should be consistently pointerish");
3852 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops)) {
3858 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3879 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt()));
3882 if (Ops.
size() == 1)
return Ops[0];
3883 LHSC = cast<SCEVConstant>(Ops[0]);
3886 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned);
3887 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned);
3889 if (IsMax ? IsMinV : IsMaxV) {
3893 }
else if (IsMax ? IsMaxV : IsMinV) {
3899 if (Ops.
size() == 1)
return Ops[0];
3903 while (
Idx < Ops.
size() && Ops[
Idx]->getSCEVType() < Kind)
3909 bool DeletedAny =
false;
3910 while (Ops[
Idx]->getSCEVType() == Kind) {
3930 for (
unsigned i = 0, e = Ops.
size() - 1; i != e; ++i) {
3931 if (Ops[i] == Ops[i + 1] ||
3932 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3938 }
else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3947 if (Ops.
size() == 1)
return Ops[0];
3949 assert(!Ops.
empty() &&
"Reduced smax down to nothing!");
3954 ID.AddInteger(Kind);
3955 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
3956 ID.AddPointer(Ops[i]);
3958 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3960 return ExistingSCEV;
3962 std::uninitialized_copy(Ops.
begin(), Ops.
end(), O);
3963 SCEV *S =
new (SCEVAllocator)
3966 UniqueSCEVs.InsertNode(S, IP);
3973class SCEVSequentialMinMaxDeduplicatingVisitor final
3974 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
3975 std::optional<const SCEV *>> {
3976 using RetVal = std::optional<const SCEV *>;
3984 bool canRecurseInto(
SCEVTypes Kind)
const {
3987 return RootKind == Kind || NonSequentialRootKind == Kind;
3990 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
3991 assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) &&
3992 "Only for min/max expressions.");
3995 if (!canRecurseInto(Kind))
3998 auto *NAry = cast<SCEVNAryExpr>(S);
4000 bool Changed = visit(Kind, NAry->operands(), NewOps);
4005 return std::nullopt;
4007 return isa<SCEVSequentialMinMaxExpr>(S)
4012 RetVal visit(
const SCEV *S) {
4014 if (!SeenOps.
insert(S).second)
4015 return std::nullopt;
4016 return Base::visit(S);
4022 : SE(SE), RootKind(RootKind),
4023 NonSequentialRootKind(
4029 bool Changed =
false;
4033 for (
const SCEV *
Op : OrigOps) {
4034 RetVal NewOp = visit(
Op);
4042 NewOps = std::move(Ops);
4048 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4058 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4060 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4062 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4064 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4067 return visitAnyMinMaxExpr(Expr);
4071 return visitAnyMinMaxExpr(Expr);
4075 return visitAnyMinMaxExpr(Expr);
4079 return visitAnyMinMaxExpr(Expr);
4083 return visitAnyMinMaxExpr(Expr);
4086 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4130struct SCEVPoisonCollector {
4131 bool LookThroughMaybePoisonBlocking;
4133 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4134 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4136 bool follow(
const SCEV *S) {
4137 if (!LookThroughMaybePoisonBlocking &&
4141 if (
auto *SU = dyn_cast<SCEVUnknown>(S)) {
4147 bool isDone()
const {
return false; }
4157 SCEVPoisonCollector PC1(
true);
4162 if (PC1.MaybePoison.empty())
4168 SCEVPoisonCollector PC2(
false);
4174 return PC2.MaybePoison.contains(S);
4180 SCEVPoisonCollector PC(
false);
4189 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4190 "Not a SCEVSequentialMinMaxExpr!");
4191 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
4192 if (Ops.
size() == 1)
4196 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4198 "Operand types don't match!");
4200 Ops[i]->
getType()->isPointerTy() &&
4201 "min/max should be consistently pointerish");
4209 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops))
4216 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4217 bool Changed = Deduplicator.visit(Kind, Ops, Ops);
4226 bool DeletedAny =
false;
4228 if (Ops[
Idx]->getSCEVType() != Kind) {
4232 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[
Idx]);
4235 SMME->operands().end());
4243 const SCEV *SaturationPoint;
4254 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4270 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) {
4279 ID.AddInteger(Kind);
4280 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
4281 ID.AddPointer(Ops[i]);
4283 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4285 return ExistingSCEV;
4288 std::uninitialized_copy(Ops.
begin(), Ops.
end(), O);
4289 SCEV *S =
new (SCEVAllocator)
4292 UniqueSCEVs.InsertNode(S, IP);
4340 if (
Size.isScalable())
4361 "Cannot get offset for structure containing scalable vector types");
4375 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4376 assert(cast<SCEVUnknown>(S)->getValue() == V &&
4377 "Stale SCEVUnknown in uniquing map!");
4382 FirstUnknown = cast<SCEVUnknown>(S);
4383 UniqueSCEVs.InsertNode(S, IP);
4431 bool PreciseA, PreciseB;
4432 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4433 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4434 if (!PreciseA || !PreciseB)
4437 return (ScopeA == ScopeB) || DT.
dominates(ScopeA, ScopeB) ||
4442 return CouldNotCompute.get();
4445bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4447 auto *SU = dyn_cast<SCEVUnknown>(S);
4448 return SU && SU->getValue() ==
nullptr;
4451 return !ContainsNulls;
4456 if (
I != HasRecMap.
end())
4461 HasRecMap.
insert({S, FoundAddRec});
4469 if (SI == ExprValueMap.
end())
4470 return std::nullopt;
4471 return SI->second.getArrayRef();
4477void ScalarEvolution::eraseValueFromMap(
Value *V) {
4479 if (
I != ValueExprMap.
end()) {
4480 auto EVIt = ExprValueMap.
find(
I->second);
4481 bool Removed = EVIt->second.remove(V);
4483 assert(Removed &&
"Value not in ExprValueMap?");
4488void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4492 auto It = ValueExprMap.
find_as(V);
4493 if (It == ValueExprMap.
end()) {
4495 ExprValueMap[S].
insert(V);
4503 switch (
I->getOpcode()) {
4504 case Instruction::Load: