84#include "llvm/Config/llvm-config.h"
135using namespace PatternMatch;
137#define DEBUG_TYPE "scalar-evolution"
140 "Number of loop exits with predictable exit counts");
142 "Number of loop exits without predictable exit counts");
144 "Number of loops with trip counts computed by force");
146#ifdef EXPENSIVE_CHECKS
154 cl::desc(
"Maximum number of iterations SCEV will "
155 "symbolically execute a constant "
161 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
164 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
168 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
173 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
178 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
182 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
183 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
187 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
188 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
192 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
193 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
198 cl::desc(
"Maximum depth of recursive arithmetics"),
202 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
207 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
212 cl::desc(
"Max coefficients in AddRec during evolving"),
217 cl::desc(
"Size of the expression which is considered huge"),
222 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
228 cl::desc(
"When printing analysis, include information on every instruction"));
231 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
233 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
234 "be costly in terms of compile time"));
237 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
238 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
239 "Phi strongly connected components"),
244 cl::desc(
"Handle <= and >= in finite loops"),
248 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
249 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
260#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
270 cast<SCEVConstant>(
this)->getValue()->printAsOperand(
OS,
false);
278 OS <<
"(ptrtoint " << *
Op->getType() <<
" " << *
Op <<
" to "
279 << *PtrToInt->
getType() <<
")";
285 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
292 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
299 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
328 const char *OpStr =
nullptr;
341 OpStr =
" umin_seq ";
347 ListSeparator LS(OpStr);
371 cast<SCEVUnknown>(
this)->getValue()->printAsOperand(
OS,
false);
374 OS <<
"***COULDNOTCOMPUTE***";
383 return cast<SCEVConstant>(
this)->getType();
385 return cast<SCEVVScale>(
this)->getType();
390 return cast<SCEVCastExpr>(
this)->getType();
392 return cast<SCEVAddRecExpr>(
this)->getType();
394 return cast<SCEVMulExpr>(
this)->getType();
399 return cast<SCEVMinMaxExpr>(
this)->getType();
401 return cast<SCEVSequentialMinMaxExpr>(
this)->getType();
403 return cast<SCEVAddExpr>(
this)->getType();
405 return cast<SCEVUDivExpr>(
this)->getType();
407 return cast<SCEVUnknown>(
this)->getType();
424 return cast<SCEVCastExpr>(
this)->operands();
433 return cast<SCEVNAryExpr>(
this)->operands();
435 return cast<SCEVUDivExpr>(
this)->operands();
443 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(
this))
444 return SC->getValue()->isZero();
449 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(
this))
450 return SC->getValue()->isOne();
455 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(
this))
456 return SC->getValue()->isMinusOne();
462 if (!
Mul)
return false;
466 if (!SC)
return false;
469 return SC->getAPInt().isNegative();
484 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
486 UniqueSCEVs.InsertNode(S, IP);
505 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
508 UniqueSCEVs.InsertNode(S, IP);
527 "Must be a non-bit-width-changing pointer-to-integer cast!");
539 "Cannot truncate non-integer value!");
546 "Cannot zero extend non-integer value!");
553 "Cannot sign extend non-integer value!");
556void SCEVUnknown::deleted() {
558 SE->forgetMemoizedResults(
this);
561 SE->UniqueSCEVs.RemoveNode(
this);
567void SCEVUnknown::allUsesReplacedWith(
Value *New) {
569 SE->forgetMemoizedResults(
this);
572 SE->UniqueSCEVs.RemoveNode(
this);
594 if (LIsPointer != RIsPointer)
595 return (
int)LIsPointer - (int)RIsPointer;
600 return (
int)LID - (int)RID;
603 if (
const auto *LA = dyn_cast<Argument>(LV)) {
604 const auto *
RA = cast<Argument>(RV);
605 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
606 return (
int)LArgNo - (int)RArgNo;
609 if (
const auto *LGV = dyn_cast<GlobalValue>(LV)) {
610 const auto *RGV = cast<GlobalValue>(RV);
612 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
613 auto LT = GV->getLinkage();
620 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
621 return LGV->getName().compare(RGV->getName());
626 if (
const auto *LInst = dyn_cast<Instruction>(LV)) {
627 const auto *RInst = cast<Instruction>(RV);
632 if (LParent != RParent) {
635 if (LDepth != RDepth)
636 return (
int)LDepth - (int)RDepth;
640 unsigned LNumOps = LInst->getNumOperands(),
641 RNumOps = RInst->getNumOperands();
642 if (LNumOps != RNumOps)
643 return (
int)LNumOps - (int)RNumOps;
645 for (
unsigned Idx :
seq(LNumOps)) {
661static std::optional<int>
672 return (
int)LType - (int)RType;
702 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
703 if (LBitWidth != RBitWidth)
704 return (
int)LBitWidth - (int)RBitWidth;
705 return LA.
ult(
RA) ? -1 : 1;
709 const auto *LTy = cast<IntegerType>(cast<SCEVVScale>(
LHS)->
getType());
710 const auto *RTy = cast<IntegerType>(cast<SCEVVScale>(
RHS)->
getType());
711 return LTy->getBitWidth() - RTy->getBitWidth();
722 if (LLoop != RLoop) {
724 assert(LHead != RHead &&
"Two loops share the same header?");
728 "No dominance between recurrences used by one SCEV?");
751 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
752 if (LNumOps != RNumOps)
753 return (
int)LNumOps - (int)RNumOps;
755 for (
unsigned i = 0; i != LNumOps; ++i) {
782 if (Ops.
size() < 2)
return;
789 return Complexity && *Complexity < 0;
791 if (Ops.
size() == 2) {
795 if (IsLessComplex(
RHS,
LHS))
802 return IsLessComplex(
LHS,
RHS);
809 for (
unsigned i = 0, e = Ops.
size(); i != e-2; ++i) {
810 const SCEV *S = Ops[i];
815 for (
unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
820 if (i == e-2)
return;
842template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
846 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
850 if (
const auto *
C = dyn_cast<SCEVConstant>(
Op)) {
854 Folded = cast<SCEVConstant>(
863 assert(Folded &&
"Must have folded value");
867 if (Folded && IsAbsorber(Folded->
getAPInt()))
871 if (Folded && !IsIdentity(Folded->
getAPInt()))
874 return Ops.
size() == 1 ? Ops[0] :
nullptr;
949 APInt OddFactorial(W, 1);
951 for (
unsigned i = 3; i <= K; ++i) {
954 OddFactorial *= (i >> TwoFactors);
958 unsigned CalculationBits = W +
T;
972 for (
unsigned i = 1; i != K; ++i) {
1005 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
1010 if (isa<SCEVCouldNotCompute>(Coeff))
1025 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1029 if (!
Op->getType()->isPointerTy())
1040 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1059 if (
auto *U = dyn_cast<SCEVUnknown>(
Op)) {
1064 if (isa<ConstantPointerNull>(U->getValue()))
1070 SCEV *S =
new (SCEVAllocator)
1072 UniqueSCEVs.InsertNode(S, IP);
1077 assert(
Depth == 0 &&
"getLosslessPtrToIntExpr() should not self-recurse for "
1078 "non-SCEVUnknown's.");
1090 class SCEVPtrToIntSinkingRewriter
1098 SCEVPtrToIntSinkingRewriter
Rewriter(SE);
1108 return Base::visit(S);
1113 bool Changed =
false;
1123 bool Changed =
false;
1133 "Should only reach pointer-typed SCEVUnknown's.");
1139 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(
Op, *
this);
1141 "We must have succeeded in sinking the cast, "
1142 "and ending up with an integer-typed expression!");
1150 if (isa<SCEVCouldNotCompute>(IntOp))
1159 "This is not a truncating conversion!");
1161 "This is not a conversion to a SCEVable type!");
1162 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1170 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1192 UniqueSCEVs.InsertNode(S, IP);
1201 if (isa<SCEVAddExpr>(
Op) || isa<SCEVMulExpr>(
Op)) {
1202 auto *CommOp = cast<SCEVCommutativeExpr>(
Op);
1204 unsigned numTruncs = 0;
1205 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1208 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1209 isa<SCEVTruncateExpr>(S))
1213 if (numTruncs < 2) {
1214 if (isa<SCEVAddExpr>(
Op))
1216 if (isa<SCEVMulExpr>(
Op))
1223 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1230 for (
const SCEV *
Op : AddRec->operands())
1245 UniqueSCEVs.InsertNode(S, IP);
1285struct ExtendOpTraitsBase {
1291template <
typename ExtendOp>
struct ExtendOpTraits {
1307 static const GetExtendExprTy GetExtendExpr;
1309 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1316const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1323 static const GetExtendExprTy GetExtendExpr;
1325 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1332const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1344template <
typename ExtendOpTy>
1347 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1348 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1355 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1364 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1377 auto PreStartFlags =
1395 const SCEV *OperandExtendedStart =
1397 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1398 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1410 const SCEV *OverflowLimit =
1411 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1413 if (OverflowLimit &&
1421template <
typename ExtendOpTy>
1425 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1427 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE,
Depth);
1433 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1468template <
typename ExtendOpTy>
1469bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1472 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1478 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1484 for (
unsigned Delta : {-2, -1, 1, 2}) {
1489 ID.AddPointer(PreStart);
1490 ID.AddPointer(Step);
1498 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1501 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1502 DeltaS, &Pred,
this);
1520 const unsigned BitWidth =
C.getBitWidth();
1538 const APInt &ConstantStart,
1557 auto &UserIDs = FoldCacheUser[
I.first->second];
1558 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1559 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1560 if (UserIDs[
I] ==
ID) {
1565 I.first->second = S;
1567 auto R = FoldCacheUser.insert({S, {}});
1568 R.first->second.push_back(
ID);
1574 "This is not an extending conversion!");
1576 "This is not a conversion to a SCEVable type!");
1577 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1581 auto Iter = FoldCache.find(
ID);
1582 if (Iter != FoldCache.end())
1583 return Iter->second;
1586 if (!isa<SCEVZeroExtendExpr>(S))
1594 "This is not an extending conversion!");
1596 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1613 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1617 UniqueSCEVs.InsertNode(S, IP);
1626 const SCEV *
X = ST->getOperand();
1640 if (AR->isAffine()) {
1641 const SCEV *Start = AR->getStart();
1642 const SCEV *Step = AR->getStepRecurrence(*
this);
1644 const Loop *L = AR->getLoop();
1648 if (AR->hasNoUnsignedWrap()) {
1650 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1664 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1669 const SCEV *CastedMaxBECount =
1673 if (MaxBECount == RecastedMaxBECount) {
1683 const SCEV *WideMaxBECount =
1685 const SCEV *OperandExtendedAdd =
1691 if (ZAdd == OperandExtendedAdd) {
1695 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1702 OperandExtendedAdd =
1708 if (ZAdd == OperandExtendedAdd) {
1713 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1728 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1731 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1733 if (AR->hasNoUnsignedWrap()) {
1739 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1756 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1767 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1768 const APInt &
C = SC->getAPInt();
1772 const SCEV *SResidual =
1781 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1784 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1800 if (
auto *Div = dyn_cast<SCEVUDivExpr>(
Op))
1804 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1806 if (SA->hasNoUnsignedWrap()) {
1810 for (
const auto *
Op : SA->operands())
1823 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1827 const SCEV *SResidual =
1837 if (
auto *SM = dyn_cast<SCEVMulExpr>(
Op)) {
1839 if (SM->hasNoUnsignedWrap()) {
1843 for (
const auto *
Op : SM->operands())
1860 if (SM->getNumOperands() == 2)
1861 if (
auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1862 if (MulLHS->getAPInt().isPowerOf2())
1863 if (
auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1865 MulLHS->getAPInt().logBase2();
1877 if (isa<SCEVUMinExpr>(
Op) || isa<SCEVUMaxExpr>(
Op)) {
1878 auto *
MinMax = cast<SCEVMinMaxExpr>(
Op);
1880 for (
auto *Operand :
MinMax->operands())
1882 if (isa<SCEVUMinExpr>(
MinMax))
1888 if (
auto *
MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(
Op)) {
1889 assert(isa<SCEVSequentialUMinExpr>(
MinMax) &&
"Not supported!");
1891 for (
auto *Operand :
MinMax->operands())
1898 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1901 UniqueSCEVs.InsertNode(S, IP);
1909 "This is not an extending conversion!");
1911 "This is not a conversion to a SCEVable type!");
1912 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1916 auto Iter = FoldCache.find(
ID);
1917 if (Iter != FoldCache.end())
1918 return Iter->second;
1921 if (!isa<SCEVSignExtendExpr>(S))
1929 "This is not an extending conversion!");
1931 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1953 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1958 UniqueSCEVs.InsertNode(S, IP);
1967 const SCEV *
X = ST->getOperand();
1976 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1978 if (SA->hasNoSignedWrap()) {
1982 for (
const auto *
Op : SA->operands())
1996 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
2000 const SCEV *SResidual =
2014 if (AR->isAffine()) {
2015 const SCEV *Start = AR->getStart();
2016 const SCEV *Step = AR->getStepRecurrence(*
this);
2018 const Loop *L = AR->getLoop();
2022 if (AR->hasNoSignedWrap()) {
2024 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2038 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
2044 const SCEV *CastedMaxBECount =
2048 if (MaxBECount == RecastedMaxBECount) {
2058 const SCEV *WideMaxBECount =
2060 const SCEV *OperandExtendedAdd =
2066 if (SAdd == OperandExtendedAdd) {
2070 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2077 OperandExtendedAdd =
2083 if (SAdd == OperandExtendedAdd) {
2095 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2103 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2105 if (AR->hasNoSignedWrap()) {
2111 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2119 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2120 const APInt &
C = SC->getAPInt();
2124 const SCEV *SResidual =
2133 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2136 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2149 if (isa<SCEVSMinExpr>(
Op) || isa<SCEVSMaxExpr>(
Op)) {
2150 auto *
MinMax = cast<SCEVMinMaxExpr>(
Op);
2152 for (
auto *Operand :
MinMax->operands())
2154 if (isa<SCEVSMinExpr>(
MinMax))
2161 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2164 UniqueSCEVs.InsertNode(S, IP);
2190 "This is not an extending conversion!");
2192 "This is not a conversion to a SCEVable type!");
2197 if (SC->getAPInt().isNegative())
2202 const SCEV *NewOp =
T->getOperand();
2210 if (!isa<SCEVZeroExtendExpr>(ZExt))
2215 if (!isa<SCEVSignExtendExpr>(SExt))
2221 for (
const SCEV *
Op : AR->operands())
2227 if (isa<SCEVSMaxExpr>(
Op))
2260 APInt &AccumulatedConstant,
2263 bool Interesting =
false;
2267 while (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[i])) {
2270 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2272 AccumulatedConstant += Scale *
C->getAPInt();
2277 for (; i != Ops.
size(); ++i) {
2279 if (
Mul && isa<SCEVConstant>(
Mul->getOperand(0))) {
2281 Scale * cast<SCEVConstant>(
Mul->getOperand(0))->getAPInt();
2282 if (
Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(
Mul->getOperand(1))) {
2287 Add->operands(), NewScale, SE);
2293 auto Pair = M.insert({Key, NewScale});
2297 Pair.first->second += NewScale;
2305 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2306 M.insert({Ops[i], Scale});
2310 Pair.first->second += Scale;
2329 case Instruction::Add:
2332 case Instruction::Sub:
2335 case Instruction::Mul:
2345 auto *NarrowTy = cast<IntegerType>(
LHS->
getType());
2349 const SCEV *
A = (this->*Extension)(
2351 const SCEV *LHSB = (this->*Extension)(
LHS, WideTy, 0);
2352 const SCEV *RHSB = (this->*Extension)(
RHS, WideTy, 0);
2360 if (BinOp == Instruction::Mul)
2362 auto *RHSC = dyn_cast<SCEVConstant>(
RHS);
2366 APInt C = RHSC->getAPInt();
2367 unsigned NumBits =
C.getBitWidth();
2368 bool IsSub = (BinOp == Instruction::Sub);
2369 bool IsNegativeConst = (
Signed &&
C.isNegative());
2371 bool OverflowDown = IsSub ^ IsNegativeConst;
2373 if (IsNegativeConst) {
2386 APInt Limit = Min + Magnitude;
2392 APInt Limit = Max - Magnitude;
2397std::optional<SCEV::NoWrapFlags>
2402 return std::nullopt;
2411 bool Deduced =
false;
2413 if (OBO->
getOpcode() != Instruction::Add &&
2416 return std::nullopt;
2425 false,
LHS,
RHS, CtxI)) {
2439 return std::nullopt;
2449 using namespace std::placeholders;
2456 assert(CanAnalyze &&
"don't call from other places!");
2463 auto IsKnownNonNegative = [&](
const SCEV *S) {
2473 if (SignOrUnsignWrap != SignOrUnsignMask &&
2475 isa<SCEVConstant>(Ops[0])) {
2480 return Instruction::Add;
2482 return Instruction::Mul;
2488 const APInt &
C = cast<SCEVConstant>(Ops[0])->getAPInt();
2493 Opcode,
C, OBO::NoSignedWrap);
2501 Opcode,
C, OBO::NoUnsignedWrap);
2511 Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2517 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0]))
2518 if (UDiv->getOperand(1) == Ops[1])
2520 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1]))
2521 if (UDiv->getOperand(1) == Ops[0])
2537 "only nuw or nsw allowed");
2539 if (Ops.
size() == 1)
return Ops[0];
2542 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
2544 "SCEVAddExpr operand types don't match!");
2547 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2552 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2553 [](
const APInt &
C) {
return C.isZero(); },
2554 [](
const APInt &
C) {
return false; });
2558 unsigned Idx = isa<SCEVConstant>(Ops[0]) ? 1 : 0;
2567 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2572 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2573 Add->setNoWrapFlags(ComputeFlags(Ops));
2580 Type *Ty = Ops[0]->getType();
2581 bool FoundMatch =
false;
2582 for (
unsigned i = 0, e = Ops.
size(); i != e-1; ++i)
2583 if (Ops[i] == Ops[i+1]) {
2586 while (i+Count != e && Ops[i+Count] == Ops[i])
2591 if (Ops.
size() == Count)
2595 --i; e -= Count - 1;
2605 auto FindTruncSrcType = [&]() ->
Type * {
2610 if (
auto *
T = dyn_cast<SCEVTruncateExpr>(Ops[
Idx]))
2611 return T->getOperand()->getType();
2612 if (
const auto *
Mul = dyn_cast<SCEVMulExpr>(Ops[
Idx])) {
2613 const auto *LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2614 if (
const auto *
T = dyn_cast<SCEVTruncateExpr>(LastOp))
2615 return T->getOperand()->getType();
2619 if (
auto *SrcType = FindTruncSrcType()) {
2624 for (
const SCEV *
Op : Ops) {
2626 if (
T->getOperand()->getType() != SrcType) {
2633 }
else if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(
Op)) {
2635 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2637 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2638 if (
T->getOperand()->getType() != SrcType) {
2643 }
else if (
const auto *
C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2661 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2666 if (Ops.
size() == 2) {
2670 const SCEV *
A = Ops[0];
2671 const SCEV *
B = Ops[1];
2672 auto *AddExpr = dyn_cast<SCEVAddExpr>(
B);
2673 auto *
C = dyn_cast<SCEVConstant>(
A);
2674 if (AddExpr &&
C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2675 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2676 auto C2 =
C->getAPInt();
2679 APInt ConstAdd = C1 + C2;
2680 auto AddFlags = AddExpr->getNoWrapFlags();
2706 if (Ops.
size() == 2) {
2708 if (
Mul &&
Mul->getNumOperands() == 2 &&
2709 Mul->getOperand(0)->isAllOnesValue()) {
2712 if (matchURem(
Mul->getOperand(1),
X,
Y) &&
X == Ops[1]) {
2724 bool DeletedAdd =
false;
2738 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2754 if (
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx])) {
2761 struct APIntCompare {
2770 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2771 for (
const SCEV *NewOp : NewOps)
2772 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2775 if (AccumulatedConstant != 0)
2777 for (
auto &MulOp : MulOpLists) {
2778 if (MulOp.first == 1) {
2780 }
else if (MulOp.first != 0) {
2789 if (Ops.
size() == 1)
2798 for (;
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx]); ++
Idx) {
2800 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2801 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2802 if (isa<SCEVConstant>(MulOpSCEV))
2804 for (
unsigned AddOp = 0, e = Ops.
size(); AddOp != e; ++AddOp)
2805 if (MulOpSCEV == Ops[AddOp]) {
2807 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2808 if (
Mul->getNumOperands() != 2) {
2812 Mul->operands().take_front(MulOp));
2820 if (Ops.
size() == 2)
return OuterMul;
2833 for (
unsigned OtherMulIdx =
Idx+1;
2834 OtherMulIdx < Ops.
size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2836 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2840 OMulOp != e; ++OMulOp)
2841 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2843 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2844 if (
Mul->getNumOperands() != 2) {
2846 Mul->operands().take_front(MulOp));
2853 OtherMul->
operands().take_front(OMulOp));
2858 const SCEV *InnerMulSum =
2862 if (Ops.
size() == 2)
return OuterMul;
2879 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
2885 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
2893 if (!LIOps.
empty()) {
2918 auto *DefI = getDefiningScopeBound(LIOps);
2920 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2932 if (Ops.
size() == 1)
return NewRec;
2935 for (
unsigned i = 0;; ++i)
2936 if (Ops[i] == AddRec) {
2946 for (
unsigned OtherIdx =
Idx+1;
2947 OtherIdx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2952 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2954 "AddRecExprs are not sorted in reverse dominance order?");
2955 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2958 for (; OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2960 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2961 if (OtherAddRec->getLoop() == AddRecLoop) {
2962 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2964 if (i >= AddRecOps.
size()) {
2965 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
2969 AddRecOps[i], OtherAddRec->getOperand(i)};
2972 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
2987 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2995 for (
const SCEV *
Op : Ops)
2999 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3002 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3003 S =
new (SCEVAllocator)
3005 UniqueSCEVs.InsertNode(S, IP);
3017 for (
const SCEV *
Op : Ops)
3025 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3026 S =
new (SCEVAllocator)
3028 UniqueSCEVs.InsertNode(S, IP);
3029 LoopUsers[
L].push_back(S);
3041 for (
const SCEV *
Op : Ops)
3045 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3048 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3049 S =
new (SCEVAllocator)
SCEVMulExpr(
ID.Intern(SCEVAllocator),
3051 UniqueSCEVs.InsertNode(S, IP);
3060 if (j > 1 && k / j != i) Overflow =
true;
3076 if (n == 0 || n == k)
return 1;
3077 if (k > n)
return 0;
3083 for (
uint64_t i = 1; i <= k; ++i) {
3084 r =
umul_ov(r, n-(i-1), Overflow);
3093 struct FindConstantInAddMulChain {
3094 bool FoundConstant =
false;
3096 bool follow(
const SCEV *S) {
3097 FoundConstant |= isa<SCEVConstant>(S);
3098 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
3101 bool isDone()
const {
3102 return FoundConstant;
3106 FindConstantInAddMulChain
F;
3108 ST.visitAll(StartExpr);
3109 return F.FoundConstant;
3117 "only nuw or nsw allowed");
3119 if (Ops.
size() == 1)
return Ops[0];
3121 Type *ETy = Ops[0]->getType();
3123 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
3125 "SCEVMulExpr operand types don't match!");
3130 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3131 [](
const APInt &
C) {
return C.isOne(); },
3132 [](
const APInt &
C) {
return C.isZero(); });
3143 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3148 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3149 Mul->setNoWrapFlags(ComputeFlags(Ops));
3153 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3154 if (Ops.
size() == 2) {
3171 if (Ops[0]->isAllOnesValue()) {
3176 bool AnyFolded =
false;
3177 for (
const SCEV *AddOp :
Add->operands()) {
3180 if (!isa<SCEVMulExpr>(
Mul)) AnyFolded =
true;
3185 }
else if (
const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3204 AddRec->getNoWrapFlags(FlagsMask));
3217 bool DeletedMul =
false;
3242 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
3247 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
3255 if (!LIOps.
empty()) {
3268 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3284 if (Ops.
size() == 1)
return NewRec;
3287 for (
unsigned i = 0;; ++i)
3288 if (Ops[i] == AddRec) {
3309 bool OpsModified =
false;
3310 for (
unsigned OtherIdx =
Idx+1;
3311 OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3314 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3324 bool Overflow =
false;
3330 SmallVector <const SCEV *, 7> SumOps;
3331 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3335 z < ze && !Overflow; ++z) {
3338 if (LargerThan64Bits)
3339 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3341 Coeff = Coeff1*Coeff2;
3356 if (Ops.
size() == 2)
return NewAddRec;
3357 Ops[
Idx] = NewAddRec;
3358 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
3360 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3374 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3382 "SCEVURemExpr operand types don't match!");
3387 if (RHSC->getValue()->isOne())
3391 if (RHSC->getAPInt().isPowerOf2()) {
3410 "SCEVUDivExpr operand can't be pointer!");
3412 "SCEVUDivExpr operand types don't match!");
3419 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3424 if (LHSC->getValue()->isZero())
3428 if (RHSC->getValue()->isOne())
3433 if (!RHSC->getValue()->isZero()) {
3438 unsigned LZ = RHSC->getAPInt().countl_zero();
3442 if (!RHSC->getAPInt().isPowerOf2())
3448 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*
this))) {
3450 const APInt &StepInt = Step->getAPInt();
3451 const APInt &DivInt = RHSC->getAPInt();
3452 if (!StepInt.
urem(DivInt) &&
3458 for (
const SCEV *
Op : AR->operands())
3465 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3466 if (StartC && !DivInt.
urem(StepInt) &&
3472 const APInt &StartRem = StartInt.
urem(StepInt);
3473 if (StartRem != 0) {
3474 const SCEV *NewLHS =
3477 if (
LHS != NewLHS) {
3487 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3496 for (
const SCEV *
Op : M->operands())
3500 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3501 const SCEV *
Op = M->getOperand(i);
3503 if (!isa<SCEVUDivExpr>(Div) &&
getMulExpr(Div, RHSC) ==
Op) {
3513 if (
auto *DivisorConstant =
3514 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3515 bool Overflow =
false;
3517 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3528 for (
const SCEV *
Op :
A->operands())
3532 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3534 if (isa<SCEVUDivExpr>(
Op) ||
3539 if (
Operands.size() ==
A->getNumOperands())
3546 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3553 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3556 UniqueSCEVs.InsertNode(S, IP);
3586 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3592 if (
const auto *LHSCst = dyn_cast<SCEVConstant>(
Mul->getOperand(0))) {
3593 if (LHSCst == RHSCst) {
3601 APInt Factor =
gcd(LHSCst, RHSCst);
3604 cast<SCEVConstant>(
getConstant(LHSCst->getAPInt().udiv(Factor)));
3606 cast<SCEVConstant>(
getConstant(RHSCst->getAPInt().udiv(Factor)));
3612 Mul = dyn_cast<SCEVMulExpr>(
LHS);
3619 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3620 if (
Mul->getOperand(i) ==
RHS) {
3638 if (
const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3639 if (StepChrec->getLoop() == L) {
3658 "SCEVAddRecExpr operand types don't match!");
3659 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3663 "SCEVAddRecExpr operand is not available at loop entry!");
3681 const Loop *NestedLoop = NestedAR->getLoop();
3682 if (L->contains(NestedLoop)
3687 Operands[0] = NestedAR->getStart();
3691 bool AllInvariant =
all_of(
3703 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3714 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3724 return getOrCreateAddRecExpr(
Operands, L, Flags);
3741 auto *GEPI = dyn_cast<Instruction>(
GEP);
3742 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3753 bool FirstIter =
true;
3755 for (
const SCEV *IndexExpr : IndexExprs) {
3757 if (
StructType *STy = dyn_cast<StructType>(CurTy)) {
3762 Offsets.push_back(FieldOffset);
3765 CurTy = STy->getTypeAtIndex(
Index);
3769 assert(isa<PointerType>(CurTy) &&
3770 "The first index of a GEP indexes a pointer");
3771 CurTy =
GEP->getSourceElementType();
3782 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3783 Offsets.push_back(LocalOffset);
3788 if (Offsets.empty())
3801 "GEP should not change type mid-flight.");
3805SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3808 ID.AddInteger(SCEVType);
3809 for (
const SCEV *
Op : Ops)
3812 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3822 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3823 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
3824 if (Ops.
size() == 1)
return Ops[0];
3827 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
3829 "Operand types don't match!");
3832 "min/max should be consistently pointerish");
3858 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3860 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3865 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3867 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3873 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops)) {
3879 while (
Idx < Ops.
size() && Ops[
Idx]->getSCEVType() < Kind)
3885 bool DeletedAny =
false;
3886 while (Ops[
Idx]->getSCEVType() == Kind) {
3906 for (
unsigned i = 0, e = Ops.
size() - 1; i != e; ++i) {
3907 if (Ops[i] == Ops[i + 1] ||
3908 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3914 }
else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3923 if (Ops.
size() == 1)
return Ops[0];
3925 assert(!Ops.
empty() &&
"Reduced smax down to nothing!");
3930 ID.AddInteger(Kind);
3931 for (
const SCEV *
Op : Ops)
3934 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3936 return ExistingSCEV;
3938 std::uninitialized_copy(Ops.
begin(), Ops.
end(), O);
3939 SCEV *S =
new (SCEVAllocator)
3942 UniqueSCEVs.InsertNode(S, IP);
3949class SCEVSequentialMinMaxDeduplicatingVisitor final
3950 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
3951 std::optional<const SCEV *>> {
3952 using RetVal = std::optional<const SCEV *>;
3960 bool canRecurseInto(
SCEVTypes Kind)
const {
3963 return RootKind == Kind || NonSequentialRootKind == Kind;
3966 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
3967 assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) &&
3968 "Only for min/max expressions.");
3971 if (!canRecurseInto(Kind))
3974 auto *NAry = cast<SCEVNAryExpr>(S);
3976 bool Changed = visit(Kind, NAry->operands(), NewOps);
3981 return std::nullopt;
3983 return isa<SCEVSequentialMinMaxExpr>(S)
3988 RetVal visit(
const SCEV *S) {
3990 if (!SeenOps.
insert(S).second)
3991 return std::nullopt;
3992 return Base::visit(S);
3998 : SE(SE), RootKind(RootKind),
3999 NonSequentialRootKind(
4005 bool Changed =
false;
4009 for (
const SCEV *
Op : OrigOps) {
4010 RetVal NewOp = visit(
Op);
4018 NewOps = std::move(Ops);
4024 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4034 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4036 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4038 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4040 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4043 return visitAnyMinMaxExpr(Expr);
4047 return visitAnyMinMaxExpr(Expr);
4051 return visitAnyMinMaxExpr(Expr);
4055 return visitAnyMinMaxExpr(Expr);
4059 return visitAnyMinMaxExpr(Expr);
4062 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4106struct SCEVPoisonCollector {
4107 bool LookThroughMaybePoisonBlocking;
4109 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4110 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4112 bool follow(
const SCEV *S) {
4113 if (!LookThroughMaybePoisonBlocking &&
4117 if (
auto *SU = dyn_cast<SCEVUnknown>(S)) {
4123 bool isDone()
const {
return false; }
4133 SCEVPoisonCollector PC1(
true);
4138 if (PC1.MaybePoison.empty())
4144 SCEVPoisonCollector PC2(
false);
4154 SCEVPoisonCollector PC(
false);
4177 while (!Worklist.
empty()) {
4179 if (!Visited.
insert(V).second)
4183 if (Visited.
size() > 16)
4191 auto *
I = dyn_cast<Instruction>(V);
4198 if (
auto *PDI = dyn_cast<PossiblyDisjointInst>(
I))
4199 if (PDI->isDisjoint())
4205 if (
auto *
II = dyn_cast<IntrinsicInst>(
I);
4206 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4213 if (
I->hasPoisonGeneratingAnnotations())
4225 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4226 "Not a SCEVSequentialMinMaxExpr!");
4227 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
4228 if (Ops.
size() == 1)
4232 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4234 "Operand types don't match!");
4237 "min/max should be consistently pointerish");
4245 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops))
4252 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4253 bool Changed = Deduplicator.visit(Kind, Ops, Ops);
4262 bool DeletedAny =
false;
4264 if (Ops[
Idx]->getSCEVType() != Kind) {
4268 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[
Idx]);
4271 SMME->operands().end());
4279 const SCEV *SaturationPoint;
4290 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4306 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) {
4315 ID.AddInteger(Kind);
4316 for (
const SCEV *
Op : Ops)
4319 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4321 return ExistingSCEV;
4324 std::uninitialized_copy(Ops.
begin(), Ops.
end(), O);
4325 SCEV *S =
new (SCEVAllocator)
4328 UniqueSCEVs.InsertNode(S, IP);
4376 if (
Size.isScalable())
4397 "Cannot get offset for structure containing scalable vector types");
4411 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4412 assert(cast<SCEVUnknown>(S)->getValue() == V &&
4413 "Stale SCEVUnknown in uniquing map!");
4418 FirstUnknown = cast<SCEVUnknown>(S);
4419 UniqueSCEVs.InsertNode(S, IP);
4467 bool PreciseA, PreciseB;
4468 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4469 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4470 if (!PreciseA || !PreciseB)
4473 return (ScopeA == ScopeB) || DT.
dominates(ScopeA, ScopeB) ||
4478 return CouldNotCompute.get();
4481bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4483 auto *SU = dyn_cast<SCEVUnknown>(S);
4484 return SU && SU->getValue() ==
nullptr;
4487 return !ContainsNulls;