82#include "llvm/Config/llvm-config.h"
133using namespace PatternMatch;
135#define DEBUG_TYPE "scalar-evolution"
138 "Number of loops with predictable loop counts");
140 "Number of loops without predictable loop counts");
142 "Number of loops with trip counts computed by force");
144#ifdef EXPENSIVE_CHECKS
152 cl::desc(
"Maximum number of iterations SCEV will "
153 "symbolically execute a constant "
159 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
162 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
165 cl::desc(
"Verify no dangling value in ScalarEvolution's "
166 "ExprValueMap (slow)"));
170 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
175 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
180 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
184 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
185 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
189 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
190 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
194 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
195 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
200 cl::desc(
"Maximum depth of recursive arithmetics"),
204 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
209 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
214 cl::desc(
"Max coefficients in AddRec during evolving"),
219 cl::desc(
"Size of the expression which is considered huge"),
224 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
230 cl::desc(
"When printing analysis, include information on every instruction"));
233 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
235 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
236 "be costly in terms of compile time"));
239 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
240 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
241 "Phi strongly connected components"),
246 cl::desc(
"Handle <= and >= in finite loops"),
250 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
251 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
262#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
272 cast<SCEVConstant>(
this)->getValue()->printAsOperand(
OS,
false);
280 OS <<
"(ptrtoint " << *Op->getType() <<
" " << *Op <<
" to "
281 << *PtrToInt->
getType() <<
")";
287 OS <<
"(trunc " << *Op->getType() <<
" " << *Op <<
" to "
294 OS <<
"(zext " << *Op->getType() <<
" " << *Op <<
" to "
301 OS <<
"(sext " << *Op->getType() <<
" " << *Op <<
" to "
330 const char *OpStr =
nullptr;
343 OpStr =
" umin_seq ";
349 ListSeparator LS(OpStr);
373 cast<SCEVUnknown>(
this)->getValue()->printAsOperand(
OS,
false);
376 OS <<
"***COULDNOTCOMPUTE***";
385 return cast<SCEVConstant>(
this)->getType();
387 return cast<SCEVVScale>(
this)->getType();
392 return cast<SCEVCastExpr>(
this)->getType();
394 return cast<SCEVAddRecExpr>(
this)->getType();
396 return cast<SCEVMulExpr>(
this)->getType();
401 return cast<SCEVMinMaxExpr>(
this)->getType();
403 return cast<SCEVSequentialMinMaxExpr>(
this)->getType();
405 return cast<SCEVAddExpr>(
this)->getType();
407 return cast<SCEVUDivExpr>(
this)->getType();
409 return cast<SCEVUnknown>(
this)->getType();
426 return cast<SCEVCastExpr>(
this)->operands();
435 return cast<SCEVNAryExpr>(
this)->operands();
437 return cast<SCEVUDivExpr>(
this)->operands();
445 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(
this))
446 return SC->getValue()->isZero();
451 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(
this))
452 return SC->getValue()->isOne();
457 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(
this))
458 return SC->getValue()->isMinusOne();
464 if (!
Mul)
return false;
468 if (!SC)
return false;
471 return SC->getAPInt().isNegative();
486 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
488 UniqueSCEVs.InsertNode(S, IP);
507 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
510 UniqueSCEVs.InsertNode(S, IP);
522 "Must be a non-bit-width-changing pointer-to-integer cast!");
534 "Cannot truncate non-integer value!");
541 "Cannot zero extend non-integer value!");
548 "Cannot sign extend non-integer value!");
551void SCEVUnknown::deleted() {
553 SE->forgetMemoizedResults(
this);
556 SE->UniqueSCEVs.RemoveNode(
this);
562void SCEVUnknown::allUsesReplacedWith(
Value *New) {
564 SE->forgetMemoizedResults(
this);
567 SE->UniqueSCEVs.RemoveNode(
this);
606 if (LIsPointer != RIsPointer)
607 return (
int)LIsPointer - (int)RIsPointer;
612 return (
int)LID - (int)RID;
615 if (
const auto *LA = dyn_cast<Argument>(LV)) {
616 const auto *
RA = cast<Argument>(RV);
617 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
618 return (
int)LArgNo - (int)RArgNo;
621 if (
const auto *LGV = dyn_cast<GlobalValue>(LV)) {
622 const auto *RGV = cast<GlobalValue>(RV);
624 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
625 auto LT = GV->getLinkage();
632 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
633 return LGV->getName().compare(RGV->getName());
638 if (
const auto *LInst = dyn_cast<Instruction>(LV)) {
639 const auto *RInst = cast<Instruction>(RV);
644 if (LParent != RParent) {
647 if (LDepth != RDepth)
648 return (
int)LDepth - (int)RDepth;
652 unsigned LNumOps = LInst->getNumOperands(),
653 RNumOps = RInst->getNumOperands();
654 if (LNumOps != RNumOps)
655 return (
int)LNumOps - (int)RNumOps;
657 for (
unsigned Idx :
seq(0u, LNumOps)) {
675static std::optional<int>
687 return (
int)LType - (int)RType;
717 unsigned LBitWidth = LA.getBitWidth(), RBitWidth =
RA.getBitWidth();
718 if (LBitWidth != RBitWidth)
719 return (
int)LBitWidth - (int)RBitWidth;
720 return LA.ult(
RA) ? -1 : 1;
724 const auto *LTy = cast<IntegerType>(cast<SCEVVScale>(
LHS)->
getType());
725 const auto *RTy = cast<IntegerType>(cast<SCEVVScale>(
RHS)->
getType());
726 return LTy->getBitWidth() - RTy->getBitWidth();
736 const Loop *LLoop = LA->getLoop(), *RLoop =
RA->getLoop();
737 if (LLoop != RLoop) {
739 assert(LHead != RHead &&
"Two loops share the same header?");
744 "No dominance between recurrences used by one SCEV?");
767 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
768 if (LNumOps != RNumOps)
769 return (
int)LNumOps - (int)RNumOps;
771 for (
unsigned i = 0; i != LNumOps; ++i) {
773 ROps[i], DT,
Depth + 1);
798 if (Ops.
size() < 2)
return;
807 return Complexity && *Complexity < 0;
809 if (Ops.
size() == 2) {
813 if (IsLessComplex(
RHS,
LHS))
820 return IsLessComplex(
LHS,
RHS);
827 for (
unsigned i = 0, e = Ops.
size(); i != e-2; ++i) {
828 const SCEV *S = Ops[i];
833 for (
unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
838 if (i == e-2)
return;
924 APInt OddFactorial(W, 1);
926 for (
unsigned i = 3; i <= K; ++i) {
928 unsigned TwoFactors = Mult.countr_zero();
930 Mult.lshrInPlace(TwoFactors);
931 OddFactorial *= Mult;
935 unsigned CalculationBits = W +
T;
944 APInt MultiplyFactor = OddFactorial.
zext(W+1);
946 MultiplyFactor = MultiplyFactor.
trunc(W);
952 for (
unsigned i = 1; i != K; ++i) {
985 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
990 if (isa<SCEVCouldNotCompute>(Coeff))
1005 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1009 if (!Op->getType()->isPointerTy())
1020 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1025 if (
getDataLayout().isNonIntegralPointerType(Op->getType()))
1039 if (
auto *U = dyn_cast<SCEVUnknown>(Op)) {
1044 if (isa<ConstantPointerNull>(U->getValue()))
1050 SCEV *S =
new (SCEVAllocator)
1052 UniqueSCEVs.InsertNode(S, IP);
1057 assert(
Depth == 0 &&
"getLosslessPtrToIntExpr() should not self-recurse for "
1058 "non-SCEVUnknown's.");
1070 class SCEVPtrToIntSinkingRewriter
1078 SCEVPtrToIntSinkingRewriter
Rewriter(SE);
1088 return Base::visit(S);
1093 bool Changed =
false;
1094 for (
const auto *Op : Expr->
operands()) {
1103 bool Changed =
false;
1104 for (
const auto *Op : Expr->
operands()) {
1113 "Should only reach pointer-typed SCEVUnknown's.");
1119 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *
this);
1121 "We must have succeeded in sinking the cast, "
1122 "and ending up with an integer-typed expression!");
1130 if (isa<SCEVCouldNotCompute>(IntOp))
1139 "This is not a truncating conversion!");
1141 "This is not a conversion to a SCEVable type!");
1142 assert(!Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1150 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1153 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1172 UniqueSCEVs.InsertNode(S, IP);
1181 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) {
1182 auto *CommOp = cast<SCEVCommutativeExpr>(Op);
1184 unsigned numTruncs = 0;
1185 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1188 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1189 isa<SCEVTruncateExpr>(S))
1193 if (numTruncs < 2) {
1194 if (isa<SCEVAddExpr>(Op))
1196 else if (isa<SCEVMulExpr>(Op))
1204 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1209 if (
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1211 for (
const SCEV *Op : AddRec->operands())
1226 UniqueSCEVs.InsertNode(S, IP);
1266struct ExtendOpTraitsBase {
1272template <
typename ExtendOp>
struct ExtendOpTraits {
1288 static const GetExtendExprTy GetExtendExpr;
1290 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1297const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1304 static const GetExtendExprTy GetExtendExpr;
1306 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1313const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1325template <
typename ExtendOpTy>
1328 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1329 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1336 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1355 auto PreStartFlags =
1373 const SCEV *OperandExtendedStart =
1375 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1376 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1388 const SCEV *OverflowLimit =
1389 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1391 if (OverflowLimit &&
1399template <
typename ExtendOpTy>
1403 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1405 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE,
Depth);
1411 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1446template <
typename ExtendOpTy>
1447bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1450 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1456 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1462 for (
unsigned Delta : {-2, -1, 1, 2}) {
1467 ID.AddPointer(PreStart);
1468 ID.AddPointer(Step);
1476 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1479 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1480 DeltaS, &Pred,
this);
1498 const unsigned BitWidth =
C.getBitWidth();
1516 const APInt &ConstantStart,
1535 auto &UserIDs = FoldCacheUser[
I.first->second];
1536 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1537 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1538 if (UserIDs[
I] ==
ID) {
1543 I.first->second = S;
1545 auto R = FoldCacheUser.insert({S, {}});
1546 R.first->second.push_back(
ID);
1552 "This is not an extending conversion!");
1554 "This is not a conversion to a SCEVable type!");
1555 assert(!Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1562 auto Iter = FoldCache.find(
ID);
1563 if (Iter != FoldCache.end())
1564 return Iter->second;
1567 if (!isa<SCEVZeroExtendExpr>(S))
1575 "This is not an extending conversion!");
1577 assert(!Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1580 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1595 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1599 UniqueSCEVs.InsertNode(S, IP);
1608 const SCEV *
X = ST->getOperand();
1622 if (AR->isAffine()) {
1623 const SCEV *Start = AR->getStart();
1624 const SCEV *Step = AR->getStepRecurrence(*
this);
1626 const Loop *L = AR->getLoop();
1630 if (AR->hasNoUnsignedWrap()) {
1632 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1646 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1651 const SCEV *CastedMaxBECount =
1655 if (MaxBECount == RecastedMaxBECount) {
1665 const SCEV *WideMaxBECount =
1667 const SCEV *OperandExtendedAdd =
1673 if (ZAdd == OperandExtendedAdd) {
1677 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1684 OperandExtendedAdd =
1690 if (ZAdd == OperandExtendedAdd) {
1695 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1710 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1713 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1715 if (AR->hasNoUnsignedWrap()) {
1721 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1738 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1749 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1750 const APInt &
C = SC->getAPInt();
1754 const SCEV *SResidual =
1763 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1766 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1776 if (matchURem(Op,
LHS,
RHS))
1782 if (
auto *Div = dyn_cast<SCEVUDivExpr>(Op))
1786 if (
auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1788 if (SA->hasNoUnsignedWrap()) {
1792 for (
const auto *Op : SA->operands())
1805 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1809 const SCEV *SResidual =
1819 if (
auto *SM = dyn_cast<SCEVMulExpr>(Op)) {
1821 if (SM->hasNoUnsignedWrap()) {
1825 for (
const auto *Op : SM->operands())
1842 if (SM->getNumOperands() == 2)
1843 if (
auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1844 if (MulLHS->getAPInt().isPowerOf2())
1845 if (
auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1847 MulLHS->getAPInt().logBase2();
1859 if (isa<SCEVUMinExpr>(Op) || isa<SCEVUMaxExpr>(Op)) {
1860 auto *
MinMax = cast<SCEVMinMaxExpr>(Op);
1862 for (
auto *Operand :
MinMax->operands())
1864 if (isa<SCEVUMinExpr>(
MinMax))
1871 if (
auto *
MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(Op)) {
1872 assert(isa<SCEVSequentialUMinExpr>(
MinMax) &&
"Not supported!");
1874 for (
auto *Operand :
MinMax->operands())
1881 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1884 UniqueSCEVs.InsertNode(S, IP);
1892 "This is not an extending conversion!");
1894 "This is not a conversion to a SCEVable type!");
1895 assert(!Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1902 auto Iter = FoldCache.find(
ID);
1903 if (Iter != FoldCache.end())
1904 return Iter->second;
1907 if (!isa<SCEVSignExtendExpr>(S))
1915 "This is not an extending conversion!");
1917 assert(!Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1921 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1940 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1945 UniqueSCEVs.InsertNode(S, IP);
1954 const SCEV *
X = ST->getOperand();
1963 if (
auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1965 if (SA->hasNoSignedWrap()) {
1969 for (
const auto *Op : SA->operands())
1983 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1987 const SCEV *SResidual =
2001 if (AR->isAffine()) {
2002 const SCEV *Start = AR->getStart();
2003 const SCEV *Step = AR->getStepRecurrence(*
this);
2005 const Loop *L = AR->getLoop();
2009 if (AR->hasNoSignedWrap()) {
2011 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2025 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
2031 const SCEV *CastedMaxBECount =
2035 if (MaxBECount == RecastedMaxBECount) {
2045 const SCEV *WideMaxBECount =
2047 const SCEV *OperandExtendedAdd =
2053 if (SAdd == OperandExtendedAdd) {
2057 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2064 OperandExtendedAdd =
2070 if (SAdd == OperandExtendedAdd) {
2082 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2090 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2092 if (AR->hasNoSignedWrap()) {
2098 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2106 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2107 const APInt &
C = SC->getAPInt();
2111 const SCEV *SResidual =
2120 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2123 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2136 if (isa<SCEVSMinExpr>(Op) || isa<SCEVSMaxExpr>(Op)) {
2137 auto *
MinMax = cast<SCEVMinMaxExpr>(Op);
2139 for (
auto *Operand :
MinMax->operands())
2141 if (isa<SCEVSMinExpr>(
MinMax))
2149 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2152 UniqueSCEVs.InsertNode(S, IP);
2178 "This is not an extending conversion!");
2180 "This is not a conversion to a SCEVable type!");
2184 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
2185 if (SC->getAPInt().isNegative())
2190 const SCEV *NewOp =
T->getOperand();
2198 if (!isa<SCEVZeroExtendExpr>(ZExt))
2203 if (!isa<SCEVSignExtendExpr>(SExt))
2209 for (
const SCEV *Op : AR->operands())
2215 if (isa<SCEVSMaxExpr>(Op))
2248 APInt &AccumulatedConstant,
2251 bool Interesting =
false;
2255 while (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[i])) {
2258 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2260 AccumulatedConstant += Scale *
C->getAPInt();
2265 for (; i != Ops.
size(); ++i) {
2267 if (
Mul && isa<SCEVConstant>(
Mul->getOperand(0))) {
2269 Scale * cast<SCEVConstant>(
Mul->getOperand(0))->getAPInt();
2270 if (
Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(
Mul->getOperand(1))) {
2275 Add->operands(), NewScale, SE);
2281 auto Pair = M.insert({Key, NewScale});
2285 Pair.first->second += NewScale;
2293 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2294 M.insert({Ops[i], Scale});
2298 Pair.first->second += Scale;
2317 case Instruction::Add:
2320 case Instruction::Sub:
2323 case Instruction::Mul:
2333 auto *NarrowTy = cast<IntegerType>(
LHS->
getType());
2337 const SCEV *
A = (this->*Extension)(
2339 const SCEV *LHSB = (this->*Extension)(
LHS, WideTy, 0);
2340 const SCEV *RHSB = (this->*Extension)(
RHS, WideTy, 0);
2350 if (BinOp != Instruction::Add)
2352 auto *RHSC = dyn_cast<SCEVConstant>(
RHS);
2356 APInt C = RHSC->getAPInt();
2360 unsigned NumBits =
C.getBitWidth();
2368std::optional<SCEV::NoWrapFlags>
2373 return std::nullopt;
2382 bool Deduced =
false;
2384 if (OBO->
getOpcode() != Instruction::Add &&
2387 return std::nullopt;
2396 false,
LHS,
RHS, CtxI)) {
2410 return std::nullopt;
2420 using namespace std::placeholders;
2427 assert(CanAnalyze &&
"don't call from other places!");
2434 auto IsKnownNonNegative = [&](
const SCEV *S) {
2444 if (SignOrUnsignWrap != SignOrUnsignMask &&
2446 isa<SCEVConstant>(Ops[0])) {
2451 return Instruction::Add;
2453 return Instruction::Mul;
2459 const APInt &
C = cast<SCEVConstant>(Ops[0])->getAPInt();
2464 Opcode,
C, OBO::NoSignedWrap);
2472 Opcode,
C, OBO::NoUnsignedWrap);
2482 Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2488 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0]))
2489 if (UDiv->getOperand(1) == Ops[1])
2491 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1]))
2492 if (UDiv->getOperand(1) == Ops[0])
2508 "only nuw or nsw allowed");
2510 if (Ops.
size() == 1)
return Ops[0];
2513 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
2515 "SCEVAddExpr operand types don't match!");
2517 Ops, [](
const SCEV *Op) {
return Op->getType()->isPointerTy(); });
2518 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2526 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2531 Ops[0] =
getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2532 if (Ops.
size() == 2)
return Ops[0];
2534 LHSC = cast<SCEVConstant>(Ops[0]);
2538 if (LHSC->getValue()->isZero()) {
2543 if (Ops.
size() == 1)
return Ops[0];
2553 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2558 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2559 Add->setNoWrapFlags(ComputeFlags(Ops));
2566 Type *Ty = Ops[0]->getType();
2567 bool FoundMatch =
false;
2568 for (
unsigned i = 0, e = Ops.
size(); i != e-1; ++i)
2569 if (Ops[i] == Ops[i+1]) {
2572 while (i+Count != e && Ops[i+Count] == Ops[i])
2577 if (Ops.
size() == Count)
2581 --i; e -= Count - 1;
2591 auto FindTruncSrcType = [&]() ->
Type * {
2596 if (
auto *
T = dyn_cast<SCEVTruncateExpr>(Ops[
Idx]))
2597 return T->getOperand()->getType();
2598 if (
const auto *
Mul = dyn_cast<SCEVMulExpr>(Ops[
Idx])) {
2599 const auto *LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2600 if (
const auto *
T = dyn_cast<SCEVTruncateExpr>(LastOp))
2601 return T->getOperand()->getType();
2605 if (
auto *SrcType = FindTruncSrcType()) {
2610 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
2612 if (
T->getOperand()->getType() != SrcType) {
2617 }
else if (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[i])) {
2619 }
else if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2621 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2623 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2624 if (
T->getOperand()->getType() != SrcType) {
2629 }
else if (
const auto *
C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2647 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2652 if (Ops.
size() == 2) {
2656 const SCEV *
A = Ops[0];
2657 const SCEV *
B = Ops[1];
2658 auto *AddExpr = dyn_cast<SCEVAddExpr>(
B);
2659 auto *
C = dyn_cast<SCEVConstant>(
A);
2660 if (AddExpr &&
C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2661 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2662 auto C2 =
C->getAPInt();
2665 APInt ConstAdd = C1 + C2;
2666 auto AddFlags = AddExpr->getNoWrapFlags();
2678 ConstAdd.
abs().
ule(C1.abs())) {
2692 if (Ops.
size() == 2) {
2694 if (
Mul &&
Mul->getNumOperands() == 2 &&
2695 Mul->getOperand(0)->isAllOnesValue()) {
2698 if (matchURem(
Mul->getOperand(1),
X,
Y) &&
X == Ops[1]) {
2710 bool DeletedAdd =
false;
2724 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2740 if (
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx])) {
2747 struct APIntCompare {
2756 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2757 for (
const SCEV *NewOp : NewOps)
2758 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2761 if (AccumulatedConstant != 0)
2763 for (
auto &MulOp : MulOpLists) {
2764 if (MulOp.first == 1) {
2766 }
else if (MulOp.first != 0) {
2775 if (Ops.
size() == 1)
2784 for (;
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx]); ++
Idx) {
2786 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2787 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2788 if (isa<SCEVConstant>(MulOpSCEV))
2790 for (
unsigned AddOp = 0, e = Ops.
size(); AddOp != e; ++AddOp)
2791 if (MulOpSCEV == Ops[AddOp]) {
2793 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2794 if (
Mul->getNumOperands() != 2) {
2798 Mul->operands().take_front(MulOp));
2806 if (Ops.
size() == 2)
return OuterMul;
2819 for (
unsigned OtherMulIdx =
Idx+1;
2820 OtherMulIdx < Ops.
size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2822 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2826 OMulOp != e; ++OMulOp)
2827 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2829 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2830 if (
Mul->getNumOperands() != 2) {
2832 Mul->operands().take_front(MulOp));
2839 OtherMul->
operands().take_front(OMulOp));
2844 const SCEV *InnerMulSum =
2848 if (Ops.
size() == 2)
return OuterMul;
2865 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
2871 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
2879 if (!LIOps.
empty()) {
2904 auto *DefI = getDefiningScopeBound(LIOps);
2906 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2918 if (Ops.
size() == 1)
return NewRec;
2921 for (
unsigned i = 0;; ++i)
2922 if (Ops[i] == AddRec) {
2932 for (
unsigned OtherIdx =
Idx+1;
2933 OtherIdx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2938 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2940 "AddRecExprs are not sorted in reverse dominance order?");
2941 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2944 for (; OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2946 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2947 if (OtherAddRec->getLoop() == AddRecLoop) {
2948 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2950 if (i >= AddRecOps.
size()) {
2951 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
2955 AddRecOps[i], OtherAddRec->getOperand(i)};
2958 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
2973 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2981 for (
const SCEV *Op : Ops)
2985 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
2988 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2989 S =
new (SCEVAllocator)
2991 UniqueSCEVs.InsertNode(S, IP);
3003 for (
const SCEV *Op : Ops)
3011 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3012 S =
new (SCEVAllocator)
3014 UniqueSCEVs.InsertNode(S, IP);
3015 LoopUsers[
L].push_back(S);
3027 for (
const SCEV *Op : Ops)
3031 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3034 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3035 S =
new (SCEVAllocator)
SCEVMulExpr(
ID.Intern(SCEVAllocator),
3037 UniqueSCEVs.InsertNode(S, IP);
3046 if (j > 1 && k / j != i) Overflow =
true;
3062 if (n == 0 || n == k)
return 1;
3063 if (k > n)
return 0;
3069 for (
uint64_t i = 1; i <= k; ++i) {
3070 r =
umul_ov(r, n-(i-1), Overflow);
3079 struct FindConstantInAddMulChain {
3080 bool FoundConstant =
false;
3082 bool follow(
const SCEV *S) {
3083 FoundConstant |= isa<SCEVConstant>(S);
3084 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
3087 bool isDone()
const {
3088 return FoundConstant;
3092 FindConstantInAddMulChain
F;
3094 ST.visitAll(StartExpr);
3095 return F.FoundConstant;
3103 "only nuw or nsw allowed");
3105 if (Ops.
size() == 1)
return Ops[0];
3107 Type *ETy = Ops[0]->getType();
3109 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
3111 "SCEVMulExpr operand types don't match!");
3119 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3124 Ops[0] =
getConstant(LHSC->getAPInt() * RHSC->getAPInt());
3125 if (Ops.
size() == 2)
return Ops[0];
3127 LHSC = cast<SCEVConstant>(Ops[0]);
3131 if (LHSC->getValue()->isZero())
3135 if (LHSC->getValue()->isOne()) {
3140 if (Ops.
size() == 1)
3151 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3156 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3157 Mul->setNoWrapFlags(ComputeFlags(Ops));
3161 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3162 if (Ops.
size() == 2) {
3179 if (Ops[0]->isAllOnesValue()) {
3184 bool AnyFolded =
false;
3185 for (
const SCEV *AddOp :
Add->operands()) {
3188 if (!isa<SCEVMulExpr>(
Mul)) AnyFolded =
true;
3193 }
else if (
const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3213 bool DeletedMul =
false;
3238 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
3244 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
3252 if (!LIOps.
empty()) {
3271 if (Ops.
size() == 1)
return NewRec;
3274 for (
unsigned i = 0;; ++i)
3275 if (Ops[i] == AddRec) {
3296 bool OpsModified =
false;
3297 for (
unsigned OtherIdx =
Idx+1;
3298 OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3301 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3302 if (!OtherAddRec || OtherAddRec->
getLoop() != AddRecLoop)
3311 bool Overflow =
false;
3317 SmallVector <const SCEV *, 7> SumOps;
3318 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3322 z < ze && !Overflow; ++z) {
3325 if (LargerThan64Bits)
3326 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3328 Coeff = Coeff1*Coeff2;
3343 if (Ops.
size() == 2)
return NewAddRec;
3344 Ops[
Idx] = NewAddRec;
3345 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
3347 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3361 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3369 "SCEVURemExpr operand types don't match!");
3374 if (RHSC->getValue()->isOne())
3378 if (RHSC->getAPInt().isPowerOf2()) {
3397 "SCEVUDivExpr operand can't be pointer!");
3399 "SCEVUDivExpr operand types don't match!");
3406 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3411 if (LHSC->getValue()->isZero())
3415 if (RHSC->getValue()->isOne())
3420 if (!RHSC->getValue()->isZero()) {
3425 unsigned LZ = RHSC->getAPInt().countl_zero();
3429 if (!RHSC->getAPInt().isPowerOf2())
3435 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*
this))) {
3437 const APInt &StepInt = Step->getAPInt();
3438 const APInt &DivInt = RHSC->getAPInt();
3439 if (!StepInt.
urem(DivInt) &&
3445 for (
const SCEV *Op : AR->operands())
3452 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3453 if (StartC && !DivInt.
urem(StepInt) &&
3459 const APInt &StartRem = StartInt.
urem(StepInt);
3460 if (StartRem != 0) {
3461 const SCEV *NewLHS =
3464 if (
LHS != NewLHS) {
3474 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3483 for (
const SCEV *Op : M->operands())
3487 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3488 const SCEV *Op = M->getOperand(i);
3490 if (!isa<SCEVUDivExpr>(Div) &&
getMulExpr(Div, RHSC) == Op) {
3500 if (
auto *DivisorConstant =
3501 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3502 bool Overflow =
false;
3504 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3515 for (
const SCEV *Op :
A->operands())
3519 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3521 if (isa<SCEVUDivExpr>(Op) ||
3526 if (
Operands.size() ==
A->getNumOperands())
3533 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3540 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3543 UniqueSCEVs.InsertNode(S, IP);
3573 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3579 if (
const auto *LHSCst = dyn_cast<SCEVConstant>(
Mul->getOperand(0))) {
3580 if (LHSCst == RHSCst) {
3588 APInt Factor =
gcd(LHSCst, RHSCst);
3591 cast<SCEVConstant>(
getConstant(LHSCst->getAPInt().udiv(Factor)));
3593 cast<SCEVConstant>(
getConstant(RHSCst->getAPInt().udiv(Factor)));
3599 Mul = dyn_cast<SCEVMulExpr>(
LHS);
3606 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3607 if (
Mul->getOperand(i) ==
RHS) {
3625 if (
const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3626 if (StepChrec->getLoop() == L) {
3643 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
3645 "SCEVAddRecExpr operand types don't match!");
3648 for (
unsigned i = 0, e =
Operands.size(); i != e; ++i)
3650 "SCEVAddRecExpr operand is not loop-invariant!");
3668 const Loop *NestedLoop = NestedAR->getLoop();
3669 if (L->contains(NestedLoop)
3674 Operands[0] = NestedAR->getStart();
3678 bool AllInvariant =
all_of(
3690 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *Op) {
3701 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3721 const bool AssumeInBoundsFlags = [&]() {
3722 if (!
GEP->isInBounds())
3728 auto *GEPI = dyn_cast<Instruction>(
GEP);
3731 return GEPI && isSCEVExprNeverPoison(GEPI);
3738 bool FirstIter =
true;
3740 for (
const SCEV *IndexExpr : IndexExprs) {
3742 if (
StructType *STy = dyn_cast<StructType>(CurTy)) {
3747 Offsets.push_back(FieldOffset);
3750 CurTy = STy->getTypeAtIndex(
Index);
3754 assert(isa<PointerType>(CurTy) &&
3755 "The first index of a GEP indexes a pointer");
3756 CurTy =
GEP->getSourceElementType();
3767 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3768 Offsets.push_back(LocalOffset);
3773 if (Offsets.empty())
3785 "GEP should not change type mid-flight.");
3789SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3792 ID.AddInteger(SCEVType);
3793 for (
const SCEV *Op : Ops)
3796 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3806 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3807 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
3808 if (Ops.
size() == 1)
return Ops[0];
3811 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
3813 "Operand types don't match!");
3815 Ops[i]->
getType()->isPointerTy() &&
3816 "min/max should be consistently pointerish");
3827 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops)) {
3833 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3851 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt()));
3854 if (Ops.
size() == 1)
return Ops[0];
3855 LHSC = cast<SCEVConstant>(Ops[0]);
3858 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned);
3859 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned);
3861 if (IsMax ? IsMinV : IsMaxV) {
3865 }
else if (IsMax ? IsMaxV : IsMinV) {
3871 if (Ops.
size() == 1)
return Ops[0];
3875 while (
Idx < Ops.
size() && Ops[
Idx]->getSCEVType() < Kind)
3881 bool DeletedAny =
false;
3882 while (Ops[
Idx]->getSCEVType() == Kind) {
3902 for (
unsigned i = 0, e = Ops.
size() - 1; i != e; ++i) {
3903 if (Ops[i] == Ops[i + 1] ||
3904 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3910 }
else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3919 if (Ops.
size() == 1)
return Ops[0];
3921 assert(!Ops.
empty() &&
"Reduced smax down to nothing!");
3926 ID.AddInteger(Kind);
3927 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
3928 ID.AddPointer(Ops[i]);
3930 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3932 return ExistingSCEV;
3934 std::uninitialized_copy(Ops.
begin(), Ops.
end(), O);
3935 SCEV *S =
new (SCEVAllocator)
3938 UniqueSCEVs.InsertNode(S, IP);
3945class SCEVSequentialMinMaxDeduplicatingVisitor final
3946 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
3947 std::optional<const SCEV *>> {
3948 using RetVal = std::optional<const SCEV *>;
3956 bool canRecurseInto(
SCEVTypes Kind)
const {
3959 return RootKind == Kind || NonSequentialRootKind == Kind;
3962 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
3963 assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) &&
3964 "Only for min/max expressions.");
3967 if (!canRecurseInto(Kind))
3970 auto *NAry = cast<SCEVNAryExpr>(S);
3972 bool Changed = visit(Kind, NAry->operands(), NewOps);
3977 return std::nullopt;
3979 return isa<SCEVSequentialMinMaxExpr>(S)
3984 RetVal visit(
const SCEV *S) {
3986 if (!SeenOps.
insert(S).second)
3987 return std::nullopt;
3988 return Base::visit(S);
3994 : SE(SE), RootKind(RootKind),
3995 NonSequentialRootKind(
4001 bool Changed =
false;
4005 for (
const SCEV *Op : OrigOps) {
4006 RetVal NewOp = visit(Op);
4014 NewOps = std::move(Ops);
4020 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4030 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4032 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4034 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4036 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4039 return visitAnyMinMaxExpr(Expr);
4043 return visitAnyMinMaxExpr(Expr);
4047 return visitAnyMinMaxExpr(Expr);
4051 return visitAnyMinMaxExpr(Expr);
4055 return visitAnyMinMaxExpr(Expr);
4058 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4103 struct SCEVPoisonCollector {
4104 bool LookThroughMaybePoisonBlocking;
4106 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4107 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4109 bool follow(
const SCEV *S) {
4110 if (!LookThroughMaybePoisonBlocking &&
4114 if (
auto *SU = dyn_cast<SCEVUnknown>(S)) {
4120 bool isDone()
const {
return false; }
4127 SCEVPoisonCollector PC1(
true);
4132 if (PC1.MaybePoison.empty())
4138 SCEVPoisonCollector PC2(
false);
4143 return all_of(PC1.MaybePoison,
4144 [&](
const SCEV *S) { return PC2.MaybePoison.contains(S); });
4150 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4151 "Not a SCEVSequentialMinMaxExpr!");
4152 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
4153 if (Ops.
size() == 1)
4157 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4159 "Operand types don't match!");
4161 Ops[i]->
getType()->isPointerTy() &&
4162 "min/max should be consistently pointerish");
4170 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops))
4177 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4178 bool Changed = Deduplicator.visit(Kind, Ops, Ops);
4187 bool DeletedAny =
false;
4189 if (Ops[
Idx]->getSCEVType() != Kind) {
4193 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[
Idx]);
4196 SMME->operands().end());
4204 const SCEV *SaturationPoint;
4215 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4231 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) {
4240 ID.AddInteger(Kind);
4241 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
4242 ID.AddPointer(Ops[i]);
4244 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4246 return ExistingSCEV;
4249 std::uninitialized_copy(Ops.
begin(), Ops.
end(), O);
4250 SCEV *S =
new (SCEVAllocator)
4253 UniqueSCEVs.InsertNode(S, IP);
4301 if (
Size.isScalable())
4321 IntTy,
getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo));
4334 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4335 assert(cast<SCEVUnknown>(S)->getValue() == V &&
4336 "Stale SCEVUnknown in uniquing map!");
4341 FirstUnknown = cast<SCEVUnknown>(S);
4342 UniqueSCEVs.InsertNode(S, IP);
4390 bool PreciseA, PreciseB;
4391 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4392 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4393 if (!PreciseA || !PreciseB)
4396 return (ScopeA == ScopeB) || DT.
dominates(ScopeA, ScopeB) ||
4402 return CouldNotCompute.get();
4405bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4407 auto *SU = dyn_cast<SCEVUnknown>(S);
4408 return SU && SU->getValue() ==
nullptr;
4411 return !ContainsNulls;
4416 if (
I != HasRecMap.
end())
4421 HasRecMap.
insert({S, FoundAddRec});
4429 if (SI == ExprValueMap.
end())
4430 return std::nullopt;
4438 return SI->second.getArrayRef();
4444void ScalarEvolution::eraseValueFromMap(
Value *V) {
4446 if (
I != ValueExprMap.
end()) {
4447 auto EVIt = ExprValueMap.
find(
I->second);
4448 bool Removed = EVIt->second.remove(V);
4450 assert(Removed &&
"Value not in ExprValueMap?");
4455void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4459 auto It = ValueExprMap.
find_as(V);
4460 if (It == ValueExprMap.
end()) {
4462 ExprValueMap[S].
insert(V);
4471 if (
const SCEV *S = getExistingSCEV(V))
4473 return createSCEVIter(V);
4476const SCEV *ScalarEvolution::getExistingSCEV(
Value *V) {
4480 if (
I != ValueExprMap.
end()) {
4481 const SCEV *S =
I->second;
4482 assert(checkValidity(S) &&
4483 "existing SCEV has not been properly invalidated");
4492 if (
const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4496 Type *Ty = V->getType();
4504 if (!
Add ||
Add->getNumOperands() != 2 ||
4505 !
Add->getOperand(0)->isAllOnesValue())
4508 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(
Add->getOperand(1));
4518 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4520 if (
const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4531 return (
const SCEV *)
nullptr;
4537 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4541 Type *Ty = V->getType();