85 #include "llvm/Config/llvm-config.h"
133 using namespace llvm;
134 using namespace PatternMatch;
136 #define DEBUG_TYPE "scalar-evolution"
139 "Number of loops with predictable loop counts");
141 "Number of loops without predictable loop counts");
142 STATISTIC(NumBruteForceTripCountsComputed,
143 "Number of loops with trip counts computed by force");
145 #ifdef EXPENSIVE_CHECKS
146 bool llvm::VerifySCEV =
true;
148 bool llvm::VerifySCEV =
false;
153 cl::desc(
"Maximum number of iterations SCEV will "
154 "symbolically execute a constant "
160 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
163 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
166 cl::desc(
"Verify no dangling value in ScalarEvolution's "
167 "ExprValueMap (slow)"));
171 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
176 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
181 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
185 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
186 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
190 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
191 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
195 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
196 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
201 cl::desc(
"Maximum depth of recursive arithmetics"),
205 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
210 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
215 cl::desc(
"Max coefficients in AddRec during evolving"),
220 cl::desc(
"Size of the expression which is considered huge"),
226 cl::desc(
"When printing analysis, include information on every instruction"));
229 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
231 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
232 "be costly in terms of compile time"));
235 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
236 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
237 "Phi strongly connected components"),
242 cl::desc(
"Handle <= and >= in finite loops"),
253 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
261 switch (getSCEVType()) {
263 cast<SCEVConstant>(
this)->getValue()->printAsOperand(OS,
false);
268 OS <<
"(ptrtoint " << *
Op->getType() <<
" " << *
Op <<
" to "
269 << *PtrToInt->
getType() <<
")";
275 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
282 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
289 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
318 const char *OpStr =
nullptr;
331 OpStr =
" umin_seq ";
337 ListSeparator
LS(OpStr);
357 OS <<
"(" << *UDiv->
getLHS() <<
" /u " << *UDiv->
getRHS() <<
")";
364 OS <<
"sizeof(" << *AllocTy <<
")";
368 OS <<
"alignof(" << *AllocTy <<
")";
375 OS <<
"offsetof(" << *CTy <<
", ";
386 OS <<
"***COULDNOTCOMPUTE***";
393 switch (getSCEVType()) {
395 return cast<SCEVConstant>(
this)->getType();
400 return cast<SCEVCastExpr>(
this)->getType();
402 return cast<SCEVAddRecExpr>(
this)->getType();
404 return cast<SCEVMulExpr>(
this)->getType();
409 return cast<SCEVMinMaxExpr>(
this)->getType();
411 return cast<SCEVSequentialMinMaxExpr>(
this)->getType();
413 return cast<SCEVAddExpr>(
this)->getType();
415 return cast<SCEVUDivExpr>(
this)->getType();
417 return cast<SCEVUnknown>(
this)->getType();
426 return SC->getValue()->isZero();
432 return SC->getValue()->isOne();
438 return SC->getValue()->isMinusOne();
444 if (!
Mul)
return false;
448 if (!
SC)
return false;
451 return SC->getAPInt().isNegative();
466 if (
const SCEV *
S = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP))
return S;
468 UniqueSCEVs.InsertNode(
S,
IP);
492 "Must be a non-bit-width-changing pointer-to-integer cast!");
504 "Cannot truncate non-integer value!");
511 "Cannot zero extend non-integer value!");
518 "Cannot sign extend non-integer value!");
521 void SCEVUnknown::deleted() {
523 SE->forgetMemoizedResults(
this);
526 SE->UniqueSCEVs.RemoveNode(
this);
532 void SCEVUnknown::allUsesReplacedWith(
Value *New) {
534 SE->forgetMemoizedResults(
this);
537 SE->UniqueSCEVs.RemoveNode(
this);
545 if (VCE->getOpcode() == Instruction::PtrToInt)
546 if (
ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
547 if (CE->getOpcode() == Instruction::GetElementPtr &&
548 CE->getOperand(0)->isNullValue() &&
549 CE->getNumOperands() == 2)
550 if (
ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
552 AllocTy = cast<GEPOperator>(CE)->getSourceElementType();
561 if (VCE->getOpcode() == Instruction::PtrToInt)
562 if (
ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
563 if (CE->getOpcode() == Instruction::GetElementPtr &&
564 CE->getOperand(0)->isNullValue()) {
565 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType();
566 if (
StructType *STy = dyn_cast<StructType>(Ty))
567 if (!STy->isPacked() &&
568 CE->getNumOperands() == 3 &&
569 CE->getOperand(1)->isNullValue()) {
570 if (
ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
572 STy->getNumElements() == 2 &&
573 STy->getElementType(0)->isIntegerTy(1)) {
574 AllocTy = STy->getElementType(1);
585 if (VCE->getOpcode() == Instruction::PtrToInt)
586 if (
ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
587 if (CE->getOpcode() == Instruction::GetElementPtr &&
588 CE->getNumOperands() == 3 &&
589 CE->getOperand(0)->isNullValue() &&
590 CE->getOperand(1)->isNullValue()) {
591 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType();
596 FieldNo = CE->getOperand(2);
637 if (LIsPointer != RIsPointer)
638 return (
int)LIsPointer - (
int)RIsPointer;
643 return (
int)LID - (
int)RID;
646 if (
const auto *
LA = dyn_cast<Argument>(LV)) {
647 const auto *
RA = cast<Argument>(RV);
648 unsigned LArgNo =
LA->getArgNo(), RArgNo =
RA->getArgNo();
649 return (
int)LArgNo - (
int)RArgNo;
652 if (
const auto *LGV = dyn_cast<GlobalValue>(LV)) {
653 const auto *RGV = cast<GlobalValue>(RV);
655 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
656 auto LT = GV->getLinkage();
663 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
664 return LGV->getName().compare(RGV->getName());
669 if (
const auto *LInst = dyn_cast<Instruction>(LV)) {
670 const auto *RInst = cast<Instruction>(RV);
675 if (LParent != RParent) {
678 if (LDepth != RDepth)
679 return (
int)LDepth - (
int)RDepth;
683 unsigned LNumOps = LInst->getNumOperands(),
684 RNumOps = RInst->getNumOperands();
685 if (LNumOps != RNumOps)
686 return (
int)LNumOps - (
int)RNumOps;
688 for (
unsigned Idx :
seq(0u, LNumOps)) {
691 RInst->getOperand(Idx),
Depth + 1);
718 return (
int)LType - (
int)RType;
748 unsigned LBitWidth =
LA.getBitWidth(), RBitWidth =
RA.getBitWidth();
749 if (LBitWidth != RBitWidth)
750 return (
int)LBitWidth - (
int)RBitWidth;
751 return LA.ult(
RA) ? -1 : 1;
761 const Loop *LLoop =
LA->getLoop(), *RLoop =
RA->getLoop();
762 if (LLoop != RLoop) {
764 assert(LHead != RHead &&
"Two loops share the same header?");
769 "No dominance between recurrences used by one SCEV?");
774 unsigned LNumOps =
LA->getNumOperands(), RNumOps =
RA->getNumOperands();
775 if (LNumOps != RNumOps)
776 return (
int)LNumOps - (
int)RNumOps;
779 for (
unsigned i = 0;
i != LNumOps; ++
i) {
781 LA->getOperand(
i),
RA->getOperand(
i), DT,
802 if (LNumOps != RNumOps)
803 return (
int)LNumOps - (
int)RNumOps;
805 for (
unsigned i = 0;
i != LNumOps; ++
i) {
865 if (Ops.size() < 2)
return;
874 return Complexity && *Complexity < 0;
876 if (Ops.size() == 2) {
880 if (IsLessComplex(
RHS,
LHS))
887 return IsLessComplex(
LHS,
RHS);
894 for (
unsigned i = 0,
e = Ops.size();
i !=
e-2; ++
i) {
896 unsigned Complexity =
S->getSCEVType();
900 for (
unsigned j =
i+1;
j !=
e && Ops[
j]->getSCEVType() == Complexity; ++
j) {
905 if (
i ==
e-2)
return;
993 for (
unsigned i = 3;
i <= K; ++
i) {
995 unsigned TwoFactors =
Mult.countTrailingZeros();
997 Mult.lshrInPlace(TwoFactors);
998 OddFactorial *=
Mult;
1002 unsigned CalculationBits =
W +
T;
1011 APInt MultiplyFactor = OddFactorial.
zext(
W+1);
1013 MultiplyFactor = MultiplyFactor.
trunc(
W);
1019 for (
unsigned i = 1;
i != K; ++
i) {
1057 if (isa<SCEVCouldNotCompute>(Coeff))
1072 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1076 if (!
Op->getType()->isPointerTy())
1087 if (
const SCEV *
S = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP))
1106 if (
auto *U = dyn_cast<SCEVUnknown>(
Op)) {
1111 if (isa<ConstantPointerNull>(U->getValue()))
1117 SCEV *
S =
new (SCEVAllocator)
1119 UniqueSCEVs.InsertNode(
S,
IP);
1124 assert(
Depth == 0 &&
"getLosslessPtrToIntExpr() should not self-recurse for "
1125 "non-SCEVUnknown's.");
1137 class SCEVPtrToIntSinkingRewriter
1145 SCEVPtrToIntSinkingRewriter
Rewriter(SE);
1150 Type *STy =
S->getType();
1155 return Base::visit(
S);
1160 bool Changed =
false;
1170 bool Changed =
false;
1180 "Should only reach pointer-typed SCEVUnknown's.");
1188 "We must have succeeded in sinking the cast, "
1189 "and ending up with an integer-typed expression!");
1197 if (isa<SCEVCouldNotCompute>(IntOp))
1206 "This is not a truncating conversion!");
1208 "This is not a conversion to a SCEVable type!");
1209 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1217 if (
const SCEV *
S = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP))
return S;
1239 UniqueSCEVs.InsertNode(
S,
IP);
1248 if (isa<SCEVAddExpr>(
Op) || isa<SCEVMulExpr>(
Op)) {
1249 auto *CommOp = cast<SCEVCommutativeExpr>(
Op);
1251 unsigned numTruncs = 0;
1252 for (
unsigned i = 0,
e = CommOp->getNumOperands();
i !=
e && numTruncs < 2;
1255 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(
i)) &&
1256 isa<SCEVTruncateExpr>(
S))
1260 if (numTruncs < 2) {
1261 if (isa<SCEVAddExpr>(
Op))
1263 else if (isa<SCEVMulExpr>(
Op))
1271 if (
const SCEV *
S = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP))
1278 for (
const SCEV *
Op : AddRec->operands())
1293 UniqueSCEVs.InsertNode(
S,
IP);
1333 struct ExtendOpTraitsBase {
1339 template <
typename ExtendOp>
struct ExtendOpTraits {
1355 static const GetExtendExprTy GetExtendExpr;
1357 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1364 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1371 static const GetExtendExprTy GetExtendExpr;
1373 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1380 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1392 template <
typename ExtendOpTy>
1395 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1396 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1403 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1413 DiffOps.push_back(
Op);
1422 auto PreStartFlags =
1440 const SCEV *OperandExtendedStart =
1442 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1443 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1455 const SCEV *OverflowLimit =
1456 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1458 if (OverflowLimit &&
1466 template <
typename ExtendOpTy>
1470 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1472 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE,
Depth);
1478 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1513 template <
typename ExtendOpTy>
1514 bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1517 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1523 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1529 for (
unsigned Delta : {-2, -1, 1, 2}) {
1534 ID.AddPointer(PreStart);
1535 ID.AddPointer(Step);
1543 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1546 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1547 DeltaS, &Pred,
this);
1565 const unsigned BitWidth =
C.getBitWidth();
1583 const APInt &ConstantStart,
1596 "This is not an extending conversion!");
1598 "This is not a conversion to a SCEVable type!");
1599 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1618 if (
const SCEV *
S = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP))
return S;
1622 UniqueSCEVs.InsertNode(
S,
IP);
1631 const SCEV *
X =
ST->getOperand();
1645 if (AR->isAffine()) {
1646 const SCEV *Start = AR->getStart();
1647 const SCEV *Step = AR->getStepRecurrence(*
this);
1649 const Loop *L = AR->getLoop();
1651 if (!AR->hasNoUnsignedWrap()) {
1652 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1658 if (AR->hasNoUnsignedWrap()) {
1660 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1674 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1679 const SCEV *CastedMaxBECount =
1683 if (MaxBECount == RecastedMaxBECount) {
1693 const SCEV *WideMaxBECount =
1695 const SCEV *OperandExtendedAdd =
1701 if (ZAdd == OperandExtendedAdd) {
1705 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1712 OperandExtendedAdd =
1718 if (ZAdd == OperandExtendedAdd) {
1723 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1738 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1741 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1743 if (AR->hasNoUnsignedWrap()) {
1749 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1766 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1777 if (
const auto *
SC = dyn_cast<SCEVConstant>(Start)) {
1782 const SCEV *SResidual =
1791 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1794 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1810 if (
auto *Div = dyn_cast<SCEVUDivExpr>(
Op))
1814 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1816 if (SA->hasNoUnsignedWrap()) {
1820 for (
const auto *
Op : SA->operands())
1833 if (
const auto *
SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1837 const SCEV *SResidual =
1847 if (
auto *SM = dyn_cast<SCEVMulExpr>(
Op)) {
1849 if (SM->hasNoUnsignedWrap()) {
1853 for (
const auto *
Op : SM->operands())
1870 if (SM->getNumOperands() == 2)
1871 if (
auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1872 if (MulLHS->getAPInt().isPowerOf2())
1873 if (
auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1875 MulLHS->getAPInt().logBase2();
1887 if (
const SCEV *
S = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP))
return S;
1890 UniqueSCEVs.InsertNode(
S,
IP);
1898 "This is not an extending conversion!");
1900 "This is not a conversion to a SCEVable type!");
1901 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1924 if (
const SCEV *
S = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP))
return S;
1929 UniqueSCEVs.InsertNode(
S,
IP);
1938 const SCEV *
X =
ST->getOperand();
1947 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1949 if (SA->hasNoSignedWrap()) {
1953 for (
const auto *
Op : SA->operands())
1967 if (
const auto *
SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1971 const SCEV *SResidual =
1985 if (AR->isAffine()) {
1986 const SCEV *Start = AR->getStart();
1987 const SCEV *Step = AR->getStepRecurrence(*
this);
1989 const Loop *L = AR->getLoop();
1991 if (!AR->hasNoSignedWrap()) {
1992 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1998 if (AR->hasNoSignedWrap()) {
2000 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2014 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
2020 const SCEV *CastedMaxBECount =
2024 if (MaxBECount == RecastedMaxBECount) {
2034 const SCEV *WideMaxBECount =
2036 const SCEV *OperandExtendedAdd =
2042 if (SAdd == OperandExtendedAdd) {
2046 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2053 OperandExtendedAdd =
2059 if (SAdd == OperandExtendedAdd) {
2071 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2079 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2081 if (AR->hasNoSignedWrap()) {
2087 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2095 if (
const auto *
SC = dyn_cast<SCEVConstant>(Start)) {
2100 const SCEV *SResidual =
2109 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2112 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2125 if (
const SCEV *
S = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP))
return S;
2128 UniqueSCEVs.InsertNode(
S,
IP);
2154 "This is not an extending conversion!");
2156 "This is not a conversion to a SCEVable type!");
2161 if (
SC->getAPInt().isNegative())
2166 const SCEV *NewOp =
T->getOperand();
2174 if (!isa<SCEVZeroExtendExpr>(ZExt))
2179 if (!isa<SCEVSignExtendExpr>(SExt))
2185 for (
const SCEV *
Op : AR->operands())
2191 if (isa<SCEVSMaxExpr>(
Op))
2224 APInt &AccumulatedConstant,
2225 const SCEV *
const *Ops,
size_t NumOperands,
2228 bool Interesting =
false;
2235 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->
isZero())
2237 AccumulatedConstant += Scale *
C->getAPInt();
2242 for (;
i != NumOperands; ++
i) {
2252 Add->op_begin(), Add->getNumOperands(),
2259 auto Pair =
M.insert({
Key, NewScale});
2261 NewOps.push_back(Pair.first->first);
2263 Pair.first->second += NewScale;
2271 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2272 M.insert({Ops[
i], Scale});
2274 NewOps.push_back(Pair.first->first);
2276 Pair.first->second += Scale;
2297 case Instruction::Sub:
2310 auto *NarrowTy = cast<IntegerType>(
LHS->
getType());
2332 bool Deduced =
false;
2335 return {Flags, Deduced};
2340 return {Flags, Deduced};
2359 return {Flags, Deduced};
2369 using namespace std::placeholders;
2376 assert(CanAnalyze &&
"don't call from other places!");
2383 auto IsKnownNonNegative = [&](
const SCEV *
S) {
2393 if (SignOrUnsignWrap != SignOrUnsignMask &&
2395 isa<SCEVConstant>(Ops[0])) {
2408 const APInt &
C = cast<SCEVConstant>(Ops[0])->getAPInt();
2413 Opcode,
C, OBO::NoSignedWrap);
2421 Opcode,
C, OBO::NoUnsignedWrap);
2431 Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2437 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0]))
2438 if (UDiv->getOperand(1) == Ops[1])
2440 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1]))
2441 if (UDiv->getOperand(1) == Ops[0])
2457 "only nuw or nsw allowed");
2458 assert(!Ops.empty() &&
"Cannot get empty add!");
2459 if (Ops.size() == 1)
return Ops[0];
2462 for (
unsigned i = 1,
e = Ops.size();
i !=
e; ++
i)
2464 "SCEVAddExpr operand types don't match!");
2466 Ops, [](
const SCEV *
Op) {
return Op->getType()->isPointerTy(); });
2467 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2475 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2477 assert(Idx < Ops.size());
2478 while (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2480 Ops[0] =
getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2481 if (Ops.size() == 2)
return Ops[0];
2482 Ops.
erase(Ops.begin()+1);
2483 LHSC = cast<SCEVConstant>(Ops[0]);
2487 if (LHSC->getValue()->isZero()) {
2488 Ops.
erase(Ops.begin());
2492 if (Ops.size() == 1)
return Ops[0];
2502 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2507 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2508 Add->setNoWrapFlags(ComputeFlags(Ops));
2515 Type *Ty = Ops[0]->getType();
2516 bool FoundMatch =
false;
2517 for (
unsigned i = 0,
e = Ops.size();
i !=
e-1; ++
i)
2518 if (Ops[
i] == Ops[
i+1]) {
2521 while (
i+Count !=
e && Ops[
i+Count] == Ops[
i])
2526 if (Ops.size() == Count)
2529 Ops.
erase(Ops.begin()+
i+1, Ops.begin()+
i+Count);
2530 --
i;
e -= Count - 1;
2540 auto FindTruncSrcType = [&]() ->
Type * {
2545 if (
auto *
T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
2546 return T->getOperand()->getType();
2547 if (
const auto *
Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2549 if (
const auto *
T = dyn_cast<SCEVTruncateExpr>(LastOp))
2550 return T->getOperand()->getType();
2554 if (
auto *SrcType = FindTruncSrcType()) {
2559 for (
unsigned i = 0,
e = Ops.size();
i !=
e; ++
i) {
2561 if (
T->getOperand()->getType() != SrcType) {
2565 LargeOps.push_back(
T->getOperand());
2566 }
else if (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[
i])) {
2568 }
else if (
const SCEVMulExpr *
M = dyn_cast<SCEVMulExpr>(Ops[
i])) {
2570 for (
unsigned j = 0,
f =
M->getNumOperands();
j !=
f && Ok; ++
j) {
2572 dyn_cast<SCEVTruncateExpr>(
M->getOperand(
j))) {
2573 if (
T->getOperand()->getType() != SrcType) {
2577 LargeMulOps.push_back(
T->getOperand());
2578 }
else if (
const auto *
C = dyn_cast<SCEVConstant>(
M->getOperand(
j))) {
2596 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2601 if (Ops.size() == 2) {
2605 const SCEV *A = Ops[0];
2606 const SCEV *
B = Ops[1];
2607 auto *AddExpr = dyn_cast<SCEVAddExpr>(
B);
2608 auto *
C = dyn_cast<SCEVConstant>(A);
2609 if (AddExpr &&
C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2610 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2611 auto C2 =
C->getAPInt();
2615 auto AddFlags = AddExpr->getNoWrapFlags();
2641 if (Ops.size() == 2) {
2654 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() <
scAddExpr)
2658 if (Idx < Ops.size()) {
2659 bool DeletedAdd =
false;
2664 while (
const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2670 Ops.
erase(Ops.begin()+Idx);
2671 Ops.
append(Add->op_begin(), Add->op_end());
2673 CommonFlags =
maskFlags(CommonFlags, Add->getNoWrapFlags());
2684 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() <
scMulExpr)
2689 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2695 Ops.data(), Ops.size(),
2697 struct APIntCompare {
2706 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2707 for (
const SCEV *NewOp : NewOps)
2708 MulOpLists[
M.find(NewOp)->second].push_back(NewOp);
2711 if (AccumulatedConstant != 0)
2713 for (
auto &MulOp : MulOpLists) {
2714 if (MulOp.first == 1) {
2716 }
else if (MulOp.first != 0) {
2725 if (Ops.size() == 1)
2734 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2738 if (isa<SCEVConstant>(MulOpSCEV))
2740 for (
unsigned AddOp = 0,
e = Ops.size(); AddOp !=
e; ++AddOp)
2741 if (MulOpSCEV == Ops[AddOp]) {
2756 if (Ops.size() == 2)
return OuterMul;
2758 Ops.
erase(Ops.begin()+AddOp);
2759 Ops.
erase(Ops.begin()+Idx-1);
2761 Ops.
erase(Ops.begin()+Idx);
2762 Ops.
erase(Ops.begin()+AddOp-1);
2764 Ops.push_back(OuterMul);
2769 for (
unsigned OtherMulIdx = Idx+1;
2770 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2772 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2776 OMulOp !=
e; ++OMulOp)
2777 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2794 const SCEV *InnerMulSum =
2798 if (Ops.size() == 2)
return OuterMul;
2799 Ops.
erase(Ops.begin()+Idx);
2800 Ops.
erase(Ops.begin()+OtherMulIdx-1);
2801 Ops.push_back(OuterMul);
2811 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() <
scAddRecExpr)
2815 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2821 for (
unsigned i = 0,
e = Ops.size();
i !=
e; ++
i)
2823 LIOps.push_back(Ops[
i]);
2824 Ops.
erase(Ops.begin()+
i);
2829 if (!LIOps.empty()) {
2833 LIOps.push_back(AddRec);
2838 LIOps.push_back(AddRec->
getStart());
2854 auto *DefI = getDefiningScopeBound(LIOps);
2856 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2868 if (Ops.size() == 1)
return NewRec;
2871 for (
unsigned i = 0;; ++
i)
2872 if (Ops[
i] == AddRec) {
2882 for (
unsigned OtherIdx = Idx+1;
2883 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2888 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2890 "AddRecExprs are not sorted in reverse dominance order?");
2891 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2894 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2896 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2897 if (OtherAddRec->getLoop() == AddRecLoop) {
2898 for (
unsigned i = 0,
e = OtherAddRec->getNumOperands();
2900 if (
i >= AddRecOps.size()) {
2901 AddRecOps.
append(OtherAddRec->op_begin()+
i,
2902 OtherAddRec->op_end());
2906 AddRecOps[
i], OtherAddRec->getOperand(
i)};
2909 Ops.
erase(Ops.begin() + OtherIdx); --OtherIdx;
2924 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2932 for (
const SCEV *
Op : Ops)
2939 std::uninitialized_copy(Ops.begin(), Ops.end(),
O);
2940 S =
new (SCEVAllocator)
2942 UniqueSCEVs.InsertNode(
S,
IP);
2945 S->setNoWrapFlags(Flags);
2954 for (
const SCEV *
Op : Ops)
2962 std::uninitialized_copy(Ops.begin(), Ops.end(),
O);
2963 S =
new (SCEVAllocator)
2965 UniqueSCEVs.InsertNode(
S,
IP);
2966 LoopUsers[L].push_back(
S);
2978 for (
const SCEV *
Op : Ops)
2985 std::uninitialized_copy(Ops.begin(), Ops.end(),
O);
2988 UniqueSCEVs.InsertNode(
S,
IP);
2991 S->setNoWrapFlags(Flags);
2997 if (
j > 1 && k /
j !=
i) Overflow =
true;
3013 if (
n == 0 ||
n == k)
return 1;
3014 if (k >
n)
return 0;
3030 struct FindConstantInAddMulChain {
3031 bool FoundConstant =
false;
3033 bool follow(
const SCEV *
S) {
3034 FoundConstant |= isa<SCEVConstant>(
S);
3035 return isa<SCEVAddExpr>(
S) || isa<SCEVMulExpr>(
S);
3038 bool isDone()
const {
3039 return FoundConstant;
3043 FindConstantInAddMulChain
F;
3045 ST.visitAll(StartExpr);
3046 return F.FoundConstant;
3054 "only nuw or nsw allowed");
3055 assert(!Ops.empty() &&
"Cannot get empty mul!");
3056 if (Ops.size() == 1)
return Ops[0];
3058 Type *ETy = Ops[0]->getType();
3060 for (
unsigned i = 1,
e = Ops.size();
i !=
e; ++
i)
3062 "SCEVMulExpr operand types don't match!");
3070 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3072 assert(Idx < Ops.size());
3073 while (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3075 Ops[0] =
getConstant(LHSC->getAPInt() * RHSC->getAPInt());
3076 if (Ops.size() == 2)
return Ops[0];
3077 Ops.
erase(Ops.begin()+1);
3078 LHSC = cast<SCEVConstant>(Ops[0]);
3082 if (LHSC->getValue()->isZero())
3086 if (LHSC->getValue()->isOne()) {
3087 Ops.
erase(Ops.begin());
3091 if (Ops.size() == 1)
3102 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3107 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3108 Mul->setNoWrapFlags(ComputeFlags(Ops));
3112 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3113 if (Ops.size() == 2) {
3115 if (
const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
3130 if (Ops[0]->isAllOnesValue()) {
3133 if (
const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
3135 bool AnyFolded =
false;
3136 for (
const SCEV *AddOp : Add->operands()) {
3139 if (!isa<SCEVMulExpr>(
Mul)) AnyFolded =
true;
3140 NewOps.push_back(
Mul);
3144 }
else if (
const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3147 for (
const SCEV *AddRecOp : AddRec->operands())
3159 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() <
scMulExpr)
3163 if (Idx < Ops.size()) {
3164 bool DeletedMul =
false;
3165 while (
const SCEVMulExpr *
Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
3170 Ops.
erase(Ops.begin()+Idx);
3185 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() <
scAddRecExpr)
3189 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
3195 for (
unsigned i = 0,
e = Ops.size();
i !=
e; ++
i)
3197 LIOps.push_back(Ops[
i]);
3198 Ops.
erase(Ops.begin()+
i);
3203 if (!LIOps.empty()) {
3222 if (Ops.size() == 1)
return NewRec;
3225 for (
unsigned i = 0;; ++
i)
3226 if (Ops[
i] == AddRec) {
3247 bool OpsModified =
false;
3248 for (
unsigned OtherIdx = Idx+1;
3249 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3252 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3253 if (!OtherAddRec || OtherAddRec->
getLoop() != AddRecLoop)
3262 bool Overflow =
false;
3269 for (
int y =
x, ye = 2*
x+1;
y != ye && !Overflow; ++
y) {
3273 z < ze && !Overflow; ++
z) {
3276 if (LargerThan64Bits)
3277 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3279 Coeff = Coeff1*Coeff2;
3283 SumOps.push_back(
getMulExpr(CoeffTerm, Term1, Term2,
3288 SumOps.push_back(
getZero(Ty));
3294 if (Ops.size() == 2)
return NewAddRec;
3295 Ops[Idx] = NewAddRec;
3296 Ops.
erase(Ops.begin() + OtherIdx); --OtherIdx;
3298 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3312 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3320 "SCEVURemExpr operand types don't match!");
3325 if (RHSC->getValue()->isOne())
3329 if (RHSC->getAPInt().isPowerOf2()) {
3348 "SCEVUDivExpr operand can't be pointer!");
3350 "SCEVUDivExpr operand types don't match!");
3357 if (
const SCEV *
S = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP))
3362 if (LHSC->getValue()->isZero())
3366 if (RHSC->getValue()->isOne())
3371 if (!RHSC->getValue()->isZero()) {
3376 unsigned LZ = RHSC->getAPInt().countLeadingZeros();
3380 if (!RHSC->getAPInt().isPowerOf2())
3386 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*
this))) {
3388 const APInt &StepInt = Step->getAPInt();
3389 const APInt &DivInt = RHSC->getAPInt();
3390 if (!StepInt.
urem(DivInt) &&
3396 for (
const SCEV *
Op : AR->operands())
3403 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3404 if (StartC && !DivInt.
urem(StepInt) &&
3410 const APInt &StartRem = StartInt.
urem(StepInt);
3411 if (StartRem != 0) {
3412 const SCEV *NewLHS =
3415 if (
LHS != NewLHS) {
3425 if (
const SCEV *
S = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP))
3434 for (
const SCEV *
Op :
M->operands())
3438 for (
unsigned i = 0,
e =
M->getNumOperands();
i !=
e; ++
i) {
3441 if (!isa<SCEVUDivExpr>(Div) &&
getMulExpr(Div, RHSC) ==
Op) {
3451 if (
auto *DivisorConstant =
3452 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3453 bool Overflow =
false;
3455 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3466 for (
const SCEV *
Op : A->operands())
3470 for (
unsigned i = 0,
e = A->getNumOperands();
i !=
e; ++
i) {
3472 if (isa<SCEVUDivExpr>(
Op) ||
3477 if (
Operands.size() == A->getNumOperands())
3484 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3491 if (
const SCEV *
S = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP))
return S;
3494 UniqueSCEVs.InsertNode(
S,
IP);
3500 APInt A =
C1->getAPInt().abs();
3530 if (
const auto *LHSCst = dyn_cast<SCEVConstant>(
Mul->
getOperand(0))) {
3531 if (LHSCst == RHSCst) {
3539 APInt Factor =
gcd(LHSCst, RHSCst);
3542 cast<SCEVConstant>(
getConstant(LHSCst->getAPInt().udiv(Factor)));
3544 cast<SCEVConstant>(
getConstant(RHSCst->getAPInt().udiv(Factor)));
3550 Mul = dyn_cast<SCEVMulExpr>(
LHS);
3576 if (
const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3577 if (StepChrec->getLoop() == L) {
3578 Operands.append(StepChrec->op_begin(), StepChrec->op_end());
3596 "SCEVAddRecExpr operand types don't match!");
3601 "SCEVAddRecExpr operand is not loop-invariant!");
3619 const Loop *NestedLoop = NestedAR->getLoop();
3625 Operands[0] = NestedAR->getStart();
3629 bool AllInvariant =
all_of(
3641 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3652 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3662 return getOrCreateAddRecExpr(
Operands, L, Flags);
3672 const bool AssumeInBoundsFlags = [&]() {
3673 if (!
GEP->isInBounds())
3679 auto *GEPI = dyn_cast<Instruction>(
GEP);
3682 return GEPI && isSCEVExprNeverPoison(GEPI);
3689 bool FirstIter =
true;
3691 for (
const SCEV *IndexExpr : IndexExprs) {
3693 if (
StructType *STy = dyn_cast<StructType>(CurTy)) {
3698 Offsets.push_back(FieldOffset);
3701 CurTy = STy->getTypeAtIndex(
Index);
3705 assert(isa<PointerType>(CurTy) &&
3706 "The first index of a GEP indexes a pointer");
3707 CurTy =
GEP->getSourceElementType();
3718 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3719 Offsets.push_back(LocalOffset);
3734 auto *GEPExpr =
getAddExpr(BaseExpr, Offset, BaseWrap);
3736 "GEP should not change type mid-flight.");
3740 SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3743 ID.AddInteger(SCEVType);
3744 for (
const SCEV *
Op : Ops)
3747 return UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP);
3757 assert(SCEVMinMaxExpr::isMinMaxType(
Kind) &&
"Not a SCEVMinMaxExpr!");
3758 assert(!Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
3759 if (Ops.size() == 1)
return Ops[0];
3762 for (
unsigned i = 1,
e = Ops.size();
i !=
e; ++
i) {
3764 "Operand types don't match!");
3766 Ops[
i]->
getType()->isPointerTy() &&
3767 "min/max should be consistently pointerish");
3778 if (
const SCEV *
S = findExistingSCEVInCache(
Kind, Ops)) {
3784 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3786 assert(Idx < Ops.size());
3799 while (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3802 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt()));
3804 Ops.
erase(Ops.begin()+1);
3805 if (Ops.size() == 1)
return Ops[0];
3806 LHSC = cast<SCEVConstant>(Ops[0]);
3809 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned);
3810 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned);
3812 if (IsMax ? IsMinV : IsMaxV) {
3814 Ops.
erase(Ops.begin());
3816 }
else if (IsMax ? IsMaxV : IsMinV) {
3822 if (Ops.size() == 1)
return Ops[0];
3826 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() <
Kind)
3831 if (Idx < Ops.size()) {
3832 bool DeletedAny =
false;
3833 while (Ops[Idx]->getSCEVType() ==
Kind) {
3835 Ops.
erase(Ops.begin()+Idx);
3853 for (
unsigned i = 0,
e = Ops.size() - 1;
i !=
e; ++
i) {
3854 if (Ops[
i] == Ops[
i + 1] ||
3855 isKnownViaNonRecursiveReasoning(FirstPred, Ops[
i], Ops[
i + 1])) {
3858 Ops.
erase(Ops.begin() +
i + 1, Ops.begin() +
i + 2);
3861 }
else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[
i],
3864 Ops.
erase(Ops.begin() +
i, Ops.begin() +
i + 1);
3870 if (Ops.size() == 1)
return Ops[0];
3872 assert(!Ops.empty() &&
"Reduced smax down to nothing!");
3878 for (
unsigned i = 0,
e = Ops.size();
i !=
e; ++
i)
3879 ID.AddPointer(Ops[
i]);
3881 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP);
3883 return ExistingSCEV;
3885 std::uninitialized_copy(Ops.begin(), Ops.end(),
O);
3886 SCEV *
S =
new (SCEVAllocator)
3889 UniqueSCEVs.InsertNode(
S,
IP);
3896 class SCEVSequentialMinMaxDeduplicatingVisitor final
3897 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
3898 Optional<const SCEV *>> {
3910 return RootKind ==
Kind || NonSequentialRootKind ==
Kind;
3913 RetVal visitAnyMinMaxExpr(
const SCEV *
S) {
3914 assert((isa<SCEVMinMaxExpr>(
S) || isa<SCEVSequentialMinMaxExpr>(
S)) &&
3915 "Only for min/max expressions.");
3918 if (!canRecurseInto(
Kind))
3921 auto *NAry = cast<SCEVNAryExpr>(
S);
3931 return isa<SCEVSequentialMinMaxExpr>(
S)
3936 RetVal visit(
const SCEV *
S) {
3938 if (!SeenOps.
insert(
S).second)
3940 return Base::visit(
S);
3946 : SE(SE), RootKind(RootKind),
3947 NonSequentialRootKind(
3953 bool Changed =
false;
3957 for (
const SCEV *
Op : OrigOps) {
3958 RetVal NewOp = visit(
Op);
3980 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
3982 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
3984 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
3986 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
3989 return visitAnyMinMaxExpr(Expr);
3993 return visitAnyMinMaxExpr(Expr);
3997 return visitAnyMinMaxExpr(Expr);
4001 return visitAnyMinMaxExpr(Expr);
4005 return visitAnyMinMaxExpr(Expr);
4008 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4025 struct SCEVPoisonCollector {
4026 bool LookThroughSeq;
4028 SCEVPoisonCollector(
bool LookThroughSeq) : LookThroughSeq(LookThroughSeq) {}
4030 bool follow(
const SCEV *
S) {
4033 if (!LookThroughSeq && isa<SCEVSequentialMinMaxExpr>(
S))
4036 if (
auto *SU = dyn_cast<SCEVUnknown>(
S)) {
4042 bool isDone()
const {
return false; }
4048 SCEVPoisonCollector PC1(
true);
4053 if (PC1.MaybePoison.empty())
4059 SCEVPoisonCollector PC2(
false);
4064 return all_of(PC1.MaybePoison,
4065 [&](
const SCEV *
S) { return PC2.MaybePoison.contains(S); });
4071 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(
Kind) &&
4072 "Not a SCEVSequentialMinMaxExpr!");
4073 assert(!Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
4074 if (Ops.size() == 1)
4078 for (
unsigned i = 1,
e = Ops.size();
i !=
e; ++
i) {
4080 "Operand types don't match!");
4082 Ops[
i]->
getType()->isPointerTy() &&
4083 "min/max should be consistently pointerish");
4091 if (
const SCEV *
S = findExistingSCEVInCache(
Kind, Ops))
4098 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this,
Kind);
4099 bool Changed = Deduplicator.visit(
Kind, Ops, Ops);
4108 bool DeletedAny =
false;
4109 while (Idx < Ops.size()) {
4110 if (Ops[Idx]->getSCEVType() !=
Kind) {
4114 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[Idx]);
4115 Ops.
erase(Ops.begin() + Idx);
4116 Ops.
insert(Ops.begin() + Idx, SMME->op_begin(), SMME->op_end());
4124 const SCEV *SaturationPoint;
4135 for (
unsigned i = 1,
e = Ops.size();
i !=
e; ++
i) {
4146 Ops.
erase(Ops.begin() +
i);
4151 if (isKnownViaNonRecursiveReasoning(Pred, Ops[
i - 1], Ops[
i])) {
4152 Ops.
erase(Ops.begin() +
i);
4161 for (
unsigned i = 0,
e = Ops.size();
i !=
e; ++
i)
4162 ID.AddPointer(Ops[
i]);
4164 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP);
4166 return ExistingSCEV;
4169 std::uninitialized_copy(Ops.begin(), Ops.end(),
O);
4170 SCEV *
S =
new (SCEVAllocator)
4173 UniqueSCEVs.InsertNode(
S,
IP);
4231 if (
auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy))
4240 if (
auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy))
4255 IntTy,
getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo));
4268 if (
SCEV *
S = UniqueSCEVs.FindNodeOrInsertPos(
ID,
IP)) {
4269 assert(cast<SCEVUnknown>(
S)->getValue() == V &&
4270 "Stale SCEVUnknown in uniquing map!");
4275 FirstUnknown = cast<SCEVUnknown>(
S);
4276 UniqueSCEVs.InsertNode(
S,
IP);
4324 bool PreciseA, PreciseB;
4325 auto *ScopeA = getDefiningScopeBound({A}, PreciseA);
4326 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4327 if (!PreciseA || !PreciseB)
4330 return (ScopeA == ScopeB) || DT.
dominates(ScopeA, ScopeB) ||
4336 return CouldNotCompute.get();
4339 bool ScalarEvolution::checkValidity(
const SCEV *
S)
const {
4341 auto *SU = dyn_cast<SCEVUnknown>(
S);
4342 return SU && SU->getValue() ==
nullptr;
4345 return !ContainsNulls;
4350 if (
I != HasRecMap.
end())
4355 HasRecMap.
insert({
S, FoundAddRec});
4363 if (
SI == ExprValueMap.
end())
4372 return SI->second.getArrayRef();
4378 void ScalarEvolution::eraseValueFromMap(
Value *V) {
4380 if (
I != ValueExprMap.
end()) {
4381 auto EVIt = ExprValueMap.
find(
I->second);
4382 bool Removed = EVIt->second.remove(V);
4384 assert(Removed &&
"Value not in ExprValueMap?");
4389 void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *
S) {
4393 auto It = ValueExprMap.
find_as(V);
4394 if (It == ValueExprMap.
end()) {
4405 if (
const SCEV *
S = getExistingSCEV(V))
4407 return createSCEVIter(V);
4410 const SCEV *ScalarEvolution::getExistingSCEV(
Value *V) {
4414 if (
I != ValueExprMap.
end()) {
4415 const SCEV *
S =
I->second;
4417 "existing SCEV has not been properly invalidated");
4437 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
4438 if (!Add || Add->getNumOperands() != 2 ||
4439 !Add->getOperand(0)->isAllOnesValue())
4442 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
4462 for (
const SCEV *Operand : MME->operands()) {
4465 return (
const SCEV *)
nullptr;
4466 MatchedOperands.push_back(Matched);
4471 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4481 assert(
P->getType()->isPointerTy());
4483 if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(
P)) {
4491 if (
auto *Add = dyn_cast<SCEVAddExpr>(
P)) {
4494 const SCEV **PtrOp =
nullptr;
4495 for (
const SCEV *&AddOp : Ops) {
4496 if (AddOp->getType()->isPointerTy()) {
4497 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4531 const bool RHSIsNotMinSigned =
4564 "Cannot truncate or zero extend with non-integer arguments!");
4576 "Cannot truncate or zero extend with non-integer arguments!");
4588 "Cannot noop or zero extend with non-integer arguments!");
4590 "getNoopOrZeroExtend cannot truncate!");
4600 "Cannot noop or sign extend with non-integer arguments!");
4602 "getNoopOrSignExtend cannot truncate!");
4612 "Cannot noop or any extend with non-integer arguments!");
4614 "getNoopOrAnyExtend cannot truncate!");