LLVM 23.0.0git
ScalarEvolutionExpander.cpp
Go to the documentation of this file.
1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the scalar evolution expander,
10// which is used to generate the code corresponding to a given scalar evolution
11// expression.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/ScopeExit.h"
23#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/Dominators.h"
31
32#if LLVM_ENABLE_ABI_BREAKING_CHECKS
33#define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
34#else
35#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
36#endif
37
38using namespace llvm;
39
41 "scev-cheap-expansion-budget", cl::Hidden, cl::init(4),
42 cl::desc("When performing SCEV expansion only if it is cheap to do, this "
43 "controls the budget that is considered cheap (default = 4)"));
44
45using namespace PatternMatch;
46using namespace SCEVPatternMatch;
47
49 NUW = false;
50 NSW = false;
51 Exact = false;
52 Disjoint = false;
53 NNeg = false;
54 SameSign = false;
56 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(I)) {
57 NUW = OBO->hasNoUnsignedWrap();
58 NSW = OBO->hasNoSignedWrap();
59 }
60 if (auto *PEO = dyn_cast<PossiblyExactOperator>(I))
61 Exact = PEO->isExact();
62 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(I))
63 Disjoint = PDI->isDisjoint();
64 if (auto *PNI = dyn_cast<PossiblyNonNegInst>(I))
65 NNeg = PNI->hasNonNeg();
66 if (auto *TI = dyn_cast<TruncInst>(I)) {
67 NUW = TI->hasNoUnsignedWrap();
68 NSW = TI->hasNoSignedWrap();
69 }
71 GEPNW = GEP->getNoWrapFlags();
72 if (auto *ICmp = dyn_cast<ICmpInst>(I))
73 SameSign = ICmp->hasSameSign();
74}
75
78 I->setHasNoUnsignedWrap(NUW);
79 I->setHasNoSignedWrap(NSW);
80 }
82 I->setIsExact(Exact);
83 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(I))
84 PDI->setIsDisjoint(Disjoint);
85 if (auto *PNI = dyn_cast<PossiblyNonNegInst>(I))
86 PNI->setNonNeg(NNeg);
87 if (isa<TruncInst>(I)) {
88 I->setHasNoUnsignedWrap(NUW);
89 I->setHasNoSignedWrap(NSW);
90 }
92 GEP->setNoWrapFlags(GEPNW);
93 if (auto *ICmp = dyn_cast<ICmpInst>(I))
94 ICmp->setSameSign(SameSign);
95}
96
97/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
98/// reusing an existing cast if a suitable one (= dominating IP) exists, or
99/// creating a new one.
100Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
103 // This function must be called with the builder having a valid insertion
104 // point. It doesn't need to be the actual IP where the uses of the returned
105 // cast will be added, but it must dominate such IP.
106 // We use this precondition to produce a cast that will dominate all its
107 // uses. In particular, this is crucial for the case where the builder's
108 // insertion point *is* the point where we were asked to put the cast.
109 // Since we don't know the builder's insertion point is actually
110 // where the uses will be added (only that it dominates it), we are
111 // not allowed to move it.
112 BasicBlock::iterator BIP = Builder.GetInsertPoint();
113
114 Value *Ret = nullptr;
115
116 if (!isa<Constant>(V)) {
117 // Check to see if there is already a cast!
118 for (User *U : V->users()) {
119 if (U->getType() != Ty)
120 continue;
122 if (!CI || CI->getOpcode() != Op)
123 continue;
124
125 // Found a suitable cast that is at IP or comes before IP. Use it. Note
126 // that the cast must also properly dominate the Builder's insertion
127 // point.
128 if (IP->getParent() == CI->getParent() && &*BIP != CI &&
129 (&*IP == CI || CI->comesBefore(&*IP))) {
130 Ret = CI;
131 break;
132 }
133 }
134 }
135
136 // Create a new cast.
137 if (!Ret) {
138 SCEVInsertPointGuard Guard(Builder, this);
139 Builder.SetInsertPoint(&*IP);
140 Ret = Builder.CreateCast(Op, V, Ty, V->getName());
141 }
142
143 // We assert at the end of the function since IP might point to an
144 // instruction with different dominance properties than a cast
145 // (an invoke for example) and not dominate BIP (but the cast does).
146 assert(!isa<Instruction>(Ret) ||
147 SE.DT.dominates(cast<Instruction>(Ret), &*BIP));
148
149 return Ret;
150}
151
154 Instruction *MustDominate) const {
155 BasicBlock::iterator IP = ++I->getIterator();
156 if (auto *II = dyn_cast<InvokeInst>(I))
157 IP = II->getNormalDest()->begin();
158
159 while (isa<PHINode>(IP))
160 ++IP;
161
163 ++IP;
164 } else if (isa<CatchSwitchInst>(IP)) {
165 IP = MustDominate->getParent()->getFirstInsertionPt();
166 } else {
167 assert(!IP->isEHPad() && "unexpected eh pad!");
168 }
169
170 // Adjust insert point to be after instructions inserted by the expander, so
171 // we can re-use already inserted instructions. Avoid skipping past the
172 // original \p MustDominate, in case it is an inserted instruction.
173 while (isInsertedInstruction(&*IP) && &*IP != MustDominate)
174 ++IP;
175
176 return IP;
177}
178
180 SmallVector<Value *> WorkList;
181 SmallPtrSet<Value *, 8> DeletedValues;
183 while (!WorkList.empty()) {
184 Value *V = WorkList.pop_back_val();
185 if (DeletedValues.contains(V))
186 continue;
187 auto *I = dyn_cast<Instruction>(V);
188 if (!I || I == Root || !isInsertedInstruction(I) ||
190 continue;
191 append_range(WorkList, I->operands());
192 InsertedValues.erase(I);
193 InsertedPostIncValues.erase(I);
194 DeletedValues.insert(I);
195 I->eraseFromParent();
196 }
197}
198
200SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const {
201 // Cast the argument at the beginning of the entry block, after
202 // any bitcasts of other arguments.
203 if (Argument *A = dyn_cast<Argument>(V)) {
204 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
205 while ((isa<BitCastInst>(IP) &&
206 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
207 cast<BitCastInst>(IP)->getOperand(0) != A))
208 ++IP;
209 return IP;
210 }
211
212 // Cast the instruction immediately after the instruction.
214 return findInsertPointAfter(I, &*Builder.GetInsertPoint());
215
216 // Otherwise, this must be some kind of a constant,
217 // so let's plop this cast into the function's entry block.
219 "Expected the cast argument to be a global/constant");
220 return Builder.GetInsertBlock()
221 ->getParent()
222 ->getEntryBlock()
223 .getFirstInsertionPt();
224}
225
226/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
227/// which must be possible with a noop cast, doing what we can to share
228/// the casts.
229Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
230 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
231 assert((Op == Instruction::BitCast ||
232 Op == Instruction::PtrToInt ||
233 Op == Instruction::IntToPtr) &&
234 "InsertNoopCastOfTo cannot perform non-noop casts!");
235 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
236 "InsertNoopCastOfTo cannot change sizes!");
237
238 // inttoptr only works for integral pointers. For non-integral pointers, we
239 // can create a GEP on null with the integral value as index. Note that
240 // it is safe to use GEP of null instead of inttoptr here, because only
241 // expressions already based on a GEP of null should be converted to pointers
242 // during expansion.
243 if (Op == Instruction::IntToPtr) {
244 auto *PtrTy = cast<PointerType>(Ty);
245 if (DL.isNonIntegralPointerType(PtrTy))
246 return Builder.CreatePtrAdd(Constant::getNullValue(PtrTy), V, "scevgep");
247 }
248 // Short-circuit unnecessary bitcasts.
249 if (Op == Instruction::BitCast) {
250 if (V->getType() == Ty)
251 return V;
252 if (CastInst *CI = dyn_cast<CastInst>(V)) {
253 if (CI->getOperand(0)->getType() == Ty)
254 return CI->getOperand(0);
255 }
256 }
257 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
258 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
259 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
260 if (CastInst *CI = dyn_cast<CastInst>(V))
261 if ((CI->getOpcode() == Instruction::PtrToInt ||
262 CI->getOpcode() == Instruction::IntToPtr) &&
263 SE.getTypeSizeInBits(CI->getType()) ==
264 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
265 return CI->getOperand(0);
266 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
267 if ((CE->getOpcode() == Instruction::PtrToInt ||
268 CE->getOpcode() == Instruction::IntToPtr) &&
269 SE.getTypeSizeInBits(CE->getType()) ==
270 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
271 return CE->getOperand(0);
272 }
273
274 // Fold a cast of a constant.
275 if (Constant *C = dyn_cast<Constant>(V))
276 return ConstantExpr::getCast(Op, C, Ty);
277
278 // Try to reuse existing cast, or insert one.
279 return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
280}
281
282/// InsertBinop - Insert the specified binary operator, doing a small amount
283/// of work to avoid inserting an obviously redundant operation, and hoisting
284/// to an outer loop when the opportunity is there and it is safe.
285Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
286 Value *LHS, Value *RHS,
287 SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {
288 // Fold a binop with constant operands.
289 if (Constant *CLHS = dyn_cast<Constant>(LHS))
290 if (Constant *CRHS = dyn_cast<Constant>(RHS))
291 if (Constant *Res = ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, DL))
292 return Res;
293
294 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
295 unsigned ScanLimit = 6;
296 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
297 // Scanning starts from the last instruction before the insertion point.
298 BasicBlock::iterator IP = Builder.GetInsertPoint();
299 if (IP != BlockBegin) {
300 --IP;
301 for (; ScanLimit; --IP, --ScanLimit) {
302 auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) {
303 // Ensure that no-wrap flags match.
305 if (I->hasNoSignedWrap() != any(Flags & SCEV::FlagNSW))
306 return true;
307 if (I->hasNoUnsignedWrap() != any(Flags & SCEV::FlagNUW))
308 return true;
309 }
310 // Conservatively, do not use any instruction which has any of exact
311 // flags installed.
312 if (isa<PossiblyExactOperator>(I) && I->isExact())
313 return true;
314 return false;
315 };
316 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
317 IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
318 return &*IP;
319 if (IP == BlockBegin) break;
320 }
321 }
322
323 // Save the original insertion point so we can restore it when we're done.
324 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
325 SCEVInsertPointGuard Guard(Builder, this);
326
327 if (IsSafeToHoist) {
328 // Move the insertion point out of as many loops as we can.
329 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
330 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
331 BasicBlock *Preheader = L->getLoopPreheader();
332 if (!Preheader) break;
333
334 // Ok, move up a level.
335 Builder.SetInsertPoint(Preheader->getTerminator());
336 }
337 }
338
339 // If we haven't found this binop, insert it.
340 Builder.SetCurrentDebugLocation(Loc);
341 bool IsNUW = any(Flags & SCEV::FlagNUW);
342 bool IsNSW = any(Flags & SCEV::FlagNSW);
343 // Don't use folder when expanding post-inc rewrites in LSRMode to preserve
344 // the rewrites.
345 if (LSRMode && !PostIncLoops.empty() &&
346 all_of(PostIncLoops, [&](const Loop *L) {
347 return !L->contains(Builder.GetInsertBlock());
348 })) {
349 auto *BO = BinaryOperator::Create(Opcode, LHS, RHS);
350 if (IsNUW)
351 BO->setHasNoUnsignedWrap();
352 if (IsNSW)
353 BO->setHasNoSignedWrap();
354 return Builder.Insert(BO);
355 }
356 return Builder.CreateNoWrapBinOp(Opcode, LHS, RHS, IsNUW, IsNSW);
357}
358
359/// expandAddToGEP - Expand an addition expression with a pointer type into
360/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
361/// BasicAliasAnalysis and other passes analyze the result. See the rules
362/// for getelementptr vs. inttoptr in
363/// http://llvm.org/docs/LangRef.html#pointeraliasing
364/// for details.
365///
366/// Design note: The correctness of using getelementptr here depends on
367/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
368/// they may introduce pointer arithmetic which may not be safely converted
369/// into getelementptr.
370///
371/// Design note: It might seem desirable for this function to be more
372/// loop-aware. If some of the indices are loop-invariant while others
373/// aren't, it might seem desirable to emit multiple GEPs, keeping the
374/// loop-invariant portions of the overall computation outside the loop.
375/// However, there are a few reasons this is not done here. Hoisting simple
376/// arithmetic is a low-level optimization that often isn't very
377/// important until late in the optimization process. In fact, passes
378/// like InstructionCombining will combine GEPs, even if it means
379/// pushing loop-invariant computation down into loops, so even if the
380/// GEPs were split here, the work would quickly be undone. The
381/// LoopStrengthReduction pass, which is usually run quite late (and
382/// after the last InstructionCombining pass), takes care of hoisting
383/// loop-invariant portions of expressions, after considering what
384/// can be folded using target addressing modes.
385///
386Value *SCEVExpander::expandAddToGEP(const SCEV *Offset, Value *V,
387 SCEV::NoWrapFlags Flags) {
389 SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
390
391 Value *Idx = expand(Offset);
392 GEPNoWrapFlags NW = any(Flags & SCEV::FlagNUW)
394 : GEPNoWrapFlags::none();
395
396 // Fold a GEP with constant operands.
397 if (Constant *CLHS = dyn_cast<Constant>(V))
398 if (Constant *CRHS = dyn_cast<Constant>(Idx))
399 return Builder.CreatePtrAdd(CLHS, CRHS, "", NW);
400
401 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
402 unsigned ScanLimit = 6;
403 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
404 // Scanning starts from the last instruction before the insertion point.
405 BasicBlock::iterator IP = Builder.GetInsertPoint();
406 if (IP != BlockBegin) {
407 --IP;
408 for (; ScanLimit; --IP, --ScanLimit) {
409 if (auto *GEP = dyn_cast<GetElementPtrInst>(IP)) {
410 if (GEP->getPointerOperand() == V &&
411 GEP->getSourceElementType() == Builder.getInt8Ty() &&
412 GEP->getOperand(1) == Idx) {
413 rememberFlags(GEP);
414 GEP->setNoWrapFlags(GEP->getNoWrapFlags() & NW);
415 return &*IP;
416 }
417 }
418 if (IP == BlockBegin) break;
419 }
420 }
421
422 // Save the original insertion point so we can restore it when we're done.
423 SCEVInsertPointGuard Guard(Builder, this);
424
425 // Move the insertion point out of as many loops as we can.
426 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
427 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
428 BasicBlock *Preheader = L->getLoopPreheader();
429 if (!Preheader) break;
430
431 // Ok, move up a level.
432 Builder.SetInsertPoint(Preheader->getTerminator());
433 }
434
435 // Emit a GEP.
436 return Builder.CreatePtrAdd(V, Idx, "scevgep", NW);
437}
438
439/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
440/// SCEV expansion. If they are nested, this is the most nested. If they are
441/// neighboring, pick the later.
442static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
443 DominatorTree &DT) {
444 if (!A) return B;
445 if (!B) return A;
446 if (A->contains(B)) return B;
447 if (B->contains(A)) return A;
448 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
449 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
450 return A; // Arbitrarily break the tie.
451}
452
453/// getRelevantLoop - Get the most relevant loop associated with the given
454/// expression, according to PickMostRelevantLoop.
455const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
456 // Test whether we've already computed the most relevant loop for this SCEV.
457 auto Pair = RelevantLoops.try_emplace(S);
458 if (!Pair.second)
459 return Pair.first->second;
460
461 switch (S->getSCEVType()) {
462 case scConstant:
463 case scVScale:
464 return nullptr; // A constant has no relevant loops.
465 case scTruncate:
466 case scZeroExtend:
467 case scSignExtend:
468 case scPtrToAddr:
469 case scPtrToInt:
470 case scAddExpr:
471 case scMulExpr:
472 case scUDivExpr:
473 case scAddRecExpr:
474 case scUMaxExpr:
475 case scSMaxExpr:
476 case scUMinExpr:
477 case scSMinExpr:
479 const Loop *L = nullptr;
480 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
481 L = AR->getLoop();
482 for (const SCEV *Op : S->operands())
483 L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
484 return RelevantLoops[S] = L;
485 }
486 case scUnknown: {
487 const SCEVUnknown *U = cast<SCEVUnknown>(S);
488 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
489 return Pair.first->second = SE.LI.getLoopFor(I->getParent());
490 // A non-instruction has no relevant loops.
491 return nullptr;
492 }
494 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
495 }
496 llvm_unreachable("Unexpected SCEV type!");
497}
498
499namespace {
500
501/// LoopCompare - Compare loops by PickMostRelevantLoop.
502class LoopCompare {
503 DominatorTree &DT;
504public:
505 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
506
507 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
508 std::pair<const Loop *, const SCEV *> RHS) const {
509 // Keep pointer operands sorted at the end.
510 if (LHS.second->getType()->isPointerTy() !=
511 RHS.second->getType()->isPointerTy())
512 return LHS.second->getType()->isPointerTy();
513
514 // Compare loops with PickMostRelevantLoop.
515 if (LHS.first != RHS.first)
516 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
517
518 // If one operand is a non-constant negative and the other is not,
519 // put the non-constant negative on the right so that a sub can
520 // be used instead of a negate and add.
521 if (LHS.second->isNonConstantNegative()) {
522 if (!RHS.second->isNonConstantNegative())
523 return false;
524 } else if (RHS.second->isNonConstantNegative())
525 return true;
526
527 // Otherwise they are equivalent according to this comparison.
528 return false;
529 }
530};
531
532}
533
534Value *SCEVExpander::visitAddExpr(SCEVUseT<const SCEVAddExpr *> S) {
535 // Recognize the canonical representation of an unsimplifed urem.
536 const SCEV *URemLHS = nullptr;
537 const SCEV *URemRHS = nullptr;
538 if (match(S, m_scev_URem(m_SCEV(URemLHS), m_SCEV(URemRHS), SE))) {
539 Value *LHS = expand(URemLHS);
540 Value *RHS = expand(URemRHS);
541 return InsertBinop(Instruction::URem, LHS, RHS, SCEV::FlagAnyWrap,
542 /*IsSafeToHoist*/ false);
543 }
544
545 // Collect all the add operands in a loop, along with their associated loops.
546 // Iterate in reverse so that constants are emitted last, all else equal, and
547 // so that pointer operands are inserted first, which the code below relies on
548 // to form more involved GEPs.
550 for (const SCEV *Op : reverse(S->operands()))
551 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
552
553 // Sort by loop. Use a stable sort so that constants follow non-constants and
554 // pointer operands precede non-pointer operands.
555 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
556
557 // Emit instructions to add all the operands. Hoist as much as possible
558 // out of loops, and form meaningful getelementptrs where possible.
559 Value *Sum = nullptr;
560 for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
561 const Loop *CurLoop = I->first;
562 const SCEV *Op = I->second;
563 if (!Sum) {
564 // This is the first operand. Just expand it.
565 Sum = expand(Op);
566 ++I;
567 continue;
568 }
569
570 assert(!Op->getType()->isPointerTy() && "Only first op can be pointer");
571 if (isa<PointerType>(Sum->getType())) {
572 // The running sum expression is a pointer. Try to form a getelementptr
573 // at this level with that as the base.
575 for (; I != E && I->first == CurLoop; ++I) {
576 // If the operand is SCEVUnknown and not instructions, peek through
577 // it, to enable more of it to be folded into the GEP.
578 const SCEV *X = I->second;
579 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
580 if (!isa<Instruction>(U->getValue()))
581 X = SE.getSCEV(U->getValue());
582 NewOps.push_back(X);
583 }
584 Sum = expandAddToGEP(SE.getAddExpr(NewOps), Sum, S.getNoWrapFlags());
585 } else if (Op->isNonConstantNegative()) {
586 // Instead of doing a negate and add, just do a subtract.
587 Value *W = expand(SE.getNegativeSCEV(Op));
588 Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap,
589 /*IsSafeToHoist*/ true);
590 ++I;
591 } else {
592 // A simple add.
593 Value *W = expand(Op);
594 // Canonicalize a constant to the RHS.
595 if (isa<Constant>(Sum))
596 std::swap(Sum, W);
597 Sum = InsertBinop(Instruction::Add, Sum, W, S.getNoWrapFlags(),
598 /*IsSafeToHoist*/ true);
599 ++I;
600 }
601 }
602
603 return Sum;
604}
605
606Value *SCEVExpander::visitMulExpr(SCEVUseT<const SCEVMulExpr *> S) {
607 Type *Ty = S->getType();
608
609 // Collect all the mul operands in a loop, along with their associated loops.
610 // Iterate in reverse so that constants are emitted last, all else equal.
612 for (const SCEV *Op : reverse(S->operands()))
613 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
614
615 // Sort by loop. Use a stable sort so that constants follow non-constants.
616 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
617
618 // Emit instructions to mul all the operands. Hoist as much as possible
619 // out of loops.
620 Value *Prod = nullptr;
621 auto I = OpsAndLoops.begin();
622
623 // Expand the calculation of X pow N in the following manner:
624 // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
625 // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
626 const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops]() {
627 auto E = I;
628 // Calculate how many times the same operand from the same loop is included
629 // into this power.
630 uint64_t Exponent = 0;
631 const uint64_t MaxExponent = UINT64_MAX >> 1;
632 // No one sane will ever try to calculate such huge exponents, but if we
633 // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
634 // below when the power of 2 exceeds our Exponent, and we want it to be
635 // 1u << 31 at most to not deal with unsigned overflow.
636 while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
637 ++Exponent;
638 ++E;
639 }
640 assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
641
642 // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
643 // that are needed into the result.
644 Value *P = expand(I->second);
645 Value *Result = nullptr;
646 if (Exponent & 1)
647 Result = P;
648 for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
649 P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap,
650 /*IsSafeToHoist*/ true);
651 if (Exponent & BinExp)
652 Result = Result ? InsertBinop(Instruction::Mul, Result, P,
654 /*IsSafeToHoist*/ true)
655 : P;
656 }
657
658 I = E;
659 assert(Result && "Nothing was expanded?");
660 return Result;
661 };
662
663 while (I != OpsAndLoops.end()) {
664 if (!Prod) {
665 // This is the first operand. Just expand it.
666 Prod = ExpandOpBinPowN();
667 } else if (I->second->isAllOnesValue()) {
668 // Instead of doing a multiply by negative one, just do a negate.
669 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod,
670 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
671 ++I;
672 } else {
673 // A simple mul.
674 Value *W = ExpandOpBinPowN();
675 // Canonicalize a constant to the RHS.
676 if (isa<Constant>(Prod)) std::swap(Prod, W);
677 const APInt *RHS;
678 if (match(W, m_Power2(RHS))) {
679 // Canonicalize Prod*(1<<C) to Prod<<C.
680 assert(!Ty->isVectorTy() && "vector types are not SCEVable");
681 auto NWFlags = S.getNoWrapFlags();
682 // clear nsw flag if shl will produce poison value.
683 if (RHS->logBase2() == RHS->getBitWidth() - 1)
684 NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW);
685 Prod = InsertBinop(Instruction::Shl, Prod,
686 ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
687 /*IsSafeToHoist*/ true);
688 } else {
689 Prod = InsertBinop(Instruction::Mul, Prod, W, S.getNoWrapFlags(),
690 /*IsSafeToHoist*/ true);
691 }
692 }
693 }
694
695 return Prod;
696}
697
698Value *SCEVExpander::visitUDivExpr(SCEVUseT<const SCEVUDivExpr *> S) {
699 Value *LHS = expand(S->getLHS());
700 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
701 const APInt &RHS = SC->getAPInt();
702 if (RHS.isPowerOf2())
703 return InsertBinop(Instruction::LShr, LHS,
704 ConstantInt::get(SC->getType(), RHS.logBase2()),
705 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
706 }
707
708 const SCEV *RHSExpr = S->getRHS();
709 Value *RHS = expand(RHSExpr);
710 if (SafeUDivMode) {
711 bool GuaranteedNotPoison =
712 ScalarEvolution::isGuaranteedNotToBePoison(RHSExpr);
713 if (!GuaranteedNotPoison)
714 RHS = Builder.CreateFreeze(RHS);
715
716 // We need an umax if either RHSExpr is not known to be zero, or if it is
717 // not guaranteed to be non-poison. In the later case, the frozen poison may
718 // be 0.
719 if (!SE.isKnownNonZero(RHSExpr) || !GuaranteedNotPoison)
720 RHS = Builder.CreateIntrinsic(RHS->getType(), Intrinsic::umax,
721 {RHS, ConstantInt::get(RHS->getType(), 1)});
722 }
723 return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap,
724 /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS()));
725}
726
727/// Determine if this is a well-behaved chain of instructions leading back to
728/// the PHI. If so, it may be reused by expanded expressions.
729bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
730 const Loop *L) {
731 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
732 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
733 return false;
734 // If any of the operands don't dominate the insert position, bail.
735 // Addrec operands are always loop-invariant, so this can only happen
736 // if there are instructions which haven't been hoisted.
737 if (L == IVIncInsertLoop) {
738 for (Use &Op : llvm::drop_begin(IncV->operands()))
739 if (Instruction *OInst = dyn_cast<Instruction>(Op))
740 if (!SE.DT.dominates(OInst, IVIncInsertPos))
741 return false;
742 }
743 // Advance to the next instruction.
744 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
745 if (!IncV)
746 return false;
747
748 if (IncV->mayHaveSideEffects())
749 return false;
750
751 if (IncV == PN)
752 return true;
753
754 return isNormalAddRecExprPHI(PN, IncV, L);
755}
756
757/// getIVIncOperand returns an induction variable increment's induction
758/// variable operand.
759///
760/// If allowScale is set, any type of GEP is allowed as long as the nonIV
761/// operands dominate InsertPos.
762///
763/// If allowScale is not set, ensure that a GEP increment conforms to one of the
764/// simple patterns generated by getAddRecExprPHILiterally and
765/// expandAddtoGEP. If the pattern isn't recognized, return NULL.
767 Instruction *InsertPos,
768 bool allowScale) {
769 if (IncV == InsertPos)
770 return nullptr;
771
772 switch (IncV->getOpcode()) {
773 default:
774 return nullptr;
775 // Check for a simple Add/Sub or GEP of a loop invariant step.
776 case Instruction::Add:
777 case Instruction::Sub: {
779 if (!OInst || SE.DT.dominates(OInst, InsertPos))
780 return dyn_cast<Instruction>(IncV->getOperand(0));
781 return nullptr;
782 }
783 case Instruction::BitCast:
784 return dyn_cast<Instruction>(IncV->getOperand(0));
785 case Instruction::GetElementPtr:
786 for (Use &U : llvm::drop_begin(IncV->operands())) {
787 if (isa<Constant>(U))
788 continue;
789 if (Instruction *OInst = dyn_cast<Instruction>(U)) {
790 if (!SE.DT.dominates(OInst, InsertPos))
791 return nullptr;
792 }
793 if (allowScale) {
794 // allow any kind of GEP as long as it can be hoisted.
795 continue;
796 }
797 // GEPs produced by SCEVExpander use i8 element type.
798 if (!cast<GEPOperator>(IncV)->getSourceElementType()->isIntegerTy(8))
799 return nullptr;
800 break;
801 }
802 return dyn_cast<Instruction>(IncV->getOperand(0));
803 }
804}
805
806/// If the insert point of the current builder or any of the builders on the
807/// stack of saved builders has 'I' as its insert point, update it to point to
808/// the instruction after 'I'. This is intended to be used when the instruction
809/// 'I' is being moved. If this fixup is not done and 'I' is moved to a
810/// different block, the inconsistent insert point (with a mismatched
811/// Instruction and Block) can lead to an instruction being inserted in a block
812/// other than its parent.
813void SCEVExpander::fixupInsertPoints(Instruction *I) {
815 BasicBlock::iterator NewInsertPt = std::next(It);
816 if (Builder.GetInsertPoint() == It)
817 Builder.SetInsertPoint(&*NewInsertPt);
818 for (auto *InsertPtGuard : InsertPointGuards)
819 if (InsertPtGuard->GetInsertPoint() == It)
820 InsertPtGuard->SetInsertPoint(NewInsertPt);
821}
822
823/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
824/// it available to other uses in this loop. Recursively hoist any operands,
825/// until we reach a value that dominates InsertPos.
827 bool RecomputePoisonFlags) {
828 auto FixupPoisonFlags = [this](Instruction *I) {
829 // Drop flags that are potentially inferred from old context and infer flags
830 // in new context.
831 rememberFlags(I);
832 I->dropPoisonGeneratingFlags();
833 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(I))
834 if (auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
835 auto *BO = cast<BinaryOperator>(I);
836 BO->setHasNoUnsignedWrap(
838 BO->setHasNoSignedWrap(
840 }
841 };
842
843 if (SE.DT.dominates(IncV, InsertPos)) {
844 if (RecomputePoisonFlags)
845 FixupPoisonFlags(IncV);
846 return true;
847 }
848
849 // InsertPos must itself dominate IncV so that IncV's new position satisfies
850 // its existing users.
851 if (isa<PHINode>(InsertPos) ||
852 !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
853 return false;
854
855 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
856 return false;
857
858 // Check that the chain of IV operands leading back to Phi can be hoisted.
860 for(;;) {
861 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
862 if (!Oper)
863 return false;
864 // IncV is safe to hoist.
865 IVIncs.push_back(IncV);
866 IncV = Oper;
867 if (SE.DT.dominates(IncV, InsertPos))
868 break;
869 }
870 for (Instruction *I : llvm::reverse(IVIncs)) {
871 fixupInsertPoints(I);
872 I->moveBefore(InsertPos->getIterator());
873 if (RecomputePoisonFlags)
874 FixupPoisonFlags(I);
875 }
876 return true;
877}
878
880 PHINode *WidePhi,
881 Instruction *OrigInc,
882 Instruction *WideInc) {
883 return match(OrigInc, m_c_BinOp(m_Specific(OrigPhi), m_Value())) &&
884 match(WideInc, m_c_BinOp(m_Specific(WidePhi), m_Value())) &&
885 OrigInc->getOpcode() == WideInc->getOpcode();
886}
887
888/// Determine if this cyclic phi is in a form that would have been generated by
889/// LSR. We don't care if the phi was actually expanded in this pass, as long
890/// as it is in a low-cost form, for example, no implied multiplication. This
891/// should match any patterns generated by getAddRecExprPHILiterally and
892/// expandAddtoGEP.
893bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
894 const Loop *L) {
895 for(Instruction *IVOper = IncV;
896 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
897 /*allowScale=*/false));) {
898 if (IVOper == PN)
899 return true;
900 }
901 return false;
902}
903
904/// expandIVInc - Expand an IV increment at Builder's current InsertPos.
905/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
906/// need to materialize IV increments elsewhere to handle difficult situations.
907Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
908 bool useSubtract) {
909 Value *IncV;
910 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
911 if (PN->getType()->isPointerTy()) {
912 // TODO: Change name to IVName.iv.next.
913 IncV = Builder.CreatePtrAdd(PN, StepV, "scevgep");
914 } else {
915 IncV = useSubtract ?
916 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
917 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
918 }
919 return IncV;
920}
921
922/// Check whether we can cheaply express the requested SCEV in terms of
923/// the available PHI SCEV by truncation and/or inversion of the step.
925 const SCEVAddRecExpr *Phi,
926 const SCEVAddRecExpr *Requested,
927 bool &InvertStep) {
928 // We can't transform to match a pointer PHI.
929 Type *PhiTy = Phi->getType();
930 Type *RequestedTy = Requested->getType();
931 if (PhiTy->isPointerTy() || RequestedTy->isPointerTy())
932 return false;
933
934 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
935 return false;
936
937 // Try truncate it if necessary.
938 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
939 if (!Phi)
940 return false;
941
942 // Check whether truncation will help.
943 if (Phi == Requested) {
944 InvertStep = false;
945 return true;
946 }
947
948 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
949 if (SE.getMinusSCEV(Requested->getStart(), Requested) == Phi) {
950 InvertStep = true;
951 return true;
952 }
953
954 return false;
955}
956
957static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
958 if (!isa<IntegerType>(AR->getType()))
959 return false;
960
961 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
962 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
963 const SCEV *Step = AR->getStepRecurrence(SE);
964 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
965 SE.getSignExtendExpr(AR, WideTy));
966 const SCEV *ExtendAfterOp =
967 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
968 return ExtendAfterOp == OpAfterExtend;
969}
970
971static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
972 if (!isa<IntegerType>(AR->getType()))
973 return false;
974
975 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
976 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
977 const SCEV *Step = AR->getStepRecurrence(SE);
978 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
979 SE.getZeroExtendExpr(AR, WideTy));
980 const SCEV *ExtendAfterOp =
981 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
982 return ExtendAfterOp == OpAfterExtend;
983}
984
985/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
986/// the base addrec, which is the addrec without any non-loop-dominating
987/// values, and return the PHI.
988PHINode *
989SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
990 const Loop *L, Type *&TruncTy,
991 bool &InvertStep) {
992 assert((!IVIncInsertLoop || IVIncInsertPos) &&
993 "Uninitialized insert position");
994
995 // Reuse a previously-inserted PHI, if present.
996 BasicBlock *LatchBlock = L->getLoopLatch();
997 if (LatchBlock) {
998 PHINode *AddRecPhiMatch = nullptr;
999 Instruction *IncV = nullptr;
1000 TruncTy = nullptr;
1001 InvertStep = false;
1002
1003 // Only try partially matching scevs that need truncation and/or
1004 // step-inversion if we know this loop is outside the current loop.
1005 bool TryNonMatchingSCEV =
1006 IVIncInsertLoop &&
1007 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1008
1009 for (PHINode &PN : L->getHeader()->phis()) {
1010 if (!SE.isSCEVable(PN.getType()))
1011 continue;
1012
1013 // We should not look for a incomplete PHI. Getting SCEV for a incomplete
1014 // PHI has no meaning at all.
1015 if (!PN.isComplete()) {
1017 DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n");
1018 continue;
1019 }
1020
1021 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
1022 if (!PhiSCEV)
1023 continue;
1024
1025 bool IsMatchingSCEV = PhiSCEV == Normalized;
1026 // We only handle truncation and inversion of phi recurrences for the
1027 // expanded expression if the expanded expression's loop dominates the
1028 // loop we insert to. Check now, so we can bail out early.
1029 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1030 continue;
1031
1032 // TODO: this possibly can be reworked to avoid this cast at all.
1033 Instruction *TempIncV =
1035 if (!TempIncV)
1036 continue;
1037
1038 // Check whether we can reuse this PHI node.
1039 if (LSRMode) {
1040 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1041 continue;
1042 } else {
1043 if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1044 continue;
1045 }
1046
1047 // Stop if we have found an exact match SCEV.
1048 if (IsMatchingSCEV) {
1049 IncV = TempIncV;
1050 TruncTy = nullptr;
1051 InvertStep = false;
1052 AddRecPhiMatch = &PN;
1053 break;
1054 }
1055
1056 // Try whether the phi can be translated into the requested form
1057 // (truncated and/or offset by a constant).
1058 if ((!TruncTy || InvertStep) &&
1059 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1060 // Record the phi node. But don't stop we might find an exact match
1061 // later.
1062 AddRecPhiMatch = &PN;
1063 IncV = TempIncV;
1064 TruncTy = Normalized->getType();
1065 }
1066 }
1067
1068 if (AddRecPhiMatch) {
1069 // Ok, the add recurrence looks usable.
1070 // Remember this PHI, even in post-inc mode.
1071 InsertedValues.insert(AddRecPhiMatch);
1072 // Remember the increment.
1073 rememberInstruction(IncV);
1074 // Those values were not actually inserted but re-used.
1075 ReusedValues.insert(AddRecPhiMatch);
1076 ReusedValues.insert(IncV);
1077 return AddRecPhiMatch;
1078 }
1079 }
1080
1081 // Save the original insertion point so we can restore it when we're done.
1082 SCEVInsertPointGuard Guard(Builder, this);
1083
1084 // Another AddRec may need to be recursively expanded below. For example, if
1085 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1086 // loop. Remove this loop from the PostIncLoops set before expanding such
1087 // AddRecs. Otherwise, we cannot find a valid position for the step
1088 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1089 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1090 // so it's not worth implementing SmallPtrSet::swap.
1091 PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1092 PostIncLoops.clear();
1093
1094 // Expand code for the start value into the loop preheader.
1095 assert(L->getLoopPreheader() &&
1096 "Can't expand add recurrences without a loop preheader!");
1097 Value *StartV =
1098 expand(Normalized->getStart(), L->getLoopPreheader()->getTerminator());
1099
1100 // StartV must have been be inserted into L's preheader to dominate the new
1101 // phi.
1102 assert(!isa<Instruction>(StartV) ||
1103 SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1104 L->getHeader()));
1105
1106 // Expand code for the step value. Do this before creating the PHI so that PHI
1107 // reuse code doesn't see an incomplete PHI.
1108 const SCEV *Step = Normalized->getStepRecurrence(SE);
1109 Type *ExpandTy = Normalized->getType();
1110 // If the stride is negative, insert a sub instead of an add for the increment
1111 // (unless it's a constant, because subtracts of constants are canonicalized
1112 // to adds).
1113 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1114 if (useSubtract)
1115 Step = SE.getNegativeSCEV(Step);
1116 // Expand the step somewhere that dominates the loop header.
1117 Value *StepV = expand(Step, L->getHeader()->getFirstInsertionPt());
1118
1119 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1120 // we actually do emit an addition. It does not apply if we emit a
1121 // subtraction.
1122 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1123 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1124
1125 // Create the PHI.
1126 BasicBlock *Header = L->getHeader();
1127 Builder.SetInsertPoint(Header, Header->begin());
1128 PHINode *PN =
1129 Builder.CreatePHI(ExpandTy, pred_size(Header), Twine(IVName) + ".iv");
1130
1131 // Create the step instructions and populate the PHI.
1132 for (BasicBlock *Pred : predecessors(Header)) {
1133 // Add a start value.
1134 if (!L->contains(Pred)) {
1135 PN->addIncoming(StartV, Pred);
1136 continue;
1137 }
1138
1139 // Create a step value and add it to the PHI.
1140 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1141 // instructions at IVIncInsertPos.
1142 Instruction *InsertPos = L == IVIncInsertLoop ?
1143 IVIncInsertPos : Pred->getTerminator();
1144 Builder.SetInsertPoint(InsertPos);
1145 Value *IncV = expandIVInc(PN, StepV, L, useSubtract);
1146
1148 if (IncrementIsNUW)
1149 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1150 if (IncrementIsNSW)
1151 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1152 }
1153 PN->addIncoming(IncV, Pred);
1154 }
1155
1156 // After expanding subexpressions, restore the PostIncLoops set so the caller
1157 // can ensure that IVIncrement dominates the current uses.
1158 PostIncLoops = SavedPostIncLoops;
1159
1160 // Remember this PHI, even in post-inc mode. LSR SCEV-based salvaging is most
1161 // effective when we are able to use an IV inserted here, so record it.
1162 InsertedValues.insert(PN);
1163 InsertedIVs.push_back(PN);
1164 return PN;
1165}
1166
1167Value *
1168SCEVExpander::expandAddRecExprLiterally(SCEVUseT<const SCEVAddRecExpr *> S) {
1169 const Loop *L = S->getLoop();
1170
1171 // Determine a normalized form of this expression, which is the expression
1172 // before any post-inc adjustment is made.
1173 const SCEVAddRecExpr *Normalized = S;
1174 if (PostIncLoops.count(L)) {
1176 Loops.insert(L);
1177 Normalized = cast<SCEVAddRecExpr>(
1178 normalizeForPostIncUse(S, Loops, SE, /*CheckInvertible=*/false));
1179 }
1180
1181 [[maybe_unused]] const SCEV *Start = Normalized->getStart();
1182 const SCEV *Step = Normalized->getStepRecurrence(SE);
1183 assert(SE.properlyDominates(Start, L->getHeader()) &&
1184 "Start does not properly dominate loop header");
1185 assert(SE.dominates(Step, L->getHeader()) && "Step not dominate loop header");
1186
1187 // In some cases, we decide to reuse an existing phi node but need to truncate
1188 // it and/or invert the step.
1189 Type *TruncTy = nullptr;
1190 bool InvertStep = false;
1191 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, TruncTy, InvertStep);
1192
1193 // Accommodate post-inc mode, if necessary.
1194 Value *Result;
1195 if (!PostIncLoops.count(L))
1196 Result = PN;
1197 else {
1198 // In PostInc mode, use the post-incremented value.
1199 BasicBlock *LatchBlock = L->getLoopLatch();
1200 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1201 Result = PN->getIncomingValueForBlock(LatchBlock);
1202
1203 // We might be introducing a new use of the post-inc IV that is not poison
1204 // safe, in which case we should drop poison generating flags. Only keep
1205 // those flags for which SCEV has proven that they always hold.
1206 if (isa<OverflowingBinaryOperator>(Result)) {
1207 auto *I = cast<Instruction>(Result);
1208 if (!S->hasNoUnsignedWrap())
1209 I->setHasNoUnsignedWrap(false);
1210 if (!S->hasNoSignedWrap())
1211 I->setHasNoSignedWrap(false);
1212 }
1213
1214 // For an expansion to use the postinc form, the client must call
1215 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1216 // or dominated by IVIncInsertPos.
1217 if (isa<Instruction>(Result) &&
1218 !SE.DT.dominates(cast<Instruction>(Result),
1219 &*Builder.GetInsertPoint())) {
1220 // The induction variable's postinc expansion does not dominate this use.
1221 // IVUsers tries to prevent this case, so it is rare. However, it can
1222 // happen when an IVUser outside the loop is not dominated by the latch
1223 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1224 // all cases. Consider a phi outside whose operand is replaced during
1225 // expansion with the value of the postinc user. Without fundamentally
1226 // changing the way postinc users are tracked, the only remedy is
1227 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1228 // but hopefully expandCodeFor handles that.
1229 bool useSubtract =
1230 !S->getType()->isPointerTy() && Step->isNonConstantNegative();
1231 if (useSubtract)
1232 Step = SE.getNegativeSCEV(Step);
1233 Value *StepV;
1234 {
1235 // Expand the step somewhere that dominates the loop header.
1236 SCEVInsertPointGuard Guard(Builder, this);
1237 StepV = expand(Step, L->getHeader()->getFirstInsertionPt());
1238 }
1239 Result = expandIVInc(PN, StepV, L, useSubtract);
1240 }
1241 }
1242
1243 // We have decided to reuse an induction variable of a dominating loop. Apply
1244 // truncation and/or inversion of the step.
1245 if (TruncTy) {
1246 // Truncate the result.
1247 if (TruncTy != Result->getType())
1248 Result = Builder.CreateTrunc(Result, TruncTy);
1249
1250 // Invert the result.
1251 if (InvertStep)
1252 Result = Builder.CreateSub(expand(Normalized->getStart()), Result);
1253 }
1254
1255 return Result;
1256}
1257
1258Value *SCEVExpander::tryToReuseLCSSAPhi(SCEVUseT<const SCEVAddRecExpr *> S) {
1259 Type *STy = S->getType();
1260 const Loop *L = S->getLoop();
1261 BasicBlock *EB = L->getExitBlock();
1262 if (!EB || !EB->getSinglePredecessor() ||
1263 !SE.DT.dominates(EB, Builder.GetInsertBlock()))
1264 return nullptr;
1265
1266 // Helper to check if the diff between S and ExitSCEV is simple enough to
1267 // allow reusing the LCSSA phi.
1268 auto CanReuse = [&](const SCEV *ExitSCEV) -> const SCEV * {
1269 if (isa<SCEVCouldNotCompute>(ExitSCEV))
1270 return nullptr;
1271 const SCEV *Diff = SE.getMinusSCEV(S, ExitSCEV);
1272 const SCEV *Op = Diff;
1278 return nullptr;
1279 return Diff;
1280 };
1281
1282 for (auto &PN : EB->phis()) {
1283 if (!SE.isSCEVable(PN.getType()))
1284 continue;
1285 auto *ExitSCEV = SE.getSCEV(&PN);
1286 if (!isa<SCEVAddRecExpr>(ExitSCEV))
1287 continue;
1288 Type *PhiTy = PN.getType();
1289 const SCEV *Diff = nullptr;
1290 if (STy->isIntegerTy() && PhiTy->isPointerTy() &&
1291 DL.getAddressType(PhiTy) == STy) {
1292 // Prefer ptrtoaddr over ptrtoint.
1293 const SCEV *AddrSCEV = SE.getPtrToAddrExpr(ExitSCEV);
1294 Diff = CanReuse(AddrSCEV);
1295 if (!Diff) {
1296 const SCEV *IntSCEV = SE.getPtrToIntExpr(ExitSCEV, STy);
1297 Diff = CanReuse(IntSCEV);
1298 }
1299 } else if (STy == PhiTy) {
1300 Diff = CanReuse(ExitSCEV);
1301 }
1302 if (!Diff)
1303 continue;
1304
1305 assert(Diff->getType()->isIntegerTy() &&
1306 "difference must be of integer type");
1307 Value *DiffV = expand(Diff);
1308 Value *BaseV = fixupLCSSAFormFor(&PN);
1309 if (PhiTy->isPointerTy()) {
1310 if (STy->isPointerTy())
1311 return Builder.CreatePtrAdd(BaseV, DiffV);
1312 BaseV = Builder.CreatePtrToAddr(BaseV);
1313 }
1314 return Builder.CreateAdd(BaseV, DiffV);
1315 }
1316
1317 return nullptr;
1318}
1319
1320Value *SCEVExpander::visitAddRecExpr(SCEVUseT<const SCEVAddRecExpr *> S) {
1321 // In canonical mode we compute the addrec as an expression of a canonical IV
1322 // using evaluateAtIteration and expand the resulting SCEV expression. This
1323 // way we avoid introducing new IVs to carry on the computation of the addrec
1324 // throughout the loop.
1325 //
1326 // For nested addrecs evaluateAtIteration might need a canonical IV of a
1327 // type wider than the addrec itself. Emitting a canonical IV of the
1328 // proper type might produce non-legal types, for example expanding an i64
1329 // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
1330 // back to non-canonical mode for nested addrecs.
1331 if (!CanonicalMode || (S->getNumOperands() > 2))
1332 return expandAddRecExprLiterally(S);
1333
1334 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1335 const Loop *L = S->getLoop();
1336
1337 // First check for an existing canonical IV in a suitable type.
1338 PHINode *CanonicalIV = nullptr;
1339 if (PHINode *PN = L->getCanonicalInductionVariable())
1340 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1341 CanonicalIV = PN;
1342
1343 // Rewrite an AddRec in terms of the canonical induction variable, if
1344 // its type is more narrow.
1345 if (CanonicalIV &&
1346 SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
1347 !S->getType()->isPointerTy()) {
1348 SmallVector<SCEVUse, 4> NewOps(S->getNumOperands());
1349 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1350 NewOps[i] = SE.getAnyExtendExpr(S->getOperand(i), CanonicalIV->getType());
1351 Value *V = expand(
1352 SE.getAddRecExpr(NewOps, S->getLoop(), S.getNoWrapFlags(SCEV::FlagNW)));
1353 BasicBlock::iterator NewInsertPt =
1355 &*Builder.GetInsertPoint())
1356 : Builder.GetInsertPoint();
1357 V = expand(SE.getTruncateExpr(SE.getUnknown(V), Ty), NewInsertPt);
1358 return V;
1359 }
1360
1361 // If S is expanded outside the defining loop, check if there is a
1362 // matching LCSSA phi node for it.
1363 if (Value *V = tryToReuseLCSSAPhi(S))
1364 return V;
1365
1366 // {X,+,F} --> X + {0,+,F}
1367 if (!S->getStart()->isZero()) {
1368 if (isa<PointerType>(S->getType())) {
1369 Value *StartV = expand(SE.getPointerBase(S));
1370 return expandAddToGEP(SE.removePointerBase(S), StartV,
1372 }
1373
1374 SmallVector<SCEVUse, 4> NewOps(S->operands());
1375 NewOps[0] = SE.getConstant(Ty, 0);
1376 const SCEV *Rest =
1377 SE.getAddRecExpr(NewOps, L, S.getNoWrapFlags(SCEV::FlagNW));
1378
1379 // Just do a normal add. Pre-expand the operands to suppress folding.
1380 //
1381 // The LHS and RHS values are factored out of the expand call to make the
1382 // output independent of the argument evaluation order.
1383 const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1384 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1385 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1386 }
1387
1388 // If we don't yet have a canonical IV, create one.
1389 if (!CanonicalIV) {
1390 // Create and insert the PHI node for the induction variable in the
1391 // specified loop.
1392 BasicBlock *Header = L->getHeader();
1393 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1394 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar");
1395 CanonicalIV->insertBefore(Header->begin());
1396 rememberInstruction(CanonicalIV);
1397
1398 SmallPtrSet<BasicBlock *, 4> PredSeen;
1399 Constant *One = ConstantInt::get(Ty, 1);
1400 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1401 BasicBlock *HP = *HPI;
1402 if (!PredSeen.insert(HP).second) {
1403 // There must be an incoming value for each predecessor, even the
1404 // duplicates!
1405 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1406 continue;
1407 }
1408
1409 if (L->contains(HP)) {
1410 // Insert a unit add instruction right before the terminator
1411 // corresponding to the back-edge.
1412 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1413 "indvar.next",
1414 HP->getTerminator()->getIterator());
1415 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1416 rememberInstruction(Add);
1417 CanonicalIV->addIncoming(Add, HP);
1418 } else {
1419 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1420 }
1421 }
1422 }
1423
1424 // {0,+,1} --> Insert a canonical induction variable into the loop!
1425 if (S->isAffine() && S->getOperand(1)->isOne()) {
1426 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1427 "IVs with types different from the canonical IV should "
1428 "already have been handled!");
1429 return CanonicalIV;
1430 }
1431
1432 // {0,+,F} --> {0,+,1} * F
1433
1434 // If this is a simple linear addrec, emit it now as a special case.
1435 if (S->isAffine()) // {0,+,F} --> i*F
1436 return
1437 expand(SE.getTruncateOrNoop(
1438 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1439 SE.getNoopOrAnyExtend(S->getOperand(1),
1440 CanonicalIV->getType())),
1441 Ty));
1442
1443 // If this is a chain of recurrences, turn it into a closed form, using the
1444 // folders, then expandCodeFor the closed form. This allows the folders to
1445 // simplify the expression without having to build a bunch of special code
1446 // into this folder.
1447 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1448
1449 // Promote S up to the canonical IV type, if the cast is foldable.
1450 const SCEV *NewS = S;
1451 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1452 if (isa<SCEVAddRecExpr>(Ext))
1453 NewS = Ext;
1454
1455 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1456
1457 // Truncate the result down to the original type, if needed.
1458 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1459 return expand(T);
1460}
1461
1462Value *SCEVExpander::visitPtrToAddrExpr(SCEVUseT<const SCEVPtrToAddrExpr *> S) {
1463 Value *V = expand(S->getOperand());
1464 Type *Ty = S->getType();
1465
1466 // ptrtoaddr and ptrtoint produce the same value, so try to reuse either.
1467 if (!isa<Constant>(V)) {
1468 BasicBlock::iterator BIP = Builder.GetInsertPoint();
1469 for (User *U : V->users()) {
1470 auto *CI = dyn_cast<CastInst>(U);
1471 if (CI && CI->getType() == Ty &&
1472 (CI->getOpcode() == CastInst::PtrToAddr ||
1473 CI->getOpcode() == CastInst::PtrToInt) &&
1474 &*BIP != CI && SE.DT.dominates(CI, &*BIP))
1475 return CI;
1476 }
1477 }
1478 return ReuseOrCreateCast(V, Ty, CastInst::PtrToAddr,
1479 GetOptimalInsertionPointForCastOf(V));
1480}
1481
1482Value *SCEVExpander::visitPtrToIntExpr(SCEVUseT<const SCEVPtrToIntExpr *> S) {
1483 Value *V = expand(S->getOperand());
1484 return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1485 GetOptimalInsertionPointForCastOf(V));
1486}
1487
1488Value *SCEVExpander::visitTruncateExpr(SCEVUseT<const SCEVTruncateExpr *> S) {
1489 Value *V = expand(S->getOperand());
1490 return Builder.CreateTrunc(V, S->getType());
1491}
1492
1493Value *
1494SCEVExpander::visitZeroExtendExpr(SCEVUseT<const SCEVZeroExtendExpr *> S) {
1495 Value *V = expand(S->getOperand());
1496 return Builder.CreateZExt(V, S->getType(), "",
1497 SE.isKnownNonNegative(S->getOperand()));
1498}
1499
1500Value *
1501SCEVExpander::visitSignExtendExpr(SCEVUseT<const SCEVSignExtendExpr *> S) {
1502 Value *V = expand(S->getOperand());
1503 return Builder.CreateSExt(V, S->getType());
1504}
1505
1506Value *SCEVExpander::expandMinMaxExpr(SCEVUseT<const SCEVNAryExpr *> S,
1507 Intrinsic::ID IntrinID, Twine Name,
1508 bool IsSequential) {
1509 bool PrevSafeMode = SafeUDivMode;
1510 SafeUDivMode |= IsSequential;
1511 Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1512 Type *Ty = LHS->getType();
1513 if (IsSequential)
1514 LHS = Builder.CreateFreeze(LHS);
1515 for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1516 SafeUDivMode = (IsSequential && i != 0) || PrevSafeMode;
1517 Value *RHS = expand(S->getOperand(i));
1518 if (IsSequential && i != 0)
1519 RHS = Builder.CreateFreeze(RHS);
1520 Value *Sel;
1521 if (Ty->isIntegerTy())
1522 Sel = Builder.CreateIntrinsic(IntrinID, {Ty}, {LHS, RHS},
1523 /*FMFSource=*/nullptr, Name);
1524 else {
1525 Value *ICmp =
1526 Builder.CreateICmp(MinMaxIntrinsic::getPredicate(IntrinID), LHS, RHS);
1527 Sel = Builder.CreateSelect(ICmp, LHS, RHS, Name);
1528 }
1529 LHS = Sel;
1530 }
1531 SafeUDivMode = PrevSafeMode;
1532 return LHS;
1533}
1534
1535Value *SCEVExpander::visitSMaxExpr(SCEVUseT<const SCEVSMaxExpr *> S) {
1536 return expandMinMaxExpr(S, Intrinsic::smax, "smax");
1537}
1538
1539Value *SCEVExpander::visitUMaxExpr(SCEVUseT<const SCEVUMaxExpr *> S) {
1540 return expandMinMaxExpr(S, Intrinsic::umax, "umax");
1541}
1542
1543Value *SCEVExpander::visitSMinExpr(SCEVUseT<const SCEVSMinExpr *> S) {
1544 return expandMinMaxExpr(S, Intrinsic::smin, "smin");
1545}
1546
1547Value *SCEVExpander::visitUMinExpr(SCEVUseT<const SCEVUMinExpr *> S) {
1548 return expandMinMaxExpr(S, Intrinsic::umin, "umin");
1549}
1550
1551Value *SCEVExpander::visitSequentialUMinExpr(
1553 return expandMinMaxExpr(S, Intrinsic::umin, "umin",
1554 /*IsSequential*/ true);
1555}
1556
1557Value *SCEVExpander::visitVScale(SCEVUseT<const SCEVVScale *> S) {
1558 return Builder.CreateVScale(S->getType());
1559}
1560
1563 setInsertPoint(IP);
1564 return expandCodeFor(SH, Ty);
1565}
1566
1568 // Expand the code for this SCEV.
1569 Value *V = expand(SH);
1570
1571 if (Ty && Ty != V->getType()) {
1572 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1573 "non-trivial casts should be done with the SCEVs directly!");
1574 V = InsertNoopCastOfTo(V, Ty);
1575 }
1576 return V;
1577}
1578
1579Value *SCEVExpander::FindValueInExprValueMap(
1580 SCEVUse S, const Instruction *InsertPt,
1581 SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts) {
1582 // If the expansion is not in CanonicalMode, and the SCEV contains any
1583 // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1584 if (!CanonicalMode && SE.containsAddRecurrence(S))
1585 return nullptr;
1586
1587 // If S is a constant or unknown, it may be worse to reuse an existing Value.
1589 return nullptr;
1590
1591 for (Value *V : SE.getSCEVValues(S)) {
1592 Instruction *EntInst = dyn_cast<Instruction>(V);
1593 if (!EntInst)
1594 continue;
1595
1596 // Choose a Value from the set which dominates the InsertPt.
1597 // InsertPt should be inside the Value's parent loop so as not to break
1598 // the LCSSA form.
1599 assert(EntInst->getFunction() == InsertPt->getFunction());
1600 if (S->getType() != V->getType() || !SE.DT.dominates(EntInst, InsertPt) ||
1601 !(SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1602 SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1603 continue;
1604
1605 // Make sure reusing the instruction is poison-safe.
1606 if (SE.canReuseInstruction(S, EntInst, DropPoisonGeneratingInsts))
1607 return V;
1608 DropPoisonGeneratingInsts.clear();
1609 }
1610 return nullptr;
1611}
1612
1613// The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1614// or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1615// and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1616// literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1617// the expansion will try to reuse Value from ExprValueMap, and only when it
1618// fails, expand the SCEV literally.
1619Value *SCEVExpander::expand(SCEVUse S) {
1620 // Compute an insertion point for this SCEV object. Hoist the instructions
1621 // as far out in the loop nest as possible.
1622 BasicBlock::iterator InsertPt = Builder.GetInsertPoint();
1623
1624 // We can move insertion point only if there is no div or rem operations
1625 // otherwise we are risky to move it over the check for zero denominator.
1626 auto SafeToHoist = [](const SCEV *S) {
1627 return !SCEVExprContains(S, [](const SCEV *S) {
1628 if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) {
1629 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS()))
1630 // Division by non-zero constants can be hoisted.
1631 return SC->getValue()->isZero();
1632 // All other divisions should not be moved as they may be
1633 // divisions by zero and should be kept within the
1634 // conditions of the surrounding loops that guard their
1635 // execution (see PR35406).
1636 return true;
1637 }
1638 return false;
1639 });
1640 };
1641 if (SafeToHoist(S)) {
1642 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1643 L = L->getParentLoop()) {
1644 if (SE.isLoopInvariant(S, L)) {
1645 if (!L) break;
1646 if (BasicBlock *Preheader = L->getLoopPreheader()) {
1647 InsertPt = Preheader->getTerminator()->getIterator();
1648 } else {
1649 // LSR sets the insertion point for AddRec start/step values to the
1650 // block start to simplify value reuse, even though it's an invalid
1651 // position. SCEVExpander must correct for this in all cases.
1652 InsertPt = L->getHeader()->getFirstInsertionPt();
1653 }
1654 } else {
1655 // If the SCEV is computable at this level, insert it into the header
1656 // after the PHIs (and after any other instructions that we've inserted
1657 // there) so that it is guaranteed to dominate any user inside the loop.
1658 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1659 InsertPt = L->getHeader()->getFirstInsertionPt();
1660
1661 while (InsertPt != Builder.GetInsertPoint() &&
1662 (isInsertedInstruction(&*InsertPt))) {
1663 InsertPt = std::next(InsertPt);
1664 }
1665 break;
1666 }
1667 }
1668 }
1669
1670 // Check to see if we already expanded this here.
1671 auto I = InsertedExpressions.find(std::make_pair(S, &*InsertPt));
1672 if (I != InsertedExpressions.end())
1673 return I->second;
1674
1675 SCEVInsertPointGuard Guard(Builder, this);
1676 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1677
1678 // Expand the expression into instructions.
1679 SmallVector<Instruction *> DropPoisonGeneratingInsts;
1680 Value *V = FindValueInExprValueMap(S, &*InsertPt, DropPoisonGeneratingInsts);
1681 if (!V) {
1682 V = visit(S);
1683 V = fixupLCSSAFormFor(V);
1684 } else {
1685 for (Instruction *I : DropPoisonGeneratingInsts) {
1686 rememberFlags(I);
1687 I->dropPoisonGeneratingAnnotations();
1688 // See if we can re-infer from first principles any of the flags we just
1689 // dropped.
1690 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(I))
1691 if (auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
1692 auto *BO = cast<BinaryOperator>(I);
1693 BO->setHasNoUnsignedWrap(
1695 BO->setHasNoSignedWrap(
1697 }
1698 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(I)) {
1699 auto *Src = NNI->getOperand(0);
1701 Constant::getNullValue(Src->getType()), I,
1702 DL).value_or(false))
1703 NNI->setNonNeg(true);
1704 }
1705 }
1706 }
1707 // Remember the expanded value for this SCEV at this location.
1708 //
1709 // This is independent of PostIncLoops. The mapped value simply materializes
1710 // the expression at this insertion point. If the mapped value happened to be
1711 // a postinc expansion, it could be reused by a non-postinc user, but only if
1712 // its insertion point was already at the head of the loop.
1713 InsertedExpressions[std::make_pair(S, &*InsertPt)] = V;
1714 return V;
1715}
1716
1717void SCEVExpander::rememberInstruction(Value *I) {
1718 auto DoInsert = [this](Value *V) {
1719 if (!PostIncLoops.empty())
1720 InsertedPostIncValues.insert(V);
1721 else
1722 InsertedValues.insert(V);
1723 };
1724 DoInsert(I);
1725}
1726
1727void SCEVExpander::rememberFlags(Instruction *I) {
1728 // If we already have flags for the instruction, keep the existing ones.
1729 OrigFlags.try_emplace(I, PoisonFlags(I));
1730}
1731
1732void SCEVExpander::replaceCongruentIVInc(
1733 PHINode *&Phi, PHINode *&OrigPhi, Loop *L, const DominatorTree *DT,
1735 BasicBlock *LatchBlock = L->getLoopLatch();
1736 if (!LatchBlock)
1737 return;
1738
1739 Instruction *OrigInc =
1740 dyn_cast<Instruction>(OrigPhi->getIncomingValueForBlock(LatchBlock));
1741 Instruction *IsomorphicInc =
1742 dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1743 if (!OrigInc || !IsomorphicInc)
1744 return;
1745
1746 // If this phi has the same width but is more canonical, replace the
1747 // original with it. As part of the "more canonical" determination,
1748 // respect a prior decision to use an IV chain.
1749 if (OrigPhi->getType() == Phi->getType()) {
1750 bool Chained = ChainedPhis.contains(Phi);
1751 if (!(Chained || isExpandedAddRecExprPHI(OrigPhi, OrigInc, L)) &&
1752 (Chained || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1753 std::swap(OrigPhi, Phi);
1754 std::swap(OrigInc, IsomorphicInc);
1755 }
1756 }
1757
1758 // Replacing the congruent phi is sufficient because acyclic
1759 // redundancy elimination, CSE/GVN, should handle the
1760 // rest. However, once SCEV proves that a phi is congruent,
1761 // it's often the head of an IV user cycle that is isomorphic
1762 // with the original phi. It's worth eagerly cleaning up the
1763 // common case of a single IV increment so that DeleteDeadPHIs
1764 // can remove cycles that had postinc uses.
1765 // Because we may potentially introduce a new use of OrigIV that didn't
1766 // exist before at this point, its poison flags need readjustment.
1767 const SCEV *TruncExpr =
1768 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
1769 if (OrigInc == IsomorphicInc || TruncExpr != SE.getSCEV(IsomorphicInc) ||
1770 !SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc))
1771 return;
1772
1773 bool BothHaveNUW = false;
1774 bool BothHaveNSW = false;
1775 auto *OBOIncV = dyn_cast<OverflowingBinaryOperator>(OrigInc);
1776 auto *OBOIsomorphic = dyn_cast<OverflowingBinaryOperator>(IsomorphicInc);
1777 if (OBOIncV && OBOIsomorphic) {
1778 BothHaveNUW =
1779 OBOIncV->hasNoUnsignedWrap() && OBOIsomorphic->hasNoUnsignedWrap();
1780 BothHaveNSW =
1781 OBOIncV->hasNoSignedWrap() && OBOIsomorphic->hasNoSignedWrap();
1782 }
1783
1784 if (!hoistIVInc(OrigInc, IsomorphicInc,
1785 /*RecomputePoisonFlags*/ true))
1786 return;
1787
1788 // We are replacing with a wider increment. If both OrigInc and IsomorphicInc
1789 // are NUW/NSW, then we can preserve them on the wider increment; the narrower
1790 // IsomorphicInc would wrap before the wider OrigInc, so the replacement won't
1791 // make IsomorphicInc's uses more poisonous.
1792 assert(OrigInc->getType()->getScalarSizeInBits() >=
1793 IsomorphicInc->getType()->getScalarSizeInBits() &&
1794 "Should only replace an increment with a wider one.");
1795 if (BothHaveNUW || BothHaveNSW) {
1796 OrigInc->setHasNoUnsignedWrap(OBOIncV->hasNoUnsignedWrap() || BothHaveNUW);
1797 OrigInc->setHasNoSignedWrap(OBOIncV->hasNoSignedWrap() || BothHaveNSW);
1798 }
1799
1800 SCEV_DEBUG_WITH_TYPE(DebugType,
1801 dbgs() << "INDVARS: Eliminated congruent iv.inc: "
1802 << *IsomorphicInc << '\n');
1803 Value *NewInc = OrigInc;
1804 if (OrigInc->getType() != IsomorphicInc->getType()) {
1806 if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
1807 IP = PN->getParent()->getFirstInsertionPt();
1808 else
1809 IP = OrigInc->getNextNode()->getIterator();
1810
1811 IRBuilder<> Builder(IP->getParent(), IP);
1812 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1813 NewInc =
1814 Builder.CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
1815 }
1816 IsomorphicInc->replaceAllUsesWith(NewInc);
1817 DeadInsts.emplace_back(IsomorphicInc);
1818}
1819
1820/// replaceCongruentIVs - Check for congruent phis in this loop header and
1821/// replace them with their most canonical representative. Return the number of
1822/// phis eliminated.
1823///
1824/// This does not depend on any SCEVExpander state but should be used in
1825/// the same context that SCEVExpander is used.
1826unsigned
1829 const TargetTransformInfo *TTI) {
1830 // Find integer phis in order of increasing width.
1832 llvm::make_pointer_range(L->getHeader()->phis()));
1833
1834 if (TTI)
1835 // Use stable_sort to preserve order of equivalent PHIs, so the order
1836 // of the sorted Phis is the same from run to run on the same loop.
1837 llvm::stable_sort(Phis, [](Value *LHS, Value *RHS) {
1838 // Put pointers at the back and make sure pointer < pointer = false.
1839 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1840 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1841 return RHS->getType()->getPrimitiveSizeInBits().getFixedValue() <
1842 LHS->getType()->getPrimitiveSizeInBits().getFixedValue();
1843 });
1844
1845 unsigned NumElim = 0;
1847 // Process phis from wide to narrow. Map wide phis to their truncation
1848 // so narrow phis can reuse them.
1849 for (PHINode *Phi : Phis) {
1850 auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
1851 if (Value *V = simplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
1852 return V;
1853 if (!SE.isSCEVable(PN->getType()))
1854 return nullptr;
1855 auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
1856 if (!Const)
1857 return nullptr;
1858 return Const->getValue();
1859 };
1860
1861 // Fold constant phis. They may be congruent to other constant phis and
1862 // would confuse the logic below that expects proper IVs.
1863 if (Value *V = SimplifyPHINode(Phi)) {
1864 if (V->getType() != Phi->getType())
1865 continue;
1866 SE.forgetValue(Phi);
1867 Phi->replaceAllUsesWith(V);
1868 DeadInsts.emplace_back(Phi);
1869 ++NumElim;
1870 SCEV_DEBUG_WITH_TYPE(DebugType,
1871 dbgs() << "INDVARS: Eliminated constant iv: " << *Phi
1872 << '\n');
1873 continue;
1874 }
1875
1876 if (!SE.isSCEVable(Phi->getType()))
1877 continue;
1878
1879 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1880 if (!OrigPhiRef) {
1881 OrigPhiRef = Phi;
1882 if (Phi->getType()->isIntegerTy() && TTI &&
1883 TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1884 // Make sure we only rewrite using simple induction variables;
1885 // otherwise, we can make the trip count of a loop unanalyzable
1886 // to SCEV.
1887 const SCEV *PhiExpr = SE.getSCEV(Phi);
1888 if (isa<SCEVAddRecExpr>(PhiExpr)) {
1889 // This phi can be freely truncated to the narrowest phi type. Map the
1890 // truncated expression to it so it will be reused for narrow types.
1891 const SCEV *TruncExpr =
1892 SE.getTruncateExpr(PhiExpr, Phis.back()->getType());
1893 ExprToIVMap[TruncExpr] = Phi;
1894 }
1895 }
1896 continue;
1897 }
1898
1899 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1900 // sense.
1901 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1902 continue;
1903
1904 replaceCongruentIVInc(Phi, OrigPhiRef, L, DT, DeadInsts);
1905 SCEV_DEBUG_WITH_TYPE(DebugType,
1906 dbgs() << "INDVARS: Eliminated congruent iv: " << *Phi
1907 << '\n');
1909 DebugType, dbgs() << "INDVARS: Original iv: " << *OrigPhiRef << '\n');
1910 ++NumElim;
1911 Value *NewIV = OrigPhiRef;
1912 if (OrigPhiRef->getType() != Phi->getType()) {
1913 IRBuilder<> Builder(L->getHeader(),
1914 L->getHeader()->getFirstInsertionPt());
1915 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1916 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1917 }
1918 Phi->replaceAllUsesWith(NewIV);
1919 DeadInsts.emplace_back(Phi);
1920 }
1921 return NumElim;
1922}
1923
1925 const Instruction *At,
1926 Loop *L) {
1927 using namespace llvm::PatternMatch;
1928
1929 SmallVector<BasicBlock *, 4> ExitingBlocks;
1930 L->getExitingBlocks(ExitingBlocks);
1931
1932 // Look for suitable value in simple conditions at the loop exits.
1933 for (BasicBlock *BB : ExitingBlocks) {
1934 CmpPredicate Pred;
1935 Instruction *LHS, *RHS;
1936
1937 if (!match(BB->getTerminator(),
1938 m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
1940 continue;
1941
1942 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1943 return true;
1944
1945 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1946 return true;
1947 }
1948
1949 // Use expand's logic which is used for reusing a previous Value in
1950 // ExprValueMap. Note that we don't currently model the cost of
1951 // needing to drop poison generating flags on the instruction if we
1952 // want to reuse it. We effectively assume that has zero cost.
1953 SmallVector<Instruction *> DropPoisonGeneratingInsts;
1954 return FindValueInExprValueMap(S, At, DropPoisonGeneratingInsts) != nullptr;
1955}
1956
1957template<typename T> static InstructionCost costAndCollectOperands(
1960 SmallVectorImpl<SCEVOperand> &Worklist) {
1961
1962 const T *S = cast<T>(WorkItem.S);
1963 InstructionCost Cost = 0;
1964 // Object to help map SCEV operands to expanded IR instructions.
1965 struct OperationIndices {
1966 OperationIndices(unsigned Opc, size_t min, size_t max) :
1967 Opcode(Opc), MinIdx(min), MaxIdx(max) { }
1968 unsigned Opcode;
1969 size_t MinIdx;
1970 size_t MaxIdx;
1971 };
1972
1973 // Collect the operations of all the instructions that will be needed to
1974 // expand the SCEVExpr. This is so that when we come to cost the operands,
1975 // we know what the generated user(s) will be.
1977
1978 auto CastCost = [&](unsigned Opcode) -> InstructionCost {
1979 Operations.emplace_back(Opcode, 0, 0);
1980 return TTI.getCastInstrCost(Opcode, S->getType(),
1981 S->getOperand(0)->getType(),
1983 };
1984
1985 auto ArithCost = [&](unsigned Opcode, unsigned NumRequired,
1986 unsigned MinIdx = 0,
1987 unsigned MaxIdx = 1) -> InstructionCost {
1988 Operations.emplace_back(Opcode, MinIdx, MaxIdx);
1989 return NumRequired *
1990 TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind);
1991 };
1992
1993 auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, unsigned MinIdx,
1994 unsigned MaxIdx) -> InstructionCost {
1995 Operations.emplace_back(Opcode, MinIdx, MaxIdx);
1996 Type *OpType = S->getType();
1997 return NumRequired * TTI.getCmpSelInstrCost(
1998 Opcode, OpType, CmpInst::makeCmpResultType(OpType),
2000 };
2001
2002 switch (S->getSCEVType()) {
2003 case scCouldNotCompute:
2004 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2005 case scUnknown:
2006 case scConstant:
2007 case scVScale:
2008 return 0;
2009 case scPtrToAddr:
2010 Cost = CastCost(Instruction::PtrToAddr);
2011 break;
2012 case scPtrToInt:
2013 Cost = CastCost(Instruction::PtrToInt);
2014 break;
2015 case scTruncate:
2016 Cost = CastCost(Instruction::Trunc);
2017 break;
2018 case scZeroExtend:
2019 Cost = CastCost(Instruction::ZExt);
2020 break;
2021 case scSignExtend:
2022 Cost = CastCost(Instruction::SExt);
2023 break;
2024 case scUDivExpr: {
2025 unsigned Opcode = Instruction::UDiv;
2026 if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
2027 if (SC->getAPInt().isPowerOf2())
2028 Opcode = Instruction::LShr;
2029 Cost = ArithCost(Opcode, 1);
2030 break;
2031 }
2032 case scAddExpr:
2033 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
2034 break;
2035 case scMulExpr: {
2036 // Match the actual expansion in visitMulExpr: multiply by -1 is
2037 // expanded as a negate (sub 0, x), and multiply by a power of 2 is
2038 // expanded as a shift. Only handle the common two-operand case with a
2039 // constant LHS; for everything else fall back to the pessimistic
2040 // all-multiplies estimate.
2041 // TODO: this is still pessimistic for the general case because of the
2042 // Bin Pow algorithm actually used by the expander, see
2043 // SCEVExpander::visitMulExpr(), ExpandOpBinPowN().
2044 unsigned OpCode = Instruction::Mul;
2045 if (S->getNumOperands() == 2)
2046 if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(0))) {
2047 if (SC->getAPInt().isAllOnes()) // -1
2048 OpCode = Instruction::Sub;
2049 else if (SC->getAPInt().isPowerOf2())
2050 OpCode = Instruction::Shl;
2051 }
2052 Cost = ArithCost(OpCode, S->getNumOperands() - 1);
2053 break;
2054 }
2055 case scSMaxExpr:
2056 case scUMaxExpr:
2057 case scSMinExpr:
2058 case scUMinExpr:
2059 case scSequentialUMinExpr: {
2060 // FIXME: should this ask the cost for Intrinsic's?
2061 // The reduction tree.
2062 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
2063 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
2064 switch (S->getSCEVType()) {
2065 case scSequentialUMinExpr: {
2066 // The safety net against poison.
2067 // FIXME: this is broken.
2068 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
2069 Cost += ArithCost(Instruction::Or,
2070 S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
2071 Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
2072 break;
2073 }
2074 default:
2076 "Unhandled SCEV expression type?");
2077 break;
2078 }
2079 break;
2080 }
2081 case scAddRecExpr: {
2082 // Addrec expands to a phi and add per recurrence.
2083 unsigned NumRecurrences = S->getNumOperands() - 1;
2084 Cost += TTI.getCFInstrCost(Instruction::PHI, CostKind) * NumRecurrences;
2085 Cost +=
2086 TTI.getArithmeticInstrCost(Instruction::Add, S->getType(), CostKind) *
2087 NumRecurrences;
2088 // AR start is used in phi.
2089 Worklist.emplace_back(Instruction::PHI, 0, S->getOperand(0));
2090 // Other operands are used in add.
2091 for (const SCEV *Op : S->operands().drop_front())
2092 Worklist.emplace_back(Instruction::Add, 1, Op);
2093 break;
2094 }
2095 }
2096
2097 for (auto &CostOp : Operations) {
2098 for (auto SCEVOp : enumerate(S->operands())) {
2099 // Clamp the index to account for multiple IR operations being chained.
2100 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2101 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2102 Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
2103 }
2104 }
2105 return Cost;
2106}
2107
2108bool SCEVExpander::isHighCostExpansionHelper(
2109 const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
2110 InstructionCost &Cost, unsigned Budget, const TargetTransformInfo &TTI,
2112 SmallVectorImpl<SCEVOperand> &Worklist) {
2113 if (Cost > Budget)
2114 return true; // Already run out of budget, give up.
2115
2116 const SCEV *S = WorkItem.S;
2117 // Was the cost of expansion of this expression already accounted for?
2118 if (!isa<SCEVConstant>(S) && !Processed.insert(S).second)
2119 return false; // We have already accounted for this expression.
2120
2121 // If we can find an existing value for this scev available at the point "At"
2122 // then consider the expression cheap.
2123 if (hasRelatedExistingExpansion(S, &At, L))
2124 return false; // Consider the expression to be free.
2125
2127 L->getHeader()->getParent()->hasMinSize()
2130
2131 switch (S->getSCEVType()) {
2132 case scCouldNotCompute:
2133 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2134 case scUnknown:
2135 case scVScale:
2136 // Assume to be zero-cost.
2137 return false;
2138 case scConstant: {
2139 // Only evalulate the costs of constants when optimizing for size.
2141 return false;
2142 const APInt &Imm = cast<SCEVConstant>(S)->getAPInt();
2143 Type *Ty = S->getType();
2145 WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind);
2146 return Cost > Budget;
2147 }
2148 case scTruncate:
2149 case scPtrToAddr:
2150 case scPtrToInt:
2151 case scZeroExtend:
2152 case scSignExtend: {
2153 Cost +=
2155 return false; // Will answer upon next entry into this function.
2156 }
2157 case scUDivExpr: {
2158 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
2159 // HowManyLessThans produced to compute a precise expression, rather than a
2160 // UDiv from the user's code. If we can't find a UDiv in the code with some
2161 // simple searching, we need to account for it's cost.
2162
2163 // At the beginning of this function we already tried to find existing
2164 // value for plain 'S'. Now try to lookup 'S + 1' since it is common
2165 // pattern involving division. This is just a simple search heuristic.
2167 SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L))
2168 return false; // Consider it to be free.
2169
2170 Cost +=
2172 return false; // Will answer upon next entry into this function.
2173 }
2174 case scAddExpr:
2175 case scMulExpr:
2176 case scUMaxExpr:
2177 case scSMaxExpr:
2178 case scUMinExpr:
2179 case scSMinExpr:
2180 case scSequentialUMinExpr: {
2181 assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2182 "Nary expr should have more than 1 operand.");
2183 // The simple nary expr will require one less op (or pair of ops)
2184 // than the number of it's terms.
2185 Cost +=
2187 return Cost > Budget;
2188 }
2189 case scAddRecExpr: {
2190 assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2191 "Polynomial should be at least linear");
2193 WorkItem, TTI, CostKind, Worklist);
2194 return Cost > Budget;
2195 }
2196 }
2197 llvm_unreachable("Unknown SCEV kind!");
2198}
2199
2201 Instruction *IP) {
2202 assert(IP);
2203 switch (Pred->getKind()) {
2208 case SCEVPredicate::P_Wrap: {
2209 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2210 return expandWrapPredicate(AddRecPred, IP);
2211 }
2212 }
2213 llvm_unreachable("Unknown SCEV predicate type");
2214}
2215
2217 Instruction *IP) {
2218 Value *Expr0 = expand(Pred->getLHS(), IP);
2219 Value *Expr1 = expand(Pred->getRHS(), IP);
2220
2221 Builder.SetInsertPoint(IP);
2222 auto InvPred = ICmpInst::getInversePredicate(Pred->getPredicate());
2223 auto *I = Builder.CreateICmp(InvPred, Expr0, Expr1, "ident.check");
2224 return I;
2225}
2226
2228 Instruction *Loc, bool Signed) {
2229 assert(AR->isAffine() && "Cannot generate RT check for "
2230 "non-affine expression");
2231
2232 // FIXME: It is highly suspicious that we're ignoring the predicates here.
2234 const SCEV *ExitCount =
2235 SE.getPredicatedSymbolicMaxBackedgeTakenCount(AR->getLoop(), Pred);
2236
2237 assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count");
2238
2239 const SCEV *Step = AR->getStepRecurrence(SE);
2240 const SCEV *Start = AR->getStart();
2241
2242 Type *ARTy = AR->getType();
2243 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2244 unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2245
2246 // The expression {Start,+,Step} has nusw/nssw if
2247 // Step < 0, Start - |Step| * Backedge <= Start
2248 // Step >= 0, Start + |Step| * Backedge > Start
2249 // and |Step| * Backedge doesn't unsigned overflow.
2250
2251 Builder.SetInsertPoint(Loc);
2252 Value *TripCountVal = expand(ExitCount, Loc);
2253
2254 IntegerType *Ty =
2255 IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy));
2256
2257 Value *StepValue = expand(Step, Loc);
2258 Value *NegStepValue = expand(SE.getNegativeSCEV(Step), Loc);
2259 Value *StartValue = expand(Start, Loc);
2260
2261 ConstantInt *Zero =
2262 ConstantInt::get(Loc->getContext(), APInt::getZero(DstBits));
2263
2264 Builder.SetInsertPoint(Loc);
2265 // Compute |Step|
2266 Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2267 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2268
2269 // Compute |Step| * Backedge
2270 // Compute:
2271 // 1. Start + |Step| * Backedge < Start
2272 // 2. Start - |Step| * Backedge > Start
2273 //
2274 // And select either 1. or 2. depending on whether step is positive or
2275 // negative. If Step is known to be positive or negative, only create
2276 // either 1. or 2.
2277 auto ComputeEndCheck = [&]() -> Value * {
2278 // Get the backedge taken count and truncate or extended to the AR type.
2279 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2280
2281 CallInst *Mul = Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, Ty,
2282 {AbsStep, TruncTripCount},
2283 /*FMFSource=*/nullptr, "mul");
2284 Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2285 Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2286
2287 Value *Add = nullptr, *Sub = nullptr;
2288 bool NeedPosCheck = !SE.isKnownNegative(Step);
2289 bool NeedNegCheck = !SE.isKnownPositive(Step);
2290
2291 if (isa<PointerType>(ARTy)) {
2292 Value *NegMulV = Builder.CreateNeg(MulV);
2293 if (NeedPosCheck)
2294 Add = Builder.CreatePtrAdd(StartValue, MulV);
2295 if (NeedNegCheck)
2296 Sub = Builder.CreatePtrAdd(StartValue, NegMulV);
2297 } else {
2298 if (NeedPosCheck)
2299 Add = Builder.CreateAdd(StartValue, MulV);
2300 if (NeedNegCheck)
2301 Sub = Builder.CreateSub(StartValue, MulV);
2302 }
2303
2304 Value *EndCompareLT = nullptr;
2305 Value *EndCompareGT = nullptr;
2306 Value *EndCheck = nullptr;
2307 if (NeedPosCheck)
2308 EndCheck = EndCompareLT = Builder.CreateICmp(
2310 if (NeedNegCheck)
2311 EndCheck = EndCompareGT = Builder.CreateICmp(
2313 if (NeedPosCheck && NeedNegCheck) {
2314 // Select the answer based on the sign of Step.
2315 EndCheck = Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2316 }
2317 return Builder.CreateOr(EndCheck, OfMul);
2318 };
2319 Value *EndCheck = ComputeEndCheck();
2320
2321 // If the backedge taken count type is larger than the AR type,
2322 // check that we don't drop any bits by truncating it. If we are
2323 // dropping bits, then we have overflow (unless the step is zero).
2324 if (SrcBits > DstBits) {
2325 auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2326 auto *BackedgeCheck =
2327 Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2328 ConstantInt::get(Loc->getContext(), MaxVal));
2329 BackedgeCheck = Builder.CreateAnd(
2330 BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2331
2332 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2333 }
2334
2335 return EndCheck;
2336}
2337
2339 Instruction *IP) {
2340 const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2341 Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2342
2343 // Add a check for NUSW
2344 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
2345 NUSWCheck = generateOverflowCheck(A, IP, false);
2346
2347 // Add a check for NSSW
2348 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
2349 NSSWCheck = generateOverflowCheck(A, IP, true);
2350
2351 if (NUSWCheck && NSSWCheck)
2352 return Builder.CreateOr(NUSWCheck, NSSWCheck);
2353
2354 if (NUSWCheck)
2355 return NUSWCheck;
2356
2357 if (NSSWCheck)
2358 return NSSWCheck;
2359
2360 return ConstantInt::getFalse(IP->getContext());
2361}
2362
2364 Instruction *IP) {
2365 // Loop over all checks in this set.
2366 SmallVector<Value *> Checks;
2367 for (const auto *Pred : Union->getPredicates()) {
2368 Checks.push_back(expandCodeForPredicate(Pred, IP));
2369 Builder.SetInsertPoint(IP);
2370 }
2371
2372 if (Checks.empty())
2373 return ConstantInt::getFalse(IP->getContext());
2374 return Builder.CreateOr(Checks);
2375}
2376
2377Value *SCEVExpander::fixupLCSSAFormFor(Value *V) {
2378 auto *DefI = dyn_cast<Instruction>(V);
2379 if (!PreserveLCSSA || !DefI)
2380 return V;
2381
2382 BasicBlock::iterator InsertPt = Builder.GetInsertPoint();
2383 Loop *DefLoop = SE.LI.getLoopFor(DefI->getParent());
2384 Loop *UseLoop = SE.LI.getLoopFor(InsertPt->getParent());
2385 if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop))
2386 return V;
2387
2388 // Create a temporary instruction to at the current insertion point, so we
2389 // can hand it off to the helper to create LCSSA PHIs if required for the
2390 // new use.
2391 // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor)
2392 // would accept a insertion point and return an LCSSA phi for that
2393 // insertion point, so there is no need to insert & remove the temporary
2394 // instruction.
2395 Type *ToTy;
2396 if (DefI->getType()->isIntegerTy())
2397 ToTy = PointerType::get(DefI->getContext(), 0);
2398 else
2399 ToTy = Type::getInt32Ty(DefI->getContext());
2400 Instruction *User =
2401 CastInst::CreateBitOrPointerCast(DefI, ToTy, "tmp.lcssa.user", InsertPt);
2402 llvm::scope_exit RemoveUserOnExit([User]() { User->eraseFromParent(); });
2403
2405 ToUpdate.push_back(DefI);
2406 SmallVector<PHINode *, 16> PHIsToRemove;
2407 SmallVector<PHINode *, 16> InsertedPHIs;
2408 formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, &PHIsToRemove,
2409 &InsertedPHIs);
2410 for (PHINode *PN : InsertedPHIs)
2411 rememberInstruction(PN);
2412 for (PHINode *PN : PHIsToRemove) {
2413 if (!PN->use_empty())
2414 continue;
2415 InsertedValues.erase(PN);
2416 InsertedPostIncValues.erase(PN);
2417 PN->eraseFromParent();
2418 }
2419
2420 return User->getOperand(0);
2421}
2422
2423namespace {
2424// Search for a SCEV subexpression that is not safe to expand. Any expression
2425// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2426// UDiv expressions. We don't know if the UDiv is derived from an IR divide
2427// instruction, but the important thing is that we prove the denominator is
2428// nonzero before expansion.
2429//
2430// IVUsers already checks that IV-derived expressions are safe. So this check is
2431// only needed when the expression includes some subexpression that is not IV
2432// derived.
2433//
2434// Currently, we only allow division by a value provably non-zero here.
2435//
2436// We cannot generally expand recurrences unless the step dominates the loop
2437// header. The expander handles the special case of affine recurrences by
2438// scaling the recurrence outside the loop, but this technique isn't generally
2439// applicable. Expanding a nested recurrence outside a loop requires computing
2440// binomial coefficients. This could be done, but the recurrence has to be in a
2441// perfectly reduced form, which can't be guaranteed.
2442struct SCEVFindUnsafe {
2443 ScalarEvolution &SE;
2444 bool CanonicalMode;
2445 bool IsUnsafe = false;
2446
2447 SCEVFindUnsafe(ScalarEvolution &SE, bool CanonicalMode)
2448 : SE(SE), CanonicalMode(CanonicalMode) {}
2449
2450 bool follow(const SCEV *S) {
2451 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2452 if (!SE.isKnownNonZero(D->getRHS())) {
2453 IsUnsafe = true;
2454 return false;
2455 }
2456 }
2457 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2458 // For non-affine addrecs or in non-canonical mode we need a preheader
2459 // to insert into.
2460 if (!AR->getLoop()->getLoopPreheader() &&
2461 (!CanonicalMode || !AR->isAffine())) {
2462 IsUnsafe = true;
2463 return false;
2464 }
2465 }
2466 return true;
2467 }
2468 bool isDone() const { return IsUnsafe; }
2469};
2470} // namespace
2471
2473 SCEVFindUnsafe Search(SE, CanonicalMode);
2474 visitAll(S, Search);
2475 return !Search.IsUnsafe;
2476}
2477
2479 const Instruction *InsertionPoint) const {
2480 if (!isSafeToExpand(S))
2481 return false;
2482 // We have to prove that the expanded site of S dominates InsertionPoint.
2483 // This is easy when not in the same block, but hard when S is an instruction
2484 // to be expanded somewhere inside the same block as our insertion point.
2485 // What we really need here is something analogous to an OrderedBasicBlock,
2486 // but for the moment, we paper over the problem by handling two common and
2487 // cheap to check cases.
2488 if (SE.properlyDominates(S, InsertionPoint->getParent()))
2489 return true;
2490 if (SE.dominates(S, InsertionPoint->getParent())) {
2491 if (InsertionPoint->getParent()->getTerminator() == InsertionPoint)
2492 return true;
2493 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2494 if (llvm::is_contained(InsertionPoint->operand_values(), U->getValue()))
2495 return true;
2496 }
2497 return false;
2498}
2499
2501 // Result is used, nothing to remove.
2502 if (ResultUsed)
2503 return;
2504
2505 // Restore original poison flags.
2506 for (auto [I, Flags] : Expander.OrigFlags)
2507 Flags.apply(I);
2508
2509 auto InsertedInstructions = Expander.getAllInsertedInstructions();
2510#ifndef NDEBUG
2512 InsertedInstructions);
2513 (void)InsertedSet;
2514#endif
2515 // Remove sets with value handles.
2516 Expander.clear();
2517
2518 // Remove all inserted instructions.
2519 for (Instruction *I : reverse(InsertedInstructions)) {
2520#ifndef NDEBUG
2521 assert(all_of(I->users(),
2522 [&InsertedSet](Value *U) {
2523 return InsertedSet.contains(cast<Instruction>(U));
2524 }) &&
2525 "removed instruction should only be used by instructions inserted "
2526 "during expansion");
2527#endif
2528 assert(!I->getType()->isVoidTy() &&
2529 "inserted instruction should have non-void types");
2530 I->replaceAllUsesWith(PoisonValue::get(I->getType()));
2531 I->eraseFromParent();
2532 }
2533}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
Hexagon Common GEP
Hexagon Hardware Loops
#define I(x, y, z)
Definition MD5.cpp:57
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
if(PassOpts->AAPipeline)
This file contains some templates that are useful if you are working with the STL at all.
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static const Loop * PickMostRelevantLoop(const Loop *A, const Loop *B, DominatorTree &DT)
PickMostRelevantLoop - Given two loops pick the one that's most relevant for SCEV expansion.
static InstructionCost costAndCollectOperands(const SCEVOperand &WorkItem, const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, SmallVectorImpl< SCEVOperand > &Worklist)
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, bool &InvertStep)
Check whether we can cheaply express the requested SCEV in terms of the available PHI SCEV by truncat...
#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This pass exposes codegen information to IR-level passes.
Value * RHS
Value * LHS
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1054
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:207
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:530
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:986
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static GEPNoWrapFlags noUnsignedWrap()
static GEPNoWrapFlags none()
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2847
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
Value * getIncomingValueForBlock(const BasicBlock *BB) const
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This node represents a polynomial recurrence on the trip count of the specified loop.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
SCEVUse getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
LLVM_ABI Value * generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc, bool Signed)
Generates code that evaluates if the AR expression will overflow.
LLVM_ABI bool hasRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Determine whether there is an existing expansion of S that can be reused.
SmallVector< Instruction *, 32 > getAllInsertedInstructions() const
Return a vector containing all instructions inserted during expansion.
LLVM_ABI bool isSafeToExpand(const SCEV *S) const
Return true if the given expression is safe to expand in the sense that all materialized values are s...
LLVM_ABI bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint) const
Return true if the given expression is safe to expand in the sense that all materialized values are d...
LLVM_ABI unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetTransformInfo *TTI=nullptr)
replace congruent phis with their most canonical representative.
LLVM_ABI Value * expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI bool hoistIVInc(Instruction *IncV, Instruction *InsertPos, bool RecomputePoisonFlags=false)
Utility for hoisting IncV (with all subexpressions requried for its computation) before InsertPos.
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
static LLVM_ABI bool canReuseFlagsFromOriginalIVInc(PHINode *OrigPhi, PHINode *WidePhi, Instruction *OrigInc, Instruction *WideInc)
Return true if both increments directly increment the corresponding IV PHI nodes and have the same op...
LLVM_ABI Value * expandCodeFor(SCEVUse SH, Type *Ty, BasicBlock::iterator I)
Insert code to directly compute the specified SCEV expression into the program.
LLVM_ABI Value * expandComparePredicate(const SCEVComparePredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI Value * expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI Instruction * getIVIncOperand(Instruction *IncV, Instruction *InsertPos, bool allowScale)
Return the induction variable increment's IV operand.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
LLVM_ABI BasicBlock::iterator findInsertPointAfter(Instruction *I, Instruction *MustDominate) const
Returns a suitable insert point after I, that dominates MustDominate.
void setInsertPoint(Instruction *IP)
Set the current insertion point.
This class represents an assumption made using SCEV expressions which can be checked at run-time.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an assumption made on an AddRec expression.
This class represents an analyzed expression in the program.
SCEVNoWrapFlags NoWrapFlags
static constexpr auto FlagNUW
static constexpr auto FlagAnyWrap
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
static constexpr auto FlagNSW
LLVM_ABI ArrayRef< SCEVUse > operands() const
Return operands of this SCEV expression.
SCEVTypes getSCEVType() const
static constexpr auto FlagNW
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getMinusSCEV(SCEVUse LHS, SCEVUse RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags Mask)
Convenient NoWrapFlags manipulation.
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
@ None
The insert/extract is not used with a load/store.
LLVM_ABI InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind, Instruction *Inst=nullptr) const
Return the expected cost of materialization for the given integer immediate of the specified type for...
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:290
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:130
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
#define UINT64_MAX
Definition DataTypes.h:77
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr bool any(E Val)
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
bool match(Val *V, const Pattern &P)
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto m_BasicBlock()
Match an arbitrary basic block value and ignore it.
auto m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
brc_match< Cond_t, match_bind< BasicBlock >, match_bind< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVPtrToAddrExpr, Op0_t > m_scev_PtrToAddr(const Op0_t &Op0)
match_bind< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
SCEVUnaryExpr_match< SCEVPtrToIntExpr, Op0_t > m_scev_PtrToInt(const Op0_t &Op0)
match_bind< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2116
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2554
auto pred_end(const MachineBasicBlock *BB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
constexpr from_range_t from_range
constexpr NextUseDistance min(NextUseDistance A, NextUseDistance B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:403
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI cl::opt< unsigned > SCEVCheapExpansionBudget
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI const SCEV * normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE, bool CheckInvertible=true)
Normalize S to be post-increment for all loops present in Loops.
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
constexpr NextUseDistance max(NextUseDistance A, NextUseDistance B)
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
PredIterator< BasicBlock, Value::user_iterator > pred_iterator
Definition CFG.h:105
constexpr unsigned BitWidth
LLVM_ABI bool formLCSSAForInstructions(SmallVectorImpl< Instruction * > &Worklist, const DominatorTree &DT, const LoopInfo &LI, ScalarEvolution *SE, SmallVectorImpl< PHINode * > *PHIsToRemove=nullptr, SmallVectorImpl< PHINode * > *InsertedPHIs=nullptr)
Ensures LCSSA form for every instruction from the Worklist in the scope of innermost containing loop.
Definition LCSSA.cpp:308
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
SmallPtrSet< const Loop *, 2 > PostIncLoopSet
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
SCEVUseT< const SCEV * > SCEVUse
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
LLVM_ABI void apply(Instruction *I)
LLVM_ABI PoisonFlags(const Instruction *I)
struct for holding enough information to help calculate the cost of the given SCEV when expanded into...
const SCEV * S
The SCEV operand to be costed.
unsigned ParentOpcode
LLVM instruction opcode that uses the operand.
int OperandIdx
The use index of an expanded instruction.
SCEVNoWrapFlags getNoWrapFlags(SCEVNoWrapFlags Mask=SCEVNoWrapFlags::NoWrapMask) const
Return the no-wrap flags for this SCEVUse, which is the union of the use-specific flags and the under...