LLVM  15.0.0git
ScalarEvolutionExpander.cpp
Go to the documentation of this file.
1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution expander,
10 // which is used to generate the code corresponding to a given scalar evolution
11 // expression.
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/LoopInfo.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/PatternMatch.h"
29 
30 #ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
31 #define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
32 #else
33 #define SCEV_DEBUG_WITH_TYPE(TYPE, X)
34 #endif
35 
36 using namespace llvm;
37 
39  "scev-cheap-expansion-budget", cl::Hidden, cl::init(4),
40  cl::desc("When performing SCEV expansion only if it is cheap to do, this "
41  "controls the budget that is considered cheap (default = 4)"));
42 
43 using namespace PatternMatch;
44 
45 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
46 /// reusing an existing cast if a suitable one (= dominating IP) exists, or
47 /// creating a new one.
48 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
51  // This function must be called with the builder having a valid insertion
52  // point. It doesn't need to be the actual IP where the uses of the returned
53  // cast will be added, but it must dominate such IP.
54  // We use this precondition to produce a cast that will dominate all its
55  // uses. In particular, this is crucial for the case where the builder's
56  // insertion point *is* the point where we were asked to put the cast.
57  // Since we don't know the builder's insertion point is actually
58  // where the uses will be added (only that it dominates it), we are
59  // not allowed to move it.
60  BasicBlock::iterator BIP = Builder.GetInsertPoint();
61 
62  Value *Ret = nullptr;
63 
64  // Check to see if there is already a cast!
65  for (User *U : V->users()) {
66  if (U->getType() != Ty)
67  continue;
68  CastInst *CI = dyn_cast<CastInst>(U);
69  if (!CI || CI->getOpcode() != Op)
70  continue;
71 
72  // Found a suitable cast that is at IP or comes before IP. Use it. Note that
73  // the cast must also properly dominate the Builder's insertion point.
74  if (IP->getParent() == CI->getParent() && &*BIP != CI &&
75  (&*IP == CI || CI->comesBefore(&*IP))) {
76  Ret = CI;
77  break;
78  }
79  }
80 
81  // Create a new cast.
82  if (!Ret) {
83  SCEVInsertPointGuard Guard(Builder, this);
84  Builder.SetInsertPoint(&*IP);
85  Ret = Builder.CreateCast(Op, V, Ty, V->getName());
86  }
87 
88  // We assert at the end of the function since IP might point to an
89  // instruction with different dominance properties than a cast
90  // (an invoke for example) and not dominate BIP (but the cast does).
91  assert(!isa<Instruction>(Ret) ||
92  SE.DT.dominates(cast<Instruction>(Ret), &*BIP));
93 
94  return Ret;
95 }
96 
99  Instruction *MustDominate) const {
100  BasicBlock::iterator IP = ++I->getIterator();
101  if (auto *II = dyn_cast<InvokeInst>(I))
102  IP = II->getNormalDest()->begin();
103 
104  while (isa<PHINode>(IP))
105  ++IP;
106 
107  if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
108  ++IP;
109  } else if (isa<CatchSwitchInst>(IP)) {
110  IP = MustDominate->getParent()->getFirstInsertionPt();
111  } else {
112  assert(!IP->isEHPad() && "unexpected eh pad!");
113  }
114 
115  // Adjust insert point to be after instructions inserted by the expander, so
116  // we can re-use already inserted instructions. Avoid skipping past the
117  // original \p MustDominate, in case it is an inserted instruction.
118  while (isInsertedInstruction(&*IP) && &*IP != MustDominate)
119  ++IP;
120 
121  return IP;
122 }
123 
125 SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const {
126  // Cast the argument at the beginning of the entry block, after
127  // any bitcasts of other arguments.
128  if (Argument *A = dyn_cast<Argument>(V)) {
129  BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
130  while ((isa<BitCastInst>(IP) &&
131  isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
132  cast<BitCastInst>(IP)->getOperand(0) != A) ||
133  isa<DbgInfoIntrinsic>(IP))
134  ++IP;
135  return IP;
136  }
137 
138  // Cast the instruction immediately after the instruction.
139  if (Instruction *I = dyn_cast<Instruction>(V))
140  return findInsertPointAfter(I, &*Builder.GetInsertPoint());
141 
142  // Otherwise, this must be some kind of a constant,
143  // so let's plop this cast into the function's entry block.
144  assert(isa<Constant>(V) &&
145  "Expected the cast argument to be a global/constant");
146  return Builder.GetInsertBlock()
147  ->getParent()
148  ->getEntryBlock()
149  .getFirstInsertionPt();
150 }
151 
152 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
153 /// which must be possible with a noop cast, doing what we can to share
154 /// the casts.
155 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
156  Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
157  assert((Op == Instruction::BitCast ||
158  Op == Instruction::PtrToInt ||
159  Op == Instruction::IntToPtr) &&
160  "InsertNoopCastOfTo cannot perform non-noop casts!");
161  assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
162  "InsertNoopCastOfTo cannot change sizes!");
163 
164  // inttoptr only works for integral pointers. For non-integral pointers, we
165  // can create a GEP on i8* null with the integral value as index. Note that
166  // it is safe to use GEP of null instead of inttoptr here, because only
167  // expressions already based on a GEP of null should be converted to pointers
168  // during expansion.
169  if (Op == Instruction::IntToPtr) {
170  auto *PtrTy = cast<PointerType>(Ty);
171  if (DL.isNonIntegralPointerType(PtrTy)) {
172  auto *Int8PtrTy = Builder.getInt8PtrTy(PtrTy->getAddressSpace());
173  assert(DL.getTypeAllocSize(Builder.getInt8Ty()) == 1 &&
174  "alloc size of i8 must by 1 byte for the GEP to be correct");
175  auto *GEP = Builder.CreateGEP(
176  Builder.getInt8Ty(), Constant::getNullValue(Int8PtrTy), V, "uglygep");
177  return Builder.CreateBitCast(GEP, Ty);
178  }
179  }
180  // Short-circuit unnecessary bitcasts.
181  if (Op == Instruction::BitCast) {
182  if (V->getType() == Ty)
183  return V;
184  if (CastInst *CI = dyn_cast<CastInst>(V)) {
185  if (CI->getOperand(0)->getType() == Ty)
186  return CI->getOperand(0);
187  }
188  }
189  // Short-circuit unnecessary inttoptr<->ptrtoint casts.
190  if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
191  SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
192  if (CastInst *CI = dyn_cast<CastInst>(V))
193  if ((CI->getOpcode() == Instruction::PtrToInt ||
194  CI->getOpcode() == Instruction::IntToPtr) &&
195  SE.getTypeSizeInBits(CI->getType()) ==
196  SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
197  return CI->getOperand(0);
198  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
199  if ((CE->getOpcode() == Instruction::PtrToInt ||
200  CE->getOpcode() == Instruction::IntToPtr) &&
201  SE.getTypeSizeInBits(CE->getType()) ==
202  SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
203  return CE->getOperand(0);
204  }
205 
206  // Fold a cast of a constant.
207  if (Constant *C = dyn_cast<Constant>(V))
208  return ConstantExpr::getCast(Op, C, Ty);
209 
210  // Try to reuse existing cast, or insert one.
211  return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
212 }
213 
214 /// InsertBinop - Insert the specified binary operator, doing a small amount
215 /// of work to avoid inserting an obviously redundant operation, and hoisting
216 /// to an outer loop when the opportunity is there and it is safe.
217 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
218  Value *LHS, Value *RHS,
219  SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {
220  // Fold a binop with constant operands.
221  if (Constant *CLHS = dyn_cast<Constant>(LHS))
222  if (Constant *CRHS = dyn_cast<Constant>(RHS))
223  return ConstantExpr::get(Opcode, CLHS, CRHS);
224 
225  // Do a quick scan to see if we have this binop nearby. If so, reuse it.
226  unsigned ScanLimit = 6;
227  BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
228  // Scanning starts from the last instruction before the insertion point.
229  BasicBlock::iterator IP = Builder.GetInsertPoint();
230  if (IP != BlockBegin) {
231  --IP;
232  for (; ScanLimit; --IP, --ScanLimit) {
233  // Don't count dbg.value against the ScanLimit, to avoid perturbing the
234  // generated code.
235  if (isa<DbgInfoIntrinsic>(IP))
236  ScanLimit++;
237 
238  auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) {
239  // Ensure that no-wrap flags match.
240  if (isa<OverflowingBinaryOperator>(I)) {
241  if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW))
242  return true;
243  if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW))
244  return true;
245  }
246  // Conservatively, do not use any instruction which has any of exact
247  // flags installed.
248  if (isa<PossiblyExactOperator>(I) && I->isExact())
249  return true;
250  return false;
251  };
252  if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
253  IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
254  return &*IP;
255  if (IP == BlockBegin) break;
256  }
257  }
258 
259  // Save the original insertion point so we can restore it when we're done.
260  DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
261  SCEVInsertPointGuard Guard(Builder, this);
262 
263  if (IsSafeToHoist) {
264  // Move the insertion point out of as many loops as we can.
265  while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
266  if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
267  BasicBlock *Preheader = L->getLoopPreheader();
268  if (!Preheader) break;
269 
270  // Ok, move up a level.
271  Builder.SetInsertPoint(Preheader->getTerminator());
272  }
273  }
274 
275  // If we haven't found this binop, insert it.
276  Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
277  BO->setDebugLoc(Loc);
278  if (Flags & SCEV::FlagNUW)
279  BO->setHasNoUnsignedWrap();
280  if (Flags & SCEV::FlagNSW)
281  BO->setHasNoSignedWrap();
282 
283  return BO;
284 }
285 
286 /// FactorOutConstant - Test if S is divisible by Factor, using signed
287 /// division. If so, update S with Factor divided out and return true.
288 /// S need not be evenly divisible if a reasonable remainder can be
289 /// computed.
290 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
291  const SCEV *Factor, ScalarEvolution &SE,
292  const DataLayout &DL) {
293  // Everything is divisible by one.
294  if (Factor->isOne())
295  return true;
296 
297  // x/x == 1.
298  if (S == Factor) {
299  S = SE.getConstant(S->getType(), 1);
300  return true;
301  }
302 
303  // For a Constant, check for a multiple of the given factor.
304  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
305  // 0/x == 0.
306  if (C->isZero())
307  return true;
308  // Check for divisibility.
309  if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
310  ConstantInt *CI =
311  ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt()));
312  // If the quotient is zero and the remainder is non-zero, reject
313  // the value at this scale. It will be considered for subsequent
314  // smaller scales.
315  if (!CI->isZero()) {
316  const SCEV *Div = SE.getConstant(CI);
317  S = Div;
318  Remainder = SE.getAddExpr(
319  Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt())));
320  return true;
321  }
322  }
323  }
324 
325  // In a Mul, check if there is a constant operand which is a multiple
326  // of the given factor.
327  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
328  // Size is known, check if there is a constant operand which is a multiple
329  // of the given factor. If so, we can factor it.
330  if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor))
331  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
332  if (!C->getAPInt().srem(FC->getAPInt())) {
333  SmallVector<const SCEV *, 4> NewMulOps(M->operands());
334  NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt()));
335  S = SE.getMulExpr(NewMulOps);
336  return true;
337  }
338  }
339 
340  // In an AddRec, check if both start and step are divisible.
341  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
342  const SCEV *Step = A->getStepRecurrence(SE);
343  const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
344  if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
345  return false;
346  if (!StepRem->isZero())
347  return false;
348  const SCEV *Start = A->getStart();
349  if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
350  return false;
351  S = SE.getAddRecExpr(Start, Step, A->getLoop(),
352  A->getNoWrapFlags(SCEV::FlagNW));
353  return true;
354  }
355 
356  return false;
357 }
358 
359 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
360 /// is the number of SCEVAddRecExprs present, which are kept at the end of
361 /// the list.
362 ///
364  Type *Ty,
365  ScalarEvolution &SE) {
366  unsigned NumAddRecs = 0;
367  for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
368  ++NumAddRecs;
369  // Group Ops into non-addrecs and addrecs.
370  SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
371  SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
372  // Let ScalarEvolution sort and simplify the non-addrecs list.
373  const SCEV *Sum = NoAddRecs.empty() ?
374  SE.getConstant(Ty, 0) :
375  SE.getAddExpr(NoAddRecs);
376  // If it returned an add, use the operands. Otherwise it simplified
377  // the sum into a single value, so just use that.
378  Ops.clear();
379  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
380  Ops.append(Add->op_begin(), Add->op_end());
381  else if (!Sum->isZero())
382  Ops.push_back(Sum);
383  // Then append the addrecs.
384  Ops.append(AddRecs.begin(), AddRecs.end());
385 }
386 
387 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
388 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
389 /// This helps expose more opportunities for folding parts of the expressions
390 /// into GEP indices.
391 ///
393  Type *Ty,
394  ScalarEvolution &SE) {
395  // Find the addrecs.
397  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
398  while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
399  const SCEV *Start = A->getStart();
400  if (Start->isZero()) break;
401  const SCEV *Zero = SE.getConstant(Ty, 0);
402  AddRecs.push_back(SE.getAddRecExpr(Zero,
403  A->getStepRecurrence(SE),
404  A->getLoop(),
405  A->getNoWrapFlags(SCEV::FlagNW)));
406  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
407  Ops[i] = Zero;
408  Ops.append(Add->op_begin(), Add->op_end());
409  e += Add->getNumOperands();
410  } else {
411  Ops[i] = Start;
412  }
413  }
414  if (!AddRecs.empty()) {
415  // Add the addrecs onto the end of the list.
416  Ops.append(AddRecs.begin(), AddRecs.end());
417  // Resort the operand list, moving any constants to the front.
418  SimplifyAddOperands(Ops, Ty, SE);
419  }
420 }
421 
422 /// expandAddToGEP - Expand an addition expression with a pointer type into
423 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
424 /// BasicAliasAnalysis and other passes analyze the result. See the rules
425 /// for getelementptr vs. inttoptr in
426 /// http://llvm.org/docs/LangRef.html#pointeraliasing
427 /// for details.
428 ///
429 /// Design note: The correctness of using getelementptr here depends on
430 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
431 /// they may introduce pointer arithmetic which may not be safely converted
432 /// into getelementptr.
433 ///
434 /// Design note: It might seem desirable for this function to be more
435 /// loop-aware. If some of the indices are loop-invariant while others
436 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
437 /// loop-invariant portions of the overall computation outside the loop.
438 /// However, there are a few reasons this is not done here. Hoisting simple
439 /// arithmetic is a low-level optimization that often isn't very
440 /// important until late in the optimization process. In fact, passes
441 /// like InstructionCombining will combine GEPs, even if it means
442 /// pushing loop-invariant computation down into loops, so even if the
443 /// GEPs were split here, the work would quickly be undone. The
444 /// LoopStrengthReduction pass, which is usually run quite late (and
445 /// after the last InstructionCombining pass), takes care of hoisting
446 /// loop-invariant portions of expressions, after considering what
447 /// can be folded using target addressing modes.
448 ///
449 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
450  const SCEV *const *op_end,
451  PointerType *PTy,
452  Type *Ty,
453  Value *V) {
454  SmallVector<Value *, 4> GepIndices;
455  SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
456  bool AnyNonZeroIndices = false;
457 
458  // Split AddRecs up into parts as either of the parts may be usable
459  // without the other.
460  SplitAddRecs(Ops, Ty, SE);
461 
462  Type *IntIdxTy = DL.getIndexType(PTy);
463 
464  // For opaque pointers, always generate i8 GEP.
465  if (!PTy->isOpaque()) {
466  // Descend down the pointer's type and attempt to convert the other
467  // operands into GEP indices, at each level. The first index in a GEP
468  // indexes into the array implied by the pointer operand; the rest of
469  // the indices index into the element or field type selected by the
470  // preceding index.
471  Type *ElTy = PTy->getNonOpaquePointerElementType();
472  for (;;) {
473  // If the scale size is not 0, attempt to factor out a scale for
474  // array indexing.
476  if (ElTy->isSized()) {
477  const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy);
478  if (!ElSize->isZero()) {
480  for (const SCEV *Op : Ops) {
481  const SCEV *Remainder = SE.getConstant(Ty, 0);
482  if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
483  // Op now has ElSize factored out.
484  ScaledOps.push_back(Op);
485  if (!Remainder->isZero())
486  NewOps.push_back(Remainder);
487  AnyNonZeroIndices = true;
488  } else {
489  // The operand was not divisible, so add it to the list of
490  // operands we'll scan next iteration.
491  NewOps.push_back(Op);
492  }
493  }
494  // If we made any changes, update Ops.
495  if (!ScaledOps.empty()) {
496  Ops = NewOps;
497  SimplifyAddOperands(Ops, Ty, SE);
498  }
499  }
500  }
501 
502  // Record the scaled array index for this level of the type. If
503  // we didn't find any operands that could be factored, tentatively
504  // assume that element zero was selected (since the zero offset
505  // would obviously be folded away).
506  Value *Scaled =
507  ScaledOps.empty()
509  : expandCodeForImpl(SE.getAddExpr(ScaledOps), Ty, false);
510  GepIndices.push_back(Scaled);
511 
512  // Collect struct field index operands.
513  while (StructType *STy = dyn_cast<StructType>(ElTy)) {
514  bool FoundFieldNo = false;
515  // An empty struct has no fields.
516  if (STy->getNumElements() == 0) break;
517  // Field offsets are known. See if a constant offset falls within any of
518  // the struct fields.
519  if (Ops.empty())
520  break;
521  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
522  if (SE.getTypeSizeInBits(C->getType()) <= 64) {
523  const StructLayout &SL = *DL.getStructLayout(STy);
524  uint64_t FullOffset = C->getValue()->getZExtValue();
525  if (FullOffset < SL.getSizeInBytes()) {
526  unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
527  GepIndices.push_back(
529  ElTy = STy->getTypeAtIndex(ElIdx);
530  Ops[0] =
531  SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
532  AnyNonZeroIndices = true;
533  FoundFieldNo = true;
534  }
535  }
536  // If no struct field offsets were found, tentatively assume that
537  // field zero was selected (since the zero offset would obviously
538  // be folded away).
539  if (!FoundFieldNo) {
540  ElTy = STy->getTypeAtIndex(0u);
541  GepIndices.push_back(
543  }
544  }
545 
546  if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
547  ElTy = ATy->getElementType();
548  else
549  // FIXME: Handle VectorType.
550  // E.g., If ElTy is scalable vector, then ElSize is not a compile-time
551  // constant, therefore can not be factored out. The generated IR is less
552  // ideal with base 'V' cast to i8* and do ugly getelementptr over that.
553  break;
554  }
555  }
556 
557  // If none of the operands were convertible to proper GEP indices, cast
558  // the base to i8* and do an ugly getelementptr with that. It's still
559  // better than ptrtoint+arithmetic+inttoptr at least.
560  if (!AnyNonZeroIndices) {
561  // Cast the base to i8*.
562  if (!PTy->isOpaque())
563  V = InsertNoopCastOfTo(V,
565 
566  assert(!isa<Instruction>(V) ||
567  SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
568 
569  // Expand the operands for a plain byte offset.
570  Value *Idx = expandCodeForImpl(SE.getAddExpr(Ops), Ty, false);
571 
572  // Fold a GEP with constant operands.
573  if (Constant *CLHS = dyn_cast<Constant>(V))
574  if (Constant *CRHS = dyn_cast<Constant>(Idx))
576  CLHS, CRHS);
577 
578  // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
579  unsigned ScanLimit = 6;
580  BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
581  // Scanning starts from the last instruction before the insertion point.
582  BasicBlock::iterator IP = Builder.GetInsertPoint();
583  if (IP != BlockBegin) {
584  --IP;
585  for (; ScanLimit; --IP, --ScanLimit) {
586  // Don't count dbg.value against the ScanLimit, to avoid perturbing the
587  // generated code.
588  if (isa<DbgInfoIntrinsic>(IP))
589  ScanLimit++;
590  if (IP->getOpcode() == Instruction::GetElementPtr &&
591  IP->getOperand(0) == V && IP->getOperand(1) == Idx &&
592  cast<GEPOperator>(&*IP)->getSourceElementType() ==
594  return &*IP;
595  if (IP == BlockBegin) break;
596  }
597  }
598 
599  // Save the original insertion point so we can restore it when we're done.
600  SCEVInsertPointGuard Guard(Builder, this);
601 
602  // Move the insertion point out of as many loops as we can.
603  while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
604  if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
605  BasicBlock *Preheader = L->getLoopPreheader();
606  if (!Preheader) break;
607 
608  // Ok, move up a level.
609  Builder.SetInsertPoint(Preheader->getTerminator());
610  }
611 
612  // Emit a GEP.
613  return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
614  }
615 
616  {
617  SCEVInsertPointGuard Guard(Builder, this);
618 
619  // Move the insertion point out of as many loops as we can.
620  while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
621  if (!L->isLoopInvariant(V)) break;
622 
623  bool AnyIndexNotLoopInvariant = any_of(
624  GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); });
625 
626  if (AnyIndexNotLoopInvariant)
627  break;
628 
629  BasicBlock *Preheader = L->getLoopPreheader();
630  if (!Preheader) break;
631 
632  // Ok, move up a level.
633  Builder.SetInsertPoint(Preheader->getTerminator());
634  }
635 
636  // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
637  // because ScalarEvolution may have changed the address arithmetic to
638  // compute a value which is beyond the end of the allocated object.
639  Value *Casted = V;
640  if (V->getType() != PTy)
641  Casted = InsertNoopCastOfTo(Casted, PTy);
642  Value *GEP = Builder.CreateGEP(PTy->getNonOpaquePointerElementType(),
643  Casted, GepIndices, "scevgep");
644  Ops.push_back(SE.getUnknown(GEP));
645  }
646 
647  return expand(SE.getAddExpr(Ops));
648 }
649 
650 Value *SCEVExpander::expandAddToGEP(const SCEV *Op, PointerType *PTy, Type *Ty,
651  Value *V) {
652  const SCEV *const Ops[1] = {Op};
653  return expandAddToGEP(Ops, Ops + 1, PTy, Ty, V);
654 }
655 
656 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
657 /// SCEV expansion. If they are nested, this is the most nested. If they are
658 /// neighboring, pick the later.
659 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
660  DominatorTree &DT) {
661  if (!A) return B;
662  if (!B) return A;
663  if (A->contains(B)) return B;
664  if (B->contains(A)) return A;
665  if (DT.dominates(A->getHeader(), B->getHeader())) return B;
666  if (DT.dominates(B->getHeader(), A->getHeader())) return A;
667  return A; // Arbitrarily break the tie.
668 }
669 
670 /// getRelevantLoop - Get the most relevant loop associated with the given
671 /// expression, according to PickMostRelevantLoop.
672 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
673  // Test whether we've already computed the most relevant loop for this SCEV.
674  auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
675  if (!Pair.second)
676  return Pair.first->second;
677 
678  if (isa<SCEVConstant>(S))
679  // A constant has no relevant loops.
680  return nullptr;
681  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
682  if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
683  return Pair.first->second = SE.LI.getLoopFor(I->getParent());
684  // A non-instruction has no relevant loops.
685  return nullptr;
686  }
687  if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
688  const Loop *L = nullptr;
689  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
690  L = AR->getLoop();
691  for (const SCEV *Op : N->operands())
692  L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
693  return RelevantLoops[N] = L;
694  }
695  if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
696  const Loop *Result = getRelevantLoop(C->getOperand());
697  return RelevantLoops[C] = Result;
698  }
699  if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
701  getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
702  return RelevantLoops[D] = Result;
703  }
704  llvm_unreachable("Unexpected SCEV type!");
705 }
706 
707 namespace {
708 
709 /// LoopCompare - Compare loops by PickMostRelevantLoop.
710 class LoopCompare {
711  DominatorTree &DT;
712 public:
713  explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
714 
715  bool operator()(std::pair<const Loop *, const SCEV *> LHS,
716  std::pair<const Loop *, const SCEV *> RHS) const {
717  // Keep pointer operands sorted at the end.
718  if (LHS.second->getType()->isPointerTy() !=
719  RHS.second->getType()->isPointerTy())
720  return LHS.second->getType()->isPointerTy();
721 
722  // Compare loops with PickMostRelevantLoop.
723  if (LHS.first != RHS.first)
724  return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
725 
726  // If one operand is a non-constant negative and the other is not,
727  // put the non-constant negative on the right so that a sub can
728  // be used instead of a negate and add.
729  if (LHS.second->isNonConstantNegative()) {
730  if (!RHS.second->isNonConstantNegative())
731  return false;
732  } else if (RHS.second->isNonConstantNegative())
733  return true;
734 
735  // Otherwise they are equivalent according to this comparison.
736  return false;
737  }
738 };
739 
740 }
741 
742 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
743  Type *Ty = SE.getEffectiveSCEVType(S->getType());
744 
745  // Collect all the add operands in a loop, along with their associated loops.
746  // Iterate in reverse so that constants are emitted last, all else equal, and
747  // so that pointer operands are inserted first, which the code below relies on
748  // to form more involved GEPs.
750  for (const SCEV *Op : reverse(S->operands()))
751  OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
752 
753  // Sort by loop. Use a stable sort so that constants follow non-constants and
754  // pointer operands precede non-pointer operands.
755  llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
756 
757  // Emit instructions to add all the operands. Hoist as much as possible
758  // out of loops, and form meaningful getelementptrs where possible.
759  Value *Sum = nullptr;
760  for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
761  const Loop *CurLoop = I->first;
762  const SCEV *Op = I->second;
763  if (!Sum) {
764  // This is the first operand. Just expand it.
765  Sum = expand(Op);
766  ++I;
767  continue;
768  }
769 
770  assert(!Op->getType()->isPointerTy() && "Only first op can be pointer");
771  if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
772  // The running sum expression is a pointer. Try to form a getelementptr
773  // at this level with that as the base.
775  for (; I != E && I->first == CurLoop; ++I) {
776  // If the operand is SCEVUnknown and not instructions, peek through
777  // it, to enable more of it to be folded into the GEP.
778  const SCEV *X = I->second;
779  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
780  if (!isa<Instruction>(U->getValue()))
781  X = SE.getSCEV(U->getValue());
782  NewOps.push_back(X);
783  }
784  Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
785  } else if (Op->isNonConstantNegative()) {
786  // Instead of doing a negate and add, just do a subtract.
787  Value *W = expandCodeForImpl(SE.getNegativeSCEV(Op), Ty, false);
788  Sum = InsertNoopCastOfTo(Sum, Ty);
789  Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap,
790  /*IsSafeToHoist*/ true);
791  ++I;
792  } else {
793  // A simple add.
794  Value *W = expandCodeForImpl(Op, Ty, false);
795  Sum = InsertNoopCastOfTo(Sum, Ty);
796  // Canonicalize a constant to the RHS.
797  if (isa<Constant>(Sum)) std::swap(Sum, W);
798  Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(),
799  /*IsSafeToHoist*/ true);
800  ++I;
801  }
802  }
803 
804  return Sum;
805 }
806 
807 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
808  Type *Ty = SE.getEffectiveSCEVType(S->getType());
809 
810  // Collect all the mul operands in a loop, along with their associated loops.
811  // Iterate in reverse so that constants are emitted last, all else equal.
813  for (const SCEV *Op : reverse(S->operands()))
814  OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
815 
816  // Sort by loop. Use a stable sort so that constants follow non-constants.
817  llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
818 
819  // Emit instructions to mul all the operands. Hoist as much as possible
820  // out of loops.
821  Value *Prod = nullptr;
822  auto I = OpsAndLoops.begin();
823 
824  // Expand the calculation of X pow N in the following manner:
825  // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
826  // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
827  const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops, &Ty]() {
828  auto E = I;
829  // Calculate how many times the same operand from the same loop is included
830  // into this power.
831  uint64_t Exponent = 0;
832  const uint64_t MaxExponent = UINT64_MAX >> 1;
833  // No one sane will ever try to calculate such huge exponents, but if we
834  // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
835  // below when the power of 2 exceeds our Exponent, and we want it to be
836  // 1u << 31 at most to not deal with unsigned overflow.
837  while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
838  ++Exponent;
839  ++E;
840  }
841  assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
842 
843  // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
844  // that are needed into the result.
845  Value *P = expandCodeForImpl(I->second, Ty, false);
846  Value *Result = nullptr;
847  if (Exponent & 1)
848  Result = P;
849  for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
850  P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap,
851  /*IsSafeToHoist*/ true);
852  if (Exponent & BinExp)
853  Result = Result ? InsertBinop(Instruction::Mul, Result, P,
855  /*IsSafeToHoist*/ true)
856  : P;
857  }
858 
859  I = E;
860  assert(Result && "Nothing was expanded?");
861  return Result;
862  };
863 
864  while (I != OpsAndLoops.end()) {
865  if (!Prod) {
866  // This is the first operand. Just expand it.
867  Prod = ExpandOpBinPowN();
868  } else if (I->second->isAllOnesValue()) {
869  // Instead of doing a multiply by negative one, just do a negate.
870  Prod = InsertNoopCastOfTo(Prod, Ty);
871  Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod,
872  SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
873  ++I;
874  } else {
875  // A simple mul.
876  Value *W = ExpandOpBinPowN();
877  Prod = InsertNoopCastOfTo(Prod, Ty);
878  // Canonicalize a constant to the RHS.
879  if (isa<Constant>(Prod)) std::swap(Prod, W);
880  const APInt *RHS;
881  if (match(W, m_Power2(RHS))) {
882  // Canonicalize Prod*(1<<C) to Prod<<C.
883  assert(!Ty->isVectorTy() && "vector types are not SCEVable");
884  auto NWFlags = S->getNoWrapFlags();
885  // clear nsw flag if shl will produce poison value.
886  if (RHS->logBase2() == RHS->getBitWidth() - 1)
887  NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW);
888  Prod = InsertBinop(Instruction::Shl, Prod,
889  ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
890  /*IsSafeToHoist*/ true);
891  } else {
892  Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(),
893  /*IsSafeToHoist*/ true);
894  }
895  }
896  }
897 
898  return Prod;
899 }
900 
901 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
902  Type *Ty = SE.getEffectiveSCEVType(S->getType());
903 
904  Value *LHS = expandCodeForImpl(S->getLHS(), Ty, false);
905  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
906  const APInt &RHS = SC->getAPInt();
907  if (RHS.isPowerOf2())
908  return InsertBinop(Instruction::LShr, LHS,
909  ConstantInt::get(Ty, RHS.logBase2()),
910  SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
911  }
912 
913  Value *RHS = expandCodeForImpl(S->getRHS(), Ty, false);
914  return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap,
915  /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS()));
916 }
917 
918 /// Determine if this is a well-behaved chain of instructions leading back to
919 /// the PHI. If so, it may be reused by expanded expressions.
920 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
921  const Loop *L) {
922  if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
923  (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
924  return false;
925  // If any of the operands don't dominate the insert position, bail.
926  // Addrec operands are always loop-invariant, so this can only happen
927  // if there are instructions which haven't been hoisted.
928  if (L == IVIncInsertLoop) {
929  for (Use &Op : llvm::drop_begin(IncV->operands()))
930  if (Instruction *OInst = dyn_cast<Instruction>(Op))
931  if (!SE.DT.dominates(OInst, IVIncInsertPos))
932  return false;
933  }
934  // Advance to the next instruction.
935  IncV = dyn_cast<Instruction>(IncV->getOperand(0));
936  if (!IncV)
937  return false;
938 
939  if (IncV->mayHaveSideEffects())
940  return false;
941 
942  if (IncV == PN)
943  return true;
944 
945  return isNormalAddRecExprPHI(PN, IncV, L);
946 }
947 
948 /// getIVIncOperand returns an induction variable increment's induction
949 /// variable operand.
950 ///
951 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
952 /// operands dominate InsertPos.
953 ///
954 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
955 /// simple patterns generated by getAddRecExprPHILiterally and
956 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
958  Instruction *InsertPos,
959  bool allowScale) {
960  if (IncV == InsertPos)
961  return nullptr;
962 
963  switch (IncV->getOpcode()) {
964  default:
965  return nullptr;
966  // Check for a simple Add/Sub or GEP of a loop invariant step.
967  case Instruction::Add:
968  case Instruction::Sub: {
969  Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
970  if (!OInst || SE.DT.dominates(OInst, InsertPos))
971  return dyn_cast<Instruction>(IncV->getOperand(0));
972  return nullptr;
973  }
974  case Instruction::BitCast:
975  return dyn_cast<Instruction>(IncV->getOperand(0));
976  case Instruction::GetElementPtr:
977  for (Use &U : llvm::drop_begin(IncV->operands())) {
978  if (isa<Constant>(U))
979  continue;
980  if (Instruction *OInst = dyn_cast<Instruction>(U)) {
981  if (!SE.DT.dominates(OInst, InsertPos))
982  return nullptr;
983  }
984  if (allowScale) {
985  // allow any kind of GEP as long as it can be hoisted.
986  continue;
987  }
988  // This must be a pointer addition of constants (pretty), which is already
989  // handled, or some number of address-size elements (ugly). Ugly geps
990  // have 2 operands. i1* is used by the expander to represent an
991  // address-size element.
992  if (IncV->getNumOperands() != 2)
993  return nullptr;
994  unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
995  if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
996  && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
997  return nullptr;
998  break;
999  }
1000  return dyn_cast<Instruction>(IncV->getOperand(0));
1001  }
1002 }
1003 
1004 /// If the insert point of the current builder or any of the builders on the
1005 /// stack of saved builders has 'I' as its insert point, update it to point to
1006 /// the instruction after 'I'. This is intended to be used when the instruction
1007 /// 'I' is being moved. If this fixup is not done and 'I' is moved to a
1008 /// different block, the inconsistent insert point (with a mismatched
1009 /// Instruction and Block) can lead to an instruction being inserted in a block
1010 /// other than its parent.
1011 void SCEVExpander::fixupInsertPoints(Instruction *I) {
1012  BasicBlock::iterator It(*I);
1013  BasicBlock::iterator NewInsertPt = std::next(It);
1014  if (Builder.GetInsertPoint() == It)
1015  Builder.SetInsertPoint(&*NewInsertPt);
1016  for (auto *InsertPtGuard : InsertPointGuards)
1017  if (InsertPtGuard->GetInsertPoint() == It)
1018  InsertPtGuard->SetInsertPoint(NewInsertPt);
1019 }
1020 
1021 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
1022 /// it available to other uses in this loop. Recursively hoist any operands,
1023 /// until we reach a value that dominates InsertPos.
1025  if (SE.DT.dominates(IncV, InsertPos))
1026  return true;
1027 
1028  // InsertPos must itself dominate IncV so that IncV's new position satisfies
1029  // its existing users.
1030  if (isa<PHINode>(InsertPos) ||
1031  !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
1032  return false;
1033 
1034  if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
1035  return false;
1036 
1037  // Check that the chain of IV operands leading back to Phi can be hoisted.
1039  for(;;) {
1040  Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
1041  if (!Oper)
1042  return false;
1043  // IncV is safe to hoist.
1044  IVIncs.push_back(IncV);
1045  IncV = Oper;
1046  if (SE.DT.dominates(IncV, InsertPos))
1047  break;
1048  }
1049  for (Instruction *I : llvm::reverse(IVIncs)) {
1050  fixupInsertPoints(I);
1051  I->moveBefore(InsertPos);
1052  }
1053  return true;
1054 }
1055 
1056 /// Determine if this cyclic phi is in a form that would have been generated by
1057 /// LSR. We don't care if the phi was actually expanded in this pass, as long
1058 /// as it is in a low-cost form, for example, no implied multiplication. This
1059 /// should match any patterns generated by getAddRecExprPHILiterally and
1060 /// expandAddtoGEP.
1061 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
1062  const Loop *L) {
1063  for(Instruction *IVOper = IncV;
1064  (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
1065  /*allowScale=*/false));) {
1066  if (IVOper == PN)
1067  return true;
1068  }
1069  return false;
1070 }
1071 
1072 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
1073 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
1074 /// need to materialize IV increments elsewhere to handle difficult situations.
1075 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
1076  Type *ExpandTy, Type *IntTy,
1077  bool useSubtract) {
1078  Value *IncV;
1079  // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1080  if (ExpandTy->isPointerTy()) {
1081  PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1082  // If the step isn't constant, don't use an implicitly scaled GEP, because
1083  // that would require a multiply inside the loop.
1084  if (!isa<ConstantInt>(StepV))
1085  GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1086  GEPPtrTy->getAddressSpace());
1087  IncV = expandAddToGEP(SE.getSCEV(StepV), GEPPtrTy, IntTy, PN);
1088  if (IncV->getType() != PN->getType())
1089  IncV = Builder.CreateBitCast(IncV, PN->getType());
1090  } else {
1091  IncV = useSubtract ?
1092  Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1093  Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1094  }
1095  return IncV;
1096 }
1097 
1098 /// Check whether we can cheaply express the requested SCEV in terms of
1099 /// the available PHI SCEV by truncation and/or inversion of the step.
1101  const SCEVAddRecExpr *Phi,
1102  const SCEVAddRecExpr *Requested,
1103  bool &InvertStep) {
1104  // We can't transform to match a pointer PHI.
1105  if (Phi->getType()->isPointerTy())
1106  return false;
1107 
1108  Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1109  Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1110 
1111  if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1112  return false;
1113 
1114  // Try truncate it if necessary.
1115  Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1116  if (!Phi)
1117  return false;
1118 
1119  // Check whether truncation will help.
1120  if (Phi == Requested) {
1121  InvertStep = false;
1122  return true;
1123  }
1124 
1125  // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1126  if (SE.getMinusSCEV(Requested->getStart(), Requested) == Phi) {
1127  InvertStep = true;
1128  return true;
1129  }
1130 
1131  return false;
1132 }
1133 
1134 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1135  if (!isa<IntegerType>(AR->getType()))
1136  return false;
1137 
1138  unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1139  Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1140  const SCEV *Step = AR->getStepRecurrence(SE);
1141  const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
1142  SE.getSignExtendExpr(AR, WideTy));
1143  const SCEV *ExtendAfterOp =
1144  SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1145  return ExtendAfterOp == OpAfterExtend;
1146 }
1147 
1148 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1149  if (!isa<IntegerType>(AR->getType()))
1150  return false;
1151 
1152  unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1153  Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1154  const SCEV *Step = AR->getStepRecurrence(SE);
1155  const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
1156  SE.getZeroExtendExpr(AR, WideTy));
1157  const SCEV *ExtendAfterOp =
1158  SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1159  return ExtendAfterOp == OpAfterExtend;
1160 }
1161 
1162 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1163 /// the base addrec, which is the addrec without any non-loop-dominating
1164 /// values, and return the PHI.
1165 PHINode *
1166 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1167  const Loop *L,
1168  Type *ExpandTy,
1169  Type *IntTy,
1170  Type *&TruncTy,
1171  bool &InvertStep) {
1172  assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1173 
1174  // Reuse a previously-inserted PHI, if present.
1175  BasicBlock *LatchBlock = L->getLoopLatch();
1176  if (LatchBlock) {
1177  PHINode *AddRecPhiMatch = nullptr;
1178  Instruction *IncV = nullptr;
1179  TruncTy = nullptr;
1180  InvertStep = false;
1181 
1182  // Only try partially matching scevs that need truncation and/or
1183  // step-inversion if we know this loop is outside the current loop.
1184  bool TryNonMatchingSCEV =
1185  IVIncInsertLoop &&
1186  SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1187 
1188  for (PHINode &PN : L->getHeader()->phis()) {
1189  if (!SE.isSCEVable(PN.getType()))
1190  continue;
1191 
1192  // We should not look for a incomplete PHI. Getting SCEV for a incomplete
1193  // PHI has no meaning at all.
1194  if (!PN.isComplete()) {
1196  DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n");
1197  continue;
1198  }
1199 
1200  const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
1201  if (!PhiSCEV)
1202  continue;
1203 
1204  bool IsMatchingSCEV = PhiSCEV == Normalized;
1205  // We only handle truncation and inversion of phi recurrences for the
1206  // expanded expression if the expanded expression's loop dominates the
1207  // loop we insert to. Check now, so we can bail out early.
1208  if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1209  continue;
1210 
1211  // TODO: this possibly can be reworked to avoid this cast at all.
1212  Instruction *TempIncV =
1213  dyn_cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock));
1214  if (!TempIncV)
1215  continue;
1216 
1217  // Check whether we can reuse this PHI node.
1218  if (LSRMode) {
1219  if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1220  continue;
1221  } else {
1222  if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1223  continue;
1224  }
1225 
1226  // Stop if we have found an exact match SCEV.
1227  if (IsMatchingSCEV) {
1228  IncV = TempIncV;
1229  TruncTy = nullptr;
1230  InvertStep = false;
1231  AddRecPhiMatch = &PN;
1232  break;
1233  }
1234 
1235  // Try whether the phi can be translated into the requested form
1236  // (truncated and/or offset by a constant).
1237  if ((!TruncTy || InvertStep) &&
1238  canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1239  // Record the phi node. But don't stop we might find an exact match
1240  // later.
1241  AddRecPhiMatch = &PN;
1242  IncV = TempIncV;
1243  TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1244  }
1245  }
1246 
1247  if (AddRecPhiMatch) {
1248  // Ok, the add recurrence looks usable.
1249  // Remember this PHI, even in post-inc mode.
1250  InsertedValues.insert(AddRecPhiMatch);
1251  // Remember the increment.
1252  rememberInstruction(IncV);
1253  // Those values were not actually inserted but re-used.
1254  ReusedValues.insert(AddRecPhiMatch);
1255  ReusedValues.insert(IncV);
1256  return AddRecPhiMatch;
1257  }
1258  }
1259 
1260  // Save the original insertion point so we can restore it when we're done.
1261  SCEVInsertPointGuard Guard(Builder, this);
1262 
1263  // Another AddRec may need to be recursively expanded below. For example, if
1264  // this AddRec is quadratic, the StepV may itself be an AddRec in this
1265  // loop. Remove this loop from the PostIncLoops set before expanding such
1266  // AddRecs. Otherwise, we cannot find a valid position for the step
1267  // (i.e. StepV can never dominate its loop header). Ideally, we could do
1268  // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1269  // so it's not worth implementing SmallPtrSet::swap.
1270  PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1271  PostIncLoops.clear();
1272 
1273  // Expand code for the start value into the loop preheader.
1274  assert(L->getLoopPreheader() &&
1275  "Can't expand add recurrences without a loop preheader!");
1276  Value *StartV =
1277  expandCodeForImpl(Normalized->getStart(), ExpandTy,
1278  L->getLoopPreheader()->getTerminator(), false);
1279 
1280  // StartV must have been be inserted into L's preheader to dominate the new
1281  // phi.
1282  assert(!isa<Instruction>(StartV) ||
1283  SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1284  L->getHeader()));
1285 
1286  // Expand code for the step value. Do this before creating the PHI so that PHI
1287  // reuse code doesn't see an incomplete PHI.
1288  const SCEV *Step = Normalized->getStepRecurrence(SE);
1289  // If the stride is negative, insert a sub instead of an add for the increment
1290  // (unless it's a constant, because subtracts of constants are canonicalized
1291  // to adds).
1292  bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1293  if (useSubtract)
1294  Step = SE.getNegativeSCEV(Step);
1295  // Expand the step somewhere that dominates the loop header.
1296  Value *StepV = expandCodeForImpl(
1297  Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false);
1298 
1299  // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1300  // we actually do emit an addition. It does not apply if we emit a
1301  // subtraction.
1302  bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1303  bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1304 
1305  // Create the PHI.
1306  BasicBlock *Header = L->getHeader();
1307  Builder.SetInsertPoint(Header, Header->begin());
1308  pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1309  PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1310  Twine(IVName) + ".iv");
1311 
1312  // Create the step instructions and populate the PHI.
1313  for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1314  BasicBlock *Pred = *HPI;
1315 
1316  // Add a start value.
1317  if (!L->contains(Pred)) {
1318  PN->addIncoming(StartV, Pred);
1319  continue;
1320  }
1321 
1322  // Create a step value and add it to the PHI.
1323  // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1324  // instructions at IVIncInsertPos.
1325  Instruction *InsertPos = L == IVIncInsertLoop ?
1326  IVIncInsertPos : Pred->getTerminator();
1327  Builder.SetInsertPoint(InsertPos);
1328  Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1329 
1330  if (isa<OverflowingBinaryOperator>(IncV)) {
1331  if (IncrementIsNUW)
1332  cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1333  if (IncrementIsNSW)
1334  cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1335  }
1336  PN->addIncoming(IncV, Pred);
1337  }
1338 
1339  // After expanding subexpressions, restore the PostIncLoops set so the caller
1340  // can ensure that IVIncrement dominates the current uses.
1341  PostIncLoops = SavedPostIncLoops;
1342 
1343  // Remember this PHI, even in post-inc mode. LSR SCEV-based salvaging is most
1344  // effective when we are able to use an IV inserted here, so record it.
1345  InsertedValues.insert(PN);
1346  InsertedIVs.push_back(PN);
1347  return PN;
1348 }
1349 
1350 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1351  Type *STy = S->getType();
1352  Type *IntTy = SE.getEffectiveSCEVType(STy);
1353  const Loop *L = S->getLoop();
1354 
1355  // Determine a normalized form of this expression, which is the expression
1356  // before any post-inc adjustment is made.
1357  const SCEVAddRecExpr *Normalized = S;
1358  if (PostIncLoops.count(L)) {
1360  Loops.insert(L);
1361  Normalized = cast<SCEVAddRecExpr>(normalizeForPostIncUse(S, Loops, SE));
1362  }
1363 
1364  // Strip off any non-loop-dominating component from the addrec start.
1365  const SCEV *Start = Normalized->getStart();
1366  const SCEV *PostLoopOffset = nullptr;
1367  if (!SE.properlyDominates(Start, L->getHeader())) {
1368  PostLoopOffset = Start;
1369  Start = SE.getConstant(Normalized->getType(), 0);
1370  Normalized = cast<SCEVAddRecExpr>(
1371  SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1372  Normalized->getLoop(),
1373  Normalized->getNoWrapFlags(SCEV::FlagNW)));
1374  }
1375 
1376  // Strip off any non-loop-dominating component from the addrec step.
1377  const SCEV *Step = Normalized->getStepRecurrence(SE);
1378  const SCEV *PostLoopScale = nullptr;
1379  if (!SE.dominates(Step, L->getHeader())) {
1380  PostLoopScale = Step;
1381  Step = SE.getConstant(Normalized->getType(), 1);
1382  if (!Start->isZero()) {
1383  // The normalization below assumes that Start is constant zero, so if
1384  // it isn't re-associate Start to PostLoopOffset.
1385  assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?");
1386  PostLoopOffset = Start;
1387  Start = SE.getConstant(Normalized->getType(), 0);
1388  }
1389  Normalized =
1390  cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1391  Start, Step, Normalized->getLoop(),
1392  Normalized->getNoWrapFlags(SCEV::FlagNW)));
1393  }
1394 
1395  // Expand the core addrec. If we need post-loop scaling, force it to
1396  // expand to an integer type to avoid the need for additional casting.
1397  Type *ExpandTy = PostLoopScale ? IntTy : STy;
1398  // We can't use a pointer type for the addrec if the pointer type is
1399  // non-integral.
1400  Type *AddRecPHIExpandTy =
1401  DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy;
1402 
1403  // In some cases, we decide to reuse an existing phi node but need to truncate
1404  // it and/or invert the step.
1405  Type *TruncTy = nullptr;
1406  bool InvertStep = false;
1407  PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
1408  IntTy, TruncTy, InvertStep);
1409 
1410  // Accommodate post-inc mode, if necessary.
1411  Value *Result;
1412  if (!PostIncLoops.count(L))
1413  Result = PN;
1414  else {
1415  // In PostInc mode, use the post-incremented value.
1416  BasicBlock *LatchBlock = L->getLoopLatch();
1417  assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1418  Result = PN->getIncomingValueForBlock(LatchBlock);
1419 
1420  // We might be introducing a new use of the post-inc IV that is not poison
1421  // safe, in which case we should drop poison generating flags. Only keep
1422  // those flags for which SCEV has proven that they always hold.
1423  if (isa<OverflowingBinaryOperator>(Result)) {
1424  auto *I = cast<Instruction>(Result);
1425  if (!S->hasNoUnsignedWrap())
1426  I->setHasNoUnsignedWrap(false);
1427  if (!S->hasNoSignedWrap())
1428  I->setHasNoSignedWrap(false);
1429  }
1430 
1431  // For an expansion to use the postinc form, the client must call
1432  // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1433  // or dominated by IVIncInsertPos.
1434  if (isa<Instruction>(Result) &&
1435  !SE.DT.dominates(cast<Instruction>(Result),
1436  &*Builder.GetInsertPoint())) {
1437  // The induction variable's postinc expansion does not dominate this use.
1438  // IVUsers tries to prevent this case, so it is rare. However, it can
1439  // happen when an IVUser outside the loop is not dominated by the latch
1440  // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1441  // all cases. Consider a phi outside whose operand is replaced during
1442  // expansion with the value of the postinc user. Without fundamentally
1443  // changing the way postinc users are tracked, the only remedy is
1444  // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1445  // but hopefully expandCodeFor handles that.
1446  bool useSubtract =
1447  !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1448  if (useSubtract)
1449  Step = SE.getNegativeSCEV(Step);
1450  Value *StepV;
1451  {
1452  // Expand the step somewhere that dominates the loop header.
1453  SCEVInsertPointGuard Guard(Builder, this);
1454  StepV = expandCodeForImpl(
1455  Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false);
1456  }
1457  Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1458  }
1459  }
1460 
1461  // We have decided to reuse an induction variable of a dominating loop. Apply
1462  // truncation and/or inversion of the step.
1463  if (TruncTy) {
1464  Type *ResTy = Result->getType();
1465  // Normalize the result type.
1466  if (ResTy != SE.getEffectiveSCEVType(ResTy))
1467  Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1468  // Truncate the result.
1469  if (TruncTy != Result->getType())
1470  Result = Builder.CreateTrunc(Result, TruncTy);
1471 
1472  // Invert the result.
1473  if (InvertStep)
1474  Result = Builder.CreateSub(
1475  expandCodeForImpl(Normalized->getStart(), TruncTy, false), Result);
1476  }
1477 
1478  // Re-apply any non-loop-dominating scale.
1479  if (PostLoopScale) {
1480  assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1481  Result = InsertNoopCastOfTo(Result, IntTy);
1482  Result = Builder.CreateMul(Result,
1483  expandCodeForImpl(PostLoopScale, IntTy, false));
1484  }
1485 
1486  // Re-apply any non-loop-dominating offset.
1487  if (PostLoopOffset) {
1488  if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1489  if (Result->getType()->isIntegerTy()) {
1490  Value *Base = expandCodeForImpl(PostLoopOffset, ExpandTy, false);
1491  Result = expandAddToGEP(SE.getUnknown(Result), PTy, IntTy, Base);
1492  } else {
1493  Result = expandAddToGEP(PostLoopOffset, PTy, IntTy, Result);
1494  }
1495  } else {
1496  Result = InsertNoopCastOfTo(Result, IntTy);
1497  Result = Builder.CreateAdd(
1498  Result, expandCodeForImpl(PostLoopOffset, IntTy, false));
1499  }
1500  }
1501 
1502  return Result;
1503 }
1504 
1505 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1506  // In canonical mode we compute the addrec as an expression of a canonical IV
1507  // using evaluateAtIteration and expand the resulting SCEV expression. This
1508  // way we avoid introducing new IVs to carry on the comutation of the addrec
1509  // throughout the loop.
1510  //
1511  // For nested addrecs evaluateAtIteration might need a canonical IV of a
1512  // type wider than the addrec itself. Emitting a canonical IV of the
1513  // proper type might produce non-legal types, for example expanding an i64
1514  // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
1515  // back to non-canonical mode for nested addrecs.
1516  if (!CanonicalMode || (S->getNumOperands() > 2))
1517  return expandAddRecExprLiterally(S);
1518 
1519  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1520  const Loop *L = S->getLoop();
1521 
1522  // First check for an existing canonical IV in a suitable type.
1523  PHINode *CanonicalIV = nullptr;
1524  if (PHINode *PN = L->getCanonicalInductionVariable())
1525  if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1526  CanonicalIV = PN;
1527 
1528  // Rewrite an AddRec in terms of the canonical induction variable, if
1529  // its type is more narrow.
1530  if (CanonicalIV &&
1531  SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
1532  !S->getType()->isPointerTy()) {
1533  SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1534  for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1535  NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1536  Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1537  S->getNoWrapFlags(SCEV::FlagNW)));
1538  BasicBlock::iterator NewInsertPt =
1539  findInsertPointAfter(cast<Instruction>(V), &*Builder.GetInsertPoint());
1540  V = expandCodeForImpl(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1541  &*NewInsertPt, false);
1542  return V;
1543  }
1544 
1545  // {X,+,F} --> X + {0,+,F}
1546  if (!S->getStart()->isZero()) {
1547  if (PointerType *PTy = dyn_cast<PointerType>(S->getType())) {
1548  Value *StartV = expand(SE.getPointerBase(S));
1549  assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1550  return expandAddToGEP(SE.removePointerBase(S), PTy, Ty, StartV);
1551  }
1552 
1553  SmallVector<const SCEV *, 4> NewOps(S->operands());
1554  NewOps[0] = SE.getConstant(Ty, 0);
1555  const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1556  S->getNoWrapFlags(SCEV::FlagNW));
1557 
1558  // Just do a normal add. Pre-expand the operands to suppress folding.
1559  //
1560  // The LHS and RHS values are factored out of the expand call to make the
1561  // output independent of the argument evaluation order.
1562  const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1563  const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1564  return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1565  }
1566 
1567  // If we don't yet have a canonical IV, create one.
1568  if (!CanonicalIV) {
1569  // Create and insert the PHI node for the induction variable in the
1570  // specified loop.
1571  BasicBlock *Header = L->getHeader();
1572  pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1573  CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1574  &Header->front());
1575  rememberInstruction(CanonicalIV);
1576 
1577  SmallSet<BasicBlock *, 4> PredSeen;
1578  Constant *One = ConstantInt::get(Ty, 1);
1579  for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1580  BasicBlock *HP = *HPI;
1581  if (!PredSeen.insert(HP).second) {
1582  // There must be an incoming value for each predecessor, even the
1583  // duplicates!
1584  CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1585  continue;
1586  }
1587 
1588  if (L->contains(HP)) {
1589  // Insert a unit add instruction right before the terminator
1590  // corresponding to the back-edge.
1591  Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1592  "indvar.next",
1593  HP->getTerminator());
1594  Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1595  rememberInstruction(Add);
1596  CanonicalIV->addIncoming(Add, HP);
1597  } else {
1598  CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1599  }
1600  }
1601  }
1602 
1603  // {0,+,1} --> Insert a canonical induction variable into the loop!
1604  if (S->isAffine() && S->getOperand(1)->isOne()) {
1605  assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1606  "IVs with types different from the canonical IV should "
1607  "already have been handled!");
1608  return CanonicalIV;
1609  }
1610 
1611  // {0,+,F} --> {0,+,1} * F
1612 
1613  // If this is a simple linear addrec, emit it now as a special case.
1614  if (S->isAffine()) // {0,+,F} --> i*F
1615  return
1616  expand(SE.getTruncateOrNoop(
1617  SE.getMulExpr(SE.getUnknown(CanonicalIV),
1618  SE.getNoopOrAnyExtend(S->getOperand(1),
1619  CanonicalIV->getType())),
1620  Ty));
1621 
1622  // If this is a chain of recurrences, turn it into a closed form, using the
1623  // folders, then expandCodeFor the closed form. This allows the folders to
1624  // simplify the expression without having to build a bunch of special code
1625  // into this folder.
1626  const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1627 
1628  // Promote S up to the canonical IV type, if the cast is foldable.
1629  const SCEV *NewS = S;
1630  const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1631  if (isa<SCEVAddRecExpr>(Ext))
1632  NewS = Ext;
1633 
1634  const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1635  //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1636 
1637  // Truncate the result down to the original type, if needed.
1638  const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1639  return expand(T);
1640 }
1641 
1642 Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) {
1643  Value *V =
1644  expandCodeForImpl(S->getOperand(), S->getOperand()->getType(), false);
1645  return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1646  GetOptimalInsertionPointForCastOf(V));
1647 }
1648 
1649 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1650  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1651  Value *V = expandCodeForImpl(
1652  S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1653  false);
1654  return Builder.CreateTrunc(V, Ty);
1655 }
1656 
1657 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1658  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1659  Value *V = expandCodeForImpl(
1660  S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1661  false);
1662  return Builder.CreateZExt(V, Ty);
1663 }
1664 
1665 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1666  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1667  Value *V = expandCodeForImpl(
1668  S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1669  false);
1670  return Builder.CreateSExt(V, Ty);
1671 }
1672 
1673 Value *SCEVExpander::expandMinMaxExpr(const SCEVNAryExpr *S,
1674  Intrinsic::ID IntrinID, Twine Name,
1675  bool IsSequential) {
1676  Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1677  Type *Ty = LHS->getType();
1678  if (IsSequential)
1679  LHS = Builder.CreateFreeze(LHS);
1680  for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1681  Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1682  if (IsSequential && i != 0)
1683  RHS = Builder.CreateFreeze(RHS);
1684  Value *Sel;
1685  if (Ty->isIntegerTy())
1686  Sel = Builder.CreateIntrinsic(IntrinID, {Ty}, {LHS, RHS},
1687  /*FMFSource=*/nullptr, Name);
1688  else {
1689  Value *ICmp =
1690  Builder.CreateICmp(MinMaxIntrinsic::getPredicate(IntrinID), LHS, RHS);
1691  Sel = Builder.CreateSelect(ICmp, LHS, RHS, Name);
1692  }
1693  LHS = Sel;
1694  }
1695  return LHS;
1696 }
1697 
1698 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1699  return expandMinMaxExpr(S, Intrinsic::smax, "smax");
1700 }
1701 
1702 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1703  return expandMinMaxExpr(S, Intrinsic::umax, "umax");
1704 }
1705 
1706 Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
1707  return expandMinMaxExpr(S, Intrinsic::smin, "smin");
1708 }
1709 
1710 Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
1711  return expandMinMaxExpr(S, Intrinsic::umin, "umin");
1712 }
1713 
1714 Value *SCEVExpander::visitSequentialUMinExpr(const SCEVSequentialUMinExpr *S) {
1715  return expandMinMaxExpr(S, Intrinsic::umin, "umin", /*IsSequential*/true);
1716 }
1717 
1718 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty,
1719  Instruction *IP, bool Root) {
1720  setInsertPoint(IP);
1721  Value *V = expandCodeForImpl(SH, Ty, Root);
1722  return V;
1723 }
1724 
1725 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty, bool Root) {
1726  // Expand the code for this SCEV.
1727  Value *V = expand(SH);
1728 
1729  if (PreserveLCSSA) {
1730  if (auto *Inst = dyn_cast<Instruction>(V)) {
1731  // Create a temporary instruction to at the current insertion point, so we
1732  // can hand it off to the helper to create LCSSA PHIs if required for the
1733  // new use.
1734  // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor)
1735  // would accept a insertion point and return an LCSSA phi for that
1736  // insertion point, so there is no need to insert & remove the temporary
1737  // instruction.
1738  Instruction *Tmp;
1739  if (Inst->getType()->isIntegerTy())
1740  Tmp = cast<Instruction>(Builder.CreateIntToPtr(
1741  Inst, Inst->getType()->getPointerTo(), "tmp.lcssa.user"));
1742  else {
1743  assert(Inst->getType()->isPointerTy());
1744  Tmp = cast<Instruction>(Builder.CreatePtrToInt(
1745  Inst, Type::getInt32Ty(Inst->getContext()), "tmp.lcssa.user"));
1746  }
1747  V = fixupLCSSAFormFor(Tmp, 0);
1748 
1749  // Clean up temporary instruction.
1750  InsertedValues.erase(Tmp);
1751  InsertedPostIncValues.erase(Tmp);
1752  Tmp->eraseFromParent();
1753  }
1754  }
1755 
1756  InsertedExpressions[std::make_pair(SH, &*Builder.GetInsertPoint())] = V;
1757  if (Ty) {
1758  assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1759  "non-trivial casts should be done with the SCEVs directly!");
1760  V = InsertNoopCastOfTo(V, Ty);
1761  }
1762  return V;
1763 }
1764 
1765 Value *SCEVExpander::FindValueInExprValueMap(const SCEV *S,
1766  const Instruction *InsertPt) {
1767  // If the expansion is not in CanonicalMode, and the SCEV contains any
1768  // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1769  if (!CanonicalMode && SE.containsAddRecurrence(S))
1770  return nullptr;
1771 
1772  // If S is a constant, it may be worse to reuse an existing Value.
1773  if (isa<SCEVConstant>(S))
1774  return nullptr;
1775 
1776  // Choose a Value from the set which dominates the InsertPt.
1777  // InsertPt should be inside the Value's parent loop so as not to break
1778  // the LCSSA form.
1779  for (Value *V : SE.getSCEVValues(S)) {
1780  Instruction *EntInst = dyn_cast<Instruction>(V);
1781  if (!EntInst)
1782  continue;
1783 
1784  assert(EntInst->getFunction() == InsertPt->getFunction());
1785  if (S->getType() == V->getType() &&
1786  SE.DT.dominates(EntInst, InsertPt) &&
1787  (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1788  SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1789  return V;
1790  }
1791  return nullptr;
1792 }
1793 
1794 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1795 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1796 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1797 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1798 // the expansion will try to reuse Value from ExprValueMap, and only when it
1799 // fails, expand the SCEV literally.
1800 Value *SCEVExpander::expand(const SCEV *S) {
1801  // Compute an insertion point for this SCEV object. Hoist the instructions
1802  // as far out in the loop nest as possible.
1803  Instruction *InsertPt = &*Builder.GetInsertPoint();
1804 
1805  // We can move insertion point only if there is no div or rem operations
1806  // otherwise we are risky to move it over the check for zero denominator.
1807  auto SafeToHoist = [](const SCEV *S) {
1808  return !SCEVExprContains(S, [](const SCEV *S) {
1809  if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) {
1810  if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS()))
1811  // Division by non-zero constants can be hoisted.
1812  return SC->getValue()->isZero();
1813  // All other divisions should not be moved as they may be
1814  // divisions by zero and should be kept within the
1815  // conditions of the surrounding loops that guard their
1816  // execution (see PR35406).
1817  return true;
1818  }
1819  return false;
1820  });
1821  };
1822  if (SafeToHoist(S)) {
1823  for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1824  L = L->getParentLoop()) {
1825  if (SE.isLoopInvariant(S, L)) {
1826  if (!L) break;
1827  if (BasicBlock *Preheader = L->getLoopPreheader())
1828  InsertPt = Preheader->getTerminator();
1829  else
1830  // LSR sets the insertion point for AddRec start/step values to the
1831  // block start to simplify value reuse, even though it's an invalid
1832  // position. SCEVExpander must correct for this in all cases.
1833  InsertPt = &*L->getHeader()->getFirstInsertionPt();
1834  } else {
1835  // If the SCEV is computable at this level, insert it into the header
1836  // after the PHIs (and after any other instructions that we've inserted
1837  // there) so that it is guaranteed to dominate any user inside the loop.
1838  if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1839  InsertPt = &*L->getHeader()->getFirstInsertionPt();
1840 
1841  while (InsertPt->getIterator() != Builder.GetInsertPoint() &&
1842  (isInsertedInstruction(InsertPt) ||
1843  isa<DbgInfoIntrinsic>(InsertPt))) {
1844  InsertPt = &*std::next(InsertPt->getIterator());
1845  }
1846  break;
1847  }
1848  }
1849  }
1850 
1851  // Check to see if we already expanded this here.
1852  auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1853  if (I != InsertedExpressions.end())
1854  return I->second;
1855 
1856  SCEVInsertPointGuard Guard(Builder, this);
1857  Builder.SetInsertPoint(InsertPt);
1858 
1859  // Expand the expression into instructions.
1860  Value *V = FindValueInExprValueMap(S, InsertPt);
1861  if (!V)
1862  V = visit(S);
1863  else {
1864  // If we're reusing an existing instruction, we are effectively CSEing two
1865  // copies of the instruction (with potentially different flags). As such,
1866  // we need to drop any poison generating flags unless we can prove that
1867  // said flags must be valid for all new users.
1868  if (auto *I = dyn_cast<Instruction>(V))
1869  if (I->hasPoisonGeneratingFlags() && !programUndefinedIfPoison(I))
1870  I->dropPoisonGeneratingFlags();
1871  }
1872  // Remember the expanded value for this SCEV at this location.
1873  //
1874  // This is independent of PostIncLoops. The mapped value simply materializes
1875  // the expression at this insertion point. If the mapped value happened to be
1876  // a postinc expansion, it could be reused by a non-postinc user, but only if
1877  // its insertion point was already at the head of the loop.
1878  InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1879  return V;
1880 }
1881 
1882 void SCEVExpander::rememberInstruction(Value *I) {
1883  auto DoInsert = [this](Value *V) {
1884  if (!PostIncLoops.empty())
1885  InsertedPostIncValues.insert(V);
1886  else
1887  InsertedValues.insert(V);
1888  };
1889  DoInsert(I);
1890 
1891  if (!PreserveLCSSA)
1892  return;
1893 
1894  if (auto *Inst = dyn_cast<Instruction>(I)) {
1895  // A new instruction has been added, which might introduce new uses outside
1896  // a defining loop. Fix LCSSA from for each operand of the new instruction,
1897  // if required.
1898  for (unsigned OpIdx = 0, OpEnd = Inst->getNumOperands(); OpIdx != OpEnd;
1899  OpIdx++)
1900  fixupLCSSAFormFor(Inst, OpIdx);
1901  }
1902 }
1903 
1904 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1905 /// replace them with their most canonical representative. Return the number of
1906 /// phis eliminated.
1907 ///
1908 /// This does not depend on any SCEVExpander state but should be used in
1909 /// the same context that SCEVExpander is used.
1910 unsigned
1913  const TargetTransformInfo *TTI) {
1914  // Find integer phis in order of increasing width.
1916  for (PHINode &PN : L->getHeader()->phis())
1917  Phis.push_back(&PN);
1918 
1919  if (TTI)
1920  // Use stable_sort to preserve order of equivalent PHIs, so the order
1921  // of the sorted Phis is the same from run to run on the same loop.
1922  llvm::stable_sort(Phis, [](Value *LHS, Value *RHS) {
1923  // Put pointers at the back and make sure pointer < pointer = false.
1924  if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1925  return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1928  });
1929 
1930  unsigned NumElim = 0;
1932  // Process phis from wide to narrow. Map wide phis to their truncation
1933  // so narrow phis can reuse them.
1934  for (PHINode *Phi : Phis) {
1935  auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
1936  if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
1937  return V;
1938  if (!SE.isSCEVable(PN->getType()))
1939  return nullptr;
1940  auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
1941  if (!Const)
1942  return nullptr;
1943  return Const->getValue();
1944  };
1945 
1946  // Fold constant phis. They may be congruent to other constant phis and
1947  // would confuse the logic below that expects proper IVs.
1948  if (Value *V = SimplifyPHINode(Phi)) {
1949  if (V->getType() != Phi->getType())
1950  continue;
1951  Phi->replaceAllUsesWith(V);
1952  DeadInsts.emplace_back(Phi);
1953  ++NumElim;
1955  dbgs() << "INDVARS: Eliminated constant iv: " << *Phi
1956  << '\n');
1957  continue;
1958  }
1959 
1960  if (!SE.isSCEVable(Phi->getType()))
1961  continue;
1962 
1963  PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1964  if (!OrigPhiRef) {
1965  OrigPhiRef = Phi;
1966  if (Phi->getType()->isIntegerTy() && TTI &&
1967  TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1968  // This phi can be freely truncated to the narrowest phi type. Map the
1969  // truncated expression to it so it will be reused for narrow types.
1970  const SCEV *TruncExpr =
1971  SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1972  ExprToIVMap[TruncExpr] = Phi;
1973  }
1974  continue;
1975  }
1976 
1977  // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1978  // sense.
1979  if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1980  continue;
1981 
1982  if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1983  Instruction *OrigInc = dyn_cast<Instruction>(
1984  OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1985  Instruction *IsomorphicInc =
1986  dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1987 
1988  if (OrigInc && IsomorphicInc) {
1989  // If this phi has the same width but is more canonical, replace the
1990  // original with it. As part of the "more canonical" determination,
1991  // respect a prior decision to use an IV chain.
1992  if (OrigPhiRef->getType() == Phi->getType() &&
1993  !(ChainedPhis.count(Phi) ||
1994  isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
1995  (ChainedPhis.count(Phi) ||
1996  isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1997  std::swap(OrigPhiRef, Phi);
1998  std::swap(OrigInc, IsomorphicInc);
1999  }
2000  // Replacing the congruent phi is sufficient because acyclic
2001  // redundancy elimination, CSE/GVN, should handle the
2002  // rest. However, once SCEV proves that a phi is congruent,
2003  // it's often the head of an IV user cycle that is isomorphic
2004  // with the original phi. It's worth eagerly cleaning up the
2005  // common case of a single IV increment so that DeleteDeadPHIs
2006  // can remove cycles that had postinc uses.
2007  const SCEV *TruncExpr =
2008  SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
2009  if (OrigInc != IsomorphicInc &&
2010  TruncExpr == SE.getSCEV(IsomorphicInc) &&
2011  SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
2012  hoistIVInc(OrigInc, IsomorphicInc)) {
2014  DebugType, dbgs() << "INDVARS: Eliminated congruent iv.inc: "
2015  << *IsomorphicInc << '\n');
2016  Value *NewInc = OrigInc;
2017  if (OrigInc->getType() != IsomorphicInc->getType()) {
2018  Instruction *IP = nullptr;
2019  if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
2020  IP = &*PN->getParent()->getFirstInsertionPt();
2021  else
2022  IP = OrigInc->getNextNode();
2023 
2025  Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
2026  NewInc = Builder.CreateTruncOrBitCast(
2027  OrigInc, IsomorphicInc->getType(), IVName);
2028  }
2029  IsomorphicInc->replaceAllUsesWith(NewInc);
2030  DeadInsts.emplace_back(IsomorphicInc);
2031  }
2032  }
2033  }
2035  dbgs() << "INDVARS: Eliminated congruent iv: " << *Phi
2036  << '\n');
2038  DebugType, dbgs() << "INDVARS: Original iv: " << *OrigPhiRef << '\n');
2039  ++NumElim;
2040  Value *NewIV = OrigPhiRef;
2041  if (OrigPhiRef->getType() != Phi->getType()) {
2043  Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
2044  NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
2045  }
2046  Phi->replaceAllUsesWith(NewIV);
2047  DeadInsts.emplace_back(Phi);
2048  }
2049  return NumElim;
2050 }
2051 
2053  const Instruction *At,
2054  Loop *L) {
2055  using namespace llvm::PatternMatch;
2056 
2057  SmallVector<BasicBlock *, 4> ExitingBlocks;
2058  L->getExitingBlocks(ExitingBlocks);
2059 
2060  // Look for suitable value in simple conditions at the loop exits.
2061  for (BasicBlock *BB : ExitingBlocks) {
2062  ICmpInst::Predicate Pred;
2063  Instruction *LHS, *RHS;
2064 
2065  if (!match(BB->getTerminator(),
2067  m_BasicBlock(), m_BasicBlock())))
2068  continue;
2069 
2070  if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
2071  return LHS;
2072 
2073  if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
2074  return RHS;
2075  }
2076 
2077  // Use expand's logic which is used for reusing a previous Value in
2078  // ExprValueMap. Note that we don't currently model the cost of
2079  // needing to drop poison generating flags on the instruction if we
2080  // want to reuse it. We effectively assume that has zero cost.
2081  return FindValueInExprValueMap(S, At);
2082 }
2083 
2084 template<typename T> static InstructionCost costAndCollectOperands(
2085  const SCEVOperand &WorkItem, const TargetTransformInfo &TTI,
2087  SmallVectorImpl<SCEVOperand> &Worklist) {
2088 
2089  const T *S = cast<T>(WorkItem.S);
2090  InstructionCost Cost = 0;
2091  // Object to help map SCEV operands to expanded IR instructions.
2092  struct OperationIndices {
2093  OperationIndices(unsigned Opc, size_t min, size_t max) :
2094  Opcode(Opc), MinIdx(min), MaxIdx(max) { }
2095  unsigned Opcode;
2096  size_t MinIdx;
2097  size_t MaxIdx;
2098  };
2099 
2100  // Collect the operations of all the instructions that will be needed to
2101  // expand the SCEVExpr. This is so that when we come to cost the operands,
2102  // we know what the generated user(s) will be.
2104 
2105  auto CastCost = [&](unsigned Opcode) -> InstructionCost {
2106  Operations.emplace_back(Opcode, 0, 0);
2107  return TTI.getCastInstrCost(Opcode, S->getType(),
2108  S->getOperand(0)->getType(),
2110  };
2111 
2112  auto ArithCost = [&](unsigned Opcode, unsigned NumRequired,
2113  unsigned MinIdx = 0,
2114  unsigned MaxIdx = 1) -> InstructionCost {
2115  Operations.emplace_back(Opcode, MinIdx, MaxIdx);
2116  return NumRequired *
2117  TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind);
2118  };
2119 
2120  auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, unsigned MinIdx,
2121  unsigned MaxIdx) -> InstructionCost {
2122  Operations.emplace_back(Opcode, MinIdx, MaxIdx);
2123  Type *OpType = S->getOperand(0)->getType();
2124  return NumRequired * TTI.getCmpSelInstrCost(
2125  Opcode, OpType, CmpInst::makeCmpResultType(OpType),
2127  };
2128 
2129  switch (S->getSCEVType()) {
2130  case scCouldNotCompute:
2131  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2132  case scUnknown:
2133  case scConstant:
2134  return 0;
2135  case scPtrToInt:
2136  Cost = CastCost(Instruction::PtrToInt);
2137  break;
2138  case scTruncate:
2139  Cost = CastCost(Instruction::Trunc);
2140  break;
2141  case scZeroExtend:
2142  Cost = CastCost(Instruction::ZExt);
2143  break;
2144  case scSignExtend:
2145  Cost = CastCost(Instruction::SExt);
2146  break;
2147  case scUDivExpr: {
2148  unsigned Opcode = Instruction::UDiv;
2149  if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
2150  if (SC->getAPInt().isPowerOf2())
2151  Opcode = Instruction::LShr;
2152  Cost = ArithCost(Opcode, 1);
2153  break;
2154  }
2155  case scAddExpr:
2156  Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
2157  break;
2158  case scMulExpr:
2159  // TODO: this is a very pessimistic cost modelling for Mul,
2160  // because of Bin Pow algorithm actually used by the expander,
2161  // see SCEVExpander::visitMulExpr(), ExpandOpBinPowN().
2162  Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
2163  break;
2164  case scSMaxExpr:
2165  case scUMaxExpr:
2166  case scSMinExpr:
2167  case scUMinExpr:
2168  case scSequentialUMinExpr: {
2169  // FIXME: should this ask the cost for Intrinsic's?
2170  // The reduction tree.
2171  Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
2172  Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
2173  switch (S->getSCEVType()) {
2174  case scSequentialUMinExpr: {
2175  // The safety net against poison.
2176  // FIXME: this is broken.
2177  Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
2178  Cost += ArithCost(Instruction::Or,
2179  S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
2180  Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
2181  break;
2182  }
2183  default:
2184  assert(!isa<SCEVSequentialMinMaxExpr>(S) &&
2185  "Unhandled SCEV expression type?");
2186  break;
2187  }
2188  break;
2189  }
2190  case scAddRecExpr: {
2191  // In this polynominal, we may have some zero operands, and we shouldn't
2192  // really charge for those. So how many non-zero coeffients are there?
2193  int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) {
2194  return !Op->isZero();
2195  });
2196 
2197  assert(NumTerms >= 1 && "Polynominal should have at least one term.");
2198  assert(!(*std::prev(S->operands().end()))->isZero() &&
2199  "Last operand should not be zero");
2200 
2201  // Ignoring constant term (operand 0), how many of the coeffients are u> 1?
2202  int NumNonZeroDegreeNonOneTerms =
2203  llvm::count_if(S->operands(), [](const SCEV *Op) {
2204  auto *SConst = dyn_cast<SCEVConstant>(Op);
2205  return !SConst || SConst->getAPInt().ugt(1);
2206  });
2207 
2208  // Much like with normal add expr, the polynominal will require
2209  // one less addition than the number of it's terms.
2210  InstructionCost AddCost = ArithCost(Instruction::Add, NumTerms - 1,
2211  /*MinIdx*/ 1, /*MaxIdx*/ 1);
2212  // Here, *each* one of those will require a multiplication.
2213  InstructionCost MulCost =
2214  ArithCost(Instruction::Mul, NumNonZeroDegreeNonOneTerms);
2215  Cost = AddCost + MulCost;
2216 
2217  // What is the degree of this polynominal?
2218  int PolyDegree = S->getNumOperands() - 1;
2219  assert(PolyDegree >= 1 && "Should be at least affine.");
2220 
2221  // The final term will be:
2222  // Op_{PolyDegree} * x ^ {PolyDegree}
2223  // Where x ^ {PolyDegree} will again require PolyDegree-1 mul operations.
2224  // Note that x ^ {PolyDegree} = x * x ^ {PolyDegree-1} so charging for
2225  // x ^ {PolyDegree} will give us x ^ {2} .. x ^ {PolyDegree-1} for free.
2226  // FIXME: this is conservatively correct, but might be overly pessimistic.
2227  Cost += MulCost * (PolyDegree - 1);
2228  break;
2229  }
2230  }
2231 
2232  for (auto &CostOp : Operations) {
2233  for (auto SCEVOp : enumerate(S->operands())) {
2234  // Clamp the index to account for multiple IR operations being chained.
2235  size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2236  size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2237  Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
2238  }
2239  }
2240  return Cost;
2241 }
2242 
2243 bool SCEVExpander::isHighCostExpansionHelper(
2244  const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
2245  InstructionCost &Cost, unsigned Budget, const TargetTransformInfo &TTI,
2246  SmallPtrSetImpl<const SCEV *> &Processed,
2247  SmallVectorImpl<SCEVOperand> &Worklist) {
2248  if (Cost > Budget)
2249  return true; // Already run out of budget, give up.
2250 
2251  const SCEV *S = WorkItem.S;
2252  // Was the cost of expansion of this expression already accounted for?
2253  if (!isa<SCEVConstant>(S) && !Processed.insert(S).second)
2254  return false; // We have already accounted for this expression.
2255 
2256  // If we can find an existing value for this scev available at the point "At"
2257  // then consider the expression cheap.
2258  if (getRelatedExistingExpansion(S, &At, L))
2259  return false; // Consider the expression to be free.
2260 
2262  L->getHeader()->getParent()->hasMinSize()
2265 
2266  switch (S->getSCEVType()) {
2267  case scCouldNotCompute:
2268  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2269  case scUnknown:
2270  // Assume to be zero-cost.
2271  return false;
2272  case scConstant: {
2273  // Only evalulate the costs of constants when optimizing for size.
2275  return false;
2276  const APInt &Imm = cast<SCEVConstant>(S)->getAPInt();
2277  Type *Ty = S->getType();
2278  Cost += TTI.getIntImmCostInst(
2279  WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind);
2280  return Cost > Budget;
2281  }
2282  case scTruncate:
2283  case scPtrToInt:
2284  case scZeroExtend:
2285  case scSignExtend: {
2286  Cost +=
2287  costAndCollectOperands<SCEVCastExpr>(WorkItem, TTI, CostKind, Worklist);
2288  return false; // Will answer upon next entry into this function.
2289  }
2290  case scUDivExpr: {
2291  // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
2292  // HowManyLessThans produced to compute a precise expression, rather than a
2293  // UDiv from the user's code. If we can't find a UDiv in the code with some
2294  // simple searching, we need to account for it's cost.
2295 
2296  // At the beginning of this function we already tried to find existing
2297  // value for plain 'S'. Now try to lookup 'S + 1' since it is common
2298  // pattern involving division. This is just a simple search heuristic.
2299  if (getRelatedExistingExpansion(
2300  SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L))
2301  return false; // Consider it to be free.
2302 
2303  Cost +=
2304  costAndCollectOperands<SCEVUDivExpr>(WorkItem, TTI, CostKind, Worklist);
2305  return false; // Will answer upon next entry into this function.
2306  }
2307  case scAddExpr:
2308  case scMulExpr:
2309  case scUMaxExpr:
2310  case scSMaxExpr:
2311  case scUMinExpr:
2312  case scSMinExpr:
2313  case scSequentialUMinExpr: {
2314  assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2315  "Nary expr should have more than 1 operand.");
2316  // The simple nary expr will require one less op (or pair of ops)
2317  // than the number of it's terms.
2318  Cost +=
2319  costAndCollectOperands<SCEVNAryExpr>(WorkItem, TTI, CostKind, Worklist);
2320  return Cost > Budget;
2321  }
2322  case scAddRecExpr: {
2323  assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2324  "Polynomial should be at least linear");
2325  Cost += costAndCollectOperands<SCEVAddRecExpr>(
2326  WorkItem, TTI, CostKind, Worklist);
2327  return Cost > Budget;
2328  }
2329  }
2330  llvm_unreachable("Unknown SCEV kind!");
2331 }
2332 
2334  Instruction *IP) {
2335  assert(IP);
2336  switch (Pred->getKind()) {
2338  return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2340  return expandComparePredicate(cast<SCEVComparePredicate>(Pred), IP);
2341  case SCEVPredicate::P_Wrap: {
2342  auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2343  return expandWrapPredicate(AddRecPred, IP);
2344  }
2345  }
2346  llvm_unreachable("Unknown SCEV predicate type");
2347 }
2348 
2350  Instruction *IP) {
2351  Value *Expr0 =
2352  expandCodeForImpl(Pred->getLHS(), Pred->getLHS()->getType(), IP, false);
2353  Value *Expr1 =
2354  expandCodeForImpl(Pred->getRHS(), Pred->getRHS()->getType(), IP, false);
2355 
2356  Builder.SetInsertPoint(IP);
2357  auto InvPred = ICmpInst::getInversePredicate(Pred->getPredicate());
2358  auto *I = Builder.CreateICmp(InvPred, Expr0, Expr1, "ident.check");
2359  return I;
2360 }
2361 
2363  Instruction *Loc, bool Signed) {
2364  assert(AR->isAffine() && "Cannot generate RT check for "
2365  "non-affine expression");
2366 
2367  // FIXME: It is highly suspicious that we're ignoring the predicates here.
2369  const SCEV *ExitCount =
2370  SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
2371 
2372  assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count");
2373 
2374  const SCEV *Step = AR->getStepRecurrence(SE);
2375  const SCEV *Start = AR->getStart();
2376 
2377  Type *ARTy = AR->getType();
2378  unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2379  unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2380 
2381  // The expression {Start,+,Step} has nusw/nssw if
2382  // Step < 0, Start - |Step| * Backedge <= Start
2383  // Step >= 0, Start + |Step| * Backedge > Start
2384  // and |Step| * Backedge doesn't unsigned overflow.
2385 
2386  IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits);
2387  Builder.SetInsertPoint(Loc);
2388  Value *TripCountVal = expandCodeForImpl(ExitCount, CountTy, Loc, false);
2389 
2390  IntegerType *Ty =
2391  IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy));
2392 
2393  Value *StepValue = expandCodeForImpl(Step, Ty, Loc, false);
2394  Value *NegStepValue =
2395  expandCodeForImpl(SE.getNegativeSCEV(Step), Ty, Loc, false);
2396  Value *StartValue = expandCodeForImpl(Start, ARTy, Loc, false);
2397 
2398  ConstantInt *Zero =
2399  ConstantInt::get(Loc->getContext(), APInt::getZero(DstBits));
2400 
2401  Builder.SetInsertPoint(Loc);
2402  // Compute |Step|
2403  Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2404  Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2405 
2406  // Compute |Step| * Backedge
2407  // Compute:
2408  // 1. Start + |Step| * Backedge < Start
2409  // 2. Start - |Step| * Backedge > Start
2410  //
2411  // And select either 1. or 2. depending on whether step is positive or
2412  // negative. If Step is known to be positive or negative, only create
2413  // either 1. or 2.
2414  auto ComputeEndCheck = [&]() -> Value * {
2415  // Checking <u 0 is always false.
2416  if (!Signed && Start->isZero() && SE.isKnownPositive(Step))
2417  return ConstantInt::getFalse(Loc->getContext());
2418 
2419  // Get the backedge taken count and truncate or extended to the AR type.
2420  Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2421 
2422  Value *MulV, *OfMul;
2423  if (Step->isOne()) {
2424  // Special-case Step of one. Potentially-costly `umul_with_overflow` isn't
2425  // needed, there is never an overflow, so to avoid artificially inflating
2426  // the cost of the check, directly emit the optimized IR.
2427  MulV = TruncTripCount;
2428  OfMul = ConstantInt::getFalse(MulV->getContext());
2429  } else {
2430  auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
2431  Intrinsic::umul_with_overflow, Ty);
2432  CallInst *Mul =
2433  Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
2434  MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2435  OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2436  }
2437 
2438  Value *Add = nullptr, *Sub = nullptr;
2439  bool NeedPosCheck = !SE.isKnownNegative(Step);
2440  bool NeedNegCheck = !SE.isKnownPositive(Step);
2441 
2442  if (PointerType *ARPtrTy = dyn_cast<PointerType>(ARTy)) {
2443  StartValue = InsertNoopCastOfTo(
2444  StartValue, Builder.getInt8PtrTy(ARPtrTy->getAddressSpace()));
2445  Value *NegMulV = Builder.CreateNeg(MulV);
2446  if (NeedPosCheck)
2447  Add = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, MulV);
2448  if (NeedNegCheck)
2449  Sub = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, NegMulV);
2450  } else {
2451  if (NeedPosCheck)
2452  Add = Builder.CreateAdd(StartValue, MulV);
2453  if (NeedNegCheck)
2454  Sub = Builder.CreateSub(StartValue, MulV);
2455  }
2456 
2457  Value *EndCompareLT = nullptr;
2458  Value *EndCompareGT = nullptr;
2459  Value *EndCheck = nullptr;
2460  if (NeedPosCheck)
2461  EndCheck = EndCompareLT = Builder.CreateICmp(
2462  Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
2463  if (NeedNegCheck)
2464  EndCheck = EndCompareGT = Builder.CreateICmp(
2465  Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
2466  if (NeedPosCheck && NeedNegCheck) {
2467  // Select the answer based on the sign of Step.
2468  EndCheck = Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2469  }
2470  return Builder.CreateOr(EndCheck, OfMul);
2471  };
2472  Value *EndCheck = ComputeEndCheck();
2473 
2474  // If the backedge taken count type is larger than the AR type,
2475  // check that we don't drop any bits by truncating it. If we are
2476  // dropping bits, then we have overflow (unless the step is zero).
2477  if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
2478  auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2479  auto *BackedgeCheck =
2480  Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2481  ConstantInt::get(Loc->getContext(), MaxVal));
2482  BackedgeCheck = Builder.CreateAnd(
2483  BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2484 
2485  EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2486  }
2487 
2488  return EndCheck;
2489 }
2490 
2492  Instruction *IP) {
2493  const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2494  Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2495 
2496  // Add a check for NUSW
2498  NUSWCheck = generateOverflowCheck(A, IP, false);
2499 
2500  // Add a check for NSSW
2502  NSSWCheck = generateOverflowCheck(A, IP, true);
2503 
2504  if (NUSWCheck && NSSWCheck)
2505  return Builder.CreateOr(NUSWCheck, NSSWCheck);
2506 
2507  if (NUSWCheck)
2508  return NUSWCheck;
2509 
2510  if (NSSWCheck)
2511  return NSSWCheck;
2512 
2513  return ConstantInt::getFalse(IP->getContext());
2514 }
2515 
2517  Instruction *IP) {
2518  // Loop over all checks in this set.
2519  SmallVector<Value *> Checks;
2520  for (auto Pred : Union->getPredicates()) {
2521  Checks.push_back(expandCodeForPredicate(Pred, IP));
2522  Builder.SetInsertPoint(IP);
2523  }
2524 
2525  if (Checks.empty())
2526  return ConstantInt::getFalse(IP->getContext());
2527  return Builder.CreateOr(Checks);
2528 }
2529 
2530 Value *SCEVExpander::fixupLCSSAFormFor(Instruction *User, unsigned OpIdx) {
2531  assert(PreserveLCSSA);
2533 
2534  auto *OpV = User->getOperand(OpIdx);
2535  auto *OpI = dyn_cast<Instruction>(OpV);
2536  if (!OpI)
2537  return OpV;
2538 
2539  Loop *DefLoop = SE.LI.getLoopFor(OpI->getParent());
2540  Loop *UseLoop = SE.LI.getLoopFor(User->getParent());
2541  if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop))
2542  return OpV;
2543 
2544  ToUpdate.push_back(OpI);
2545  SmallVector<PHINode *, 16> PHIsToRemove;
2546  formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, Builder, &PHIsToRemove);
2547  for (PHINode *PN : PHIsToRemove) {
2548  if (!PN->use_empty())
2549  continue;
2550  InsertedValues.erase(PN);
2551  InsertedPostIncValues.erase(PN);
2552  PN->eraseFromParent();
2553  }
2554 
2555  return User->getOperand(OpIdx);
2556 }
2557 
2558 namespace {
2559 // Search for a SCEV subexpression that is not safe to expand. Any expression
2560 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2561 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2562 // instruction, but the important thing is that we prove the denominator is
2563 // nonzero before expansion.
2564 //
2565 // IVUsers already checks that IV-derived expressions are safe. So this check is
2566 // only needed when the expression includes some subexpression that is not IV
2567 // derived.
2568 //
2569 // Currently, we only allow division by a nonzero constant here. If this is
2570 // inadequate, we could easily allow division by SCEVUnknown by using
2571 // ValueTracking to check isKnownNonZero().
2572 //
2573 // We cannot generally expand recurrences unless the step dominates the loop
2574 // header. The expander handles the special case of affine recurrences by
2575 // scaling the recurrence outside the loop, but this technique isn't generally
2576 // applicable. Expanding a nested recurrence outside a loop requires computing
2577 // binomial coefficients. This could be done, but the recurrence has to be in a
2578 // perfectly reduced form, which can't be guaranteed.
2579 struct SCEVFindUnsafe {
2580  ScalarEvolution &SE;
2581  bool CanonicalMode;
2582  bool IsUnsafe = false;
2583 
2584  SCEVFindUnsafe(ScalarEvolution &SE, bool CanonicalMode)
2585  : SE(SE), CanonicalMode(CanonicalMode) {}
2586 
2587  bool follow(const SCEV *S) {
2588  if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2589  const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
2590  if (!SC || SC->getValue()->isZero()) {
2591  IsUnsafe = true;
2592  return false;
2593  }
2594  }
2595  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2596  const SCEV *Step = AR->getStepRecurrence(SE);
2597  if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
2598  IsUnsafe = true;
2599  return false;
2600  }
2601 
2602  // For non-affine addrecs or in non-canonical mode we need a preheader
2603  // to insert into.
2604  if (!AR->getLoop()->getLoopPreheader() &&
2605  (!CanonicalMode || !AR->isAffine())) {
2606  IsUnsafe = true;
2607  return false;
2608  }
2609  }
2610  return true;
2611  }
2612  bool isDone() const { return IsUnsafe; }
2613 };
2614 }
2615 
2616 namespace llvm {
2617 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE, bool CanonicalMode) {
2618  SCEVFindUnsafe Search(SE, CanonicalMode);
2619  visitAll(S, Search);
2620  return !Search.IsUnsafe;
2621 }
2622 
2623 bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint,
2624  ScalarEvolution &SE) {
2625  if (!isSafeToExpand(S, SE))
2626  return false;
2627  // We have to prove that the expanded site of S dominates InsertionPoint.
2628  // This is easy when not in the same block, but hard when S is an instruction
2629  // to be expanded somewhere inside the same block as our insertion point.
2630  // What we really need here is something analogous to an OrderedBasicBlock,
2631  // but for the moment, we paper over the problem by handling two common and
2632  // cheap to check cases.
2633  if (SE.properlyDominates(S, InsertionPoint->getParent()))
2634  return true;
2635  if (SE.dominates(S, InsertionPoint->getParent())) {
2636  if (InsertionPoint->getParent()->getTerminator() == InsertionPoint)
2637  return true;
2638  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2639  if (llvm::is_contained(InsertionPoint->operand_values(), U->getValue()))
2640  return true;
2641  }
2642  return false;
2643 }
2644 
2646  // Result is used, nothing to remove.
2647  if (ResultUsed)
2648  return;
2649 
2650  auto InsertedInstructions = Expander.getAllInsertedInstructions();
2651 #ifndef NDEBUG
2652  SmallPtrSet<Instruction *, 8> InsertedSet(InsertedInstructions.begin(),
2653  InsertedInstructions.end());
2654  (void)InsertedSet;
2655 #endif
2656  // Remove sets with value handles.
2657  Expander.clear();
2658 
2659  // Remove all inserted instructions.
2660  for (Instruction *I : reverse(InsertedInstructions)) {
2661 #ifndef NDEBUG
2662  assert(all_of(I->users(),
2663  [&InsertedSet](Value *U) {
2664  return InsertedSet.contains(cast<Instruction>(U));
2665  }) &&
2666  "removed instruction should only be used by instructions inserted "
2667  "during expansion");
2668 #endif
2669  assert(!I->getType()->isVoidTy() &&
2670  "inserted instruction should have non-void types");
2671  I->replaceAllUsesWith(UndefValue::get(I->getType()));
2672  I->eraseFromParent();
2673  }
2674 }
2675 }
i
i
Definition: README.txt:29
llvm::InstructionCost
Definition: InstructionCost.h:29
llvm::ScalarEvolution::getContext
LLVMContext & getContext() const
Definition: ScalarEvolution.h:491
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
llvm::ScalarEvolution::getTruncateOrNoop
const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
Definition: ScalarEvolution.cpp:4635
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:264
llvm::SCEVUDivExpr
This class represents a binary unsigned division operation.
Definition: ScalarEvolutionExpressions.h:295
llvm::TargetTransformInfo::TargetCostKind
TargetCostKind
The kind of cost model.
Definition: TargetTransformInfo.h:210
Signed
@ Signed
Definition: NVPTXISelLowering.cpp:4635
llvm::SCEVExpanderCleaner::cleanup
void cleanup()
Definition: ScalarEvolutionExpander.cpp:2645
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::SCEVUMinExpr
This class represents an unsigned minimum selection.
Definition: ScalarEvolutionExpressions.h:495
llvm::Type::getInt1Ty
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:236
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::Instruction::getModule
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:65
llvm::drop_begin
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:280
llvm::ScalarEvolution::getEffectiveSCEVType
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
Definition: ScalarEvolution.cpp:4310
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::isSafeToExpand
bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE, bool CanonicalMode=true)
Return true if the given expression is safe to expand in the sense that all materialized values are s...
Definition: ScalarEvolutionExpander.cpp:2617
llvm::SCEVComparePredicate::getLHS
const SCEV * getLHS() const
Returns the left hand side of the predicate.
Definition: ScalarEvolution.h:297
llvm::scSMinExpr
@ scSMinExpr
Definition: ScalarEvolutionExpressions.h:52
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1410
llvm::Type::getInt8PtrTy
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:291
llvm::CmpInst::Predicate
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:719
llvm::User::operands
op_range operands()
Definition: User.h:242
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:87
llvm::SCEVAddRecExpr::isAffine
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
Definition: ScalarEvolutionExpressions.h:370
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:104
IntrinsicInst.h
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:218
ScalarEvolutionExpander.h
llvm::SCEVAddRecExpr::getStart
const SCEV * getStart() const
Definition: ScalarEvolutionExpressions.h:353
llvm::TargetTransformInfo::getCmpSelInstrCost
InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, const Instruction *I=nullptr) const
Definition: TargetTransformInfo.cpp:834
llvm::TypeSize::getFixedSize
ScalarTy getFixedSize() const
Definition: TypeSize.h:430
T
llvm::Loop
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:530
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::LoopBase::contains
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
Definition: LoopInfo.h:122
llvm::SCEVSMaxExpr
This class represents a signed maximum selection.
Definition: ScalarEvolutionExpressions.h:459
llvm::scConstant
@ scConstant
Definition: ScalarEvolutionExpressions.h:41
llvm::PointerType::get
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Definition: Type.cpp:727
Loops
Hexagon Hardware Loops
Definition: HexagonHardwareLoops.cpp:372
llvm::ilist_node_with_parent::getNextNode
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::RISCVFenceField::W
@ W
Definition: RISCVBaseInfo.h:241
llvm::SCEVPtrToIntExpr
This class represents a cast from a pointer to a pointer-sized integer value.
Definition: ScalarEvolutionExpressions.h:118
llvm::enumerate
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
Definition: STLExtras.h:2045
llvm::scCouldNotCompute
@ scCouldNotCompute
Definition: ScalarEvolutionExpressions.h:56
llvm::ScalarEvolution::getAddRecExpr
const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
Definition: ScalarEvolution.cpp:3576
llvm::APInt::getMaxValue
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition: APInt.h:186
llvm::CmpInst::makeCmpResultType
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1044
llvm::TargetTransformInfo
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Definition: TargetTransformInfo.h:167
llvm::IRBuilder<>
llvm::PointerType::getAddressSpace
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:682
llvm::ScalarEvolution
The main scalar evolution driver.
Definition: ScalarEvolution.h:449
llvm::CmpInst::ICMP_NE
@ ICMP_NE
not equal
Definition: InstrTypes.h:741
llvm::CmpInst::getInversePredicate
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:833
R600_InstFlag::FC
@ FC
Definition: R600Defines.h:32
ValueTracking.h
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
llvm::TargetTransformInfo::TCK_CodeSize
@ TCK_CodeSize
Instruction code size.
Definition: TargetTransformInfo.h:213
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::PatternMatch::m_Br
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
Definition: PatternMatch.h:1741
llvm::SCEV::FlagNW
@ FlagNW
Definition: ScalarEvolution.h:132
llvm::programUndefinedIfPoison
bool programUndefinedIfPoison(const Instruction *Inst)
Definition: ValueTracking.cpp:5565
llvm::CmpInst::ICMP_SGT
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:746
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::reverse
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
Definition: STLExtras.h:380
llvm::scSMaxExpr
@ scSMaxExpr
Definition: ScalarEvolutionExpressions.h:50
llvm::ScalarEvolution::clearFlags
static LLVM_NODISCARD SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
Definition: ScalarEvolution.h:478
llvm::SmallSet
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:136
llvm::Instruction::comesBefore
bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
Definition: Instruction.cpp:110
llvm::Instruction::setHasNoUnsignedWrap
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
Definition: Instruction.cpp:123
llvm::CastInst::getCastOpcode
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
Definition: Instructions.cpp:3462
llvm::SmallPtrSet< const Loop *, 2 >
costAndCollectOperands
static InstructionCost costAndCollectOperands(const SCEVOperand &WorkItem, const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, SmallVectorImpl< SCEVOperand > &Worklist)
Definition: ScalarEvolutionExpander.cpp:2084
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:119
llvm::SCEVSignExtendExpr
This class represents a sign extension of a small integer value to a larger integer value.
Definition: ScalarEvolutionExpressions.h:170
llvm::SCEVExpander::hoistIVInc
bool hoistIVInc(Instruction *IncV, Instruction *InsertPos)
Utility for hoisting an IV increment.
Definition: ScalarEvolutionExpander.cpp:1024
STLExtras.h
llvm::scUnknown
@ scUnknown
Definition: ScalarEvolutionExpressions.h:55
SimplifyPHINode
static Value * SimplifyPHINode(PHINode *PN, ArrayRef< Value * > IncomingValues, const SimplifyQuery &Q)
See if we can fold the given phi. If not, returns null.
Definition: InstructionSimplify.cpp:4812
RHS
Value * RHS
Definition: X86PartialReduction.cpp:76
llvm::ArrayType
Class to represent array types.
Definition: DerivedTypes.h:357
llvm::TargetTransformInfo::getIntImmCostInst
InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind, Instruction *Inst=nullptr) const
Return the expected cost of materialization for the given integer immediate of the specified type for...
Definition: TargetTransformInfo.cpp:580
llvm::SCEVPredicate::P_Wrap
@ P_Wrap
Definition: ScalarEvolution.h:223
llvm::SCEVOperand
struct for holding enough information to help calculate the cost of the given SCEV when expanded into...
Definition: ScalarEvolutionExpander.h:46
llvm::Type::getNonOpaquePointerElementType
Type * getNonOpaquePointerElementType() const
Only use this method in code that is not reachable with opaque pointers, or part of deprecated method...
Definition: Type.h:382
llvm::Type::getInt8Ty
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:237
llvm::count_if
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition: STLExtras.h:1704
llvm::APInt::getZero
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:177
llvm::Type::getInt32Ty
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:239
llvm::SCEVZeroExtendExpr
This class represents a zero extension of a small integer value to a larger integer value.
Definition: ScalarEvolutionExpressions.h:156
llvm::APIntOps::umin
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2149
llvm::Instruction::mayHaveSideEffects
bool mayHaveSideEffects() const
Return true if the instruction may have side effects.
Definition: Instruction.cpp:695
llvm::SCEVPredicate
This class represents an assumption made using SCEV expressions which can be checked at run-time.
Definition: ScalarEvolution.h:215
llvm::SCEVOperand::OperandIdx
int OperandIdx
The use index of an expanded instruction.
Definition: ScalarEvolutionExpander.h:52
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:55
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::ScalarEvolution::getMulExpr
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
Definition: ScalarEvolution.cpp:3051
llvm::DominatorTree::dominates
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
llvm::ScalarEvolution::properlyDominates
bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
Definition: ScalarEvolution.cpp:13344
CommandLine.h
LHS
Value * LHS
Definition: X86PartialReduction.cpp:75
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
llvm::LoopBase::getParentLoop
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Definition: LoopInfo.h:113
llvm::Instruction::getOpcode
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:157
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1605
llvm::visitAll
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
Definition: ScalarEvolutionExpressions.h:723
llvm::PPCISD::SC
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
Definition: PPCISelLowering.h:418
llvm::PatternMatch::match
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
llvm::COFF::DebugType
DebugType
Definition: COFF.h:651
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::SmallVectorImpl::append
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:667
llvm::Instruction::setHasNoSignedWrap
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
Definition: Instruction.cpp:127
llvm::User
Definition: User.h:44
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::scAddRecExpr
@ scAddRecExpr
Definition: ScalarEvolutionExpressions.h:48
llvm::SCEVTruncateExpr
This class represents a truncation of an integer value to a smaller integer value.
Definition: ScalarEvolutionExpressions.h:144
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:297
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::scUMaxExpr
@ scUMaxExpr
Definition: ScalarEvolutionExpressions.h:49
UINT64_MAX
#define UINT64_MAX
Definition: DataTypes.h:77
IP
Definition: NVPTXLowerArgs.cpp:167
llvm::Type::isVectorTy
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:227
llvm::PatternMatch::m_Instruction
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:726
llvm::BasicBlock::getFirstInsertionPt
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:246
false
Definition: StackSlotColoring.cpp:141
FactorOutConstant
static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder, const SCEV *Factor, ScalarEvolution &SE, const DataLayout &DL)
FactorOutConstant - Test if S is divisible by Factor, using signed division.
Definition: ScalarEvolutionExpander.cpp:290
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::SCEVMulExpr
This node represents multiplication of some number of SCEVs.
Definition: ScalarEvolutionExpressions.h:281
llvm::PHINode::getIncomingValueForBlock
Value * getIncomingValueForBlock(const BasicBlock *BB) const
Definition: Instructions.h:2849
llvm::IntegerType
Class to represent integer types.
Definition: DerivedTypes.h:40
llvm::Instruction::CastOps
CastOps
Definition: Instruction.h:800
llvm::SCEVWrapPredicate::IncrementNSSW
@ IncrementNSSW
Definition: ScalarEvolution.h:345
llvm::SCEVUMaxExpr
This class represents an unsigned maximum selection.
Definition: ScalarEvolutionExpressions.h:471
llvm::Instruction
Definition: Instruction.h:42
llvm::SimplifyInstruction
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
Definition: InstructionSimplify.cpp:6465
llvm::SCEVExprContains
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Definition: ScalarEvolutionExpressions.h:730
llvm::BasicBlock::phis
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:355
llvm::LoopBase::getExitingBlocks
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
Definition: LoopInfoImpl.h:33
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1769
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:919
LoopUtils.h
PickMostRelevantLoop
static const Loop * PickMostRelevantLoop(const Loop *A, const Loop *B, DominatorTree &DT)
PickMostRelevantLoop - Given two loops pick the one that's most relevant for SCEV expansion.
Definition: ScalarEvolutionExpander.cpp:659
llvm::SCEVComparePredicate::getRHS
const SCEV * getRHS() const
Returns the right hand side of the predicate.
Definition: ScalarEvolution.h:300
llvm::SCEVNAryExpr
This node is a base class providing common functionality for n'ary operators.
Definition: ScalarEvolutionExpressions.h:184
PatternMatch.h
llvm::Type::getInt1PtrTy
static PointerType * getInt1PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:287
llvm::PointerType::isOpaque
bool isOpaque() const
Definition: DerivedTypes.h:673
llvm::Type::getIntegerBitWidth
unsigned getIntegerBitWidth() const
Definition: DerivedTypes.h:97
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:344
X
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
llvm::SCEVOperand::S
const SCEV * S
The SCEV operand to be costed.
Definition: ScalarEvolutionExpander.h:54
LoopInfo.h
llvm::SCEV::isNonConstantNegative
bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
Definition: ScalarEvolution.cpp:443
llvm::PatternMatch::m_Power2
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
Definition: PatternMatch.h:544
llvm::Type::isIntegerTy
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:191
llvm::ScalarEvolution::dominates
bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
Definition: ScalarEvolution.cpp:13340
llvm::SCEVExpander::generateOverflowCheck
Value * generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc, bool Signed)
Generates code that evaluates if the AR expression will overflow.
Definition: ScalarEvolutionExpander.cpp:2362
llvm::cl::opt
Definition: CommandLine.h:1392
llvm::PHINode::isComplete
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
Definition: Instructions.h:2879
llvm::SCEV
This class represents an analyzed expression in the program.
Definition: ScalarEvolution.h:75
llvm::MipsISD::Ext
@ Ext
Definition: MipsISelLowering.h:159
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::SCEVExpander::findInsertPointAfter
BasicBlock::iterator findInsertPointAfter(Instruction *I, Instruction *MustDominate) const
Returns a suitable insert point after I, that dominates MustDominate.
Definition: ScalarEvolutionExpander.cpp:98
llvm::Instruction::eraseFromParent
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:77
expand
static Expected< BitVector > expand(StringRef S, StringRef Original)
Definition: GlobPattern.cpp:27
uint64_t
llvm::StructLayout
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:622
llvm::SCEVPredicate::getKind
SCEVPredicateKind getKind() const
Definition: ScalarEvolution.h:234
D
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Scaled
@ Scaled
Definition: ARCInstrInfo.cpp:35
llvm::scZeroExtend
@ scZeroExtend
Definition: ScalarEvolutionExpressions.h:43
llvm::SCEVExpander::expandUnionPredicate
Value * expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
Definition: ScalarEvolutionExpander.cpp:2516
llvm::PHINode::addIncoming
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: Instructions.h:2814
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::SCEVComparePredicate
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
Definition: ScalarEvolution.h:278
llvm::scAddExpr
@ scAddExpr
Definition: ScalarEvolutionExpressions.h:45
llvm::DenseMap
Definition: DenseMap.h:716
llvm::SCEV::FlagNSW
@ FlagNSW
Definition: ScalarEvolution.h:134
llvm::SCEV::FlagAnyWrap
@ FlagAnyWrap
Definition: ScalarEvolution.h:131
llvm::ConstantExpr::get
static Constant * get(unsigned Opcode, Constant *C1, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a unary operator constant expression, folding if possible.
Definition: Constants.cpp:2292
IsIncrementNSW
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
Definition: ScalarEvolutionExpander.cpp:1134
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
llvm::StructLayout::getElementContainingOffset
unsigned getElementContainingOffset(uint64_t Offset) const
Given a valid byte offset into the structure, returns the structure index that contains it.
Definition: DataLayout.cpp:83
llvm::LoopBase::getLoopPreheader
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
Definition: LoopInfoImpl.h:166
llvm::SCEVCheapExpansionBudget
cl::opt< unsigned > SCEVCheapExpansionBudget
llvm::PointerType
Class to represent pointers.
Definition: DerivedTypes.h:632
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1670
llvm::Instruction::setDebugLoc
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:364
llvm::SCEVWrapPredicate::IncrementNUSW
@ IncrementNUSW
Definition: ScalarEvolution.h:344
llvm::SCEVConstant
This class represents a constant integer value.
Definition: ScalarEvolutionExpressions.h:60
llvm::LoopBase::getLoopLatch
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
Definition: LoopInfoImpl.h:215
llvm::scUDivExpr
@ scUDivExpr
Definition: ScalarEvolutionExpressions.h:47
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:853
llvm::ConstantExpr::getCast
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
Definition: Constants.cpp:2002
llvm::CmpInst::BAD_ICMP_PREDICATE
@ BAD_ICMP_PREDICATE
Definition: InstrTypes.h:752
llvm::WinEH::EncodingType::CE
@ CE
Windows NT (Windows on ARM)
llvm::scSignExtend
@ scSignExtend
Definition: ScalarEvolutionExpressions.h:44
llvm::MinMaxIntrinsic::getPredicate
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
Definition: IntrinsicInst.h:585
llvm::TargetTransformInfo::getCastInstrCost
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
Definition: TargetTransformInfo.cpp:806
llvm::SCEVPredicate::P_Compare
@ P_Compare
Definition: ScalarEvolution.h:223
SimplifyAddOperands
static void SimplifyAddOperands(SmallVectorImpl< const SCEV * > &Ops, Type *Ty, ScalarEvolution &SE)
SimplifyAddOperands - Sort and simplify a list of add operands.
Definition: ScalarEvolutionExpander.cpp:363
SplitAddRecs
static void SplitAddRecs(SmallVectorImpl< const SCEV * > &Ops, Type *Ty, ScalarEvolution &SE)
SplitAddRecs - Flatten a list of add operands, moving addrec start values out to the top level.
Definition: ScalarEvolutionExpander.cpp:392
llvm::FloatStyle::Exponent
@ Exponent
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:651
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
llvm::TargetTransformInfo::isTruncateFree
bool isTruncateFree(Type *Ty1, Type *Ty2) const
Return true if it's free to truncate a value of type Ty1 to type Ty2.
Definition: TargetTransformInfo.cpp:462
llvm::CmpInst::ICMP_SLT
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:748
llvm::APIntOps::smin
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Definition: APInt.h:2139
llvm::SCEVNAryExpr::getNoWrapFlags
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
Definition: ScalarEvolutionExpressions.h:213
llvm::SmallPtrSetImplBase::clear
void clear()
Definition: SmallPtrSet.h:95
llvm::ScalarEvolution::getSignExtendExpr
const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
Definition: ScalarEvolution.cpp:1897
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
Mul
BinaryOperator * Mul
Definition: X86PartialReduction.cpp:70
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1612
DataLayout.h
llvm::StructType
Class to represent struct types.
Definition: DerivedTypes.h:213
llvm::scTruncate
@ scTruncate
Definition: ScalarEvolutionExpressions.h:42
llvm::StructLayout::getSizeInBytes
uint64_t getSizeInBytes() const
Definition: DataLayout.h:629
llvm::SCEVSequentialUMinExpr
This class represents a sequential/in-order unsigned minimum selection.
Definition: ScalarEvolutionExpressions.h:554
llvm::ScalarEvolution::getConstant
const SCEV * getConstant(ConstantInt *V)
Definition: ScalarEvolution.cpp:462
llvm::CmpInst::ICMP_ULT
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:744
llvm::Loop::getCanonicalInductionVariable
PHINode * getCanonicalInductionVariable() const
Check to see if the loop has a canonical induction variable: an integer recurrence that starts at 0 a...
Definition: LoopInfo.cpp:146
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::ConstantInt::isZero
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:194
llvm::Instruction::getFunction
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:69
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:529
CostKind
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
llvm::Value::getContext
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:991
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:82
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::SCEV::FlagNUW
@ FlagNUW
Definition: ScalarEvolution.h:133
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::CastInst
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:429
llvm::SmallSet::insert
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:182
llvm::PredIterator
Definition: CFG.h:42
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:305
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
llvm::TargetTransformInfo::CastContextHint::None
@ None
The cast is not used with a load/store of any kind.
llvm::ConstantInt::getFalse
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:874
llvm::scPtrToInt
@ scPtrToInt
Definition: ScalarEvolutionExpressions.h:54
llvm::BasicBlock::front
const Instruction & front() const
Definition: BasicBlock.h:309
llvm::APInt::zext
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:973
SCEV_DEBUG_WITH_TYPE
#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
Definition: ScalarEvolutionExpander.cpp:33
llvm::SCEV::NoWrapFlags
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
Definition: ScalarEvolution.h:130
llvm::MCID::Select
@ Select
Definition: MCInstrDesc.h:164
llvm::SCEVExpander::getRelatedExistingExpansion
Value * getRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Try to find the ValueOffsetPair for S.
Definition: ScalarEvolutionExpander.cpp:2052
llvm::APIntOps::umax
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
Definition: APInt.h:2154
llvm::Twine
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:83
llvm::stable_sort
void stable_sort(R &&Range)
Definition: STLExtras.h:1749
llvm::ConstantExpr
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:971
llvm::SCEVAddRecExpr::getLoop
const Loop * getLoop() const
Definition: ScalarEvolutionExpressions.h:354
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:50
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:350
llvm::ScalarEvolution::getMinusSCEV
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
Definition: ScalarEvolution.cpp:4524
llvm::SCEVSMinExpr
This class represents a signed minimum selection.
Definition: ScalarEvolutionExpressions.h:483
llvm::normalizeForPostIncUse
const SCEV * normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE)
Normalize S to be post-increment for all loops present in Loops.
Definition: ScalarEvolutionNormalization.cpp:97
llvm::SCEVWrapPredicate::getFlags
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
Definition: ScalarEvolution.h:393
llvm::SCEVExpander::replaceCongruentIVs
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetTransformInfo *TTI=nullptr)
replace congruent phis with their most canonical representative.
Definition: ScalarEvolutionExpander.cpp:1911
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:341
llvm::SCEVUnionPredicate
This class represents a composition of other SCEV predicates, and is the class that most clients will...
Definition: ScalarEvolution.h:413
llvm::PHINode::Create
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Definition: Instructions.h:2706
llvm::StructLayout::getElementOffset
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:652
llvm::SCEVCastExpr
This is the base class for unary cast operator classes.
Definition: ScalarEvolutionExpressions.h:86
llvm::pred_end
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
llvm::SCEVAddRecExpr
This node represents a polynomial recurrence on the trip count of the specified loop.
Definition: ScalarEvolutionExpressions.h:342
llvm::SCEVWrapPredicate
This class represents an assumption made on an AddRec expression.
Definition: ScalarEvolution.h:318
llvm::BitWidth
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
llvm::SCEVUnknown
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
Definition: ScalarEvolutionExpressions.h:571
llvm::LoopBase::getHeader
BlockT * getHeader() const
Definition: LoopInfo.h:104
llvm::SCEV::isOne
bool isOne() const
Return true if the expression is a constant one.
Definition: ScalarEvolution.cpp:431
llvm::User::operand_values
iterator_range< value_op_iterator > operand_values()
Definition: User.h:266
IsIncrementNUW
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
Definition: ScalarEvolutionExpander.cpp:1148
llvm::ConstantExpr::getGetElementPtr
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1243
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:591
llvm::SCEVOperand::ParentOpcode
unsigned ParentOpcode
LLVM instruction opcode that uses the operand.
Definition: ScalarEvolutionExpander.h:50
llvm::MCID::Add
@ Add
Definition: MCInstrDesc.h:185
llvm::SCEVExpander::expandComparePredicate
Value * expandComparePredicate(const SCEVComparePredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
Definition: ScalarEvolutionExpander.cpp:2349
llvm::Instruction::BinaryOps
BinaryOps
Definition: Instruction.h:786
llvm::pred_begin
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
llvm::formLCSSAForInstructions
bool formLCSSAForInstructions(SmallVectorImpl< Instruction * > &Worklist, const DominatorTree &DT, const LoopInfo &LI, ScalarEvolution *SE, IRBuilderBase &Builder, SmallVectorImpl< PHINode * > *PHIsToRemove=nullptr)
Ensures LCSSA form for every instruction from the Worklist in the scope of innermost containing loop.
Definition: LCSSA.cpp:78
llvm::ScalarEvolution::getZeroExtendExpr
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
Definition: ScalarEvolution.cpp:1595
llvm::SCEVWrapPredicate::getExpr
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
Definition: ScalarEvolution.cpp:14031
llvm::SCEVAddExpr
This node represents an addition of some number of SCEVs.
Definition: ScalarEvolutionExpressions.h:257
llvm::isSafeToExpandAt
bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint, ScalarEvolution &SE)
Return true if the given expression is safe to expand in the sense that all materialized values are d...
Definition: ScalarEvolutionExpander.cpp:2623
llvm::User::getNumOperands
unsigned getNumOperands() const
Definition: User.h:191
llvm::scMulExpr
@ scMulExpr
Definition: ScalarEvolutionExpressions.h:46
llvm::SCEVExpander::expandCodeForPredicate
Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
Definition: ScalarEvolutionExpander.cpp:2333
llvm::Instruction::getDebugLoc
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:367
llvm::SCEVPredicate::P_Union
@ P_Union
Definition: ScalarEvolution.h:223
llvm::PatternMatch::m_ICmp
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
Definition: PatternMatch.h:1404
llvm::CmpInst::ICMP_UGT
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:742
Dominators.h
N
#define N
llvm::CastInst::getOpcode
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:676
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:91
InstructionSimplify.h
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
TargetTransformInfo.h
llvm::PHINode
Definition: Instructions.h:2664
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:119
llvm::PatternMatch
Definition: PatternMatch.h:47
llvm::SCEVExpander::getIVIncOperand
Instruction * getIVIncOperand(Instruction *IncV, Instruction *InsertPos, bool allowScale)
Return the induction variable increment's IV operand.
Definition: ScalarEvolutionExpander.cpp:957
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::Function::hasMinSize
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:660
llvm::SmallPtrSetImpl
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
llvm::SCEV::getType
Type * getType() const
Return the LLVM type of this SCEV expression.
Definition: ScalarEvolution.cpp:393
llvm::SCEVExpander::expandWrapPredicate
Value * expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
Definition: ScalarEvolutionExpander.cpp:2491
llvm::ScalarEvolution::getAddExpr
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
Definition: ScalarEvolution.cpp:2454
llvm::IntegerType::get
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1474
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:172
llvm::scSequentialUMinExpr
@ scSequentialUMinExpr
Definition: ScalarEvolutionExpressions.h:53
CreateAdd
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp)
Definition: Reassociate.cpp:231
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::PatternMatch::m_BasicBlock
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
Definition: PatternMatch.h:160
llvm::TargetTransformInfo::getArithmeticInstrCost
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueKind Opd1Info=OK_AnyValue, OperandValueKind Opd2Info=OK_AnyValue, OperandValueProperties Opd1PropInfo=OP_None, OperandValueProperties Opd2PropInfo=OP_None, ArrayRef< const Value * > Args=ArrayRef< const Value * >(), const Instruction *CxtI=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
Definition: TargetTransformInfo.cpp:741
llvm::scUMinExpr
@ scUMinExpr
Definition: ScalarEvolutionExpressions.h:51
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::cl::desc
Definition: CommandLine.h:405
canBeCheaplyTransformed
static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, bool &InvertStep)
Check whether we can cheaply express the requested SCEV in terms of the available PHI SCEV by truncat...
Definition: ScalarEvolutionExpander.cpp:1100
raw_ostream.h
llvm::SCEV::isZero
bool isZero() const
Return true if the expression is a constant zero.
Definition: ScalarEvolution.cpp:425
llvm::SCEVAddRecExpr::getType
Type * getType() const
Definition: ScalarEvolutionExpressions.h:352
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::TargetTransformInfo::TCK_RecipThroughput
@ TCK_RecipThroughput
Reciprocal throughput.
Definition: TargetTransformInfo.h:211
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:421
llvm::APIntOps::smax
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
Definition: APInt.h:2144
llvm::SCEVAddRecExpr::getStepRecurrence
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
Definition: ScalarEvolutionExpressions.h:360
llvm::sampleprof::Base
@ Base
Definition: Discriminator.h:58
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
llvm::Type::getPrimitiveSizeInBits
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:164
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:927
llvm::SCEVComparePredicate::getPredicate
ICmpInst::Predicate getPredicate() const
Definition: ScalarEvolution.h:294
SmallSet.h
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:37