LLVM  15.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The implementation for the loop memory dependence that was originally
10 // developed for the loop vectorizer.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/Analysis/LoopInfo.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DebugLoc.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/PatternMatch.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/IR/ValueHandle.h"
54 #include "llvm/InitializePasses.h"
55 #include "llvm/Pass.h"
56 #include "llvm/Support/Casting.h"
58 #include "llvm/Support/Debug.h"
61 #include <algorithm>
62 #include <cassert>
63 #include <cstdint>
64 #include <iterator>
65 #include <utility>
66 #include <vector>
67 
68 using namespace llvm;
69 using namespace llvm::PatternMatch;
70 
71 #define DEBUG_TYPE "loop-accesses"
72 
74 VectorizationFactor("force-vector-width", cl::Hidden,
75  cl::desc("Sets the SIMD width. Zero is autoselect."),
78 
80 VectorizationInterleave("force-vector-interleave", cl::Hidden,
81  cl::desc("Sets the vectorization interleave count. "
82  "Zero is autoselect."),
86 
88  "runtime-memory-check-threshold", cl::Hidden,
89  cl::desc("When performing memory disambiguation checks at runtime do not "
90  "generate more than this number of comparisons (default = 8)."),
93 
94 /// The maximum iterations used to merge memory checks
96  "memory-check-merge-threshold", cl::Hidden,
97  cl::desc("Maximum number of comparisons done when trying to merge "
98  "runtime memory checks. (default = 100)"),
99  cl::init(100));
100 
101 /// Maximum SIMD width.
102 const unsigned VectorizerParams::MaxVectorWidth = 64;
103 
104 /// We collect dependences up to this threshold.
105 static cl::opt<unsigned>
106  MaxDependences("max-dependences", cl::Hidden,
107  cl::desc("Maximum number of dependences collected by "
108  "loop-access analysis (default = 100)"),
109  cl::init(100));
110 
111 /// This enables versioning on the strides of symbolically striding memory
112 /// accesses in code like the following.
113 /// for (i = 0; i < N; ++i)
114 /// A[i * Stride1] += B[i * Stride2] ...
115 ///
116 /// Will be roughly translated to
117 /// if (Stride1 == 1 && Stride2 == 1) {
118 /// for (i = 0; i < N; i+=4)
119 /// A[i:i+3] += ...
120 /// } else
121 /// ...
123  "enable-mem-access-versioning", cl::init(true), cl::Hidden,
124  cl::desc("Enable symbolic stride memory access versioning"));
125 
126 /// Enable store-to-load forwarding conflict detection. This option can
127 /// be disabled for correctness testing.
129  "store-to-load-forwarding-conflict-detection", cl::Hidden,
130  cl::desc("Enable conflict detection in loop-access analysis"),
131  cl::init(true));
132 
134  return ::VectorizationInterleave.getNumOccurrences() > 0;
135 }
136 
138  if (auto *CI = dyn_cast<CastInst>(V))
139  if (CI->getOperand(0)->getType()->isIntegerTy())
140  return CI->getOperand(0);
141  return V;
142 }
143 
145  const ValueToValueMap &PtrToStride,
146  Value *Ptr) {
147  const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
148 
149  // If there is an entry in the map return the SCEV of the pointer with the
150  // symbolic stride replaced by one.
151  ValueToValueMap::const_iterator SI = PtrToStride.find(Ptr);
152  if (SI == PtrToStride.end())
153  // For a non-symbolic stride, just return the original expression.
154  return OrigSCEV;
155 
156  Value *StrideVal = stripIntegerCast(SI->second);
157 
158  ScalarEvolution *SE = PSE.getSE();
159  const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal));
160  const auto *CT =
161  static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType()));
162 
163  PSE.addPredicate(*SE->getEqualPredicate(U, CT));
164  auto *Expr = PSE.getSCEV(Ptr);
165 
166  LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
167  << " by: " << *Expr << "\n");
168  return Expr;
169 }
170 
172  unsigned Index, RuntimePointerChecking &RtCheck)
173  : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
174  AddressSpace(RtCheck.Pointers[Index]
175  .PointerValue->getType()
176  ->getPointerAddressSpace()),
177  NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
178  Members.push_back(Index);
179 }
180 
181 /// Calculate Start and End points of memory access.
182 /// Let's assume A is the first access and B is a memory access on N-th loop
183 /// iteration. Then B is calculated as:
184 /// B = A + Step*N .
185 /// Step value may be positive or negative.
186 /// N is a calculated back-edge taken count:
187 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
188 /// Start and End points are calculated in the following way:
189 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
190 /// where SizeOfElt is the size of single memory access in bytes.
191 ///
192 /// There is no conflict when the intervals are disjoint:
193 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
194 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
195  Type *AccessTy, bool WritePtr,
196  unsigned DepSetId, unsigned ASId,
198  bool NeedsFreeze) {
199  ScalarEvolution *SE = PSE.getSE();
200 
201  const SCEV *ScStart;
202  const SCEV *ScEnd;
203 
204  if (SE->isLoopInvariant(PtrExpr, Lp)) {
205  ScStart = ScEnd = PtrExpr;
206  } else {
207  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr);
208  assert(AR && "Invalid addrec expression");
209  const SCEV *Ex = PSE.getBackedgeTakenCount();
210 
211  ScStart = AR->getStart();
212  ScEnd = AR->evaluateAtIteration(Ex, *SE);
213  const SCEV *Step = AR->getStepRecurrence(*SE);
214 
215  // For expressions with negative step, the upper bound is ScStart and the
216  // lower bound is ScEnd.
217  if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
218  if (CStep->getValue()->isNegative())
219  std::swap(ScStart, ScEnd);
220  } else {
221  // Fallback case: the step is not constant, but we can still
222  // get the upper and lower bounds of the interval by using min/max
223  // expressions.
224  ScStart = SE->getUMinExpr(ScStart, ScEnd);
225  ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
226  }
227  }
228  // Add the size of the pointed element to ScEnd.
229  auto &DL = Lp->getHeader()->getModule()->getDataLayout();
230  Type *IdxTy = DL.getIndexType(Ptr->getType());
231  const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
232  ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
233 
234  Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
235  NeedsFreeze);
236 }
237 
238 void RuntimePointerChecking::tryToCreateDiffCheck(
239  const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
240  if (!CanUseDiffCheck)
241  return;
242 
243  // If either group contains multiple different pointers, bail out.
244  // TODO: Support multiple pointers by using the minimum or maximum pointer,
245  // depending on src & sink.
246  if (CGI.Members.size() != 1 || CGJ.Members.size() != 1) {
247  CanUseDiffCheck = false;
248  return;
249  }
250 
251  PointerInfo *Src = &Pointers[CGI.Members[0]];
252  PointerInfo *Sink = &Pointers[CGJ.Members[0]];
253 
254  // If either pointer is read and written, multiple checks may be needed. Bail
255  // out.
256  if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
257  !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty()) {
258  CanUseDiffCheck = false;
259  return;
260  }
261 
262  ArrayRef<unsigned> AccSrc =
263  DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
264  ArrayRef<unsigned> AccSink =
265  DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
266  // If either pointer is accessed multiple times, there may not be a clear
267  // src/sink relation. Bail out for now.
268  if (AccSrc.size() != 1 || AccSink.size() != 1) {
269  CanUseDiffCheck = false;
270  return;
271  }
272  // If the sink is accessed before src, swap src/sink.
273  if (AccSink[0] < AccSrc[0])
274  std::swap(Src, Sink);
275 
276  auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
277  auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
278  if (!SrcAR || !SinkAR) {
279  CanUseDiffCheck = false;
280  return;
281  }
282 
283  const DataLayout &DL =
284  SinkAR->getLoop()->getHeader()->getModule()->getDataLayout();
286  DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
288  DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
289  Type *SrcTy = getLoadStoreType(SrcInsts[0]);
290  Type *DstTy = getLoadStoreType(SinkInsts[0]);
291  if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy))
292  return;
293  unsigned AllocSize =
294  std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
295  IntegerType *IntTy =
296  IntegerType::get(Src->PointerValue->getContext(),
297  DL.getPointerSizeInBits(CGI.AddressSpace));
298 
299  // Only matching constant steps matching the AllocSize are supported at the
300  // moment. This simplifies the difference computation. Can be extended in the
301  // future.
302  auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
303  if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
304  Step->getAPInt().abs() != AllocSize) {
305  CanUseDiffCheck = false;
306  return;
307  }
308 
309  // When counting down, the dependence distance needs to be swapped.
310  if (Step->getValue()->isNegative())
311  std::swap(SinkAR, SrcAR);
312 
313  const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
314  const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
315  if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
316  isa<SCEVCouldNotCompute>(SrcStartInt)) {
317  CanUseDiffCheck = false;
318  return;
319  }
320  DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
321  Src->NeedsFreeze || Sink->NeedsFreeze);
322 }
323 
324 SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
326 
327  for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
328  for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
330  const RuntimeCheckingPtrGroup &CGJ = CheckingGroups[J];
331 
332  if (needsChecking(CGI, CGJ)) {
333  tryToCreateDiffCheck(CGI, CGJ);
334  Checks.push_back(std::make_pair(&CGI, &CGJ));
335  }
336  }
337  }
338  return Checks;
339 }
340 
341 void RuntimePointerChecking::generateChecks(
342  MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
343  assert(Checks.empty() && "Checks is not empty");
344  groupChecks(DepCands, UseDependencies);
345  Checks = generateChecks();
346 }
347 
349  const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
350  for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
351  for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
352  if (needsChecking(M.Members[I], N.Members[J]))
353  return true;
354  return false;
355 }
356 
357 /// Compare \p I and \p J and return the minimum.
358 /// Return nullptr in case we couldn't find an answer.
359 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
360  ScalarEvolution *SE) {
361  const SCEV *Diff = SE->getMinusSCEV(J, I);
362  const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
363 
364  if (!C)
365  return nullptr;
366  if (C->getValue()->isNegative())
367  return J;
368  return I;
369 }
370 
372  RuntimePointerChecking &RtCheck) {
373  return addPointer(
374  Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
375  RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
376  RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
377 }
378 
379 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
380  const SCEV *End, unsigned AS,
381  bool NeedsFreeze,
382  ScalarEvolution &SE) {
383  assert(AddressSpace == AS &&
384  "all pointers in a checking group must be in the same address space");
385 
386  // Compare the starts and ends with the known minimum and maximum
387  // of this set. We need to know how we compare against the min/max
388  // of the set in order to be able to emit memchecks.
389  const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
390  if (!Min0)
391  return false;
392 
393  const SCEV *Min1 = getMinFromExprs(End, High, &SE);
394  if (!Min1)
395  return false;
396 
397  // Update the low bound expression if we've found a new min value.
398  if (Min0 == Start)
399  Low = Start;
400 
401  // Update the high bound expression if we've found a new max value.
402  if (Min1 != End)
403  High = End;
404 
405  Members.push_back(Index);
406  this->NeedsFreeze |= NeedsFreeze;
407  return true;
408 }
409 
410 void RuntimePointerChecking::groupChecks(
411  MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
412  // We build the groups from dependency candidates equivalence classes
413  // because:
414  // - We know that pointers in the same equivalence class share
415  // the same underlying object and therefore there is a chance
416  // that we can compare pointers
417  // - We wouldn't be able to merge two pointers for which we need
418  // to emit a memcheck. The classes in DepCands are already
419  // conveniently built such that no two pointers in the same
420  // class need checking against each other.
421 
422  // We use the following (greedy) algorithm to construct the groups
423  // For every pointer in the equivalence class:
424  // For each existing group:
425  // - if the difference between this pointer and the min/max bounds
426  // of the group is a constant, then make the pointer part of the
427  // group and update the min/max bounds of that group as required.
428 
429  CheckingGroups.clear();
430 
431  // If we need to check two pointers to the same underlying object
432  // with a non-constant difference, we shouldn't perform any pointer
433  // grouping with those pointers. This is because we can easily get
434  // into cases where the resulting check would return false, even when
435  // the accesses are safe.
436  //
437  // The following example shows this:
438  // for (i = 0; i < 1000; ++i)
439  // a[5000 + i * m] = a[i] + a[i + 9000]
440  //
441  // Here grouping gives a check of (5000, 5000 + 1000 * m) against
442  // (0, 10000) which is always false. However, if m is 1, there is no
443  // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
444  // us to perform an accurate check in this case.
445  //
446  // The above case requires that we have an UnknownDependence between
447  // accesses to the same underlying object. This cannot happen unless
448  // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
449  // is also false. In this case we will use the fallback path and create
450  // separate checking groups for all pointers.
451 
452  // If we don't have the dependency partitions, construct a new
453  // checking pointer group for each pointer. This is also required
454  // for correctness, because in this case we can have checking between
455  // pointers to the same underlying object.
456  if (!UseDependencies) {
457  for (unsigned I = 0; I < Pointers.size(); ++I)
458  CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
459  return;
460  }
461 
462  unsigned TotalComparisons = 0;
463 
465  for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
466  auto Iter = PositionMap.insert({Pointers[Index].PointerValue, {}});
467  Iter.first->second.push_back(Index);
468  }
469 
470  // We need to keep track of what pointers we've already seen so we
471  // don't process them twice.
473 
474  // Go through all equivalence classes, get the "pointer check groups"
475  // and add them to the overall solution. We use the order in which accesses
476  // appear in 'Pointers' to enforce determinism.
477  for (unsigned I = 0; I < Pointers.size(); ++I) {
478  // We've seen this pointer before, and therefore already processed
479  // its equivalence class.
480  if (Seen.count(I))
481  continue;
482 
483  MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
484  Pointers[I].IsWritePtr);
485 
487  auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
488 
489  // Because DepCands is constructed by visiting accesses in the order in
490  // which they appear in alias sets (which is deterministic) and the
491  // iteration order within an equivalence class member is only dependent on
492  // the order in which unions and insertions are performed on the
493  // equivalence class, the iteration order is deterministic.
494  for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
495  MI != ME; ++MI) {
496  auto PointerI = PositionMap.find(MI->getPointer());
497  assert(PointerI != PositionMap.end() &&
498  "pointer in equivalence class not found in PositionMap");
499  for (unsigned Pointer : PointerI->second) {
500  bool Merged = false;
501  // Mark this pointer as seen.
502  Seen.insert(Pointer);
503 
504  // Go through all the existing sets and see if we can find one
505  // which can include this pointer.
506  for (RuntimeCheckingPtrGroup &Group : Groups) {
507  // Don't perform more than a certain amount of comparisons.
508  // This should limit the cost of grouping the pointers to something
509  // reasonable. If we do end up hitting this threshold, the algorithm
510  // will create separate groups for all remaining pointers.
511  if (TotalComparisons > MemoryCheckMergeThreshold)
512  break;
513 
514  TotalComparisons++;
515 
516  if (Group.addPointer(Pointer, *this)) {
517  Merged = true;
518  break;
519  }
520  }
521 
522  if (!Merged)
523  // We couldn't add this pointer to any existing set or the threshold
524  // for the number of comparisons has been reached. Create a new group
525  // to hold the current pointer.
526  Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
527  }
528  }
529 
530  // We've computed the grouped checks for this partition.
531  // Save the results and continue with the next one.
532  llvm::copy(Groups, std::back_inserter(CheckingGroups));
533  }
534 }
535 
537  const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
538  unsigned PtrIdx2) {
539  return (PtrToPartition[PtrIdx1] != -1 &&
540  PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
541 }
542 
543 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
544  const PointerInfo &PointerI = Pointers[I];
545  const PointerInfo &PointerJ = Pointers[J];
546 
547  // No need to check if two readonly pointers intersect.
548  if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
549  return false;
550 
551  // Only need to check pointers between two different dependency sets.
552  if (PointerI.DependencySetId == PointerJ.DependencySetId)
553  return false;
554 
555  // Only need to check pointers in the same alias set.
556  if (PointerI.AliasSetId != PointerJ.AliasSetId)
557  return false;
558 
559  return true;
560 }
561 
564  unsigned Depth) const {
565  unsigned N = 0;
566  for (const auto &Check : Checks) {
567  const auto &First = Check.first->Members, &Second = Check.second->Members;
568 
569  OS.indent(Depth) << "Check " << N++ << ":\n";
570 
571  OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
572  for (unsigned K = 0; K < First.size(); ++K)
573  OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
574 
575  OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
576  for (unsigned K = 0; K < Second.size(); ++K)
577  OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
578  }
579 }
580 
582 
583  OS.indent(Depth) << "Run-time memory checks:\n";
584  printChecks(OS, Checks, Depth);
585 
586  OS.indent(Depth) << "Grouped accesses:\n";
587  for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
588  const auto &CG = CheckingGroups[I];
589 
590  OS.indent(Depth + 2) << "Group " << &CG << ":\n";
591  OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
592  << ")\n";
593  for (unsigned J = 0; J < CG.Members.size(); ++J) {
594  OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
595  << "\n";
596  }
597  }
598 }
599 
600 namespace {
601 
602 /// Analyses memory accesses in a loop.
603 ///
604 /// Checks whether run time pointer checks are needed and builds sets for data
605 /// dependence checking.
606 class AccessAnalysis {
607 public:
608  /// Read or write access location.
609  typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
610  typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
611 
612  AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
615  : TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA), PSE(PSE) {}
616 
617  /// Register a load and whether it is only read from.
618  void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
619  Value *Ptr = const_cast<Value*>(Loc.Ptr);
620  AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags);
621  Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
622  if (IsReadOnly)
623  ReadOnlyPtr.insert(Ptr);
624  }
625 
626  /// Register a store.
627  void addStore(MemoryLocation &Loc, Type *AccessTy) {
628  Value *Ptr = const_cast<Value*>(Loc.Ptr);
629  AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags);
630  Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
631  }
632 
633  /// Check if we can emit a run-time no-alias check for \p Access.
634  ///
635  /// Returns true if we can emit a run-time no alias check for \p Access.
636  /// If we can check this access, this also adds it to a dependence set and
637  /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
638  /// we will attempt to use additional run-time checks in order to get
639  /// the bounds of the pointer.
640  bool createCheckForAccess(RuntimePointerChecking &RtCheck,
641  MemAccessInfo Access, Type *AccessTy,
642  const ValueToValueMap &Strides,
643  DenseMap<Value *, unsigned> &DepSetId,
644  Loop *TheLoop, unsigned &RunningDepId,
645  unsigned ASId, bool ShouldCheckStride, bool Assume);
646 
647  /// Check whether we can check the pointers at runtime for
648  /// non-intersection.
649  ///
650  /// Returns true if we need no check or if we do and we can generate them
651  /// (i.e. the pointers have computable bounds).
652  bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
653  Loop *TheLoop, const ValueToValueMap &Strides,
654  Value *&UncomputablePtr, bool ShouldCheckWrap = false);
655 
656  /// Goes over all memory accesses, checks whether a RT check is needed
657  /// and builds sets of dependent accesses.
658  void buildDependenceSets() {
659  processMemAccesses();
660  }
661 
662  /// Initial processing of memory accesses determined that we need to
663  /// perform dependency checking.
664  ///
665  /// Note that this can later be cleared if we retry memcheck analysis without
666  /// dependency checking (i.e. FoundNonConstantDistanceDependence).
667  bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
668 
669  /// We decided that no dependence analysis would be used. Reset the state.
670  void resetDepChecks(MemoryDepChecker &DepChecker) {
671  CheckDeps.clear();
672  DepChecker.clearDependences();
673  }
674 
675  MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
676 
677 private:
679 
680  /// Go over all memory access and check whether runtime pointer checks
681  /// are needed and build sets of dependency check candidates.
682  void processMemAccesses();
683 
684  /// Map of all accesses. Values are the types used to access memory pointed to
685  /// by the pointer.
686  PtrAccessMap Accesses;
687 
688  /// The loop being checked.
689  const Loop *TheLoop;
690 
691  /// List of accesses that need a further dependence check.
692  MemAccessInfoList CheckDeps;
693 
694  /// Set of pointers that are read only.
695  SmallPtrSet<Value*, 16> ReadOnlyPtr;
696 
697  /// An alias set tracker to partition the access set by underlying object and
698  //intrinsic property (such as TBAA metadata).
699  AliasSetTracker AST;
700 
701  LoopInfo *LI;
702 
703  /// Sets of potentially dependent accesses - members of one set share an
704  /// underlying pointer. The set "CheckDeps" identfies which sets really need a
705  /// dependence check.
707 
708  /// Initial processing of memory accesses determined that we may need
709  /// to add memchecks. Perform the analysis to determine the necessary checks.
710  ///
711  /// Note that, this is different from isDependencyCheckNeeded. When we retry
712  /// memcheck analysis without dependency checking
713  /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
714  /// cleared while this remains set if we have potentially dependent accesses.
715  bool IsRTCheckAnalysisNeeded = false;
716 
717  /// The SCEV predicate containing all the SCEV-related assumptions.
719 };
720 
721 } // end anonymous namespace
722 
723 /// Check whether a pointer can participate in a runtime bounds check.
724 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
725 /// by adding run-time checks (overflow checks) if necessary.
727  const SCEV *PtrScev, Loop *L, bool Assume) {
728  // The bounds for loop-invariant pointer is trivial.
729  if (PSE.getSE()->isLoopInvariant(PtrScev, L))
730  return true;
731 
732  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
733 
734  if (!AR && Assume)
735  AR = PSE.getAsAddRec(Ptr);
736 
737  if (!AR)
738  return false;
739 
740  return AR->isAffine();
741 }
742 
743 /// Check whether a pointer address cannot wrap.
745  const ValueToValueMap &Strides, Value *Ptr, Type *AccessTy,
746  Loop *L) {
747  const SCEV *PtrScev = PSE.getSCEV(Ptr);
748  if (PSE.getSE()->isLoopInvariant(PtrScev, L))
749  return true;
750 
751  int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides);
752  if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
753  return true;
754 
755  return false;
756 }
757 
758 static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
759  function_ref<void(Value *)> AddPointer) {
760  SmallPtrSet<Value *, 8> Visited;
761  SmallVector<Value *> WorkList;
762  WorkList.push_back(StartPtr);
763 
764  while (!WorkList.empty()) {
765  Value *Ptr = WorkList.pop_back_val();
766  if (!Visited.insert(Ptr).second)
767  continue;
768  auto *PN = dyn_cast<PHINode>(Ptr);
769  // SCEV does not look through non-header PHIs inside the loop. Such phis
770  // can be analyzed by adding separate accesses for each incoming pointer
771  // value.
772  if (PN && InnermostLoop.contains(PN->getParent()) &&
773  PN->getParent() != InnermostLoop.getHeader()) {
774  for (const Use &Inc : PN->incoming_values())
775  WorkList.push_back(Inc);
776  } else
777  AddPointer(Ptr);
778  }
779 }
780 
781 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
782  MemAccessInfo Access, Type *AccessTy,
783  const ValueToValueMap &StridesMap,
784  DenseMap<Value *, unsigned> &DepSetId,
785  Loop *TheLoop, unsigned &RunningDepId,
786  unsigned ASId, bool ShouldCheckWrap,
787  bool Assume) {
788  Value *Ptr = Access.getPointer();
789 
790  ScalarEvolution &SE = *PSE.getSE();
792  auto *SI = dyn_cast<SelectInst>(Ptr);
793  // Look through selects in the current loop.
794  if (SI && !TheLoop->isLoopInvariant(SI)) {
795  TranslatedPtrs = {
796  std::make_pair(SE.getSCEV(SI->getOperand(1)),
797  !isGuaranteedNotToBeUndefOrPoison(SI->getOperand(1))),
798  std::make_pair(SE.getSCEV(SI->getOperand(2)),
799  !isGuaranteedNotToBeUndefOrPoison(SI->getOperand(2)))};
800  } else
801  TranslatedPtrs = {
802  std::make_pair(replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false)};
803 
804  for (auto &P : TranslatedPtrs) {
805  const SCEV *PtrExpr = P.first;
806  if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
807  return false;
808 
809  // When we run after a failing dependency check we have to make sure
810  // we don't have wrapping pointers.
811  if (ShouldCheckWrap) {
812  // Skip wrap checking when translating pointers.
813  if (TranslatedPtrs.size() > 1)
814  return false;
815 
816  if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
817  auto *Expr = PSE.getSCEV(Ptr);
818  if (!Assume || !isa<SCEVAddRecExpr>(Expr))
819  return false;
820  PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
821  }
822  }
823  // If there's only one option for Ptr, look it up after bounds and wrap
824  // checking, because assumptions might have been added to PSE.
825  if (TranslatedPtrs.size() == 1)
826  TranslatedPtrs[0] = std::make_pair(
827  replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false);
828  }
829 
830  for (auto &P : TranslatedPtrs) {
831  const SCEV *PtrExpr = P.first;
832 
833  // The id of the dependence set.
834  unsigned DepId;
835 
836  if (isDependencyCheckNeeded()) {
837  Value *Leader = DepCands.getLeaderValue(Access).getPointer();
838  unsigned &LeaderId = DepSetId[Leader];
839  if (!LeaderId)
840  LeaderId = RunningDepId++;
841  DepId = LeaderId;
842  } else
843  // Each access has its own dependence set.
844  DepId = RunningDepId++;
845 
846  bool IsWrite = Access.getInt();
847  RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
848  P.second);
849  LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
850  }
851 
852  return true;
853 }
854 
855 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
856  ScalarEvolution *SE, Loop *TheLoop,
857  const ValueToValueMap &StridesMap,
858  Value *&UncomputablePtr, bool ShouldCheckWrap) {
859  // Find pointers with computable bounds. We are going to use this information
860  // to place a runtime bound check.
861  bool CanDoRT = true;
862 
863  bool MayNeedRTCheck = false;
864  if (!IsRTCheckAnalysisNeeded) return true;
865 
866  bool IsDepCheckNeeded = isDependencyCheckNeeded();
867 
868  // We assign a consecutive id to access from different alias sets.
869  // Accesses between different groups doesn't need to be checked.
870  unsigned ASId = 0;
871  for (auto &AS : AST) {
872  int NumReadPtrChecks = 0;
873  int NumWritePtrChecks = 0;
874  bool CanDoAliasSetRT = true;
875  ++ASId;
876 
877  // We assign consecutive id to access from different dependence sets.
878  // Accesses within the same set don't need a runtime check.
879  unsigned RunningDepId = 1;
881 
883 
884  // First, count how many write and read accesses are in the alias set. Also
885  // collect MemAccessInfos for later.
886  SmallVector<MemAccessInfo, 4> AccessInfos;
887  for (const auto &A : AS) {
888  Value *Ptr = A.getValue();
889  bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
890 
891  if (IsWrite)
892  ++NumWritePtrChecks;
893  else
894  ++NumReadPtrChecks;
895  AccessInfos.emplace_back(Ptr, IsWrite);
896  }
897 
898  // We do not need runtime checks for this alias set, if there are no writes
899  // or a single write and no reads.
900  if (NumWritePtrChecks == 0 ||
901  (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
902  assert((AS.size() <= 1 ||
903  all_of(AS,
904  [this](auto AC) {
905  MemAccessInfo AccessWrite(AC.getValue(), true);
906  return DepCands.findValue(AccessWrite) == DepCands.end();
907  })) &&
908  "Can only skip updating CanDoRT below, if all entries in AS "
909  "are reads or there is at most 1 entry");
910  continue;
911  }
912 
913  for (auto &Access : AccessInfos) {
914  for (auto &AccessTy : Accesses[Access]) {
915  if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
916  DepSetId, TheLoop, RunningDepId, ASId,
917  ShouldCheckWrap, false)) {
918  LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
919  << *Access.getPointer() << '\n');
920  Retries.push_back(Access);
921  CanDoAliasSetRT = false;
922  }
923  }
924  }
925 
926  // Note that this function computes CanDoRT and MayNeedRTCheck
927  // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
928  // we have a pointer for which we couldn't find the bounds but we don't
929  // actually need to emit any checks so it does not matter.
930  //
931  // We need runtime checks for this alias set, if there are at least 2
932  // dependence sets (in which case RunningDepId > 2) or if we need to re-try
933  // any bound checks (because in that case the number of dependence sets is
934  // incomplete).
935  bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
936 
937  // We need to perform run-time alias checks, but some pointers had bounds
938  // that couldn't be checked.
939  if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
940  // Reset the CanDoSetRt flag and retry all accesses that have failed.
941  // We know that we need these checks, so we can now be more aggressive
942  // and add further checks if required (overflow checks).
943  CanDoAliasSetRT = true;
944  for (auto Access : Retries) {
945  for (auto &AccessTy : Accesses[Access]) {
946  if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
947  DepSetId, TheLoop, RunningDepId, ASId,
948  ShouldCheckWrap, /*Assume=*/true)) {
949  CanDoAliasSetRT = false;
950  UncomputablePtr = Access.getPointer();
951  break;
952  }
953  }
954  }
955  }
956 
957  CanDoRT &= CanDoAliasSetRT;
958  MayNeedRTCheck |= NeedsAliasSetRTCheck;
959  ++ASId;
960  }
961 
962  // If the pointers that we would use for the bounds comparison have different
963  // address spaces, assume the values aren't directly comparable, so we can't
964  // use them for the runtime check. We also have to assume they could
965  // overlap. In the future there should be metadata for whether address spaces
966  // are disjoint.
967  unsigned NumPointers = RtCheck.Pointers.size();
968  for (unsigned i = 0; i < NumPointers; ++i) {
969  for (unsigned j = i + 1; j < NumPointers; ++j) {
970  // Only need to check pointers between two different dependency sets.
971  if (RtCheck.Pointers[i].DependencySetId ==
972  RtCheck.Pointers[j].DependencySetId)
973  continue;
974  // Only need to check pointers in the same alias set.
975  if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
976  continue;
977 
978  Value *PtrI = RtCheck.Pointers[i].PointerValue;
979  Value *PtrJ = RtCheck.Pointers[j].PointerValue;
980 
981  unsigned ASi = PtrI->getType()->getPointerAddressSpace();
982  unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
983  if (ASi != ASj) {
984  LLVM_DEBUG(
985  dbgs() << "LAA: Runtime check would require comparison between"
986  " different address spaces\n");
987  return false;
988  }
989  }
990  }
991 
992  if (MayNeedRTCheck && CanDoRT)
993  RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
994 
995  LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
996  << " pointer comparisons.\n");
997 
998  // If we can do run-time checks, but there are no checks, no runtime checks
999  // are needed. This can happen when all pointers point to the same underlying
1000  // object for example.
1001  RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1002 
1003  bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1004  if (!CanDoRTIfNeeded)
1005  RtCheck.reset();
1006  return CanDoRTIfNeeded;
1007 }
1008 
1009 void AccessAnalysis::processMemAccesses() {
1010  // We process the set twice: first we process read-write pointers, last we
1011  // process read-only pointers. This allows us to skip dependence tests for
1012  // read-only pointers.
1013 
1014  LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1015  LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1016  LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1017  LLVM_DEBUG({
1018  for (auto A : Accesses)
1019  dbgs() << "\t" << *A.first.getPointer() << " ("
1020  << (A.first.getInt()
1021  ? "write"
1022  : (ReadOnlyPtr.count(A.first.getPointer()) ? "read-only"
1023  : "read"))
1024  << ")\n";
1025  });
1026 
1027  // The AliasSetTracker has nicely partitioned our pointers by metadata
1028  // compatibility and potential for underlying-object overlap. As a result, we
1029  // only need to check for potential pointer dependencies within each alias
1030  // set.
1031  for (const auto &AS : AST) {
1032  // Note that both the alias-set tracker and the alias sets themselves used
1033  // linked lists internally and so the iteration order here is deterministic
1034  // (matching the original instruction order within each set).
1035 
1036  bool SetHasWrite = false;
1037 
1038  // Map of pointers to last access encountered.
1039  typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1040  UnderlyingObjToAccessMap ObjToLastAccess;
1041 
1042  // Set of access to check after all writes have been processed.
1043  PtrAccessMap DeferredAccesses;
1044 
1045  // Iterate over each alias set twice, once to process read/write pointers,
1046  // and then to process read-only pointers.
1047  for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1048  bool UseDeferred = SetIteration > 0;
1049  PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1050 
1051  for (const auto &AV : AS) {
1052  Value *Ptr = AV.getValue();
1053 
1054  // For a single memory access in AliasSetTracker, Accesses may contain
1055  // both read and write, and they both need to be handled for CheckDeps.
1056  for (const auto &AC : S) {
1057  if (AC.first.getPointer() != Ptr)
1058  continue;
1059 
1060  bool IsWrite = AC.first.getInt();
1061 
1062  // If we're using the deferred access set, then it contains only
1063  // reads.
1064  bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1065  if (UseDeferred && !IsReadOnlyPtr)
1066  continue;
1067  // Otherwise, the pointer must be in the PtrAccessSet, either as a
1068  // read or a write.
1069  assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1070  S.count(MemAccessInfo(Ptr, false))) &&
1071  "Alias-set pointer not in the access set?");
1072 
1073  MemAccessInfo Access(Ptr, IsWrite);
1074  DepCands.insert(Access);
1075 
1076  // Memorize read-only pointers for later processing and skip them in
1077  // the first round (they need to be checked after we have seen all
1078  // write pointers). Note: we also mark pointer that are not
1079  // consecutive as "read-only" pointers (so that we check
1080  // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1081  if (!UseDeferred && IsReadOnlyPtr) {
1082  // We only use the pointer keys, the types vector values don't
1083  // matter.
1084  DeferredAccesses.insert({Access, {}});
1085  continue;
1086  }
1087 
1088  // If this is a write - check other reads and writes for conflicts. If
1089  // this is a read only check other writes for conflicts (but only if
1090  // there is no other write to the ptr - this is an optimization to
1091  // catch "a[i] = a[i] + " without having to do a dependence check).
1092  if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1093  CheckDeps.push_back(Access);
1094  IsRTCheckAnalysisNeeded = true;
1095  }
1096 
1097  if (IsWrite)
1098  SetHasWrite = true;
1099 
1100  // Create sets of pointers connected by a shared alias set and
1101  // underlying object.
1102  typedef SmallVector<const Value *, 16> ValueVector;
1103  ValueVector TempObjects;
1104 
1105  getUnderlyingObjects(Ptr, TempObjects, LI);
1106  LLVM_DEBUG(dbgs()
1107  << "Underlying objects for pointer " << *Ptr << "\n");
1108  for (const Value *UnderlyingObj : TempObjects) {
1109  // nullptr never alias, don't join sets for pointer that have "null"
1110  // in their UnderlyingObjects list.
1111  if (isa<ConstantPointerNull>(UnderlyingObj) &&
1113  TheLoop->getHeader()->getParent(),
1114  UnderlyingObj->getType()->getPointerAddressSpace()))
1115  continue;
1116 
1117  UnderlyingObjToAccessMap::iterator Prev =
1118  ObjToLastAccess.find(UnderlyingObj);
1119  if (Prev != ObjToLastAccess.end())
1120  DepCands.unionSets(Access, Prev->second);
1121 
1122  ObjToLastAccess[UnderlyingObj] = Access;
1123  LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1124  }
1125  }
1126  }
1127  }
1128  }
1129 }
1130 
1131 static bool isInBoundsGep(Value *Ptr) {
1132  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
1133  return GEP->isInBounds();
1134  return false;
1135 }
1136 
1137 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1138 /// i.e. monotonically increasing/decreasing.
1139 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1140  PredicatedScalarEvolution &PSE, const Loop *L) {
1141  // FIXME: This should probably only return true for NUW.
1143  return true;
1144 
1145  // Scalar evolution does not propagate the non-wrapping flags to values that
1146  // are derived from a non-wrapping induction variable because non-wrapping
1147  // could be flow-sensitive.
1148  //
1149  // Look through the potentially overflowing instruction to try to prove
1150  // non-wrapping for the *specific* value of Ptr.
1151 
1152  // The arithmetic implied by an inbounds GEP can't overflow.
1153  auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1154  if (!GEP || !GEP->isInBounds())
1155  return false;
1156 
1157  // Make sure there is only one non-const index and analyze that.
1158  Value *NonConstIndex = nullptr;
1159  for (Value *Index : GEP->indices())
1160  if (!isa<ConstantInt>(Index)) {
1161  if (NonConstIndex)
1162  return false;
1163  NonConstIndex = Index;
1164  }
1165  if (!NonConstIndex)
1166  // The recurrence is on the pointer, ignore for now.
1167  return false;
1168 
1169  // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1170  // AddRec using a NSW operation.
1171  if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1172  if (OBO->hasNoSignedWrap() &&
1173  // Assume constant for other the operand so that the AddRec can be
1174  // easily found.
1175  isa<ConstantInt>(OBO->getOperand(1))) {
1176  auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1177 
1178  if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1179  return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1180  }
1181 
1182  return false;
1183 }
1184 
1185 /// Check whether the access through \p Ptr has a constant stride.
1187  Value *Ptr, const Loop *Lp,
1188  const ValueToValueMap &StridesMap, bool Assume,
1189  bool ShouldCheckWrap) {
1190  Type *Ty = Ptr->getType();
1191  assert(Ty->isPointerTy() && "Unexpected non-ptr");
1192 
1193  if (isa<ScalableVectorType>(AccessTy)) {
1194  LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1195  << "\n");
1196  return 0;
1197  }
1198 
1199  const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1200 
1201  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1202  if (Assume && !AR)
1203  AR = PSE.getAsAddRec(Ptr);
1204 
1205  if (!AR) {
1206  LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1207  << " SCEV: " << *PtrScev << "\n");
1208  return 0;
1209  }
1210 
1211  // The access function must stride over the innermost loop.
1212  if (Lp != AR->getLoop()) {
1213  LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1214  << *Ptr << " SCEV: " << *AR << "\n");
1215  return 0;
1216  }
1217 
1218  // The address calculation must not wrap. Otherwise, a dependence could be
1219  // inverted.
1220  // An inbounds getelementptr that is a AddRec with a unit stride
1221  // cannot wrap per definition. The unit stride requirement is checked later.
1222  // An getelementptr without an inbounds attribute and unit stride would have
1223  // to access the pointer value "0" which is undefined behavior in address
1224  // space 0, therefore we can also vectorize this case.
1225  unsigned AddrSpace = Ty->getPointerAddressSpace();
1226  bool IsInBoundsGEP = isInBoundsGep(Ptr);
1227  bool IsNoWrapAddRec = !ShouldCheckWrap ||
1229  isNoWrapAddRec(Ptr, AR, PSE, Lp);
1230  if (!IsNoWrapAddRec && !IsInBoundsGEP &&
1231  NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace)) {
1232  if (Assume) {
1234  IsNoWrapAddRec = true;
1235  LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n"
1236  << "LAA: Pointer: " << *Ptr << "\n"
1237  << "LAA: SCEV: " << *AR << "\n"
1238  << "LAA: Added an overflow assumption\n");
1239  } else {
1240  LLVM_DEBUG(
1241  dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1242  << *Ptr << " SCEV: " << *AR << "\n");
1243  return 0;
1244  }
1245  }
1246 
1247  // Check the step is constant.
1248  const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1249 
1250  // Calculate the pointer stride and check if it is constant.
1251  const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1252  if (!C) {
1253  LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1254  << " SCEV: " << *AR << "\n");
1255  return 0;
1256  }
1257 
1258  auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1259  TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1260  int64_t Size = AllocSize.getFixedSize();
1261  const APInt &APStepVal = C->getAPInt();
1262 
1263  // Huge step value - give up.
1264  if (APStepVal.getBitWidth() > 64)
1265  return 0;
1266 
1267  int64_t StepVal = APStepVal.getSExtValue();
1268 
1269  // Strided access.
1270  int64_t Stride = StepVal / Size;
1271  int64_t Rem = StepVal % Size;
1272  if (Rem)
1273  return 0;
1274 
1275  // If the SCEV could wrap but we have an inbounds gep with a unit stride we
1276  // know we can't "wrap around the address space". In case of address space
1277  // zero we know that this won't happen without triggering undefined behavior.
1278  if (!IsNoWrapAddRec && Stride != 1 && Stride != -1 &&
1279  (IsInBoundsGEP || !NullPointerIsDefined(Lp->getHeader()->getParent(),
1280  AddrSpace))) {
1281  if (Assume) {
1282  // We can avoid this case by adding a run-time check.
1283  LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
1284  << "inbounds or in address space 0 may wrap:\n"
1285  << "LAA: Pointer: " << *Ptr << "\n"
1286  << "LAA: SCEV: " << *AR << "\n"
1287  << "LAA: Added an overflow assumption\n");
1289  } else
1290  return 0;
1291  }
1292 
1293  return Stride;
1294 }
1295 
1297  Value *PtrB, const DataLayout &DL,
1298  ScalarEvolution &SE, bool StrictCheck,
1299  bool CheckType) {
1300  assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1301  assert(cast<PointerType>(PtrA->getType())
1302  ->isOpaqueOrPointeeTypeMatches(ElemTyA) && "Wrong PtrA type");
1303  assert(cast<PointerType>(PtrB->getType())
1304  ->isOpaqueOrPointeeTypeMatches(ElemTyB) && "Wrong PtrB type");
1305 
1306  // Make sure that A and B are different pointers.
1307  if (PtrA == PtrB)
1308  return 0;
1309 
1310  // Make sure that the element types are the same if required.
1311  if (CheckType && ElemTyA != ElemTyB)
1312  return None;
1313 
1314  unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1315  unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1316 
1317  // Check that the address spaces match.
1318  if (ASA != ASB)
1319  return None;
1320  unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1321 
1322  APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1323  Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1324  Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1325 
1326  int Val;
1327  if (PtrA1 == PtrB1) {
1328  // Retrieve the address space again as pointer stripping now tracks through
1329  // `addrspacecast`.
1330  ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1331  ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1332  // Check that the address spaces match and that the pointers are valid.
1333  if (ASA != ASB)
1334  return None;
1335 
1336  IdxWidth = DL.getIndexSizeInBits(ASA);
1337  OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1338  OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1339 
1340  OffsetB -= OffsetA;
1341  Val = OffsetB.getSExtValue();
1342  } else {
1343  // Otherwise compute the distance with SCEV between the base pointers.
1344  const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1345  const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1346  const auto *Diff =
1347  dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1348  if (!Diff)
1349  return None;
1350  Val = Diff->getAPInt().getSExtValue();
1351  }
1352  int Size = DL.getTypeStoreSize(ElemTyA);
1353  int Dist = Val / Size;
1354 
1355  // Ensure that the calculated distance matches the type-based one after all
1356  // the bitcasts removal in the provided pointers.
1357  if (!StrictCheck || Dist * Size == Val)
1358  return Dist;
1359  return None;
1360 }
1361 
1363  const DataLayout &DL, ScalarEvolution &SE,
1364  SmallVectorImpl<unsigned> &SortedIndices) {
1366  VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1367  "Expected list of pointer operands.");
1368  // Walk over the pointers, and map each of them to an offset relative to
1369  // first pointer in the array.
1370  Value *Ptr0 = VL[0];
1371 
1372  using DistOrdPair = std::pair<int64_t, int>;
1373  auto Compare = [](const DistOrdPair &L, const DistOrdPair &R) {
1374  return L.first < R.first;
1375  };
1376  std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1377  Offsets.emplace(0, 0);
1378  int Cnt = 1;
1379  bool IsConsecutive = true;
1380  for (auto *Ptr : VL.drop_front()) {
1381  Optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1382  /*StrictCheck=*/true);
1383  if (!Diff)
1384  return false;
1385 
1386  // Check if the pointer with the same offset is found.
1387  int64_t Offset = *Diff;
1388  auto Res = Offsets.emplace(Offset, Cnt);
1389  if (!Res.second)
1390  return false;
1391  // Consecutive order if the inserted element is the last one.
1392  IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1393  ++Cnt;
1394  }
1395  SortedIndices.clear();
1396  if (!IsConsecutive) {
1397  // Fill SortedIndices array only if it is non-consecutive.
1398  SortedIndices.resize(VL.size());
1399  Cnt = 0;
1400  for (const std::pair<int64_t, int> &Pair : Offsets) {
1401  SortedIndices[Cnt] = Pair.second;
1402  ++Cnt;
1403  }
1404  }
1405  return true;
1406 }
1407 
1408 /// Returns true if the memory operations \p A and \p B are consecutive.
1410  ScalarEvolution &SE, bool CheckType) {
1411  Value *PtrA = getLoadStorePointerOperand(A);
1413  if (!PtrA || !PtrB)
1414  return false;
1415  Type *ElemTyA = getLoadStoreType(A);
1416  Type *ElemTyB = getLoadStoreType(B);
1417  Optional<int> Diff = getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1418  /*StrictCheck=*/true, CheckType);
1419  return Diff && *Diff == 1;
1420 }
1421 
1423  visitPointers(SI->getPointerOperand(), *InnermostLoop,
1424  [this, SI](Value *Ptr) {
1425  Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1426  InstMap.push_back(SI);
1427  ++AccessIdx;
1428  });
1429 }
1430 
1432  visitPointers(LI->getPointerOperand(), *InnermostLoop,
1433  [this, LI](Value *Ptr) {
1434  Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1435  InstMap.push_back(LI);
1436  ++AccessIdx;
1437  });
1438 }
1439 
1442  switch (Type) {
1443  case NoDep:
1444  case Forward:
1445  case BackwardVectorizable:
1447 
1448  case Unknown:
1451  case Backward:
1454  }
1455  llvm_unreachable("unexpected DepType!");
1456 }
1457 
1459  switch (Type) {
1460  case NoDep:
1461  case Forward:
1462  case ForwardButPreventsForwarding:
1463  case Unknown:
1464  return false;
1465 
1466  case BackwardVectorizable:
1467  case Backward:
1468  case BackwardVectorizableButPreventsForwarding:
1469  return true;
1470  }
1471  llvm_unreachable("unexpected DepType!");
1472 }
1473 
1475  return isBackward() || Type == Unknown;
1476 }
1477 
1479  switch (Type) {
1480  case Forward:
1481  case ForwardButPreventsForwarding:
1482  return true;
1483 
1484  case NoDep:
1485  case Unknown:
1486  case BackwardVectorizable:
1487  case Backward:
1488  case BackwardVectorizableButPreventsForwarding:
1489  return false;
1490  }
1491  llvm_unreachable("unexpected DepType!");
1492 }
1493 
1494 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1495  uint64_t TypeByteSize) {
1496  // If loads occur at a distance that is not a multiple of a feasible vector
1497  // factor store-load forwarding does not take place.
1498  // Positive dependences might cause troubles because vectorizing them might
1499  // prevent store-load forwarding making vectorized code run a lot slower.
1500  // a[i] = a[i-3] ^ a[i-8];
1501  // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1502  // hence on your typical architecture store-load forwarding does not take
1503  // place. Vectorizing in such cases does not make sense.
1504  // Store-load forwarding distance.
1505 
1506  // After this many iterations store-to-load forwarding conflicts should not
1507  // cause any slowdowns.
1508  const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1509  // Maximum vector factor.
1510  uint64_t MaxVFWithoutSLForwardIssues = std::min(
1511  VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes);
1512 
1513  // Compute the smallest VF at which the store and load would be misaligned.
1514  for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1515  VF *= 2) {
1516  // If the number of vector iteration between the store and the load are
1517  // small we could incur conflicts.
1518  if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1519  MaxVFWithoutSLForwardIssues = (VF >> 1);
1520  break;
1521  }
1522  }
1523 
1524  if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1525  LLVM_DEBUG(
1526  dbgs() << "LAA: Distance " << Distance
1527  << " that could cause a store-load forwarding conflict\n");
1528  return true;
1529  }
1530 
1531  if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
1532  MaxVFWithoutSLForwardIssues !=
1533  VectorizerParams::MaxVectorWidth * TypeByteSize)
1534  MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
1535  return false;
1536 }
1537 
1538 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1539  if (Status < S)
1540  Status = S;
1541 }
1542 
1543 /// Given a non-constant (unknown) dependence-distance \p Dist between two
1544 /// memory accesses, that have the same stride whose absolute value is given
1545 /// in \p Stride, and that have the same type size \p TypeByteSize,
1546 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1547 /// possible to prove statically that the dependence distance is larger
1548 /// than the range that the accesses will travel through the execution of
1549 /// the loop. If so, return true; false otherwise. This is useful for
1550 /// example in loops such as the following (PR31098):
1551 /// for (i = 0; i < D; ++i) {
1552 /// = out[i];
1553 /// out[i+D] =
1554 /// }
1556  const SCEV &BackedgeTakenCount,
1557  const SCEV &Dist, uint64_t Stride,
1558  uint64_t TypeByteSize) {
1559 
1560  // If we can prove that
1561  // (**) |Dist| > BackedgeTakenCount * Step
1562  // where Step is the absolute stride of the memory accesses in bytes,
1563  // then there is no dependence.
1564  //
1565  // Rationale:
1566  // We basically want to check if the absolute distance (|Dist/Step|)
1567  // is >= the loop iteration count (or > BackedgeTakenCount).
1568  // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1569  // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1570  // that the dependence distance is >= VF; This is checked elsewhere.
1571  // But in some cases we can prune unknown dependence distances early, and
1572  // even before selecting the VF, and without a runtime test, by comparing
1573  // the distance against the loop iteration count. Since the vectorized code
1574  // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1575  // also guarantees that distance >= VF.
1576  //
1577  const uint64_t ByteStride = Stride * TypeByteSize;
1578  const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1579  const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1580 
1581  const SCEV *CastedDist = &Dist;
1582  const SCEV *CastedProduct = Product;
1583  uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1584  uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1585 
1586  // The dependence distance can be positive/negative, so we sign extend Dist;
1587  // The multiplication of the absolute stride in bytes and the
1588  // backedgeTakenCount is non-negative, so we zero extend Product.
1589  if (DistTypeSizeBits > ProductTypeSizeBits)
1590  CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1591  else
1592  CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1593 
1594  // Is Dist - (BackedgeTakenCount * Step) > 0 ?
1595  // (If so, then we have proven (**) because |Dist| >= Dist)
1596  const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1597  if (SE.isKnownPositive(Minus))
1598  return true;
1599 
1600  // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ?
1601  // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1602  const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1603  Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1604  if (SE.isKnownPositive(Minus))
1605  return true;
1606 
1607  return false;
1608 }
1609 
1610 /// Check the dependence for two accesses with the same stride \p Stride.
1611 /// \p Distance is the positive distance and \p TypeByteSize is type size in
1612 /// bytes.
1613 ///
1614 /// \returns true if they are independent.
1615 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1616  uint64_t TypeByteSize) {
1617  assert(Stride > 1 && "The stride must be greater than 1");
1618  assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1619  assert(Distance > 0 && "The distance must be non-zero");
1620 
1621  // Skip if the distance is not multiple of type byte size.
1622  if (Distance % TypeByteSize)
1623  return false;
1624 
1625  uint64_t ScaledDist = Distance / TypeByteSize;
1626 
1627  // No dependence if the scaled distance is not multiple of the stride.
1628  // E.g.
1629  // for (i = 0; i < 1024 ; i += 4)
1630  // A[i+2] = A[i] + 1;
1631  //
1632  // Two accesses in memory (scaled distance is 2, stride is 4):
1633  // | A[0] | | | | A[4] | | | |
1634  // | | | A[2] | | | | A[6] | |
1635  //
1636  // E.g.
1637  // for (i = 0; i < 1024 ; i += 3)
1638  // A[i+4] = A[i] + 1;
1639  //
1640  // Two accesses in memory (scaled distance is 4, stride is 3):
1641  // | A[0] | | | A[3] | | | A[6] | | |
1642  // | | | | | A[4] | | | A[7] | |
1643  return ScaledDist % Stride;
1644 }
1645 
1647 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1648  const MemAccessInfo &B, unsigned BIdx,
1649  const ValueToValueMap &Strides) {
1650  assert (AIdx < BIdx && "Must pass arguments in program order");
1651 
1652  Value *APtr = A.getPointer();
1653  Value *BPtr = B.getPointer();
1654  bool AIsWrite = A.getInt();
1655  bool BIsWrite = B.getInt();
1656  Type *ATy = getLoadStoreType(InstMap[AIdx]);
1657  Type *BTy = getLoadStoreType(InstMap[BIdx]);
1658 
1659  // Two reads are independent.
1660  if (!AIsWrite && !BIsWrite)
1661  return Dependence::NoDep;
1662 
1663  // We cannot check pointers in different address spaces.
1664  if (APtr->getType()->getPointerAddressSpace() !=
1665  BPtr->getType()->getPointerAddressSpace())
1666  return Dependence::Unknown;
1667 
1668  int64_t StrideAPtr =
1669  getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true);
1670  int64_t StrideBPtr =
1671  getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true);
1672 
1673  const SCEV *Src = PSE.getSCEV(APtr);
1674  const SCEV *Sink = PSE.getSCEV(BPtr);
1675 
1676  // If the induction step is negative we have to invert source and sink of the
1677  // dependence.
1678  if (StrideAPtr < 0) {
1679  std::swap(APtr, BPtr);
1680  std::swap(ATy, BTy);
1681  std::swap(Src, Sink);
1682  std::swap(AIsWrite, BIsWrite);
1683  std::swap(AIdx, BIdx);
1684  std::swap(StrideAPtr, StrideBPtr);
1685  }
1686 
1687  const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src);
1688 
1689  LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1690  << "(Induction step: " << StrideAPtr << ")\n");
1691  LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1692  << *InstMap[BIdx] << ": " << *Dist << "\n");
1693 
1694  // Need accesses with constant stride. We don't want to vectorize
1695  // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1696  // the address space.
1697  if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1698  LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1699  return Dependence::Unknown;
1700  }
1701 
1702  auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1703  uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1704  bool HasSameSize =
1705  DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
1706  uint64_t Stride = std::abs(StrideAPtr);
1707  const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1708  if (!C) {
1709  if (!isa<SCEVCouldNotCompute>(Dist) && HasSameSize &&
1710  isSafeDependenceDistance(DL, *(PSE.getSE()),
1711  *(PSE.getBackedgeTakenCount()), *Dist, Stride,
1712  TypeByteSize))
1713  return Dependence::NoDep;
1714 
1715  LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1716  FoundNonConstantDistanceDependence = true;
1717  return Dependence::Unknown;
1718  }
1719 
1720  const APInt &Val = C->getAPInt();
1721  int64_t Distance = Val.getSExtValue();
1722 
1723  // Attempt to prove strided accesses independent.
1724  if (std::abs(Distance) > 0 && Stride > 1 && HasSameSize &&
1725  areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1726  LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1727  return Dependence::NoDep;
1728  }
1729 
1730  // Negative distances are not plausible dependencies.
1731  if (Val.isNegative()) {
1732  bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1733  if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1734  (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1735  !HasSameSize)) {
1736  LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1738  }
1739 
1740  LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
1741  return Dependence::Forward;
1742  }
1743 
1744  // Write to the same location with the same size.
1745  if (Val == 0) {
1746  if (HasSameSize)
1747  return Dependence::Forward;
1748  LLVM_DEBUG(
1749  dbgs() << "LAA: Zero dependence difference but different type sizes\n");
1750  return Dependence::Unknown;
1751  }
1752 
1753  assert(Val.isStrictlyPositive() && "Expect a positive value");
1754 
1755  if (!HasSameSize) {
1756  LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
1757  "different type sizes\n");
1758  return Dependence::Unknown;
1759  }
1760 
1761  // Bail out early if passed-in parameters make vectorization not feasible.
1762  unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1764  unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1766  // The minimum number of iterations for a vectorized/unrolled version.
1767  unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1768 
1769  // It's not vectorizable if the distance is smaller than the minimum distance
1770  // needed for a vectroized/unrolled version. Vectorizing one iteration in
1771  // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1772  // TypeByteSize (No need to plus the last gap distance).
1773  //
1774  // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1775  // foo(int *A) {
1776  // int *B = (int *)((char *)A + 14);
1777  // for (i = 0 ; i < 1024 ; i += 2)
1778  // B[i] = A[i] + 1;
1779  // }
1780  //
1781  // Two accesses in memory (stride is 2):
1782  // | A[0] | | A[2] | | A[4] | | A[6] | |
1783  // | B[0] | | B[2] | | B[4] |
1784  //
1785  // Distance needs for vectorizing iterations except the last iteration:
1786  // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
1787  // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
1788  //
1789  // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
1790  // 12, which is less than distance.
1791  //
1792  // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
1793  // the minimum distance needed is 28, which is greater than distance. It is
1794  // not safe to do vectorization.
1795  uint64_t MinDistanceNeeded =
1796  TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
1797  if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
1798  LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
1799  << Distance << '\n');
1800  return Dependence::Backward;
1801  }
1802 
1803  // Unsafe if the minimum distance needed is greater than max safe distance.
1804  if (MinDistanceNeeded > MaxSafeDepDistBytes) {
1805  LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
1806  << MinDistanceNeeded << " size in bytes");
1807  return Dependence::Backward;
1808  }
1809 
1810  // Positive distance bigger than max vectorization factor.
1811  // FIXME: Should use max factor instead of max distance in bytes, which could
1812  // not handle different types.
1813  // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1814  // void foo (int *A, char *B) {
1815  // for (unsigned i = 0; i < 1024; i++) {
1816  // A[i+2] = A[i] + 1;
1817  // B[i+2] = B[i] + 1;
1818  // }
1819  // }
1820  //
1821  // This case is currently unsafe according to the max safe distance. If we
1822  // analyze the two accesses on array B, the max safe dependence distance
1823  // is 2. Then we analyze the accesses on array A, the minimum distance needed
1824  // is 8, which is less than 2 and forbidden vectorization, But actually
1825  // both A and B could be vectorized by 2 iterations.
1826  MaxSafeDepDistBytes =
1827  std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes);
1828 
1829  bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
1830  if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1831  couldPreventStoreLoadForward(Distance, TypeByteSize))
1833 
1834  uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride);
1835  LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
1836  << " with max VF = " << MaxVF << '\n');
1837  uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1838  MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
1840 }
1841 
1843  MemAccessInfoList &CheckDeps,
1844  const ValueToValueMap &Strides) {
1845 
1846  MaxSafeDepDistBytes = -1;
1848  for (MemAccessInfo CurAccess : CheckDeps) {
1849  if (Visited.count(CurAccess))
1850  continue;
1851 
1852  // Get the relevant memory access set.
1854  AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
1855 
1856  // Check accesses within this set.
1858  AccessSets.member_begin(I);
1860  AccessSets.member_end();
1861 
1862  // Check every access pair.
1863  while (AI != AE) {
1864  Visited.insert(*AI);
1865  bool AIIsWrite = AI->getInt();
1866  // Check loads only against next equivalent class, but stores also against
1867  // other stores in the same equivalence class - to the same address.
1869  (AIIsWrite ? AI : std::next(AI));
1870  while (OI != AE) {
1871  // Check every accessing instruction pair in program order.
1872  for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
1873  I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
1874  // Scan all accesses of another equivalence class, but only the next
1875  // accesses of the same equivalent class.
1876  for (std::vector<unsigned>::iterator
1877  I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
1878  I2E = (OI == AI ? I1E : Accesses[*OI].end());
1879  I2 != I2E; ++I2) {
1880  auto A = std::make_pair(&*AI, *I1);
1881  auto B = std::make_pair(&*OI, *I2);
1882 
1883  assert(*I1 != *I2);
1884  if (*I1 > *I2)
1885  std::swap(A, B);
1886 
1888  isDependent(*A.first, A.second, *B.first, B.second, Strides);
1889  mergeInStatus(Dependence::isSafeForVectorization(Type));
1890 
1891  // Gather dependences unless we accumulated MaxDependences
1892  // dependences. In that case return as soon as we find the first
1893  // unsafe dependence. This puts a limit on this quadratic
1894  // algorithm.
1895  if (RecordDependences) {
1896  if (Type != Dependence::NoDep)
1897  Dependences.push_back(Dependence(A.second, B.second, Type));
1898 
1899  if (Dependences.size() >= MaxDependences) {
1900  RecordDependences = false;
1901  Dependences.clear();
1902  LLVM_DEBUG(dbgs()
1903  << "Too many dependences, stopped recording\n");
1904  }
1905  }
1906  if (!RecordDependences && !isSafeForVectorization())
1907  return false;
1908  }
1909  ++OI;
1910  }
1911  AI++;
1912  }
1913  }
1914 
1915  LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
1916  return isSafeForVectorization();
1917 }
1918 
1921  MemAccessInfo Access(Ptr, isWrite);
1922  auto &IndexVector = Accesses.find(Access)->second;
1923 
1925  transform(IndexVector,
1926  std::back_inserter(Insts),
1927  [&](unsigned Idx) { return this->InstMap[Idx]; });
1928  return Insts;
1929 }
1930 
1931 const char *MemoryDepChecker::Dependence::DepName[] = {
1932  "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
1933  "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
1934 
1936  raw_ostream &OS, unsigned Depth,
1937  const SmallVectorImpl<Instruction *> &Instrs) const {
1938  OS.indent(Depth) << DepName[Type] << ":\n";
1939  OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
1940  OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
1941 }
1942 
1943 bool LoopAccessInfo::canAnalyzeLoop() {
1944  // We need to have a loop header.
1945  LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
1946  << TheLoop->getHeader()->getParent()->getName() << ": "
1947  << TheLoop->getHeader()->getName() << '\n');
1948 
1949  // We can only analyze innermost loops.
1950  if (!TheLoop->isInnermost()) {
1951  LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
1952  recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
1953  return false;
1954  }
1955 
1956  // We must have a single backedge.
1957  if (TheLoop->getNumBackEdges() != 1) {
1958  LLVM_DEBUG(
1959  dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1960  recordAnalysis("CFGNotUnderstood")
1961  << "loop control flow is not understood by analyzer";
1962  return false;
1963  }
1964 
1965  // ScalarEvolution needs to be able to find the exit count.
1966  const SCEV *ExitCount = PSE->getBackedgeTakenCount();
1967  if (isa<SCEVCouldNotCompute>(ExitCount)) {
1968  recordAnalysis("CantComputeNumberOfIterations")
1969  << "could not determine number of loop iterations";
1970  LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
1971  return false;
1972  }
1973 
1974  return true;
1975 }
1976 
1977 void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
1978  const TargetLibraryInfo *TLI,
1979  DominatorTree *DT) {
1980  // Holds the Load and Store instructions.
1983 
1984  // Holds all the different accesses in the loop.
1985  unsigned NumReads = 0;
1986  unsigned NumReadWrites = 0;
1987 
1988  bool HasComplexMemInst = false;
1989 
1990  // A runtime check is only legal to insert if there are no convergent calls.
1991  HasConvergentOp = false;
1992 
1993  PtrRtChecking->Pointers.clear();
1994  PtrRtChecking->Need = false;
1995 
1996  const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
1997 
1998  const bool EnableMemAccessVersioningOfLoop =
2000  !TheLoop->getHeader()->getParent()->hasOptSize();
2001 
2002  // For each block.
2003  for (BasicBlock *BB : TheLoop->blocks()) {
2004  // Scan the BB and collect legal loads and stores. Also detect any
2005  // convergent instructions.
2006  for (Instruction &I : *BB) {
2007  if (auto *Call = dyn_cast<CallBase>(&I)) {
2008  if (Call->isConvergent())
2009  HasConvergentOp = true;
2010  }
2011 
2012  // With both a non-vectorizable memory instruction and a convergent
2013  // operation, found in this loop, no reason to continue the search.
2014  if (HasComplexMemInst && HasConvergentOp) {
2015  CanVecMem = false;
2016  return;
2017  }
2018 
2019  // Avoid hitting recordAnalysis multiple times.
2020  if (HasComplexMemInst)
2021  continue;
2022 
2023  // If this is a load, save it. If this instruction can read from memory
2024  // but is not a load, then we quit. Notice that we don't handle function
2025  // calls that read or write.
2026  if (I.mayReadFromMemory()) {
2027  // Many math library functions read the rounding mode. We will only
2028  // vectorize a loop if it contains known function calls that don't set
2029  // the flag. Therefore, it is safe to ignore this read from memory.
2030  auto *Call = dyn_cast<CallInst>(&I);
2031  if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2032  continue;
2033 
2034  // If the function has an explicit vectorized counterpart, we can safely
2035  // assume that it can be vectorized.
2036  if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2037  !VFDatabase::getMappings(*Call).empty())
2038  continue;
2039 
2040  auto *Ld = dyn_cast<LoadInst>(&I);
2041  if (!Ld) {
2042  recordAnalysis("CantVectorizeInstruction", Ld)
2043  << "instruction cannot be vectorized";
2044  HasComplexMemInst = true;
2045  continue;
2046  }
2047  if (!Ld->isSimple() && !IsAnnotatedParallel) {
2048  recordAnalysis("NonSimpleLoad", Ld)
2049  << "read with atomic ordering or volatile read";
2050  LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2051  HasComplexMemInst = true;
2052  continue;
2053  }
2054  NumLoads++;
2055  Loads.push_back(Ld);
2056  DepChecker->addAccess(Ld);
2057  if (EnableMemAccessVersioningOfLoop)
2058  collectStridedAccess(Ld);
2059  continue;
2060  }
2061 
2062  // Save 'store' instructions. Abort if other instructions write to memory.
2063  if (I.mayWriteToMemory()) {
2064  auto *St = dyn_cast<StoreInst>(&I);
2065  if (!St) {
2066  recordAnalysis("CantVectorizeInstruction", St)
2067  << "instruction cannot be vectorized";
2068  HasComplexMemInst = true;
2069  continue;
2070  }
2071  if (!St->isSimple() && !IsAnnotatedParallel) {
2072  recordAnalysis("NonSimpleStore", St)
2073  << "write with atomic ordering or volatile write";
2074  LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2075  HasComplexMemInst = true;
2076  continue;
2077  }
2078  NumStores++;
2079  Stores.push_back(St);
2080  DepChecker->addAccess(St);
2081  if (EnableMemAccessVersioningOfLoop)
2082  collectStridedAccess(St);
2083  }
2084  } // Next instr.
2085  } // Next block.
2086 
2087  if (HasComplexMemInst) {
2088  CanVecMem = false;
2089  return;
2090  }
2091 
2092  // Now we have two lists that hold the loads and the stores.
2093  // Next, we find the pointers that they use.
2094 
2095  // Check if we see any stores. If there are no stores, then we don't
2096  // care if the pointers are *restrict*.
2097  if (!Stores.size()) {
2098  LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2099  CanVecMem = true;
2100  return;
2101  }
2102 
2103  MemoryDepChecker::DepCandidates DependentAccesses;
2104  AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE);
2105 
2106  // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2107  // multiple times on the same object. If the ptr is accessed twice, once
2108  // for read and once for write, it will only appear once (on the write
2109  // list). This is okay, since we are going to check for conflicts between
2110  // writes and between reads and writes, but not between reads and reads.
2112 
2113  // Record uniform store addresses to identify if we have multiple stores
2114  // to the same address.
2115  SmallPtrSet<Value *, 16> UniformStores;
2116 
2117  for (StoreInst *ST : Stores) {
2118  Value *Ptr = ST->getPointerOperand();
2119 
2120  if (isUniform(Ptr)) {
2121  // Record store instructions to loop invariant addresses
2122  StoresToInvariantAddresses.push_back(ST);
2123  HasDependenceInvolvingLoopInvariantAddress |=
2124  !UniformStores.insert(Ptr).second;
2125  }
2126 
2127  // If we did *not* see this pointer before, insert it to the read-write
2128  // list. At this phase it is only a 'write' list.
2129  Type *AccessTy = getLoadStoreType(ST);
2130  if (Seen.insert({Ptr, AccessTy}).second) {
2131  ++NumReadWrites;
2132 
2134  // The TBAA metadata could have a control dependency on the predication
2135  // condition, so we cannot rely on it when determining whether or not we
2136  // need runtime pointer checks.
2137  if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2138  Loc.AATags.TBAA = nullptr;
2139 
2140  visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2141  [&Accesses, AccessTy, Loc](Value *Ptr) {
2142  MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2143  Accesses.addStore(NewLoc, AccessTy);
2144  });
2145  }
2146  }
2147 
2148  if (IsAnnotatedParallel) {
2149  LLVM_DEBUG(
2150  dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2151  << "checks.\n");
2152  CanVecMem = true;
2153  return;
2154  }
2155 
2156  for (LoadInst *LD : Loads) {
2157  Value *Ptr = LD->getPointerOperand();
2158  // If we did *not* see this pointer before, insert it to the
2159  // read list. If we *did* see it before, then it is already in
2160  // the read-write list. This allows us to vectorize expressions
2161  // such as A[i] += x; Because the address of A[i] is a read-write
2162  // pointer. This only works if the index of A[i] is consecutive.
2163  // If the address of i is unknown (for example A[B[i]]) then we may
2164  // read a few words, modify, and write a few words, and some of the
2165  // words may be written to the same address.
2166  bool IsReadOnlyPtr = false;
2167  Type *AccessTy = getLoadStoreType(LD);
2168  if (Seen.insert({Ptr, AccessTy}).second ||
2169  !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides)) {
2170  ++NumReads;
2171  IsReadOnlyPtr = true;
2172  }
2173 
2174  // See if there is an unsafe dependency between a load to a uniform address and
2175  // store to the same uniform address.
2176  if (UniformStores.count(Ptr)) {
2177  LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2178  "load and uniform store to the same address!\n");
2179  HasDependenceInvolvingLoopInvariantAddress = true;
2180  }
2181 
2183  // The TBAA metadata could have a control dependency on the predication
2184  // condition, so we cannot rely on it when determining whether or not we
2185  // need runtime pointer checks.
2186  if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2187  Loc.AATags.TBAA = nullptr;
2188 
2189  visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2190  [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2191  MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2192  Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2193  });
2194  }
2195 
2196  // If we write (or read-write) to a single destination and there are no
2197  // other reads in this loop then is it safe to vectorize.
2198  if (NumReadWrites == 1 && NumReads == 0) {
2199  LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2200  CanVecMem = true;
2201  return;
2202  }
2203 
2204  // Build dependence sets and check whether we need a runtime pointer bounds
2205  // check.
2206  Accesses.buildDependenceSets();
2207 
2208  // Find pointers with computable bounds. We are going to use this information
2209  // to place a runtime bound check.
2210  Value *UncomputablePtr = nullptr;
2211  bool CanDoRTIfNeeded =
2212  Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2213  SymbolicStrides, UncomputablePtr, false);
2214  if (!CanDoRTIfNeeded) {
2215  auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2216  recordAnalysis("CantIdentifyArrayBounds", I)
2217  << "cannot identify array bounds";
2218  LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2219  << "the array bounds.\n");
2220  CanVecMem = false;
2221  return;
2222  }
2223 
2224  LLVM_DEBUG(
2225  dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2226 
2227  CanVecMem = true;
2228  if (Accesses.isDependencyCheckNeeded()) {
2229  LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2230  CanVecMem = DepChecker->areDepsSafe(
2231  DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
2232  MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes();
2233 
2234  if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2235  LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2236 
2237  // Clear the dependency checks. We assume they are not needed.
2238  Accesses.resetDepChecks(*DepChecker);
2239 
2240  PtrRtChecking->reset();
2241  PtrRtChecking->Need = true;
2242 
2243  auto *SE = PSE->getSE();
2244  UncomputablePtr = nullptr;
2245  CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2246  *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2247 
2248  // Check that we found the bounds for the pointer.
2249  if (!CanDoRTIfNeeded) {
2250  auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2251  recordAnalysis("CantCheckMemDepsAtRunTime", I)
2252  << "cannot check memory dependencies at runtime";
2253  LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2254  CanVecMem = false;
2255  return;
2256  }
2257 
2258  CanVecMem = true;
2259  }
2260  }
2261 
2262  if (HasConvergentOp) {
2263  recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2264  << "cannot add control dependency to convergent operation";
2265  LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2266  "would be needed with a convergent operation\n");
2267  CanVecMem = false;
2268  return;
2269  }
2270 
2271  if (CanVecMem)
2272  LLVM_DEBUG(
2273  dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2274  << (PtrRtChecking->Need ? "" : " don't")
2275  << " need runtime memory checks.\n");
2276  else
2277  emitUnsafeDependenceRemark();
2278 }
2279 
2280 void LoopAccessInfo::emitUnsafeDependenceRemark() {
2281  auto Deps = getDepChecker().getDependences();
2282  if (!Deps)
2283  return;
2284  auto Found = std::find_if(
2285  Deps->begin(), Deps->end(), [](const MemoryDepChecker::Dependence &D) {
2286  return MemoryDepChecker::Dependence::isSafeForVectorization(D.Type) !=
2287  MemoryDepChecker::VectorizationSafetyStatus::Safe;
2288  });
2289  if (Found == Deps->end())
2290  return;
2291  MemoryDepChecker::Dependence Dep = *Found;
2292 
2293  LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2294 
2295  // Emit remark for first unsafe dependence
2297  recordAnalysis("UnsafeDep", Dep.getDestination(*this))
2298  << "unsafe dependent memory operations in loop. Use "
2299  "#pragma loop distribute(enable) to allow loop distribution "
2300  "to attempt to isolate the offending operations into a separate "
2301  "loop";
2302 
2303  switch (Dep.Type) {
2307  llvm_unreachable("Unexpected dependence");
2309  R << "\nBackward loop carried data dependence.";
2310  break;
2312  R << "\nForward loop carried data dependence that prevents "
2313  "store-to-load forwarding.";
2314  break;
2316  R << "\nBackward loop carried data dependence that prevents "
2317  "store-to-load forwarding.";
2318  break;
2320  R << "\nUnknown data dependence.";
2321  break;
2322  }
2323 
2324  if (Instruction *I = Dep.getSource(*this)) {
2325  DebugLoc SourceLoc = I->getDebugLoc();
2326  if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2327  SourceLoc = DD->getDebugLoc();
2328  if (SourceLoc)
2329  R << " Memory location is the same as accessed at "
2330  << ore::NV("Location", SourceLoc);
2331  }
2332 }
2333 
2335  DominatorTree *DT) {
2336  assert(TheLoop->contains(BB) && "Unknown block used");
2337 
2338  // Blocks that do not dominate the latch need predication.
2339  BasicBlock* Latch = TheLoop->getLoopLatch();
2340  return !DT->dominates(BB, Latch);
2341 }
2342 
2343 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2344  Instruction *I) {
2345  assert(!Report && "Multiple reports generated");
2346 
2347  Value *CodeRegion = TheLoop->getHeader();
2348  DebugLoc DL = TheLoop->getStartLoc();
2349 
2350  if (I) {
2351  CodeRegion = I->getParent();
2352  // If there is no debug location attached to the instruction, revert back to
2353  // using the loop's.
2354  if (I->getDebugLoc())
2355  DL = I->getDebugLoc();
2356  }
2357 
2358  Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2359  CodeRegion);
2360  return *Report;
2361 }
2362 
2364  auto *SE = PSE->getSE();
2365  // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is
2366  // never considered uniform.
2367  // TODO: Is this really what we want? Even without FP SCEV, we may want some
2368  // trivially loop-invariant FP values to be considered uniform.
2369  if (!SE->isSCEVable(V->getType()))
2370  return false;
2371  return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
2372 }
2373 
2374 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2375  Value *Ptr = getLoadStorePointerOperand(MemAccess);
2376  if (!Ptr)
2377  return;
2378 
2379  Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2380  if (!Stride)
2381  return;
2382 
2383  LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2384  "versioning:");
2385  LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
2386 
2387  // Avoid adding the "Stride == 1" predicate when we know that
2388  // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2389  // or zero iteration loop, as Trip-Count <= Stride == 1.
2390  //
2391  // TODO: We are currently not making a very informed decision on when it is
2392  // beneficial to apply stride versioning. It might make more sense that the
2393  // users of this analysis (such as the vectorizer) will trigger it, based on
2394  // their specific cost considerations; For example, in cases where stride
2395  // versioning does not help resolving memory accesses/dependences, the
2396  // vectorizer should evaluate the cost of the runtime test, and the benefit
2397  // of various possible stride specializations, considering the alternatives
2398  // of using gather/scatters (if available).
2399 
2400  const SCEV *StrideExpr = PSE->getSCEV(Stride);
2401  const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2402 
2403  // Match the types so we can compare the stride and the BETakenCount.
2404  // The Stride can be positive/negative, so we sign extend Stride;
2405  // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2406  const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2407  uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2408  uint64_t BETypeSizeBits = DL.getTypeSizeInBits(BETakenCount->getType());
2409  const SCEV *CastedStride = StrideExpr;
2410  const SCEV *CastedBECount = BETakenCount;
2411  ScalarEvolution *SE = PSE->getSE();
2412  if (BETypeSizeBits >= StrideTypeSizeBits)
2413  CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
2414  else
2415  CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
2416  const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2417  // Since TripCount == BackEdgeTakenCount + 1, checking:
2418  // "Stride >= TripCount" is equivalent to checking:
2419  // Stride - BETakenCount > 0
2420  if (SE->isKnownPositive(StrideMinusBETaken)) {
2421  LLVM_DEBUG(
2422  dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2423  "Stride==1 predicate will imply that the loop executes "
2424  "at most once.\n");
2425  return;
2426  }
2427  LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
2428 
2429  SymbolicStrides[Ptr] = Stride;
2430  StrideSet.insert(Stride);
2431 }
2432 
2434  const TargetLibraryInfo *TLI, AAResults *AA,
2435  DominatorTree *DT, LoopInfo *LI)
2436  : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2437  PtrRtChecking(nullptr),
2438  DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L) {
2439  PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
2440  if (canAnalyzeLoop()) {
2441  analyzeLoop(AA, LI, TLI, DT);
2442  }
2443 }
2444 
2445 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
2446  if (CanVecMem) {
2447  OS.indent(Depth) << "Memory dependences are safe";
2448  if (MaxSafeDepDistBytes != -1ULL)
2449  OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes
2450  << " bytes";
2451  if (PtrRtChecking->Need)
2452  OS << " with run-time checks";
2453  OS << "\n";
2454  }
2455 
2456  if (HasConvergentOp)
2457  OS.indent(Depth) << "Has convergent operation in loop\n";
2458 
2459  if (Report)
2460  OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2461 
2462  if (auto *Dependences = DepChecker->getDependences()) {
2463  OS.indent(Depth) << "Dependences:\n";
2464  for (auto &Dep : *Dependences) {
2465  Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2466  OS << "\n";
2467  }
2468  } else
2469  OS.indent(Depth) << "Too many dependences, not recorded\n";
2470 
2471  // List the pair of accesses need run-time checks to prove independence.
2472  PtrRtChecking->print(OS, Depth);
2473  OS << "\n";
2474 
2475  OS.indent(Depth) << "Non vectorizable stores to invariant address were "
2476  << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ")
2477  << "found in loop.\n";
2478 
2479  OS.indent(Depth) << "SCEV assumptions:\n";
2480  PSE->getPredicate().print(OS, Depth);
2481 
2482  OS << "\n";
2483 
2484  OS.indent(Depth) << "Expressions re-written:\n";
2485  PSE->print(OS, Depth);
2486 }
2487 
2490 }
2491 
2493  auto &LAI = LoopAccessInfoMap[L];
2494 
2495  if (!LAI)
2496  LAI = std::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI);
2497 
2498  return *LAI;
2499 }
2500 
2502  LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this);
2503 
2504  for (Loop *TopLevelLoop : *LI)
2505  for (Loop *L : depth_first(TopLevelLoop)) {
2506  OS.indent(2) << L->getHeader()->getName() << ":\n";
2507  auto &LAI = LAA.getInfo(L);
2508  LAI.print(OS, 4);
2509  }
2510 }
2511 
2513  SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2514  auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2515  TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2516  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2517  DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2518  LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2519 
2520  return false;
2521 }
2522 
2528 
2529  AU.setPreservesAll();
2530 }
2531 
2533 static const char laa_name[] = "Loop Access Analysis";
2534 #define LAA_NAME "loop-accesses"
2535 
2542 
2544 
2547  return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI);
2548 }
2549 
2550 namespace llvm {
2551 
2553  return new LoopAccessLegacyAnalysis();
2554  }
2555 
2556 } // end namespace llvm
llvm::sortPtrAccesses
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
Definition: LoopAccessAnalysis.cpp:1362
i
i
Definition: README.txt:29
llvm::MemoryDepChecker::clearDependences
void clearDependences()
Definition: LoopAccessAnalysis.h:224
llvm::APInt::isStrictlyPositive
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition: APInt.h:339
set
We currently generate a but we really shouldn eax ecx xorl edx divl ecx eax divl ecx movl eax ret A similar code sequence works for division We currently compile i32 v2 eax eax jo LBB1_2 atomic and others It is also currently not done for read modify write instructions It is also current not done if the OF or CF flags are needed The shift operators have the complication that when the shift count is EFLAGS is not set
Definition: README.txt:1277
llvm::LoopAccessLegacyAnalysis::ID
static char ID
Definition: LoopAccessAnalysis.h:773
llvm::MemoryDepChecker::Dependence::isBackward
bool isBackward() const
Lexically backward dependence.
Definition: LoopAccessAnalysis.cpp:1458
llvm::MemoryDepChecker
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
Definition: LoopAccessAnalysis.h:86
llvm::Loop::isLoopInvariant
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
Definition: LoopInfo.cpp:60
llvm::LoopAccessLegacyAnalysis
This analysis provides dependence information for the memory accesses of a loop.
Definition: LoopAccessAnalysis.h:771
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
llvm::MemoryLocation::get
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
Definition: MemoryLocation.cpp:35
llvm::ScalarEvolution::getNegativeSCEV
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
Definition: ScalarEvolution.cpp:4437
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
llvm::sys::path::const_iterator::end
friend const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:235
getMinFromExprs
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
Definition: LoopAccessAnalysis.cpp:359
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::MemoryDepChecker::VectorizationSafetyStatus::Safe
@ Safe
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::LoopAccessInfo::isUniform
bool isUniform(Value *V) const
Returns true if the value V is uniform within the loop.
Definition: LoopAccessAnalysis.cpp:2363
llvm::EquivalenceClasses::getLeaderValue
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
Definition: EquivalenceClasses.h:191
llvm::SCEVAddRecExpr::isAffine
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
Definition: ScalarEvolutionExpressions.h:370
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:104
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:218
llvm::MemoryLocation::Ptr
const Value * Ptr
The address of the start of the location.
Definition: MemoryLocation.h:218
llvm::SCEVAddRecExpr::getStart
const SCEV * getStart() const
Definition: ScalarEvolutionExpressions.h:353
llvm::getVectorIntrinsicIDForCall
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
Definition: VectorUtils.cpp:135
llvm::TypeSize::getFixedSize
ScalarTy getFixedSize() const
Definition: TypeSize.h:444
llvm::RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup
RuntimeCheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
Definition: LoopAccessAnalysis.cpp:171
llvm::Function
Definition: Function.h:60
llvm::cl::location
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:447
llvm::Loop
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:546
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Pass.h
llvm::LoopBase::contains
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
Definition: LoopInfo.h:138
EnableMemAccessVersioning
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
llvm::ARM_MB::LD
@ LD
Definition: ARMBaseInfo.h:72
llvm::isGuaranteedNotToBeUndefOrPoison
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Definition: ValueTracking.cpp:5404
llvm::PredicatedScalarEvolution
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
Definition: ScalarEvolution.h:2176
High
uint64_t High
Definition: NVVMIntrRange.cpp:61
llvm::MemoryDepChecker::Dependence::getSource
Instruction * getSource(const LoopAccessInfo &LAI) const
Return the source instruction of the dependence.
Definition: LoopAccessAnalysis.h:824
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
llvm::EquivalenceClasses::member_end
member_iterator member_end() const
Definition: EquivalenceClasses.h:178
CheckType
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
Definition: SelectionDAGISel.cpp:2485
llvm::APInt::getSExtValue
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1478
llvm::EquivalenceClasses
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
Definition: EquivalenceClasses.h:60
llvm::PredicatedScalarEvolution::getBackedgeTakenCount
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
Definition: ScalarEvolution.cpp:14146
ErrorHandling.h
LoopAccessAnalysis.h
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:729
llvm::Loop::getStartLoc
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition: LoopInfo.cpp:630
llvm::ScalarEvolution
The main scalar evolution driver.
Definition: ScalarEvolution.h:449
llvm::VectorizerParams::VectorizationInterleave
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
Definition: LoopAccessAnalysis.h:43
llvm::LoopAccessLegacyAnalysis::getInfo
const LoopAccessInfo & getInfo(Loop *L)
Query the result of the loop access information for the loop L.
Definition: LoopAccessAnalysis.cpp:2492
MaxDependences
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
ValueTracking.h
OptimizationRemarkEmitter.h
llvm::SCEV::NoWrapMask
@ NoWrapMask
Definition: ScalarEvolution.h:135
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
llvm::MemoryDepChecker::Dependence::isPossiblyBackward
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Definition: LoopAccessAnalysis.cpp:1474
llvm::AliasSetTracker
Definition: AliasSetTracker.h:322
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::LoopAccessAnalysis
This analysis provides dependence information for the memory accesses of a loop.
Definition: LoopAccessAnalysis.h:813
APInt.h
llvm::getLoadStoreType
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Definition: Instructions.h:5362
llvm::Depth
@ Depth
Definition: SIMachineScheduler.h:36
true
basic Basic Alias true
Definition: BasicAliasAnalysis.cpp:1886
llvm::DenseMapIterator
Definition: DenseMap.h:57
ScalarEvolution.h
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::getPtrStride
int64_t getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const ValueToValueMap &StridesMap=ValueToValueMap(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
Definition: LoopAccessAnalysis.cpp:1186
DenseMap.h
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1411
llvm::RuntimePointerChecking::needsChecking
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
Definition: LoopAccessAnalysis.cpp:348
llvm::sys::path::begin
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:226
llvm::LoopInfoWrapperPass
The legacy pass manager's analysis pass to compute loop information.
Definition: LoopInfo.h:1287
llvm::LoopStandardAnalysisResults
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
Definition: LoopAnalysisManager.h:51
llvm::copy
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1668
llvm::MemoryDepChecker::areDepsSafe
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps, const ValueToValueMap &Strides)
Check whether the dependencies between the accesses are safe.
Definition: LoopAccessAnalysis.cpp:1842
llvm::SmallSet
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:136
llvm::isConsecutiveAccess
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
Definition: LoopAccessAnalysis.cpp:1409
llvm::Optional< int >
llvm::MapVector
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:37
llvm::SmallPtrSet< Value *, 16 >
llvm::ore::NV
DiagnosticInfoOptimizationBase::Argument NV
Definition: OptimizationRemarkEmitter.h:136
llvm::RuntimePointerChecking::getNumberOfChecks
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
Definition: LoopAccessAnalysis.h:466
Operator.h
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
llvm::EquivalenceClasses::insert
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
Definition: EquivalenceClasses.h:220
STLExtras.h
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:654
llvm::LoadInst::getPointerOperand
Value * getPointerOperand()
Definition: Instructions.h:260
llvm::PredicatedScalarEvolution::getAsAddRec
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
Definition: ScalarEvolution.cpp:14213
llvm::MemoryDepChecker::Dependence::NoDep
@ NoDep
Definition: LoopAccessAnalysis.h:111
llvm::ArrayRef::empty
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:159
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
DepthFirstIterator.h
llvm::stripIntegerCast
Value * stripIntegerCast(Value *V)
Definition: LoopAccessAnalysis.cpp:137
I1
@ I1
Definition: DXILOpLowering.cpp:37
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::RISCVFenceField::R
@ R
Definition: RISCVBaseInfo.h:240
llvm::MemoryDepChecker::Dependence::DepType
DepType
The type of the dependence.
Definition: LoopAccessAnalysis.h:109
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:55
LoopAnalysisManager.h
llvm::MemoryDepChecker::Dependence::isForward
bool isForward() const
Lexically forward dependence.
Definition: LoopAccessAnalysis.cpp:1478
AliasAnalysis.h
PointerIntPair.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::ScalarEvolution::getMulExpr
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
Definition: ScalarEvolution.cpp:3050
llvm::DominatorTree::dominates
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
Instruction.h
CommandLine.h
llvm::MemoryLocation::AATags
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
Definition: MemoryLocation.h:231
llvm::Intrinsic::getType
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Return the function type for an intrinsic.
Definition: Function.cpp:1374
llvm::LoopAccessLegacyAnalysis::print
void print(raw_ostream &OS, const Module *M=nullptr) const override
Print the result of the analysis when invoked with -analyze.
Definition: LoopAccessAnalysis.cpp:2501
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1617
llvm::APInt::isNegative
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:312
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
llvm::ScalarEvolution::getOne
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
Definition: ScalarEvolution.h:645
Constants.h
llvm::MemoryDepChecker::VectorizationSafetyStatus
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
Definition: LoopAccessAnalysis.h:96
llvm::AAResults
Definition: AliasAnalysis.h:511
isNoWrapAddRec
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
Definition: LoopAccessAnalysis.cpp:1139
llvm::LoopAccessInfo::blockNeedsPredication
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
Definition: LoopAccessAnalysis.cpp:2334
llvm::MemoryDepChecker::Dependence::BackwardVectorizable
@ BackwardVectorizable
Definition: LoopAccessAnalysis.h:131
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::Loop::isAnnotatedParallel
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition: LoopInfo.cpp:563
llvm::ARM_PROC::A
@ A
Definition: ARMBaseInfo.h:34
llvm::getStrideFromPointer
Value * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
Definition: VectorUtils.cpp:209
llvm::ScalarEvolution::getUMaxExpr
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
Definition: ScalarEvolution.cpp:4191
InstrTypes.h
llvm::MemoryDepChecker::getOrderForAccess
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
Definition: LoopAccessAnalysis.h:249
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::MemoryDepChecker::Dependence::print
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
Definition: LoopAccessAnalysis.cpp:1935
llvm::PredicatedScalarEvolution::hasNoOverflow
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
Definition: ScalarEvolution.cpp:14197
Check
#define Check(C,...)
Definition: Lint.cpp:170
llvm::LoopBase::blocks
iterator_range< block_iterator > blocks() const
Definition: LoopInfo.h:194
llvm::AMDGPU::PALMD::Key
Key
PAL metadata keys.
Definition: AMDGPUMetadata.h:486
TargetLibraryInfo.h
llvm::RuntimeCheckingPtrGroup::AddressSpace
unsigned AddressSpace
Address space of the involved pointers.
Definition: LoopAccessAnalysis.h:357
false
Definition: StackSlotColoring.cpp:141
llvm::M68kBeads::DA
@ DA
Definition: M68kBaseInfo.h:59
llvm::dwarf::Index
Index
Definition: Dwarf.h:472
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
First
into llvm powi allowing the code generator to produce balanced multiplication trees First
Definition: README.txt:54
llvm::IntegerType
Class to represent integer types.
Definition: DerivedTypes.h:40
llvm::EquivalenceClasses::end
iterator end() const
Definition: EquivalenceClasses.h:168
llvm::Instruction
Definition: Instruction.h:42
isInBoundsGep
static bool isInBoundsGep(Value *Ptr)
Definition: LoopAccessAnalysis.cpp:1131
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:302
llvm::MemoryDepChecker::addAccess
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
Definition: LoopAccessAnalysis.cpp:1422
llvm::APInt::getZExtValue
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1466
llvm::MemoryDepChecker::Dependence::BackwardVectorizableButPreventsForwarding
@ BackwardVectorizableButPreventsForwarding
Definition: LoopAccessAnalysis.h:133
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:54
llvm::SmallVectorImpl::resize
void resize(size_type N)
Definition: SmallVector.h:619
llvm::ThreadPriority::Low
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
llvm::ScalarEvolutionWrapperPass
Definition: ScalarEvolution.h:2145
DebugLoc.h
SmallPtrSet.h
llvm::BasicBlock::getModule
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:147
PatternMatch.h
llvm::MCID::Call
@ Call
Definition: MCInstrDesc.h:155
llvm::getPointersDiff
Optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
Definition: LoopAccessAnalysis.cpp:1296
llvm::ScalarEvolution::getEqualPredicate
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
Definition: ScalarEvolution.cpp:13796
llvm::AddressSpace
AddressSpace
Definition: NVPTXBaseInfo.h:21
VectorizationInterleave
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
llvm::RuntimePointerChecking::reset
void reset()
Reset the state of the pointer runtime information.
Definition: LoopAccessAnalysis.h:419
llvm::None
const NoneType None
Definition: None.h:24
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
Type.h
llvm::RuntimePointerChecking::PointerInfo::AliasSetId
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
Definition: LoopAccessAnalysis.h:401
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
llvm::dxil::PointerTypeAnalysis::run
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
Definition: PointerTypeAnalysis.cpp:101
LoopInfo.h
isNoWrap
static bool isNoWrap(PredicatedScalarEvolution &PSE, const ValueToValueMap &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
Definition: LoopAccessAnalysis.cpp:744
llvm::MemoryDepChecker::getInstructionsForAccess
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
Definition: LoopAccessAnalysis.cpp:1920
llvm::ScalarEvolution::getSCEV
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
Definition: ScalarEvolution.cpp:4406
isSafeDependenceDistance
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &BackedgeTakenCount, const SCEV &Dist, uint64_t Stride, uint64_t TypeByteSize)
Given a non-constant (unknown) dependence-distance Dist between two memory accesses,...
Definition: LoopAccessAnalysis.cpp:1555
llvm::function_ref
An efficient, type-erasing, non-owning reference to a callable.
Definition: STLFunctionalExtras.h:36
llvm::RuntimePointerChecking::PointerInfo
Definition: LoopAccessAnalysis.h:386
VectorUtils.h
llvm::MemoryDepChecker::VectorizationSafetyStatus::Unsafe
@ Unsafe
llvm::initializeLoopAccessLegacyAnalysisPass
void initializeLoopAccessLegacyAnalysisPass(PassRegistry &)
BasicBlock.h
llvm::cl::opt
Definition: CommandLine.h:1392
llvm::SCEV
This class represents an analyzed expression in the program.
Definition: ScalarEvolution.h:75
llvm::EquivalenceClasses::iterator
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
Definition: EquivalenceClasses.h:165
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:297
llvm::getUnderlyingObjects
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Definition: ValueTracking.cpp:4500
llvm::SmallSet::count
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
llvm::ArrayRef::drop_front
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:203
llvm::RuntimePointerChecking
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
Definition: LoopAccessAnalysis.h:382
llvm::VectorizerParams::VectorizationFactor
static unsigned VectorizationFactor
VF as overridden by the user.
Definition: LoopAccessAnalysis.h:41
llvm::RuntimePointerChecking::Need
bool Need
This flag indicates if we need to add the runtime check.
Definition: LoopAccessAnalysis.h:477
llvm::getPointerOperand
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
Definition: Instructions.h:5331
uint64_t
llvm::MemoryDepChecker::Dependence
Dependece between memory access instructions.
Definition: LoopAccessAnalysis.h:107
D
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
llvm::VFDatabase::getMappings
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition: VectorUtils.h:249
llvm::PredicatedScalarEvolution::setNoOverflow
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
Definition: ScalarEvolution.cpp:14181
llvm::MemoryDepChecker::Dependence::Backward
@ Backward
Definition: LoopAccessAnalysis.h:128
llvm::ARM_MB::ST
@ ST
Definition: ARMBaseInfo.h:73
llvm::RuntimePointerChecking::PointerInfo::DependencySetId
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
Definition: LoopAccessAnalysis.h:399
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::RuntimePointerChecking::CheckingGroups
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
Definition: LoopAccessAnalysis.h:483
laa_name
static const char laa_name[]
Definition: LoopAccessAnalysis.cpp:2533
MemoryLocation.h
llvm::DenseMap< const Value *, Value * >
llvm::SCEV::FlagNSW
@ FlagNSW
Definition: ScalarEvolution.h:134
llvm::AnalysisKey
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:69
llvm::RuntimeCheckingPtrGroup
A grouping of pointers.
Definition: LoopAccessAnalysis.h:334
llvm::ScalarEvolution::getPtrToIntExpr
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
Definition: ScalarEvolution.cpp:1193
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::GetElementPtrInst
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:916
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
llvm::LoopAccessInfo
Drive the analysis of memory accesses in the loop.
Definition: LoopAccessAnalysis.h:559
llvm::VectorizerParams::MaxVectorWidth
static const unsigned MaxVectorWidth
Maximum SIMD width.
Definition: LoopAccessAnalysis.h:38
llvm::SCEVWrapPredicate::IncrementNUSW
@ IncrementNUSW
Definition: ScalarEvolution.h:344
llvm::SCEVConstant
This class represents a constant integer value.
Definition: ScalarEvolutionExpressions.h:60
visitPointers
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
Definition: LoopAccessAnalysis.cpp:758
llvm::LoopBase::getLoopLatch
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
Definition: LoopInfoImpl.h:215
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:152
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::PredicatedScalarEvolution::getSCEV
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
Definition: ScalarEvolution.cpp:14127
llvm::MemoryDepChecker::isSafeForVectorization
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
Definition: LoopAccessAnalysis.h:190
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:853
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::VectorizerParams::RuntimeMemoryCheckThreshold
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
Definition: LoopAccessAnalysis.h:49
iterator_range.h
llvm::SPIRV::Capability::Groups
@ Groups
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
LAA_NAME
#define LAA_NAME
Definition: LoopAccessAnalysis.cpp:2534
llvm::ScalarEvolution::getNoopOrSignExtend
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
Definition: ScalarEvolution.cpp:4610
llvm::RuntimeCheckingPtrGroup::High
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
Definition: LoopAccessAnalysis.h:350
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
llvm::RuntimeCheckingPtrGroup::Low
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
Definition: LoopAccessAnalysis.h:353
Status
Definition: SIModeRegister.cpp:29
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
Compare
QP Compare Ordered outs ins xscmpudp No builtin are required Or llvm fcmp order unorder compare DP QP Compare builtin are required DP Compare
Definition: README_P9.txt:309
llvm::SCEVNAryExpr::getNoWrapFlags
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
Definition: ScalarEvolutionExpressions.h:213
llvm::Sched::Source
@ Source
Definition: TargetLowering.h:99
llvm::ArrayRef< unsigned >
llvm::LoopInfo
Definition: LoopInfo.h:1102
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
llvm::replaceSymbolicStrideSCEV
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const ValueToValueMap &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
Definition: LoopAccessAnalysis.cpp:144
DataLayout.h
EnableForwardingConflictDetection
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
llvm::EquivalenceClasses::member_begin
member_iterator member_begin(iterator I) const
Definition: EquivalenceClasses.h:174
llvm::MemoryDepChecker::MemAccessInfo
PointerIntPair< Value *, 1, bool > MemAccessInfo
Definition: LoopAccessAnalysis.h:88
llvm::ScalarEvolution::getConstant
const SCEV * getConstant(ConstantInt *V)
Definition: ScalarEvolution.cpp:461
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::transform
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition: STLExtras.h:1723
llvm::cl::Sink
@ Sink
Definition: CommandLine.h:167
llvm::ScalarEvolution::getStoreSizeOfExpr
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
Definition: ScalarEvolution.cpp:4243
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
MemoryCheckMergeThreshold
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
llvm::RuntimeCheckingPtrGroup::addPointer
bool addPointer(unsigned Index, RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
Definition: LoopAccessAnalysis.cpp:371
DEBUG_TYPE
#define DEBUG_TYPE
Definition: LoopAccessAnalysis.cpp:71
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::ScalarEvolution::getUMinExpr
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Definition: ScalarEvolution.cpp:4210
llvm::OptimizationRemarkAnalysis
Diagnostic information for optimization analysis remarks.
Definition: DiagnosticInfo.h:781
llvm::SmallSet::insert
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:182
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:305
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:173
ValueHandle.h
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:209
llvm::VectorizerParams::isInterleaveForced
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
Definition: LoopAccessAnalysis.cpp:133
llvm::RuntimePointerChecking::RuntimeCheckingPtrGroup
friend struct RuntimeCheckingPtrGroup
Definition: LoopAccessAnalysis.h:383
llvm::LoopAccessLegacyAnalysis::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: LoopAccessAnalysis.cpp:2523
llvm::Value::stripAndAccumulateInBoundsConstantOffsets
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:727
llvm::Function::hasOptSize
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:664
llvm::ScalarEvolution::isLoopInvariant
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
Definition: ScalarEvolution.cpp:13240
llvm::find_if
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1644
RuntimeMemoryCheckThreshold
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
llvm::ScalarEvolution::isKnownPositive
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
Definition: ScalarEvolution.cpp:10325
llvm::depth_first
iterator_range< df_iterator< T > > depth_first(const T &G)
Definition: DepthFirstIterator.h:230
llvm::MemoryDepChecker::Dependence::Unknown
@ Unknown
Definition: LoopAccessAnalysis.h:113
llvm::createLAAPass
Pass * createLAAPass()
Definition: LoopAccessAnalysis.cpp:2552
llvm::RuntimePointerChecking::Pointers
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
Definition: LoopAccessAnalysis.h:480
j
return j(j<< 16)
llvm::LoopBase::isInnermost
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
Definition: LoopInfo.h:181
llvm::RuntimePointerChecking::arePointersInSamePartition
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
Definition: LoopAccessAnalysis.cpp:536
llvm::SCEVAddRecExpr::getLoop
const Loop * getLoop() const
Definition: ScalarEvolutionExpressions.h:354
llvm::LoopAccessLegacyAnalysis::LoopAccessLegacyAnalysis
LoopAccessLegacyAnalysis()
Definition: LoopAccessAnalysis.cpp:2488
std
Definition: BitVector.h:851
hasComputableBounds
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
Definition: LoopAccessAnalysis.cpp:726
llvm::ScalarEvolution::getMinusSCEV
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
Definition: ScalarEvolution.cpp:4523
llvm::AnalysisUsage::setPreservesAll
void setPreservesAll()
Set by analyses that do not transform their input at all.
Definition: PassAnalysisSupport.h:130
llvm::LocationSize::beforeOrAfterPointer
constexpr static LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
Definition: MemoryLocation.h:130
llvm::DenseMapBase::end
iterator end()
Definition: DenseMap.h:84
VectorizationFactor
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
llvm::EquivalenceClasses::findValue
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
Definition: EquivalenceClasses.h:184
llvm::APInt::sextOrTrunc
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1002
llvm::RuntimePointerChecking::printChecks
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
Definition: LoopAccessAnalysis.cpp:562
llvm::TypeSize
Definition: TypeSize.h:435
llvm::SCEVAddRecExpr
This node represents a polynomial recurrence on the trip count of the specified loop.
Definition: ScalarEvolutionExpressions.h:342
Casting.h
DiagnosticInfo.h
Function.h
llvm::LoopAccessInfo::LoopAccessInfo
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
Definition: LoopAccessAnalysis.cpp:2433
llvm::MemoryDepChecker::Dependence::DepName
static const char * DepName[]
String version of the types.
Definition: LoopAccessAnalysis.h:137
llvm::LoopBase::getHeader
BlockT * getHeader() const
Definition: LoopInfo.h:104
llvm::RuntimePointerChecking::PointerInfo::IsWritePtr
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
Definition: LoopAccessAnalysis.h:396
llvm::ScalarEvolution::isSCEVable
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Definition: ScalarEvolution.cpp:4292
PassManager.h
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition: TargetLibraryInfo.h:222
EquivalenceClasses.h
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:591
llvm::PredicatedScalarEvolution::addPredicate
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
Definition: ScalarEvolution.cpp:14156
llvm::LoopBase::getNumBackEdges
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
Definition: LoopInfo.h:266
llvm::AAMDNodes::TBAA
MDNode * TBAA
The tag for type-based alias analysis.
Definition: Metadata.h:669
llvm::MemoryDepChecker::Dependence::Forward
@ Forward
Definition: LoopAccessAnalysis.h:123
areStridedAccessesIndependent
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
Definition: LoopAccessAnalysis.cpp:1615
llvm::APInt::abs
APInt abs() const
Get the absolute value.
Definition: APInt.h:1686
AA
ScalarEvolutionExpressions.h
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
llvm::ScalarEvolution::getZeroExtendExpr
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
Definition: ScalarEvolution.cpp:1594
Instructions.h
llvm::PointerIntPair< Value *, 1, bool >
INITIALIZE_PASS_BEGIN
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:51
llvm::RuntimePointerChecking::insert
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
Definition: LoopAccessAnalysis.cpp:194
SmallVector.h
llvm::raw_ostream::indent
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
Definition: raw_ostream.cpp:496
Dominators.h
llvm::MemoryDepChecker::Dependence::Type
DepType Type
The type of the dependence.
Definition: LoopAccessAnalysis.h:144
N
#define N
llvm::AAResultsWrapperPass
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Definition: AliasAnalysis.h:1351
llvm::LoopAccessInfo::print
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
Definition: LoopAccessAnalysis.cpp:2445
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:164
llvm::PatternMatch
Definition: PatternMatch.h:47
llvm::SmallVectorImpl< int >
llvm::MemoryDepChecker::Dependence::isSafeForVectorization
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
Definition: LoopAccessAnalysis.cpp:1441
llvm::EquivalenceClasses::unionSets
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
Definition: EquivalenceClasses.h:238
llvm::AnalysisUsage::addRequiredTransitive
AnalysisUsage & addRequiredTransitive()
Definition: PassAnalysisSupport.h:81
llvm::GlobalValue::getType
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:276
llvm::Module::getDataLayout
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:398
DerivedTypes.h
llvm::SCEV::getType
Type * getType() const
Return the LLVM type of this SCEV expression.
Definition: ScalarEvolution.cpp:392
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:42
llvm::getLoadStorePointerOperand
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
Definition: Instructions.h:5317
llvm::ScalarEvolution::getAddExpr
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
Definition: ScalarEvolution.cpp:2453
llvm::IntegerType::get
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
AliasSetTracker.h
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:171
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::cl::desc
Definition: CommandLine.h:405
raw_ostream.h
llvm::SI::KernelInputOffsets::Offsets
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:1312
llvm::NullPointerIsDefined
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2047
llvm::PredicatedScalarEvolution::getSE
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
Definition: ScalarEvolution.h:2208
llvm::EquivalenceClasses::member_iterator
Definition: EquivalenceClasses.h:272
Value.h
llvm::RuntimePointerChecking::print
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
Definition: LoopAccessAnalysis.cpp:581
llvm::abs
APFloat abs(APFloat X)
Returns the absolute value of the argument.
Definition: APFloat.h:1282
llvm::RuntimeCheckingPtrGroup::Members
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
Definition: LoopAccessAnalysis.h:355
InitializePasses.h
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
Debug.h
llvm::MemoryDepChecker::Dependence::ForwardButPreventsForwarding
@ ForwardButPreventsForwarding
Definition: LoopAccessAnalysis.h:126
llvm::RuntimeCheckingPtrGroup::NeedsFreeze
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
Definition: LoopAccessAnalysis.h:360
llvm::RuntimePointerChecking::generateChecks
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
Definition: LoopAccessAnalysis.cpp:341
llvm::MemoryLocation
Representation for a specific memory location.
Definition: MemoryLocation.h:210
llvm::MemoryDepChecker::Dependence::getDestination
Instruction * getDestination(const LoopAccessInfo &LAI) const
Return the destination instruction of the dependence.
Definition: LoopAccessAnalysis.h:829
llvm::SCEVAddRecExpr::getStepRecurrence
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
Definition: ScalarEvolutionExpressions.h:360
llvm::LoopAccessLegacyAnalysis::runOnFunction
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition: LoopAccessAnalysis.cpp:2512
SetVector.h
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
llvm::SCEVAddRecExpr::evaluateAtIteration
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
Definition: ScalarEvolution.cpp:1042
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:927
SmallSet.h
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
llvm::MemoryDepChecker::VectorizationSafetyStatus::PossiblySafeWithRtChecks
@ PossiblySafeWithRtChecks