LLVM 20.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DebugLoc.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
46#include "llvm/IR/InstrTypes.h"
47#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Operator.h"
50#include "llvm/IR/PassManager.h"
52#include "llvm/IR/Type.h"
53#include "llvm/IR/Value.h"
54#include "llvm/IR/ValueHandle.h"
57#include "llvm/Support/Debug.h"
60#include <algorithm>
61#include <cassert>
62#include <cstdint>
63#include <iterator>
64#include <utility>
65#include <variant>
66#include <vector>
67
68using namespace llvm;
69using namespace llvm::PatternMatch;
70
71#define DEBUG_TYPE "loop-accesses"
72
74VectorizationFactor("force-vector-width", cl::Hidden,
75 cl::desc("Sets the SIMD width. Zero is autoselect."),
78
80VectorizationInterleave("force-vector-interleave", cl::Hidden,
81 cl::desc("Sets the vectorization interleave count. "
82 "Zero is autoselect."),
86
88 "runtime-memory-check-threshold", cl::Hidden,
89 cl::desc("When performing memory disambiguation checks at runtime do not "
90 "generate more than this number of comparisons (default = 8)."),
93
94/// The maximum iterations used to merge memory checks
96 "memory-check-merge-threshold", cl::Hidden,
97 cl::desc("Maximum number of comparisons done when trying to merge "
98 "runtime memory checks. (default = 100)"),
99 cl::init(100));
100
101/// Maximum SIMD width.
102const unsigned VectorizerParams::MaxVectorWidth = 64;
103
104/// We collect dependences up to this threshold.
106 MaxDependences("max-dependences", cl::Hidden,
107 cl::desc("Maximum number of dependences collected by "
108 "loop-access analysis (default = 100)"),
109 cl::init(100));
110
111/// This enables versioning on the strides of symbolically striding memory
112/// accesses in code like the following.
113/// for (i = 0; i < N; ++i)
114/// A[i * Stride1] += B[i * Stride2] ...
115///
116/// Will be roughly translated to
117/// if (Stride1 == 1 && Stride2 == 1) {
118/// for (i = 0; i < N; i+=4)
119/// A[i:i+3] += ...
120/// } else
121/// ...
123 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
124 cl::desc("Enable symbolic stride memory access versioning"));
125
126/// Enable store-to-load forwarding conflict detection. This option can
127/// be disabled for correctness testing.
129 "store-to-load-forwarding-conflict-detection", cl::Hidden,
130 cl::desc("Enable conflict detection in loop-access analysis"),
131 cl::init(true));
132
134 "max-forked-scev-depth", cl::Hidden,
135 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
136 cl::init(5));
137
139 "laa-speculate-unit-stride", cl::Hidden,
140 cl::desc("Speculate that non-constant strides are unit in LAA"),
141 cl::init(true));
142
144 "hoist-runtime-checks", cl::Hidden,
145 cl::desc(
146 "Hoist inner loop runtime memory checks to outer loop if possible"),
149
151 return ::VectorizationInterleave.getNumOccurrences() > 0;
152}
153
155 const DenseMap<Value *, const SCEV *> &PtrToStride,
156 Value *Ptr) {
157 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
158
159 // If there is an entry in the map return the SCEV of the pointer with the
160 // symbolic stride replaced by one.
162 if (SI == PtrToStride.end())
163 // For a non-symbolic stride, just return the original expression.
164 return OrigSCEV;
165
166 const SCEV *StrideSCEV = SI->second;
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const SCEV *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 const SCEV *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, const RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
191}
192
193/// Calculate Start and End points of memory access.
194/// Let's assume A is the first access and B is a memory access on N-th loop
195/// iteration. Then B is calculated as:
196/// B = A + Step*N .
197/// Step value may be positive or negative.
198/// N is a calculated back-edge taken count:
199/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
200/// Start and End points are calculated in the following way:
201/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
202/// where SizeOfElt is the size of single memory access in bytes.
203///
204/// There is no conflict when the intervals are disjoint:
205/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
206static std::pair<const SCEV *, const SCEV *> getStartAndEndForAccess(
207 const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,
209 DenseMap<std::pair<const SCEV *, Type *>,
210 std::pair<const SCEV *, const SCEV *>> &PointerBounds) {
211 ScalarEvolution *SE = PSE.getSE();
212
213 auto [Iter, Ins] = PointerBounds.insert(
214 {{PtrExpr, AccessTy},
215 {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
216 if (!Ins)
217 return Iter->second;
218
219 const SCEV *ScStart;
220 const SCEV *ScEnd;
221
222 if (SE->isLoopInvariant(PtrExpr, Lp)) {
223 ScStart = ScEnd = PtrExpr;
224 } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
225 const SCEV *Ex = PSE.getSymbolicMaxBackedgeTakenCount();
226
227 ScStart = AR->getStart();
228 ScEnd = AR->evaluateAtIteration(Ex, *SE);
229 const SCEV *Step = AR->getStepRecurrence(*SE);
230
231 // For expressions with negative step, the upper bound is ScStart and the
232 // lower bound is ScEnd.
233 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
234 if (CStep->getValue()->isNegative())
235 std::swap(ScStart, ScEnd);
236 } else {
237 // Fallback case: the step is not constant, but we can still
238 // get the upper and lower bounds of the interval by using min/max
239 // expressions.
240 ScStart = SE->getUMinExpr(ScStart, ScEnd);
241 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
242 }
243 } else
244 return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
245
246 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
247 assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
248
249 // Add the size of the pointed element to ScEnd.
250 auto &DL = Lp->getHeader()->getDataLayout();
251 Type *IdxTy = DL.getIndexType(PtrExpr->getType());
252 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
253 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
254
255 Iter->second = {ScStart, ScEnd};
256 return Iter->second;
257}
258
259/// Calculate Start and End points of memory access using
260/// getStartAndEndForAccess.
262 Type *AccessTy, bool WritePtr,
263 unsigned DepSetId, unsigned ASId,
265 bool NeedsFreeze) {
266 const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
267 Lp, PtrExpr, AccessTy, PSE, DC.getPointerBounds());
268 assert(!isa<SCEVCouldNotCompute>(ScStart) &&
269 !isa<SCEVCouldNotCompute>(ScEnd) &&
270 "must be able to compute both start and end expressions");
271 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
272 NeedsFreeze);
273}
274
275bool RuntimePointerChecking::tryToCreateDiffCheck(
276 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
277 // If either group contains multiple different pointers, bail out.
278 // TODO: Support multiple pointers by using the minimum or maximum pointer,
279 // depending on src & sink.
280 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
281 return false;
282
283 const PointerInfo *Src = &Pointers[CGI.Members[0]];
284 const PointerInfo *Sink = &Pointers[CGJ.Members[0]];
285
286 // If either pointer is read and written, multiple checks may be needed. Bail
287 // out.
288 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
289 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
290 return false;
291
292 ArrayRef<unsigned> AccSrc =
293 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
294 ArrayRef<unsigned> AccSink =
295 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
296 // If either pointer is accessed multiple times, there may not be a clear
297 // src/sink relation. Bail out for now.
298 if (AccSrc.size() != 1 || AccSink.size() != 1)
299 return false;
300
301 // If the sink is accessed before src, swap src/sink.
302 if (AccSink[0] < AccSrc[0])
303 std::swap(Src, Sink);
304
305 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
306 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
307 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
308 SinkAR->getLoop() != DC.getInnermostLoop())
309 return false;
310
312 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
314 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
315 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
316 Type *DstTy = getLoadStoreType(SinkInsts[0]);
317 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy))
318 return false;
319
320 const DataLayout &DL =
321 SinkAR->getLoop()->getHeader()->getDataLayout();
322 unsigned AllocSize =
323 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
324
325 // Only matching constant steps matching the AllocSize are supported at the
326 // moment. This simplifies the difference computation. Can be extended in the
327 // future.
328 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
329 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
330 Step->getAPInt().abs() != AllocSize)
331 return false;
332
333 IntegerType *IntTy =
334 IntegerType::get(Src->PointerValue->getContext(),
335 DL.getPointerSizeInBits(CGI.AddressSpace));
336
337 // When counting down, the dependence distance needs to be swapped.
338 if (Step->getValue()->isNegative())
339 std::swap(SinkAR, SrcAR);
340
341 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
342 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
343 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
344 isa<SCEVCouldNotCompute>(SrcStartInt))
345 return false;
346
347 const Loop *InnerLoop = SrcAR->getLoop();
348 // If the start values for both Src and Sink also vary according to an outer
349 // loop, then it's probably better to avoid creating diff checks because
350 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
351 // do the expanded full range overlap checks, which can be hoisted.
352 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
353 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
354 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
355 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
356 const Loop *StartARLoop = SrcStartAR->getLoop();
357 if (StartARLoop == SinkStartAR->getLoop() &&
358 StartARLoop == InnerLoop->getParentLoop() &&
359 // If the diff check would already be loop invariant (due to the
360 // recurrences being the same), then we prefer to keep the diff checks
361 // because they are cheaper.
362 SrcStartAR->getStepRecurrence(*SE) !=
363 SinkStartAR->getStepRecurrence(*SE)) {
364 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
365 "cannot be hoisted out of the outer loop\n");
366 return false;
367 }
368 }
369
370 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
371 << "SrcStart: " << *SrcStartInt << '\n'
372 << "SinkStartInt: " << *SinkStartInt << '\n');
373 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
374 Src->NeedsFreeze || Sink->NeedsFreeze);
375 return true;
376}
377
378SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
380
381 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
382 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
385
386 if (needsChecking(CGI, CGJ)) {
387 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
388 Checks.emplace_back(&CGI, &CGJ);
389 }
390 }
391 }
392 return Checks;
393}
394
395void RuntimePointerChecking::generateChecks(
396 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
397 assert(Checks.empty() && "Checks is not empty");
398 groupChecks(DepCands, UseDependencies);
399 Checks = generateChecks();
400}
401
403 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
404 for (const auto &I : M.Members)
405 for (const auto &J : N.Members)
406 if (needsChecking(I, J))
407 return true;
408 return false;
409}
410
411/// Compare \p I and \p J and return the minimum.
412/// Return nullptr in case we couldn't find an answer.
413static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
414 ScalarEvolution *SE) {
415 std::optional<APInt> Diff = SE->computeConstantDifference(J, I);
416 if (!Diff)
417 return nullptr;
418 return Diff->isNegative() ? J : I;
419}
420
422 unsigned Index, const RuntimePointerChecking &RtCheck) {
423 return addPointer(
424 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
425 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
426 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
427}
428
430 const SCEV *End, unsigned AS,
431 bool NeedsFreeze,
432 ScalarEvolution &SE) {
433 assert(AddressSpace == AS &&
434 "all pointers in a checking group must be in the same address space");
435
436 // Compare the starts and ends with the known minimum and maximum
437 // of this set. We need to know how we compare against the min/max
438 // of the set in order to be able to emit memchecks.
439 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
440 if (!Min0)
441 return false;
442
443 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
444 if (!Min1)
445 return false;
446
447 // Update the low bound expression if we've found a new min value.
448 if (Min0 == Start)
449 Low = Start;
450
451 // Update the high bound expression if we've found a new max value.
452 if (Min1 != End)
453 High = End;
454
456 this->NeedsFreeze |= NeedsFreeze;
457 return true;
458}
459
460void RuntimePointerChecking::groupChecks(
461 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
462 // We build the groups from dependency candidates equivalence classes
463 // because:
464 // - We know that pointers in the same equivalence class share
465 // the same underlying object and therefore there is a chance
466 // that we can compare pointers
467 // - We wouldn't be able to merge two pointers for which we need
468 // to emit a memcheck. The classes in DepCands are already
469 // conveniently built such that no two pointers in the same
470 // class need checking against each other.
471
472 // We use the following (greedy) algorithm to construct the groups
473 // For every pointer in the equivalence class:
474 // For each existing group:
475 // - if the difference between this pointer and the min/max bounds
476 // of the group is a constant, then make the pointer part of the
477 // group and update the min/max bounds of that group as required.
478
479 CheckingGroups.clear();
480
481 // If we need to check two pointers to the same underlying object
482 // with a non-constant difference, we shouldn't perform any pointer
483 // grouping with those pointers. This is because we can easily get
484 // into cases where the resulting check would return false, even when
485 // the accesses are safe.
486 //
487 // The following example shows this:
488 // for (i = 0; i < 1000; ++i)
489 // a[5000 + i * m] = a[i] + a[i + 9000]
490 //
491 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
492 // (0, 10000) which is always false. However, if m is 1, there is no
493 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
494 // us to perform an accurate check in this case.
495 //
496 // The above case requires that we have an UnknownDependence between
497 // accesses to the same underlying object. This cannot happen unless
498 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
499 // is also false. In this case we will use the fallback path and create
500 // separate checking groups for all pointers.
501
502 // If we don't have the dependency partitions, construct a new
503 // checking pointer group for each pointer. This is also required
504 // for correctness, because in this case we can have checking between
505 // pointers to the same underlying object.
506 if (!UseDependencies) {
507 for (unsigned I = 0; I < Pointers.size(); ++I)
508 CheckingGroups.emplace_back(I, *this);
509 return;
510 }
511
512 unsigned TotalComparisons = 0;
513
515 for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
516 auto [It, _] = PositionMap.insert({Pointers[Index].PointerValue, {}});
517 It->second.push_back(Index);
518 }
519
520 // We need to keep track of what pointers we've already seen so we
521 // don't process them twice.
523
524 // Go through all equivalence classes, get the "pointer check groups"
525 // and add them to the overall solution. We use the order in which accesses
526 // appear in 'Pointers' to enforce determinism.
527 for (unsigned I = 0; I < Pointers.size(); ++I) {
528 // We've seen this pointer before, and therefore already processed
529 // its equivalence class.
530 if (Seen.count(I))
531 continue;
532
533 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
534 Pointers[I].IsWritePtr);
535
537 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
538
539 // Because DepCands is constructed by visiting accesses in the order in
540 // which they appear in alias sets (which is deterministic) and the
541 // iteration order within an equivalence class member is only dependent on
542 // the order in which unions and insertions are performed on the
543 // equivalence class, the iteration order is deterministic.
544 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
545 MI != ME; ++MI) {
546 auto PointerI = PositionMap.find(MI->getPointer());
547 assert(PointerI != PositionMap.end() &&
548 "pointer in equivalence class not found in PositionMap");
549 for (unsigned Pointer : PointerI->second) {
550 bool Merged = false;
551 // Mark this pointer as seen.
552 Seen.insert(Pointer);
553
554 // Go through all the existing sets and see if we can find one
555 // which can include this pointer.
556 for (RuntimeCheckingPtrGroup &Group : Groups) {
557 // Don't perform more than a certain amount of comparisons.
558 // This should limit the cost of grouping the pointers to something
559 // reasonable. If we do end up hitting this threshold, the algorithm
560 // will create separate groups for all remaining pointers.
561 if (TotalComparisons > MemoryCheckMergeThreshold)
562 break;
563
564 TotalComparisons++;
565
566 if (Group.addPointer(Pointer, *this)) {
567 Merged = true;
568 break;
569 }
570 }
571
572 if (!Merged)
573 // We couldn't add this pointer to any existing set or the threshold
574 // for the number of comparisons has been reached. Create a new group
575 // to hold the current pointer.
576 Groups.emplace_back(Pointer, *this);
577 }
578 }
579
580 // We've computed the grouped checks for this partition.
581 // Save the results and continue with the next one.
582 llvm::copy(Groups, std::back_inserter(CheckingGroups));
583 }
584}
585
587 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
588 unsigned PtrIdx2) {
589 return (PtrToPartition[PtrIdx1] != -1 &&
590 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
591}
592
593bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
594 const PointerInfo &PointerI = Pointers[I];
595 const PointerInfo &PointerJ = Pointers[J];
596
597 // No need to check if two readonly pointers intersect.
598 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
599 return false;
600
601 // Only need to check pointers between two different dependency sets.
602 if (PointerI.DependencySetId == PointerJ.DependencySetId)
603 return false;
604
605 // Only need to check pointers in the same alias set.
606 return PointerI.AliasSetId == PointerJ.AliasSetId;
607}
608
611 unsigned Depth) const {
612 unsigned N = 0;
613 for (const auto &[Check1, Check2] : Checks) {
614 const auto &First = Check1->Members, &Second = Check2->Members;
615
616 OS.indent(Depth) << "Check " << N++ << ":\n";
617
618 OS.indent(Depth + 2) << "Comparing group (" << Check1 << "):\n";
619 for (unsigned K : First)
620 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
621
622 OS.indent(Depth + 2) << "Against group (" << Check2 << "):\n";
623 for (unsigned K : Second)
624 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
625 }
626}
627
629
630 OS.indent(Depth) << "Run-time memory checks:\n";
631 printChecks(OS, Checks, Depth);
632
633 OS.indent(Depth) << "Grouped accesses:\n";
634 for (const auto &CG : CheckingGroups) {
635 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
636 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
637 << ")\n";
638 for (unsigned Member : CG.Members) {
639 OS.indent(Depth + 6) << "Member: " << *Pointers[Member].Expr << "\n";
640 }
641 }
642}
643
644namespace {
645
646/// Analyses memory accesses in a loop.
647///
648/// Checks whether run time pointer checks are needed and builds sets for data
649/// dependence checking.
650class AccessAnalysis {
651public:
652 /// Read or write access location.
653 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
654 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
655
656 AccessAnalysis(const Loop *TheLoop, AAResults *AA, const LoopInfo *LI,
659 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
660 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
661 LoopAliasScopes(LoopAliasScopes) {
662 // We're analyzing dependences across loop iterations.
663 BAA.enableCrossIterationMode();
664 }
665
666 /// Register a load and whether it is only read from.
667 void addLoad(const MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
668 Value *Ptr = const_cast<Value *>(Loc.Ptr);
669 AST.add(adjustLoc(Loc));
670 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
671 if (IsReadOnly)
672 ReadOnlyPtr.insert(Ptr);
673 }
674
675 /// Register a store.
676 void addStore(const MemoryLocation &Loc, Type *AccessTy) {
677 Value *Ptr = const_cast<Value *>(Loc.Ptr);
678 AST.add(adjustLoc(Loc));
679 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
680 }
681
682 /// Check if we can emit a run-time no-alias check for \p Access.
683 ///
684 /// Returns true if we can emit a run-time no alias check for \p Access.
685 /// If we can check this access, this also adds it to a dependence set and
686 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
687 /// we will attempt to use additional run-time checks in order to get
688 /// the bounds of the pointer.
689 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
690 MemAccessInfo Access, Type *AccessTy,
691 const DenseMap<Value *, const SCEV *> &Strides,
693 Loop *TheLoop, unsigned &RunningDepId,
694 unsigned ASId, bool ShouldCheckStride, bool Assume);
695
696 /// Check whether we can check the pointers at runtime for
697 /// non-intersection.
698 ///
699 /// Returns true if we need no check or if we do and we can generate them
700 /// (i.e. the pointers have computable bounds).
701 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
702 Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
703 Value *&UncomputablePtr, bool ShouldCheckWrap = false);
704
705 /// Goes over all memory accesses, checks whether a RT check is needed
706 /// and builds sets of dependent accesses.
707 void buildDependenceSets() {
708 processMemAccesses();
709 }
710
711 /// Initial processing of memory accesses determined that we need to
712 /// perform dependency checking.
713 ///
714 /// Note that this can later be cleared if we retry memcheck analysis without
715 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
716 bool isDependencyCheckNeeded() const { return !CheckDeps.empty(); }
717
718 /// We decided that no dependence analysis would be used. Reset the state.
719 void resetDepChecks(MemoryDepChecker &DepChecker) {
720 CheckDeps.clear();
721 DepChecker.clearDependences();
722 }
723
724 const MemAccessInfoList &getDependenciesToCheck() const { return CheckDeps; }
725
726private:
728
729 /// Adjust the MemoryLocation so that it represents accesses to this
730 /// location across all iterations, rather than a single one.
731 MemoryLocation adjustLoc(MemoryLocation Loc) const {
732 // The accessed location varies within the loop, but remains within the
733 // underlying object.
735 Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
736 Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
737 return Loc;
738 }
739
740 /// Drop alias scopes that are only valid within a single loop iteration.
741 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
742 if (!ScopeList)
743 return nullptr;
744
745 // For the sake of simplicity, drop the whole scope list if any scope is
746 // iteration-local.
747 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
748 return LoopAliasScopes.contains(cast<MDNode>(Scope));
749 }))
750 return nullptr;
751
752 return ScopeList;
753 }
754
755 /// Go over all memory access and check whether runtime pointer checks
756 /// are needed and build sets of dependency check candidates.
757 void processMemAccesses();
758
759 /// Map of all accesses. Values are the types used to access memory pointed to
760 /// by the pointer.
761 PtrAccessMap Accesses;
762
763 /// The loop being checked.
764 const Loop *TheLoop;
765
766 /// List of accesses that need a further dependence check.
767 MemAccessInfoList CheckDeps;
768
769 /// Set of pointers that are read only.
770 SmallPtrSet<Value*, 16> ReadOnlyPtr;
771
772 /// Batched alias analysis results.
773 BatchAAResults BAA;
774
775 /// An alias set tracker to partition the access set by underlying object and
776 //intrinsic property (such as TBAA metadata).
777 AliasSetTracker AST;
778
779 /// The LoopInfo of the loop being checked.
780 const LoopInfo *LI;
781
782 /// Sets of potentially dependent accesses - members of one set share an
783 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
784 /// dependence check.
786
787 /// Initial processing of memory accesses determined that we may need
788 /// to add memchecks. Perform the analysis to determine the necessary checks.
789 ///
790 /// Note that, this is different from isDependencyCheckNeeded. When we retry
791 /// memcheck analysis without dependency checking
792 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
793 /// cleared while this remains set if we have potentially dependent accesses.
794 bool IsRTCheckAnalysisNeeded = false;
795
796 /// The SCEV predicate containing all the SCEV-related assumptions.
798
800
801 /// Alias scopes that are declared inside the loop, and as such not valid
802 /// across iterations.
803 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
804};
805
806} // end anonymous namespace
807
808/// Check whether a pointer can participate in a runtime bounds check.
809/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
810/// by adding run-time checks (overflow checks) if necessary.
812 const SCEV *PtrScev, Loop *L, bool Assume) {
813 // The bounds for loop-invariant pointer is trivial.
814 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
815 return true;
816
817 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
818
819 if (!AR && Assume)
820 AR = PSE.getAsAddRec(Ptr);
821
822 if (!AR)
823 return false;
824
825 return AR->isAffine();
826}
827
828/// Check whether a pointer address cannot wrap.
830 const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
831 Loop *L) {
832 const SCEV *PtrScev = PSE.getSCEV(Ptr);
833 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
834 return true;
835
836 int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
837 return Stride == 1 ||
839}
840
841static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
842 function_ref<void(Value *)> AddPointer) {
844 SmallVector<Value *> WorkList;
845 WorkList.push_back(StartPtr);
846
847 while (!WorkList.empty()) {
848 Value *Ptr = WorkList.pop_back_val();
849 if (!Visited.insert(Ptr).second)
850 continue;
851 auto *PN = dyn_cast<PHINode>(Ptr);
852 // SCEV does not look through non-header PHIs inside the loop. Such phis
853 // can be analyzed by adding separate accesses for each incoming pointer
854 // value.
855 if (PN && InnermostLoop.contains(PN->getParent()) &&
856 PN->getParent() != InnermostLoop.getHeader()) {
857 for (const Use &Inc : PN->incoming_values())
858 WorkList.push_back(Inc);
859 } else
860 AddPointer(Ptr);
861 }
862}
863
864// Walk back through the IR for a pointer, looking for a select like the
865// following:
866//
867// %offset = select i1 %cmp, i64 %a, i64 %b
868// %addr = getelementptr double, double* %base, i64 %offset
869// %ld = load double, double* %addr, align 8
870//
871// We won't be able to form a single SCEVAddRecExpr from this since the
872// address for each loop iteration depends on %cmp. We could potentially
873// produce multiple valid SCEVAddRecExprs, though, and check all of them for
874// memory safety/aliasing if needed.
875//
876// If we encounter some IR we don't yet handle, or something obviously fine
877// like a constant, then we just add the SCEV for that term to the list passed
878// in by the caller. If we have a node that may potentially yield a valid
879// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
880// ourselves before adding to the list.
881static void findForkedSCEVs(
882 ScalarEvolution *SE, const Loop *L, Value *Ptr,
884 unsigned Depth) {
885 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
886 // we've exceeded our limit on recursion, just return whatever we have
887 // regardless of whether it can be used for a forked pointer or not, along
888 // with an indication of whether it might be a poison or undef value.
889 const SCEV *Scev = SE->getSCEV(Ptr);
890 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
891 !isa<Instruction>(Ptr) || Depth == 0) {
892 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
893 return;
894 }
895
896 Depth--;
897
898 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
899 return get<1>(S);
900 };
901
902 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
903 switch (Opcode) {
904 case Instruction::Add:
905 return SE->getAddExpr(L, R);
906 case Instruction::Sub:
907 return SE->getMinusSCEV(L, R);
908 default:
909 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
910 }
911 };
912
913 Instruction *I = cast<Instruction>(Ptr);
914 unsigned Opcode = I->getOpcode();
915 switch (Opcode) {
916 case Instruction::GetElementPtr: {
917 auto *GEP = cast<GetElementPtrInst>(I);
918 Type *SourceTy = GEP->getSourceElementType();
919 // We only handle base + single offset GEPs here for now.
920 // Not dealing with preexisting gathers yet, so no vectors.
921 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
922 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
923 break;
924 }
927 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
928 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
929
930 // See if we need to freeze our fork...
931 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
932 any_of(OffsetScevs, UndefPoisonCheck);
933
934 // Check that we only have a single fork, on either the base or the offset.
935 // Copy the SCEV across for the one without a fork in order to generate
936 // the full SCEV for both sides of the GEP.
937 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
938 BaseScevs.push_back(BaseScevs[0]);
939 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
940 OffsetScevs.push_back(OffsetScevs[0]);
941 else {
942 ScevList.emplace_back(Scev, NeedsFreeze);
943 break;
944 }
945
946 // Find the pointer type we need to extend to.
947 Type *IntPtrTy = SE->getEffectiveSCEVType(
948 SE->getSCEV(GEP->getPointerOperand())->getType());
949
950 // Find the size of the type being pointed to. We only have a single
951 // index term (guarded above) so we don't need to index into arrays or
952 // structures, just get the size of the scalar value.
953 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
954
955 // Scale up the offsets by the size of the type, then add to the bases.
956 const SCEV *Scaled1 = SE->getMulExpr(
957 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
958 const SCEV *Scaled2 = SE->getMulExpr(
959 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
960 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
961 NeedsFreeze);
962 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
963 NeedsFreeze);
964 break;
965 }
966 case Instruction::Select: {
968 // A select means we've found a forked pointer, but we currently only
969 // support a single select per pointer so if there's another behind this
970 // then we just bail out and return the generic SCEV.
971 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
972 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
973 if (ChildScevs.size() == 2) {
974 ScevList.push_back(ChildScevs[0]);
975 ScevList.push_back(ChildScevs[1]);
976 } else
977 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
978 break;
979 }
980 case Instruction::PHI: {
982 // A phi means we've found a forked pointer, but we currently only
983 // support a single phi per pointer so if there's another behind this
984 // then we just bail out and return the generic SCEV.
985 if (I->getNumOperands() == 2) {
986 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
987 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
988 }
989 if (ChildScevs.size() == 2) {
990 ScevList.push_back(ChildScevs[0]);
991 ScevList.push_back(ChildScevs[1]);
992 } else
993 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
994 break;
995 }
996 case Instruction::Add:
997 case Instruction::Sub: {
1000 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1001 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1002
1003 // See if we need to freeze our fork...
1004 bool NeedsFreeze =
1005 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1006
1007 // Check that we only have a single fork, on either the left or right side.
1008 // Copy the SCEV across for the one without a fork in order to generate
1009 // the full SCEV for both sides of the BinOp.
1010 if (LScevs.size() == 2 && RScevs.size() == 1)
1011 RScevs.push_back(RScevs[0]);
1012 else if (RScevs.size() == 2 && LScevs.size() == 1)
1013 LScevs.push_back(LScevs[0]);
1014 else {
1015 ScevList.emplace_back(Scev, NeedsFreeze);
1016 break;
1017 }
1018
1019 ScevList.emplace_back(
1020 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
1021 NeedsFreeze);
1022 ScevList.emplace_back(
1023 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
1024 NeedsFreeze);
1025 break;
1026 }
1027 default:
1028 // Just return the current SCEV if we haven't handled the instruction yet.
1029 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1030 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1031 break;
1032 }
1033}
1034
1037 const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
1038 const Loop *L) {
1039 ScalarEvolution *SE = PSE.getSE();
1040 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1042 findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
1043
1044 // For now, we will only accept a forked pointer with two possible SCEVs
1045 // that are either SCEVAddRecExprs or loop invariant.
1046 if (Scevs.size() == 2 &&
1047 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1048 SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1049 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1050 SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1051 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
1052 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
1053 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
1054 return Scevs;
1055 }
1056
1057 return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1058}
1059
1060bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1061 MemAccessInfo Access, Type *AccessTy,
1062 const DenseMap<Value *, const SCEV *> &StridesMap,
1064 Loop *TheLoop, unsigned &RunningDepId,
1065 unsigned ASId, bool ShouldCheckWrap,
1066 bool Assume) {
1067 Value *Ptr = Access.getPointer();
1068
1070 findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
1071
1072 for (const auto &P : TranslatedPtrs) {
1073 const SCEV *PtrExpr = get<0>(P);
1074 if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
1075 return false;
1076
1077 // When we run after a failing dependency check we have to make sure
1078 // we don't have wrapping pointers.
1079 if (ShouldCheckWrap) {
1080 // Skip wrap checking when translating pointers.
1081 if (TranslatedPtrs.size() > 1)
1082 return false;
1083
1084 if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1085 const SCEV *Expr = PSE.getSCEV(Ptr);
1086 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1087 return false;
1089 }
1090 }
1091 // If there's only one option for Ptr, look it up after bounds and wrap
1092 // checking, because assumptions might have been added to PSE.
1093 if (TranslatedPtrs.size() == 1)
1094 TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1095 false};
1096 }
1097
1098 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1099 // The id of the dependence set.
1100 unsigned DepId;
1101
1102 if (isDependencyCheckNeeded()) {
1103 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1104 unsigned &LeaderId = DepSetId[Leader];
1105 if (!LeaderId)
1106 LeaderId = RunningDepId++;
1107 DepId = LeaderId;
1108 } else
1109 // Each access has its own dependence set.
1110 DepId = RunningDepId++;
1111
1112 bool IsWrite = Access.getInt();
1113 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1114 NeedsFreeze);
1115 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1116 }
1117
1118 return true;
1119}
1120
1121bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1122 ScalarEvolution *SE, Loop *TheLoop,
1123 const DenseMap<Value *, const SCEV *> &StridesMap,
1124 Value *&UncomputablePtr, bool ShouldCheckWrap) {
1125 // Find pointers with computable bounds. We are going to use this information
1126 // to place a runtime bound check.
1127 bool CanDoRT = true;
1128
1129 bool MayNeedRTCheck = false;
1130 if (!IsRTCheckAnalysisNeeded) return true;
1131
1132 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1133
1134 // We assign a consecutive id to access from different alias sets.
1135 // Accesses between different groups doesn't need to be checked.
1136 unsigned ASId = 0;
1137 for (const auto &AS : AST) {
1138 int NumReadPtrChecks = 0;
1139 int NumWritePtrChecks = 0;
1140 bool CanDoAliasSetRT = true;
1141 ++ASId;
1142 auto ASPointers = AS.getPointers();
1143
1144 // We assign consecutive id to access from different dependence sets.
1145 // Accesses within the same set don't need a runtime check.
1146 unsigned RunningDepId = 1;
1148
1150
1151 // First, count how many write and read accesses are in the alias set. Also
1152 // collect MemAccessInfos for later.
1154 for (const Value *ConstPtr : ASPointers) {
1155 Value *Ptr = const_cast<Value *>(ConstPtr);
1156 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1157 if (IsWrite)
1158 ++NumWritePtrChecks;
1159 else
1160 ++NumReadPtrChecks;
1161 AccessInfos.emplace_back(Ptr, IsWrite);
1162 }
1163
1164 // We do not need runtime checks for this alias set, if there are no writes
1165 // or a single write and no reads.
1166 if (NumWritePtrChecks == 0 ||
1167 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1168 assert((ASPointers.size() <= 1 ||
1169 all_of(ASPointers,
1170 [this](const Value *Ptr) {
1171 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1172 true);
1173 return DepCands.findValue(AccessWrite) == DepCands.end();
1174 })) &&
1175 "Can only skip updating CanDoRT below, if all entries in AS "
1176 "are reads or there is at most 1 entry");
1177 continue;
1178 }
1179
1180 for (auto &Access : AccessInfos) {
1181 for (const auto &AccessTy : Accesses[Access]) {
1182 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1183 DepSetId, TheLoop, RunningDepId, ASId,
1184 ShouldCheckWrap, false)) {
1185 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1186 << *Access.getPointer() << '\n');
1187 Retries.emplace_back(Access, AccessTy);
1188 CanDoAliasSetRT = false;
1189 }
1190 }
1191 }
1192
1193 // Note that this function computes CanDoRT and MayNeedRTCheck
1194 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1195 // we have a pointer for which we couldn't find the bounds but we don't
1196 // actually need to emit any checks so it does not matter.
1197 //
1198 // We need runtime checks for this alias set, if there are at least 2
1199 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1200 // any bound checks (because in that case the number of dependence sets is
1201 // incomplete).
1202 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1203
1204 // We need to perform run-time alias checks, but some pointers had bounds
1205 // that couldn't be checked.
1206 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1207 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1208 // We know that we need these checks, so we can now be more aggressive
1209 // and add further checks if required (overflow checks).
1210 CanDoAliasSetRT = true;
1211 for (const auto &[Access, AccessTy] : Retries) {
1212 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1213 DepSetId, TheLoop, RunningDepId, ASId,
1214 ShouldCheckWrap, /*Assume=*/true)) {
1215 CanDoAliasSetRT = false;
1216 UncomputablePtr = Access.getPointer();
1217 break;
1218 }
1219 }
1220 }
1221
1222 CanDoRT &= CanDoAliasSetRT;
1223 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1224 ++ASId;
1225 }
1226
1227 // If the pointers that we would use for the bounds comparison have different
1228 // address spaces, assume the values aren't directly comparable, so we can't
1229 // use them for the runtime check. We also have to assume they could
1230 // overlap. In the future there should be metadata for whether address spaces
1231 // are disjoint.
1232 unsigned NumPointers = RtCheck.Pointers.size();
1233 for (unsigned i = 0; i < NumPointers; ++i) {
1234 for (unsigned j = i + 1; j < NumPointers; ++j) {
1235 // Only need to check pointers between two different dependency sets.
1236 if (RtCheck.Pointers[i].DependencySetId ==
1237 RtCheck.Pointers[j].DependencySetId)
1238 continue;
1239 // Only need to check pointers in the same alias set.
1240 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1241 continue;
1242
1243 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1244 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1245
1246 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1247 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1248 if (ASi != ASj) {
1249 LLVM_DEBUG(
1250 dbgs() << "LAA: Runtime check would require comparison between"
1251 " different address spaces\n");
1252 return false;
1253 }
1254 }
1255 }
1256
1257 if (MayNeedRTCheck && CanDoRT)
1258 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1259
1260 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1261 << " pointer comparisons.\n");
1262
1263 // If we can do run-time checks, but there are no checks, no runtime checks
1264 // are needed. This can happen when all pointers point to the same underlying
1265 // object for example.
1266 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1267
1268 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1269 if (!CanDoRTIfNeeded)
1270 RtCheck.reset();
1271 return CanDoRTIfNeeded;
1272}
1273
1274void AccessAnalysis::processMemAccesses() {
1275 // We process the set twice: first we process read-write pointers, last we
1276 // process read-only pointers. This allows us to skip dependence tests for
1277 // read-only pointers.
1278
1279 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1280 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1281 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1282 LLVM_DEBUG({
1283 for (const auto &[A, _] : Accesses)
1284 dbgs() << "\t" << *A.getPointer() << " ("
1285 << (A.getInt() ? "write"
1286 : (ReadOnlyPtr.count(A.getPointer()) ? "read-only"
1287 : "read"))
1288 << ")\n";
1289 });
1290
1291 // The AliasSetTracker has nicely partitioned our pointers by metadata
1292 // compatibility and potential for underlying-object overlap. As a result, we
1293 // only need to check for potential pointer dependencies within each alias
1294 // set.
1295 for (const auto &AS : AST) {
1296 // Note that both the alias-set tracker and the alias sets themselves used
1297 // ordered collections internally and so the iteration order here is
1298 // deterministic.
1299 auto ASPointers = AS.getPointers();
1300
1301 bool SetHasWrite = false;
1302
1303 // Map of pointers to last access encountered.
1304 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1305 UnderlyingObjToAccessMap ObjToLastAccess;
1306
1307 // Set of access to check after all writes have been processed.
1308 PtrAccessMap DeferredAccesses;
1309
1310 // Iterate over each alias set twice, once to process read/write pointers,
1311 // and then to process read-only pointers.
1312 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1313 bool UseDeferred = SetIteration > 0;
1314 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1315
1316 for (const Value *ConstPtr : ASPointers) {
1317 Value *Ptr = const_cast<Value *>(ConstPtr);
1318
1319 // For a single memory access in AliasSetTracker, Accesses may contain
1320 // both read and write, and they both need to be handled for CheckDeps.
1321 for (const auto &[AC, _] : S) {
1322 if (AC.getPointer() != Ptr)
1323 continue;
1324
1325 bool IsWrite = AC.getInt();
1326
1327 // If we're using the deferred access set, then it contains only
1328 // reads.
1329 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1330 if (UseDeferred && !IsReadOnlyPtr)
1331 continue;
1332 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1333 // read or a write.
1334 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1335 S.count(MemAccessInfo(Ptr, false))) &&
1336 "Alias-set pointer not in the access set?");
1337
1338 MemAccessInfo Access(Ptr, IsWrite);
1339 DepCands.insert(Access);
1340
1341 // Memorize read-only pointers for later processing and skip them in
1342 // the first round (they need to be checked after we have seen all
1343 // write pointers). Note: we also mark pointer that are not
1344 // consecutive as "read-only" pointers (so that we check
1345 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1346 if (!UseDeferred && IsReadOnlyPtr) {
1347 // We only use the pointer keys, the types vector values don't
1348 // matter.
1349 DeferredAccesses.insert({Access, {}});
1350 continue;
1351 }
1352
1353 // If this is a write - check other reads and writes for conflicts. If
1354 // this is a read only check other writes for conflicts (but only if
1355 // there is no other write to the ptr - this is an optimization to
1356 // catch "a[i] = a[i] + " without having to do a dependence check).
1357 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1358 CheckDeps.push_back(Access);
1359 IsRTCheckAnalysisNeeded = true;
1360 }
1361
1362 if (IsWrite)
1363 SetHasWrite = true;
1364
1365 // Create sets of pointers connected by a shared alias set and
1366 // underlying object.
1367 typedef SmallVector<const Value *, 16> ValueVector;
1368 ValueVector TempObjects;
1369
1370 UnderlyingObjects[Ptr] = {};
1371 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1372 ::getUnderlyingObjects(Ptr, UOs, LI);
1374 << "Underlying objects for pointer " << *Ptr << "\n");
1375 for (const Value *UnderlyingObj : UOs) {
1376 // nullptr never alias, don't join sets for pointer that have "null"
1377 // in their UnderlyingObjects list.
1378 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1380 TheLoop->getHeader()->getParent(),
1381 UnderlyingObj->getType()->getPointerAddressSpace()))
1382 continue;
1383
1384 UnderlyingObjToAccessMap::iterator Prev =
1385 ObjToLastAccess.find(UnderlyingObj);
1386 if (Prev != ObjToLastAccess.end())
1387 DepCands.unionSets(Access, Prev->second);
1388
1389 ObjToLastAccess[UnderlyingObj] = Access;
1390 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1391 }
1392 }
1393 }
1394 }
1395 }
1396}
1397
1398/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1399/// i.e. monotonically increasing/decreasing.
1400static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1401 PredicatedScalarEvolution &PSE, const Loop *L) {
1402
1403 // FIXME: This should probably only return true for NUW.
1405 return true;
1406
1408 return true;
1409
1410 // Scalar evolution does not propagate the non-wrapping flags to values that
1411 // are derived from a non-wrapping induction variable because non-wrapping
1412 // could be flow-sensitive.
1413 //
1414 // Look through the potentially overflowing instruction to try to prove
1415 // non-wrapping for the *specific* value of Ptr.
1416
1417 // The arithmetic implied by an inbounds GEP can't overflow.
1418 const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1419 if (!GEP || !GEP->isInBounds())
1420 return false;
1421
1422 // Make sure there is only one non-const index and analyze that.
1423 Value *NonConstIndex = nullptr;
1424 for (Value *Index : GEP->indices())
1425 if (!isa<ConstantInt>(Index)) {
1426 if (NonConstIndex)
1427 return false;
1428 NonConstIndex = Index;
1429 }
1430 if (!NonConstIndex)
1431 // The recurrence is on the pointer, ignore for now.
1432 return false;
1433
1434 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1435 // AddRec using a NSW operation.
1436 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1437 if (OBO->hasNoSignedWrap() &&
1438 // Assume constant for other the operand so that the AddRec can be
1439 // easily found.
1440 isa<ConstantInt>(OBO->getOperand(1))) {
1441 const SCEV *OpScev = PSE.getSCEV(OBO->getOperand(0));
1442
1443 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1444 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1445 }
1446
1447 return false;
1448}
1449
1450/// Check whether the access through \p Ptr has a constant stride.
1451std::optional<int64_t>
1453 const Loop *Lp,
1454 const DenseMap<Value *, const SCEV *> &StridesMap,
1455 bool Assume, bool ShouldCheckWrap) {
1456 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1457 if (PSE.getSE()->isLoopInvariant(PtrScev, Lp))
1458 return {0};
1459
1460 Type *Ty = Ptr->getType();
1461 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1462 if (isa<ScalableVectorType>(AccessTy)) {
1463 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1464 << "\n");
1465 return std::nullopt;
1466 }
1467
1468 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1469 if (Assume && !AR)
1470 AR = PSE.getAsAddRec(Ptr);
1471
1472 if (!AR) {
1473 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1474 << " SCEV: " << *PtrScev << "\n");
1475 return std::nullopt;
1476 }
1477
1478 // The access function must stride over the innermost loop.
1479 if (Lp != AR->getLoop()) {
1480 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1481 << *Ptr << " SCEV: " << *AR << "\n");
1482 return std::nullopt;
1483 }
1484
1485 // Check the step is constant.
1486 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1487
1488 // Calculate the pointer stride and check if it is constant.
1489 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1490 if (!C) {
1491 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1492 << " SCEV: " << *AR << "\n");
1493 return std::nullopt;
1494 }
1495
1496 const auto &DL = Lp->getHeader()->getDataLayout();
1497 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1498 int64_t Size = AllocSize.getFixedValue();
1499 const APInt &APStepVal = C->getAPInt();
1500
1501 // Huge step value - give up.
1502 if (APStepVal.getBitWidth() > 64)
1503 return std::nullopt;
1504
1505 int64_t StepVal = APStepVal.getSExtValue();
1506
1507 // Strided access.
1508 int64_t Stride = StepVal / Size;
1509 int64_t Rem = StepVal % Size;
1510 if (Rem)
1511 return std::nullopt;
1512
1513 if (!ShouldCheckWrap)
1514 return Stride;
1515
1516 // The address calculation must not wrap. Otherwise, a dependence could be
1517 // inverted.
1518 if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1519 return Stride;
1520
1521 // An inbounds getelementptr that is a AddRec with a unit stride
1522 // cannot wrap per definition. If it did, the result would be poison
1523 // and any memory access dependent on it would be immediate UB
1524 // when executed.
1525 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1526 GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1527 return Stride;
1528
1529 // If the null pointer is undefined, then a access sequence which would
1530 // otherwise access it can be assumed not to unsigned wrap. Note that this
1531 // assumes the object in memory is aligned to the natural alignment.
1532 unsigned AddrSpace = Ty->getPointerAddressSpace();
1533 if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1534 (Stride == 1 || Stride == -1))
1535 return Stride;
1536
1537 if (Assume) {
1539 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1540 << "LAA: Pointer: " << *Ptr << "\n"
1541 << "LAA: SCEV: " << *AR << "\n"
1542 << "LAA: Added an overflow assumption\n");
1543 return Stride;
1544 }
1545 LLVM_DEBUG(
1546 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1547 << *Ptr << " SCEV: " << *AR << "\n");
1548 return std::nullopt;
1549}
1550
1551std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1552 Type *ElemTyB, Value *PtrB,
1553 const DataLayout &DL,
1554 ScalarEvolution &SE, bool StrictCheck,
1555 bool CheckType) {
1556 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1557
1558 // Make sure that A and B are different pointers.
1559 if (PtrA == PtrB)
1560 return 0;
1561
1562 // Make sure that the element types are the same if required.
1563 if (CheckType && ElemTyA != ElemTyB)
1564 return std::nullopt;
1565
1566 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1567 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1568
1569 // Check that the address spaces match.
1570 if (ASA != ASB)
1571 return std::nullopt;
1572 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1573
1574 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1575 const Value *PtrA1 =
1577 const Value *PtrB1 =
1579
1580 int Val;
1581 if (PtrA1 == PtrB1) {
1582 // Retrieve the address space again as pointer stripping now tracks through
1583 // `addrspacecast`.
1584 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1585 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1586 // Check that the address spaces match and that the pointers are valid.
1587 if (ASA != ASB)
1588 return std::nullopt;
1589
1590 IdxWidth = DL.getIndexSizeInBits(ASA);
1591 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1592 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1593
1594 OffsetB -= OffsetA;
1595 Val = OffsetB.getSExtValue();
1596 } else {
1597 // Otherwise compute the distance with SCEV between the base pointers.
1598 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1599 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1600 std::optional<APInt> Diff =
1601 SE.computeConstantDifference(PtrSCEVB, PtrSCEVA);
1602 if (!Diff)
1603 return std::nullopt;
1604 Val = Diff->getSExtValue();
1605 }
1606 int Size = DL.getTypeStoreSize(ElemTyA);
1607 int Dist = Val / Size;
1608
1609 // Ensure that the calculated distance matches the type-based one after all
1610 // the bitcasts removal in the provided pointers.
1611 if (!StrictCheck || Dist * Size == Val)
1612 return Dist;
1613 return std::nullopt;
1614}
1615
1617 const DataLayout &DL, ScalarEvolution &SE,
1618 SmallVectorImpl<unsigned> &SortedIndices) {
1620 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1621 "Expected list of pointer operands.");
1622 // Walk over the pointers, and map each of them to an offset relative to
1623 // first pointer in the array.
1624 Value *Ptr0 = VL[0];
1625
1626 using DistOrdPair = std::pair<int64_t, int>;
1627 auto Compare = llvm::less_first();
1628 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1629 Offsets.emplace(0, 0);
1630 bool IsConsecutive = true;
1631 for (auto [Idx, Ptr] : drop_begin(enumerate(VL))) {
1632 std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1633 /*StrictCheck=*/true);
1634 if (!Diff)
1635 return false;
1636
1637 // Check if the pointer with the same offset is found.
1638 int64_t Offset = *Diff;
1639 auto [It, IsInserted] = Offsets.emplace(Offset, Idx);
1640 if (!IsInserted)
1641 return false;
1642 // Consecutive order if the inserted element is the last one.
1643 IsConsecutive &= std::next(It) == Offsets.end();
1644 }
1645 SortedIndices.clear();
1646 if (!IsConsecutive) {
1647 // Fill SortedIndices array only if it is non-consecutive.
1648 SortedIndices.resize(VL.size());
1649 for (auto [Idx, Off] : enumerate(Offsets))
1650 SortedIndices[Idx] = Off.second;
1651 }
1652 return true;
1653}
1654
1655/// Returns true if the memory operations \p A and \p B are consecutive.
1657 ScalarEvolution &SE, bool CheckType) {
1660 if (!PtrA || !PtrB)
1661 return false;
1662 Type *ElemTyA = getLoadStoreType(A);
1663 Type *ElemTyB = getLoadStoreType(B);
1664 std::optional<int> Diff =
1665 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1666 /*StrictCheck=*/true, CheckType);
1667 return Diff && *Diff == 1;
1668}
1669
1671 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1672 [this, SI](Value *Ptr) {
1673 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1674 InstMap.push_back(SI);
1675 ++AccessIdx;
1676 });
1677}
1678
1680 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1681 [this, LI](Value *Ptr) {
1682 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1683 InstMap.push_back(LI);
1684 ++AccessIdx;
1685 });
1686}
1687
1690 switch (Type) {
1691 case NoDep:
1692 case Forward:
1695
1696 case Unknown:
1699 case Backward:
1701 case IndirectUnsafe:
1703 }
1704 llvm_unreachable("unexpected DepType!");
1705}
1706
1708 switch (Type) {
1709 case NoDep:
1710 case Forward:
1711 case ForwardButPreventsForwarding:
1712 case Unknown:
1713 case IndirectUnsafe:
1714 return false;
1715
1716 case BackwardVectorizable:
1717 case Backward:
1718 case BackwardVectorizableButPreventsForwarding:
1719 return true;
1720 }
1721 llvm_unreachable("unexpected DepType!");
1722}
1723
1725 return isBackward() || Type == Unknown || Type == IndirectUnsafe;
1726}
1727
1729 switch (Type) {
1730 case Forward:
1731 case ForwardButPreventsForwarding:
1732 return true;
1733
1734 case NoDep:
1735 case Unknown:
1736 case BackwardVectorizable:
1737 case Backward:
1738 case BackwardVectorizableButPreventsForwarding:
1739 case IndirectUnsafe:
1740 return false;
1741 }
1742 llvm_unreachable("unexpected DepType!");
1743}
1744
1745bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1746 uint64_t TypeByteSize) {
1747 // If loads occur at a distance that is not a multiple of a feasible vector
1748 // factor store-load forwarding does not take place.
1749 // Positive dependences might cause troubles because vectorizing them might
1750 // prevent store-load forwarding making vectorized code run a lot slower.
1751 // a[i] = a[i-3] ^ a[i-8];
1752 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1753 // hence on your typical architecture store-load forwarding does not take
1754 // place. Vectorizing in such cases does not make sense.
1755 // Store-load forwarding distance.
1756
1757 // After this many iterations store-to-load forwarding conflicts should not
1758 // cause any slowdowns.
1759 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1760 // Maximum vector factor.
1761 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1762 VectorizerParams::MaxVectorWidth * TypeByteSize, MinDepDistBytes);
1763
1764 // Compute the smallest VF at which the store and load would be misaligned.
1765 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1766 VF *= 2) {
1767 // If the number of vector iteration between the store and the load are
1768 // small we could incur conflicts.
1769 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1770 MaxVFWithoutSLForwardIssues = (VF >> 1);
1771 break;
1772 }
1773 }
1774
1775 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1776 LLVM_DEBUG(
1777 dbgs() << "LAA: Distance " << Distance
1778 << " that could cause a store-load forwarding conflict\n");
1779 return true;
1780 }
1781
1782 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1783 MaxVFWithoutSLForwardIssues !=
1784 VectorizerParams::MaxVectorWidth * TypeByteSize)
1785 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1786 return false;
1787}
1788
1789void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1790 if (Status < S)
1791 Status = S;
1792}
1793
1794/// Given a dependence-distance \p Dist between two
1795/// memory accesses, that have strides in the same direction whose absolute
1796/// value of the maximum stride is given in \p MaxStride, and that have the same
1797/// type size \p TypeByteSize, in a loop whose maximum backedge taken count is
1798/// \p MaxBTC, check if it is possible to prove statically that the dependence
1799/// distance is larger than the range that the accesses will travel through the
1800/// execution of the loop. If so, return true; false otherwise. This is useful
1801/// for example in loops such as the following (PR31098):
1802/// for (i = 0; i < D; ++i) {
1803/// = out[i];
1804/// out[i+D] =
1805/// }
1807 const SCEV &MaxBTC, const SCEV &Dist,
1808 uint64_t MaxStride,
1809 uint64_t TypeByteSize) {
1810
1811 // If we can prove that
1812 // (**) |Dist| > MaxBTC * Step
1813 // where Step is the absolute stride of the memory accesses in bytes,
1814 // then there is no dependence.
1815 //
1816 // Rationale:
1817 // We basically want to check if the absolute distance (|Dist/Step|)
1818 // is >= the loop iteration count (or > MaxBTC).
1819 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1820 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1821 // that the dependence distance is >= VF; This is checked elsewhere.
1822 // But in some cases we can prune dependence distances early, and
1823 // even before selecting the VF, and without a runtime test, by comparing
1824 // the distance against the loop iteration count. Since the vectorized code
1825 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1826 // also guarantees that distance >= VF.
1827 //
1828 const uint64_t ByteStride = MaxStride * TypeByteSize;
1829 const SCEV *Step = SE.getConstant(MaxBTC.getType(), ByteStride);
1830 const SCEV *Product = SE.getMulExpr(&MaxBTC, Step);
1831
1832 const SCEV *CastedDist = &Dist;
1833 const SCEV *CastedProduct = Product;
1834 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1835 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1836
1837 // The dependence distance can be positive/negative, so we sign extend Dist;
1838 // The multiplication of the absolute stride in bytes and the
1839 // backedgeTakenCount is non-negative, so we zero extend Product.
1840 if (DistTypeSizeBits > ProductTypeSizeBits)
1841 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1842 else
1843 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1844
1845 // Is Dist - (MaxBTC * Step) > 0 ?
1846 // (If so, then we have proven (**) because |Dist| >= Dist)
1847 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1848 if (SE.isKnownPositive(Minus))
1849 return true;
1850
1851 // Second try: Is -Dist - (MaxBTC * Step) > 0 ?
1852 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1853 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1854 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1855 return SE.isKnownPositive(Minus);
1856}
1857
1858/// Check the dependence for two accesses with the same stride \p Stride.
1859/// \p Distance is the positive distance and \p TypeByteSize is type size in
1860/// bytes.
1861///
1862/// \returns true if they are independent.
1864 uint64_t TypeByteSize) {
1865 assert(Stride > 1 && "The stride must be greater than 1");
1866 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1867 assert(Distance > 0 && "The distance must be non-zero");
1868
1869 // Skip if the distance is not multiple of type byte size.
1870 if (Distance % TypeByteSize)
1871 return false;
1872
1873 uint64_t ScaledDist = Distance / TypeByteSize;
1874
1875 // No dependence if the scaled distance is not multiple of the stride.
1876 // E.g.
1877 // for (i = 0; i < 1024 ; i += 4)
1878 // A[i+2] = A[i] + 1;
1879 //
1880 // Two accesses in memory (scaled distance is 2, stride is 4):
1881 // | A[0] | | | | A[4] | | | |
1882 // | | | A[2] | | | | A[6] | |
1883 //
1884 // E.g.
1885 // for (i = 0; i < 1024 ; i += 3)
1886 // A[i+4] = A[i] + 1;
1887 //
1888 // Two accesses in memory (scaled distance is 4, stride is 3):
1889 // | A[0] | | | A[3] | | | A[6] | | |
1890 // | | | | | A[4] | | | A[7] | |
1891 return ScaledDist % Stride;
1892}
1893
1895 MemoryDepChecker::DepDistanceStrideAndSizeInfo>
1896MemoryDepChecker::getDependenceDistanceStrideAndSize(
1899 const auto &DL = InnermostLoop->getHeader()->getDataLayout();
1900 auto &SE = *PSE.getSE();
1901 const auto &[APtr, AIsWrite] = A;
1902 const auto &[BPtr, BIsWrite] = B;
1903
1904 // Two reads are independent.
1905 if (!AIsWrite && !BIsWrite)
1907
1908 Type *ATy = getLoadStoreType(AInst);
1909 Type *BTy = getLoadStoreType(BInst);
1910
1911 // We cannot check pointers in different address spaces.
1912 if (APtr->getType()->getPointerAddressSpace() !=
1913 BPtr->getType()->getPointerAddressSpace())
1915
1916 std::optional<int64_t> StrideAPtr =
1917 getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides, true, true);
1918 std::optional<int64_t> StrideBPtr =
1919 getPtrStride(PSE, BTy, BPtr, InnermostLoop, SymbolicStrides, true, true);
1920
1921 const SCEV *Src = PSE.getSCEV(APtr);
1922 const SCEV *Sink = PSE.getSCEV(BPtr);
1923
1924 // If the induction step is negative we have to invert source and sink of the
1925 // dependence when measuring the distance between them. We should not swap
1926 // AIsWrite with BIsWrite, as their uses expect them in program order.
1927 if (StrideAPtr && *StrideAPtr < 0) {
1928 std::swap(Src, Sink);
1929 std::swap(AInst, BInst);
1930 std::swap(StrideAPtr, StrideBPtr);
1931 }
1932
1933 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1934
1935 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1936 << "\n");
1937 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
1938 << ": " << *Dist << "\n");
1939
1940 // Check if we can prove that Sink only accesses memory after Src's end or
1941 // vice versa. At the moment this is limited to cases where either source or
1942 // sink are loop invariant to avoid compile-time increases. This is not
1943 // required for correctness.
1944 if (SE.isLoopInvariant(Src, InnermostLoop) ||
1945 SE.isLoopInvariant(Sink, InnermostLoop)) {
1946 const auto &[SrcStart, SrcEnd] =
1947 getStartAndEndForAccess(InnermostLoop, Src, ATy, PSE, PointerBounds);
1948 const auto &[SinkStart, SinkEnd] =
1949 getStartAndEndForAccess(InnermostLoop, Sink, BTy, PSE, PointerBounds);
1950 if (!isa<SCEVCouldNotCompute>(SrcStart) &&
1951 !isa<SCEVCouldNotCompute>(SrcEnd) &&
1952 !isa<SCEVCouldNotCompute>(SinkStart) &&
1953 !isa<SCEVCouldNotCompute>(SinkEnd)) {
1954 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
1956 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart))
1958 }
1959 }
1960
1961 // Need accesses with constant strides and the same direction for further
1962 // dependence analysis. We don't want to vectorize "A[B[i]] += ..." and
1963 // similar code or pointer arithmetic that could wrap in the address space.
1964
1965 // If either Src or Sink are not strided (i.e. not a non-wrapping AddRec) and
1966 // not loop-invariant (stride will be 0 in that case), we cannot analyze the
1967 // dependence further and also cannot generate runtime checks.
1968 if (!StrideAPtr || !StrideBPtr) {
1969 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1971 }
1972
1973 int64_t StrideAPtrInt = *StrideAPtr;
1974 int64_t StrideBPtrInt = *StrideBPtr;
1975 LLVM_DEBUG(dbgs() << "LAA: Src induction step: " << StrideAPtrInt
1976 << " Sink induction step: " << StrideBPtrInt << "\n");
1977 // At least Src or Sink are loop invariant and the other is strided or
1978 // invariant. We can generate a runtime check to disambiguate the accesses.
1979 if (StrideAPtrInt == 0 || StrideBPtrInt == 0)
1981
1982 // Both Src and Sink have a constant stride, check if they are in the same
1983 // direction.
1984 if ((StrideAPtrInt > 0 && StrideBPtrInt < 0) ||
1985 (StrideAPtrInt < 0 && StrideBPtrInt > 0)) {
1986 LLVM_DEBUG(
1987 dbgs() << "Pointer access with strides in different directions\n");
1989 }
1990
1991 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1992 bool HasSameSize =
1993 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
1994 if (!HasSameSize)
1995 TypeByteSize = 0;
1996 return DepDistanceStrideAndSizeInfo(Dist, std::abs(StrideAPtrInt),
1997 std::abs(StrideBPtrInt), TypeByteSize,
1998 AIsWrite, BIsWrite);
1999}
2000
2002MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
2003 const MemAccessInfo &B, unsigned BIdx) {
2004 assert(AIdx < BIdx && "Must pass arguments in program order");
2005
2006 // Get the dependence distance, stride, type size and what access writes for
2007 // the dependence between A and B.
2008 auto Res =
2009 getDependenceDistanceStrideAndSize(A, InstMap[AIdx], B, InstMap[BIdx]);
2010 if (std::holds_alternative<Dependence::DepType>(Res))
2011 return std::get<Dependence::DepType>(Res);
2012
2013 auto &[Dist, StrideA, StrideB, TypeByteSize, AIsWrite, BIsWrite] =
2014 std::get<DepDistanceStrideAndSizeInfo>(Res);
2015 bool HasSameSize = TypeByteSize > 0;
2016
2017 std::optional<uint64_t> CommonStride =
2018 StrideA == StrideB ? std::make_optional(StrideA) : std::nullopt;
2019 if (isa<SCEVCouldNotCompute>(Dist)) {
2020 // TODO: Relax requirement that there is a common stride to retry with
2021 // non-constant distance dependencies.
2022 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2023 LLVM_DEBUG(dbgs() << "LAA: Dependence because of uncomputable distance.\n");
2024 return Dependence::Unknown;
2025 }
2026
2027 ScalarEvolution &SE = *PSE.getSE();
2028 auto &DL = InnermostLoop->getHeader()->getDataLayout();
2029 uint64_t MaxStride = std::max(StrideA, StrideB);
2030
2031 // If the distance between the acecsses is larger than their maximum absolute
2032 // stride multiplied by the symbolic maximum backedge taken count (which is an
2033 // upper bound of the number of iterations), the accesses are independet, i.e.
2034 // they are far enough appart that accesses won't access the same location
2035 // across all loop ierations.
2036 if (HasSameSize && isSafeDependenceDistance(
2038 *Dist, MaxStride, TypeByteSize))
2039 return Dependence::NoDep;
2040
2041 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
2042
2043 // Attempt to prove strided accesses independent.
2044 if (C) {
2045 const APInt &Val = C->getAPInt();
2046 int64_t Distance = Val.getSExtValue();
2047
2048 // If the distance between accesses and their strides are known constants,
2049 // check whether the accesses interlace each other.
2050 if (std::abs(Distance) > 0 && CommonStride && *CommonStride > 1 &&
2051 HasSameSize &&
2052 areStridedAccessesIndependent(std::abs(Distance), *CommonStride,
2053 TypeByteSize)) {
2054 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2055 return Dependence::NoDep;
2056 }
2057 } else {
2058 if (!LoopGuards)
2059 LoopGuards.emplace(
2060 ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
2061 Dist = SE.applyLoopGuards(Dist, *LoopGuards);
2062 }
2063
2064 // Negative distances are not plausible dependencies.
2065 if (SE.isKnownNonPositive(Dist)) {
2066 if (SE.isKnownNonNegative(Dist)) {
2067 if (HasSameSize) {
2068 // Write to the same location with the same size.
2069 return Dependence::Forward;
2070 }
2071 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2072 "different type sizes\n");
2073 return Dependence::Unknown;
2074 }
2075
2076 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2077 // Check if the first access writes to a location that is read in a later
2078 // iteration, where the distance between them is not a multiple of a vector
2079 // factor and relatively small.
2080 //
2081 // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2082 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2083 // forward dependency will allow vectorization using any width.
2084
2085 if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2086 if (!C) {
2087 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2088 // condition to consider retrying with runtime checks. Historically, we
2089 // did not set it when strides were different but there is no inherent
2090 // reason to.
2091 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2092 return Dependence::Unknown;
2093 }
2094 if (!HasSameSize ||
2095 couldPreventStoreLoadForward(C->getAPInt().abs().getZExtValue(),
2096 TypeByteSize)) {
2097 LLVM_DEBUG(
2098 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2100 }
2101 }
2102
2103 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2104 return Dependence::Forward;
2105 }
2106
2107 int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2108 // Below we only handle strictly positive distances.
2109 if (MinDistance <= 0) {
2110 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2111 return Dependence::Unknown;
2112 }
2113
2114 if (!isa<SCEVConstant>(Dist)) {
2115 // Previously this case would be treated as Unknown, possibly setting
2116 // FoundNonConstantDistanceDependence to force re-trying with runtime
2117 // checks. Until the TODO below is addressed, set it here to preserve
2118 // original behavior w.r.t. re-trying with runtime checks.
2119 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2120 // condition to consider retrying with runtime checks. Historically, we
2121 // did not set it when strides were different but there is no inherent
2122 // reason to.
2123 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2124 }
2125
2126 if (!HasSameSize) {
2127 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2128 "different type sizes\n");
2129 return Dependence::Unknown;
2130 }
2131
2132 if (!CommonStride)
2133 return Dependence::Unknown;
2134
2135 // Bail out early if passed-in parameters make vectorization not feasible.
2136 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2138 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2140 // The minimum number of iterations for a vectorized/unrolled version.
2141 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2142
2143 // It's not vectorizable if the distance is smaller than the minimum distance
2144 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2145 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
2146 // TypeByteSize (No need to plus the last gap distance).
2147 //
2148 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2149 // foo(int *A) {
2150 // int *B = (int *)((char *)A + 14);
2151 // for (i = 0 ; i < 1024 ; i += 2)
2152 // B[i] = A[i] + 1;
2153 // }
2154 //
2155 // Two accesses in memory (stride is 2):
2156 // | A[0] | | A[2] | | A[4] | | A[6] | |
2157 // | B[0] | | B[2] | | B[4] |
2158 //
2159 // MinDistance needs for vectorizing iterations except the last iteration:
2160 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2161 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2162 //
2163 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2164 // 12, which is less than distance.
2165 //
2166 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2167 // the minimum distance needed is 28, which is greater than distance. It is
2168 // not safe to do vectorization.
2169
2170 // We know that Dist is positive, but it may not be constant. Use the signed
2171 // minimum for computations below, as this ensures we compute the closest
2172 // possible dependence distance.
2173 uint64_t MinDistanceNeeded =
2174 TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;
2175 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2176 if (!isa<SCEVConstant>(Dist)) {
2177 // For non-constant distances, we checked the lower bound of the
2178 // dependence distance and the distance may be larger at runtime (and safe
2179 // for vectorization). Classify it as Unknown, so we re-try with runtime
2180 // checks.
2181 return Dependence::Unknown;
2182 }
2183 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2184 << MinDistance << '\n');
2185 return Dependence::Backward;
2186 }
2187
2188 // Unsafe if the minimum distance needed is greater than smallest dependence
2189 // distance distance.
2190 if (MinDistanceNeeded > MinDepDistBytes) {
2191 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2192 << MinDistanceNeeded << " size in bytes\n");
2193 return Dependence::Backward;
2194 }
2195
2196 // Positive distance bigger than max vectorization factor.
2197 // FIXME: Should use max factor instead of max distance in bytes, which could
2198 // not handle different types.
2199 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2200 // void foo (int *A, char *B) {
2201 // for (unsigned i = 0; i < 1024; i++) {
2202 // A[i+2] = A[i] + 1;
2203 // B[i+2] = B[i] + 1;
2204 // }
2205 // }
2206 //
2207 // This case is currently unsafe according to the max safe distance. If we
2208 // analyze the two accesses on array B, the max safe dependence distance
2209 // is 2. Then we analyze the accesses on array A, the minimum distance needed
2210 // is 8, which is less than 2 and forbidden vectorization, But actually
2211 // both A and B could be vectorized by 2 iterations.
2212 MinDepDistBytes =
2213 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2214
2215 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2216 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2217 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2218 isa<SCEVConstant>(Dist) &&
2219 couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {
2220 // Sanity check that we didn't update MinDepDistBytes when calling
2221 // couldPreventStoreLoadForward
2222 assert(MinDepDistBytes == MinDepDistBytesOld &&
2223 "An update to MinDepDistBytes requires an update to "
2224 "MaxSafeVectorWidthInBits");
2225 (void)MinDepDistBytesOld;
2227 }
2228
2229 // An update to MinDepDistBytes requires an update to MaxSafeVectorWidthInBits
2230 // since there is a backwards dependency.
2231 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);
2232 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2233 << " with max VF = " << MaxVF << '\n');
2234
2235 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2236 if (!isa<SCEVConstant>(Dist) && MaxVFInBits < MaxTargetVectorWidthInBits) {
2237 // For non-constant distances, we checked the lower bound of the dependence
2238 // distance and the distance may be larger at runtime (and safe for
2239 // vectorization). Classify it as Unknown, so we re-try with runtime checks.
2240 return Dependence::Unknown;
2241 }
2242
2243 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2245}
2246
2248 const MemAccessInfoList &CheckDeps) {
2249
2250 MinDepDistBytes = -1;
2252 for (MemAccessInfo CurAccess : CheckDeps) {
2253 if (Visited.count(CurAccess))
2254 continue;
2255
2256 // Get the relevant memory access set.
2258 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2259
2260 // Check accesses within this set.
2262 AccessSets.member_begin(I);
2264 AccessSets.member_end();
2265
2266 // Check every access pair.
2267 while (AI != AE) {
2268 Visited.insert(*AI);
2269 bool AIIsWrite = AI->getInt();
2270 // Check loads only against next equivalent class, but stores also against
2271 // other stores in the same equivalence class - to the same address.
2273 (AIIsWrite ? AI : std::next(AI));
2274 while (OI != AE) {
2275 // Check every accessing instruction pair in program order.
2276 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2277 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2278 // Scan all accesses of another equivalence class, but only the next
2279 // accesses of the same equivalent class.
2280 for (std::vector<unsigned>::iterator
2281 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2282 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2283 I2 != I2E; ++I2) {
2284 auto A = std::make_pair(&*AI, *I1);
2285 auto B = std::make_pair(&*OI, *I2);
2286
2287 assert(*I1 != *I2);
2288 if (*I1 > *I2)
2289 std::swap(A, B);
2290
2292 isDependent(*A.first, A.second, *B.first, B.second);
2294
2295 // Gather dependences unless we accumulated MaxDependences
2296 // dependences. In that case return as soon as we find the first
2297 // unsafe dependence. This puts a limit on this quadratic
2298 // algorithm.
2299 if (RecordDependences) {
2300 if (Type != Dependence::NoDep)
2301 Dependences.emplace_back(A.second, B.second, Type);
2302
2303 if (Dependences.size() >= MaxDependences) {
2304 RecordDependences = false;
2305 Dependences.clear();
2307 << "Too many dependences, stopped recording\n");
2308 }
2309 }
2310 if (!RecordDependences && !isSafeForVectorization())
2311 return false;
2312 }
2313 ++OI;
2314 }
2315 ++AI;
2316 }
2317 }
2318
2319 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2320 return isSafeForVectorization();
2321}
2322
2325 MemAccessInfo Access(Ptr, IsWrite);
2326 auto &IndexVector = Accesses.find(Access)->second;
2327
2329 transform(IndexVector,
2330 std::back_inserter(Insts),
2331 [&](unsigned Idx) { return this->InstMap[Idx]; });
2332 return Insts;
2333}
2334
2336 "NoDep",
2337 "Unknown",
2338 "IndirectUnsafe",
2339 "Forward",
2340 "ForwardButPreventsForwarding",
2341 "Backward",
2342 "BackwardVectorizable",
2343 "BackwardVectorizableButPreventsForwarding"};
2344
2346 raw_ostream &OS, unsigned Depth,
2347 const SmallVectorImpl<Instruction *> &Instrs) const {
2348 OS.indent(Depth) << DepName[Type] << ":\n";
2349 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2350 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2351}
2352
2353bool LoopAccessInfo::canAnalyzeLoop() {
2354 // We need to have a loop header.
2355 LLVM_DEBUG(dbgs() << "\nLAA: Checking a loop in '"
2356 << TheLoop->getHeader()->getParent()->getName() << "' from "
2357 << TheLoop->getLocStr() << "\n");
2358
2359 // We can only analyze innermost loops.
2360 if (!TheLoop->isInnermost()) {
2361 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2362 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2363 return false;
2364 }
2365
2366 // We must have a single backedge.
2367 if (TheLoop->getNumBackEdges() != 1) {
2368 LLVM_DEBUG(
2369 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2370 recordAnalysis("CFGNotUnderstood")
2371 << "loop control flow is not understood by analyzer";
2372 return false;
2373 }
2374
2375 // ScalarEvolution needs to be able to find the symbolic max backedge taken
2376 // count, which is an upper bound on the number of loop iterations. The loop
2377 // may execute fewer iterations, if it exits via an uncountable exit.
2378 const SCEV *ExitCount = PSE->getSymbolicMaxBackedgeTakenCount();
2379 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2380 recordAnalysis("CantComputeNumberOfIterations")
2381 << "could not determine number of loop iterations";
2382 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2383 return false;
2384 }
2385
2386 LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "
2387 << TheLoop->getHeader()->getName() << "\n");
2388 return true;
2389}
2390
2391bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,
2392 const TargetLibraryInfo *TLI,
2393 DominatorTree *DT) {
2394 // Holds the Load and Store instructions.
2397 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2398
2399 // Holds all the different accesses in the loop.
2400 unsigned NumReads = 0;
2401 unsigned NumReadWrites = 0;
2402
2403 bool HasComplexMemInst = false;
2404
2405 // A runtime check is only legal to insert if there are no convergent calls.
2406 HasConvergentOp = false;
2407
2408 PtrRtChecking->Pointers.clear();
2409 PtrRtChecking->Need = false;
2410
2411 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2412
2413 const bool EnableMemAccessVersioningOfLoop =
2415 !TheLoop->getHeader()->getParent()->hasOptSize();
2416
2417 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2418 // loop info, as it may be arbitrary.
2419 LoopBlocksRPO RPOT(TheLoop);
2420 RPOT.perform(LI);
2421 for (BasicBlock *BB : RPOT) {
2422 // Scan the BB and collect legal loads and stores. Also detect any
2423 // convergent instructions.
2424 for (Instruction &I : *BB) {
2425 if (auto *Call = dyn_cast<CallBase>(&I)) {
2426 if (Call->isConvergent())
2427 HasConvergentOp = true;
2428 }
2429
2430 // With both a non-vectorizable memory instruction and a convergent
2431 // operation, found in this loop, no reason to continue the search.
2432 if (HasComplexMemInst && HasConvergentOp)
2433 return false;
2434
2435 // Avoid hitting recordAnalysis multiple times.
2436 if (HasComplexMemInst)
2437 continue;
2438
2439 // Record alias scopes defined inside the loop.
2440 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2441 for (Metadata *Op : Decl->getScopeList()->operands())
2442 LoopAliasScopes.insert(cast<MDNode>(Op));
2443
2444 // Many math library functions read the rounding mode. We will only
2445 // vectorize a loop if it contains known function calls that don't set
2446 // the flag. Therefore, it is safe to ignore this read from memory.
2447 auto *Call = dyn_cast<CallInst>(&I);
2448 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2449 continue;
2450
2451 // If this is a load, save it. If this instruction can read from memory
2452 // but is not a load, then we quit. Notice that we don't handle function
2453 // calls that read or write.
2454 if (I.mayReadFromMemory()) {
2455 // If the function has an explicit vectorized counterpart, we can safely
2456 // assume that it can be vectorized.
2457 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2458 !VFDatabase::getMappings(*Call).empty())
2459 continue;
2460
2461 auto *Ld = dyn_cast<LoadInst>(&I);
2462 if (!Ld) {
2463 recordAnalysis("CantVectorizeInstruction", Ld)
2464 << "instruction cannot be vectorized";
2465 HasComplexMemInst = true;
2466 continue;
2467 }
2468 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2469 recordAnalysis("NonSimpleLoad", Ld)
2470 << "read with atomic ordering or volatile read";
2471 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2472 HasComplexMemInst = true;
2473 continue;
2474 }
2475 NumLoads++;
2476 Loads.push_back(Ld);
2477 DepChecker->addAccess(Ld);
2478 if (EnableMemAccessVersioningOfLoop)
2479 collectStridedAccess(Ld);
2480 continue;
2481 }
2482
2483 // Save 'store' instructions. Abort if other instructions write to memory.
2484 if (I.mayWriteToMemory()) {
2485 auto *St = dyn_cast<StoreInst>(&I);
2486 if (!St) {
2487 recordAnalysis("CantVectorizeInstruction", St)
2488 << "instruction cannot be vectorized";
2489 HasComplexMemInst = true;
2490 continue;
2491 }
2492 if (!St->isSimple() && !IsAnnotatedParallel) {
2493 recordAnalysis("NonSimpleStore", St)
2494 << "write with atomic ordering or volatile write";
2495 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2496 HasComplexMemInst = true;
2497 continue;
2498 }
2499 NumStores++;
2500 Stores.push_back(St);
2501 DepChecker->addAccess(St);
2502 if (EnableMemAccessVersioningOfLoop)
2503 collectStridedAccess(St);
2504 }
2505 } // Next instr.
2506 } // Next block.
2507
2508 if (HasComplexMemInst)
2509 return false;
2510
2511 // Now we have two lists that hold the loads and the stores.
2512 // Next, we find the pointers that they use.
2513
2514 // Check if we see any stores. If there are no stores, then we don't
2515 // care if the pointers are *restrict*.
2516 if (!Stores.size()) {
2517 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2518 return true;
2519 }
2520
2521 MemoryDepChecker::DepCandidates DependentAccesses;
2522 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,
2523 LoopAliasScopes);
2524
2525 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2526 // multiple times on the same object. If the ptr is accessed twice, once
2527 // for read and once for write, it will only appear once (on the write
2528 // list). This is okay, since we are going to check for conflicts between
2529 // writes and between reads and writes, but not between reads and reads.
2531
2532 // Record uniform store addresses to identify if we have multiple stores
2533 // to the same address.
2534 SmallPtrSet<Value *, 16> UniformStores;
2535
2536 for (StoreInst *ST : Stores) {
2537 Value *Ptr = ST->getPointerOperand();
2538
2539 if (isInvariant(Ptr)) {
2540 // Record store instructions to loop invariant addresses
2541 StoresToInvariantAddresses.push_back(ST);
2542 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2543 !UniformStores.insert(Ptr).second;
2544 }
2545
2546 // If we did *not* see this pointer before, insert it to the read-write
2547 // list. At this phase it is only a 'write' list.
2548 Type *AccessTy = getLoadStoreType(ST);
2549 if (Seen.insert({Ptr, AccessTy}).second) {
2550 ++NumReadWrites;
2551
2553 // The TBAA metadata could have a control dependency on the predication
2554 // condition, so we cannot rely on it when determining whether or not we
2555 // need runtime pointer checks.
2556 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2557 Loc.AATags.TBAA = nullptr;
2558
2559 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2560 [&Accesses, AccessTy, Loc](Value *Ptr) {
2561 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2562 Accesses.addStore(NewLoc, AccessTy);
2563 });
2564 }
2565 }
2566
2567 if (IsAnnotatedParallel) {
2568 LLVM_DEBUG(
2569 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2570 << "checks.\n");
2571 return true;
2572 }
2573
2574 for (LoadInst *LD : Loads) {
2575 Value *Ptr = LD->getPointerOperand();
2576 // If we did *not* see this pointer before, insert it to the
2577 // read list. If we *did* see it before, then it is already in
2578 // the read-write list. This allows us to vectorize expressions
2579 // such as A[i] += x; Because the address of A[i] is a read-write
2580 // pointer. This only works if the index of A[i] is consecutive.
2581 // If the address of i is unknown (for example A[B[i]]) then we may
2582 // read a few words, modify, and write a few words, and some of the
2583 // words may be written to the same address.
2584 bool IsReadOnlyPtr = false;
2585 Type *AccessTy = getLoadStoreType(LD);
2586 if (Seen.insert({Ptr, AccessTy}).second ||
2587 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2588 ++NumReads;
2589 IsReadOnlyPtr = true;
2590 }
2591
2592 // See if there is an unsafe dependency between a load to a uniform address and
2593 // store to the same uniform address.
2594 if (UniformStores.count(Ptr)) {
2595 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2596 "load and uniform store to the same address!\n");
2597 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2598 }
2599
2601 // The TBAA metadata could have a control dependency on the predication
2602 // condition, so we cannot rely on it when determining whether or not we
2603 // need runtime pointer checks.
2604 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2605 Loc.AATags.TBAA = nullptr;
2606
2607 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2608 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2609 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2610 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2611 });
2612 }
2613
2614 // If we write (or read-write) to a single destination and there are no
2615 // other reads in this loop then is it safe to vectorize.
2616 if (NumReadWrites == 1 && NumReads == 0) {
2617 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2618 return true;
2619 }
2620
2621 // Build dependence sets and check whether we need a runtime pointer bounds
2622 // check.
2623 Accesses.buildDependenceSets();
2624
2625 // Find pointers with computable bounds. We are going to use this information
2626 // to place a runtime bound check.
2627 Value *UncomputablePtr = nullptr;
2628 bool CanDoRTIfNeeded =
2629 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2630 SymbolicStrides, UncomputablePtr, false);
2631 if (!CanDoRTIfNeeded) {
2632 const auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2633 recordAnalysis("CantIdentifyArrayBounds", I)
2634 << "cannot identify array bounds";
2635 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2636 << "the array bounds.\n");
2637 return false;
2638 }
2639
2640 LLVM_DEBUG(
2641 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2642
2643 bool DepsAreSafe = true;
2644 if (Accesses.isDependencyCheckNeeded()) {
2645 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2646 DepsAreSafe = DepChecker->areDepsSafe(DependentAccesses,
2647 Accesses.getDependenciesToCheck());
2648
2649 if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeCheck()) {
2650 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2651
2652 // Clear the dependency checks. We assume they are not needed.
2653 Accesses.resetDepChecks(*DepChecker);
2654
2655 PtrRtChecking->reset();
2656 PtrRtChecking->Need = true;
2657
2658 auto *SE = PSE->getSE();
2659 UncomputablePtr = nullptr;
2660 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2661 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2662
2663 // Check that we found the bounds for the pointer.
2664 if (!CanDoRTIfNeeded) {
2665 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2666 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2667 << "cannot check memory dependencies at runtime";
2668 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2669 return false;
2670 }
2671 DepsAreSafe = true;
2672 }
2673 }
2674
2675 if (HasConvergentOp) {
2676 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2677 << "cannot add control dependency to convergent operation";
2678 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2679 "would be needed with a convergent operation\n");
2680 return false;
2681 }
2682
2683 if (DepsAreSafe) {
2684 LLVM_DEBUG(
2685 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2686 << (PtrRtChecking->Need ? "" : " don't")
2687 << " need runtime memory checks.\n");
2688 return true;
2689 }
2690
2691 emitUnsafeDependenceRemark();
2692 return false;
2693}
2694
2695void LoopAccessInfo::emitUnsafeDependenceRemark() {
2696 const auto *Deps = getDepChecker().getDependences();
2697 if (!Deps)
2698 return;
2699 const auto *Found =
2700 llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2703 });
2704 if (Found == Deps->end())
2705 return;
2706 MemoryDepChecker::Dependence Dep = *Found;
2707
2708 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2709
2710 // Emit remark for first unsafe dependence
2711 bool HasForcedDistribution = false;
2712 std::optional<const MDOperand *> Value =
2713 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2714 if (Value) {
2715 const MDOperand *Op = *Value;
2716 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2717 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2718 }
2719
2720 const std::string Info =
2721 HasForcedDistribution
2722 ? "unsafe dependent memory operations in loop."
2723 : "unsafe dependent memory operations in loop. Use "
2724 "#pragma clang loop distribute(enable) to allow loop distribution "
2725 "to attempt to isolate the offending operations into a separate "
2726 "loop";
2728 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2729
2730 switch (Dep.Type) {
2734 llvm_unreachable("Unexpected dependence");
2736 R << "\nBackward loop carried data dependence.";
2737 break;
2739 R << "\nForward loop carried data dependence that prevents "
2740 "store-to-load forwarding.";
2741 break;
2743 R << "\nBackward loop carried data dependence that prevents "
2744 "store-to-load forwarding.";
2745 break;
2747 R << "\nUnsafe indirect dependence.";
2748 break;
2750 R << "\nUnknown data dependence.";
2751 break;
2752 }
2753
2754 if (Instruction *I = Dep.getSource(getDepChecker())) {
2755 DebugLoc SourceLoc = I->getDebugLoc();
2756 if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2757 SourceLoc = DD->getDebugLoc();
2758 if (SourceLoc)
2759 R << " Memory location is the same as accessed at "
2760 << ore::NV("Location", SourceLoc);
2761 }
2762}
2763
2765 DominatorTree *DT) {
2766 assert(TheLoop->contains(BB) && "Unknown block used");
2767
2768 // Blocks that do not dominate the latch need predication.
2769 const BasicBlock *Latch = TheLoop->getLoopLatch();
2770 return !DT->dominates(BB, Latch);
2771}
2772
2774LoopAccessInfo::recordAnalysis(StringRef RemarkName, const Instruction *I) {
2775 assert(!Report && "Multiple reports generated");
2776
2777 const Value *CodeRegion = TheLoop->getHeader();
2778 DebugLoc DL = TheLoop->getStartLoc();
2779
2780 if (I) {
2781 CodeRegion = I->getParent();
2782 // If there is no debug location attached to the instruction, revert back to
2783 // using the loop's.
2784 if (I->getDebugLoc())
2785 DL = I->getDebugLoc();
2786 }
2787
2788 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2789 CodeRegion);
2790 return *Report;
2791}
2792
2794 auto *SE = PSE->getSE();
2795 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2796 // trivially loop-invariant FP values to be considered invariant.
2797 if (!SE->isSCEVable(V->getType()))
2798 return false;
2799 const SCEV *S = SE->getSCEV(V);
2800 return SE->isLoopInvariant(S, TheLoop);
2801}
2802
2803/// Find the operand of the GEP that should be checked for consecutive
2804/// stores. This ignores trailing indices that have no effect on the final
2805/// pointer.
2806static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2807 const DataLayout &DL = Gep->getDataLayout();
2808 unsigned LastOperand = Gep->getNumOperands() - 1;
2809 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2810
2811 // Walk backwards and try to peel off zeros.
2812 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2813 // Find the type we're currently indexing into.
2814 gep_type_iterator GEPTI = gep_type_begin(Gep);
2815 std::advance(GEPTI, LastOperand - 2);
2816
2817 // If it's a type with the same allocation size as the result of the GEP we
2818 // can peel off the zero index.
2819 TypeSize ElemSize = GEPTI.isStruct()
2820 ? DL.getTypeAllocSize(GEPTI.getIndexedType())
2822 if (ElemSize != GEPAllocSize)
2823 break;
2824 --LastOperand;
2825 }
2826
2827 return LastOperand;
2828}
2829
2830/// If the argument is a GEP, then returns the operand identified by
2831/// getGEPInductionOperand. However, if there is some other non-loop-invariant
2832/// operand, it returns that instead.
2834 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2835 if (!GEP)
2836 return Ptr;
2837
2838 unsigned InductionOperand = getGEPInductionOperand(GEP);
2839
2840 // Check that all of the gep indices are uniform except for our induction
2841 // operand.
2842 for (unsigned I = 0, E = GEP->getNumOperands(); I != E; ++I)
2843 if (I != InductionOperand &&
2844 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(I)), Lp))
2845 return Ptr;
2846 return GEP->getOperand(InductionOperand);
2847}
2848
2849/// Get the stride of a pointer access in a loop. Looks for symbolic
2850/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2852 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2853 if (!PtrTy || PtrTy->isAggregateType())
2854 return nullptr;
2855
2856 // Try to remove a gep instruction to make the pointer (actually index at this
2857 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2858 // pointer, otherwise, we are analyzing the index.
2859 Value *OrigPtr = Ptr;
2860
2861 // The size of the pointer access.
2862 int64_t PtrAccessSize = 1;
2863
2864 Ptr = stripGetElementPtr(Ptr, SE, Lp);
2865 const SCEV *V = SE->getSCEV(Ptr);
2866
2867 if (Ptr != OrigPtr)
2868 // Strip off casts.
2869 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2870 V = C->getOperand();
2871
2872 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2873 if (!S)
2874 return nullptr;
2875
2876 // If the pointer is invariant then there is no stride and it makes no
2877 // sense to add it here.
2878 if (Lp != S->getLoop())
2879 return nullptr;
2880
2881 V = S->getStepRecurrence(*SE);
2882 if (!V)
2883 return nullptr;
2884
2885 // Strip off the size of access multiplication if we are still analyzing the
2886 // pointer.
2887 if (OrigPtr == Ptr) {
2888 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2889 if (M->getOperand(0)->getSCEVType() != scConstant)
2890 return nullptr;
2891
2892 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2893
2894 // Huge step value - give up.
2895 if (APStepVal.getBitWidth() > 64)
2896 return nullptr;
2897
2898 int64_t StepVal = APStepVal.getSExtValue();
2899 if (PtrAccessSize != StepVal)
2900 return nullptr;
2901 V = M->getOperand(1);
2902 }
2903 }
2904
2905 // Note that the restriction after this loop invariant check are only
2906 // profitability restrictions.
2907 if (!SE->isLoopInvariant(V, Lp))
2908 return nullptr;
2909
2910 // Look for the loop invariant symbolic value.
2911 if (isa<SCEVUnknown>(V))
2912 return V;
2913
2914 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2915 if (isa<SCEVUnknown>(C->getOperand()))
2916 return V;
2917
2918 return nullptr;
2919}
2920
2921void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2922 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2923 if (!Ptr)
2924 return;
2925
2926 // Note: getStrideFromPointer is a *profitability* heuristic. We
2927 // could broaden the scope of values returned here - to anything
2928 // which happens to be loop invariant and contributes to the
2929 // computation of an interesting IV - but we chose not to as we
2930 // don't have a cost model here, and broadening the scope exposes
2931 // far too many unprofitable cases.
2932 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2933 if (!StrideExpr)
2934 return;
2935
2936 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2937 "versioning:");
2938 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2939
2940 if (!SpeculateUnitStride) {
2941 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2942 return;
2943 }
2944
2945 // Avoid adding the "Stride == 1" predicate when we know that
2946 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2947 // or zero iteration loop, as Trip-Count <= Stride == 1.
2948 //
2949 // TODO: We are currently not making a very informed decision on when it is
2950 // beneficial to apply stride versioning. It might make more sense that the
2951 // users of this analysis (such as the vectorizer) will trigger it, based on
2952 // their specific cost considerations; For example, in cases where stride
2953 // versioning does not help resolving memory accesses/dependences, the
2954 // vectorizer should evaluate the cost of the runtime test, and the benefit
2955 // of various possible stride specializations, considering the alternatives
2956 // of using gather/scatters (if available).
2957
2958 const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
2959
2960 // Match the types so we can compare the stride and the MaxBTC.
2961 // The Stride can be positive/negative, so we sign extend Stride;
2962 // The backedgeTakenCount is non-negative, so we zero extend MaxBTC.
2963 const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
2964 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2965 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
2966 const SCEV *CastedStride = StrideExpr;
2967 const SCEV *CastedBECount = MaxBTC;
2968 ScalarEvolution *SE = PSE->getSE();
2969 if (BETypeSizeBits >= StrideTypeSizeBits)
2970 CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
2971 else
2972 CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
2973 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2974 // Since TripCount == BackEdgeTakenCount + 1, checking:
2975 // "Stride >= TripCount" is equivalent to checking:
2976 // Stride - MaxBTC> 0
2977 if (SE->isKnownPositive(StrideMinusBETaken)) {
2978 LLVM_DEBUG(
2979 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2980 "Stride==1 predicate will imply that the loop executes "
2981 "at most once.\n");
2982 return;
2983 }
2984 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
2985
2986 // Strip back off the integer cast, and check that our result is a
2987 // SCEVUnknown as we expect.
2988 const SCEV *StrideBase = StrideExpr;
2989 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
2990 StrideBase = C->getOperand();
2991 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
2992}
2993
2995 const TargetTransformInfo *TTI,
2996 const TargetLibraryInfo *TLI, AAResults *AA,
2997 DominatorTree *DT, LoopInfo *LI)
2998 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2999 PtrRtChecking(nullptr), TheLoop(L) {
3000 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3001 if (TTI) {
3002 TypeSize FixedWidth =
3004 if (FixedWidth.isNonZero()) {
3005 // Scale the vector width by 2 as rough estimate to also consider
3006 // interleaving.
3007 MaxTargetVectorWidthInBits = FixedWidth.getFixedValue() * 2;
3008 }
3009
3010 TypeSize ScalableWidth =
3012 if (ScalableWidth.isNonZero())
3013 MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3014 }
3015 DepChecker = std::make_unique<MemoryDepChecker>(*PSE, L, SymbolicStrides,
3016 MaxTargetVectorWidthInBits);
3017 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
3018 if (canAnalyzeLoop())
3019 CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3020}
3021
3023 if (CanVecMem) {
3024 OS.indent(Depth) << "Memory dependences are safe";
3025 const MemoryDepChecker &DC = getDepChecker();
3026 if (!DC.isSafeForAnyVectorWidth())
3027 OS << " with a maximum safe vector width of "
3028 << DC.getMaxSafeVectorWidthInBits() << " bits";
3029 if (PtrRtChecking->Need)
3030 OS << " with run-time checks";
3031 OS << "\n";
3032 }
3033
3034 if (HasConvergentOp)
3035 OS.indent(Depth) << "Has convergent operation in loop\n";
3036
3037 if (Report)
3038 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3039
3040 if (auto *Dependences = DepChecker->getDependences()) {
3041 OS.indent(Depth) << "Dependences:\n";
3042 for (const auto &Dep : *Dependences) {
3043 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3044 OS << "\n";
3045 }
3046 } else
3047 OS.indent(Depth) << "Too many dependences, not recorded\n";
3048
3049 // List the pair of accesses need run-time checks to prove independence.
3050 PtrRtChecking->print(OS, Depth);
3051 OS << "\n";
3052
3053 OS.indent(Depth)
3054 << "Non vectorizable stores to invariant address were "
3055 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3056 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3057 ? ""
3058 : "not ")
3059 << "found in loop.\n";
3060
3061 OS.indent(Depth) << "SCEV assumptions:\n";
3062 PSE->getPredicate().print(OS, Depth);
3063
3064 OS << "\n";
3065
3066 OS.indent(Depth) << "Expressions re-written:\n";
3067 PSE->print(OS, Depth);
3068}
3069
3071 const auto &[It, Inserted] = LoopAccessInfoMap.insert({&L, nullptr});
3072
3073 if (Inserted)
3074 It->second =
3075 std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT, &LI);
3076
3077 return *It->second;
3078}
3081 // Collect LoopAccessInfo entries that may keep references to IR outside the
3082 // analyzed loop or SCEVs that may have been modified or invalidated. At the
3083 // moment, that is loops requiring memory or SCEV runtime checks, as those cache
3084 // SCEVs, e.g. for pointer expressions.
3085 for (const auto &[L, LAI] : LoopAccessInfoMap) {
3086 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3087 LAI->getPSE().getPredicate().isAlwaysTrue())
3088 continue;
3089 ToRemove.push_back(L);
3090 }
3091
3092 for (Loop *L : ToRemove)
3093 LoopAccessInfoMap.erase(L);
3094}
3095
3097 Function &F, const PreservedAnalyses &PA,
3099 // Check whether our analysis is preserved.
3100 auto PAC = PA.getChecker<LoopAccessAnalysis>();
3101 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3102 // If not, give up now.
3103 return true;
3104
3105 // Check whether the analyses we depend on became invalid for any reason.
3106 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3107 // invalid.
3108 return Inv.invalidate<AAManager>(F, PA) ||
3110 Inv.invalidate<LoopAnalysis>(F, PA) ||
3112}
3113
3117 auto &AA = FAM.getResult<AAManager>(F);
3118 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3119 auto &LI = FAM.getResult<LoopAnalysis>(F);
3121 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3122 return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI);
3123}
3124
3125AnalysisKey LoopAccessAnalysis::Key;
This file implements a class to represent arbitrary precision integral constant values and operations...
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
#define _
IRTranslator LLVM IR MI
static std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, PredicatedScalarEvolution &PSE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > &PointerBounds)
Calculate Start and End points of memory access.
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
uint64_t High
#define P(N)
FunctionAnalysisManager FAM
This header defines various interfaces for pass management in LLVM.
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This pass exposes codegen information to IR-level passes.
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:77
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1445
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1519
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:49
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:292
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:310
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:296
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:211
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:705
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:915
Type * getResultElementType() const
Definition: Instructions.h:976
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
An instruction for reading from memory.
Definition: Instructions.h:174
Value * getPointerOperand()
Definition: Instructions.h:253
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
std::string getLocStr() const
Return a string containing the debug location of the loop (file name + line number if present,...
Definition: LoopInfo.cpp:667
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition: LoopInfo.cpp:565
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition: LoopInfo.cpp:632
Metadata node.
Definition: Metadata.h:1069
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1428
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:891
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)
Check whether the dependencies between the accesses are safe.
const SmallVectorImpl< Instruction * > & getMemoryInstructions() const
The vector of memory access instructions.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > & getPointerBounds()
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
bool shouldRetryWithRuntimeCheck() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
Root of the metadata hierarchy.
Definition: Metadata.h:62
Diagnostic information for optimization analysis remarks.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: Analysis.h:264
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
static LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getCouldNotCompute()
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:346
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:435
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TypeSize getRegisterBitWidth(RegisterKind K) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:261
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:251
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition: VectorUtils.h:71
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:736
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isNonZero() const
Definition: TypeSize.h:158
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition: STLExtras.h:2431
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:126
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
Definition: LoopInfo.cpp:1065
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
AddressSpace
Definition: NVPTXBaseInfo.h:21
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition: STLExtras.h:1935
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2132
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:120
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1824
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
gep_type_iterator gep_type_begin(const User *GEP)
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
IR Values for the lower and upper bounds of a pointer evolution.
Definition: LoopUtils.cpp:1798
MDNode * Scope
The tag for alias scope specification (used with noalias).
Definition: Metadata.h:783
MDNode * TBAA
The tag for type-based alias analysis.
Definition: Metadata.h:777
MDNode * NoAlias
The tag specifying the noalias scope.
Definition: Metadata.h:786
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:28
Dependece between memory access instructions.
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
DepType
The type of the dependence.
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1450