LLVM 19.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DebugLoc.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
46#include "llvm/IR/InstrTypes.h"
47#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Operator.h"
50#include "llvm/IR/PassManager.h"
52#include "llvm/IR/Type.h"
53#include "llvm/IR/Value.h"
54#include "llvm/IR/ValueHandle.h"
57#include "llvm/Support/Debug.h"
60#include <algorithm>
61#include <cassert>
62#include <cstdint>
63#include <iterator>
64#include <utility>
65#include <variant>
66#include <vector>
67
68using namespace llvm;
69using namespace llvm::PatternMatch;
70
71#define DEBUG_TYPE "loop-accesses"
72
74VectorizationFactor("force-vector-width", cl::Hidden,
75 cl::desc("Sets the SIMD width. Zero is autoselect."),
78
80VectorizationInterleave("force-vector-interleave", cl::Hidden,
81 cl::desc("Sets the vectorization interleave count. "
82 "Zero is autoselect."),
86
88 "runtime-memory-check-threshold", cl::Hidden,
89 cl::desc("When performing memory disambiguation checks at runtime do not "
90 "generate more than this number of comparisons (default = 8)."),
93
94/// The maximum iterations used to merge memory checks
96 "memory-check-merge-threshold", cl::Hidden,
97 cl::desc("Maximum number of comparisons done when trying to merge "
98 "runtime memory checks. (default = 100)"),
99 cl::init(100));
100
101/// Maximum SIMD width.
102const unsigned VectorizerParams::MaxVectorWidth = 64;
103
104/// We collect dependences up to this threshold.
106 MaxDependences("max-dependences", cl::Hidden,
107 cl::desc("Maximum number of dependences collected by "
108 "loop-access analysis (default = 100)"),
109 cl::init(100));
110
111/// This enables versioning on the strides of symbolically striding memory
112/// accesses in code like the following.
113/// for (i = 0; i < N; ++i)
114/// A[i * Stride1] += B[i * Stride2] ...
115///
116/// Will be roughly translated to
117/// if (Stride1 == 1 && Stride2 == 1) {
118/// for (i = 0; i < N; i+=4)
119/// A[i:i+3] += ...
120/// } else
121/// ...
123 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
124 cl::desc("Enable symbolic stride memory access versioning"));
125
126/// Enable store-to-load forwarding conflict detection. This option can
127/// be disabled for correctness testing.
129 "store-to-load-forwarding-conflict-detection", cl::Hidden,
130 cl::desc("Enable conflict detection in loop-access analysis"),
131 cl::init(true));
132
134 "max-forked-scev-depth", cl::Hidden,
135 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
136 cl::init(5));
137
139 "laa-speculate-unit-stride", cl::Hidden,
140 cl::desc("Speculate that non-constant strides are unit in LAA"),
141 cl::init(true));
142
144 "hoist-runtime-checks", cl::Hidden,
145 cl::desc(
146 "Hoist inner loop runtime memory checks to outer loop if possible"),
149
151 return ::VectorizationInterleave.getNumOccurrences() > 0;
152}
153
155 const DenseMap<Value *, const SCEV *> &PtrToStride,
156 Value *Ptr) {
157 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
158
159 // If there is an entry in the map return the SCEV of the pointer with the
160 // symbolic stride replaced by one.
162 if (SI == PtrToStride.end())
163 // For a non-symbolic stride, just return the original expression.
164 return OrigSCEV;
165
166 const SCEV *StrideSCEV = SI->second;
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const auto *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 auto *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
191}
192
193/// Calculate Start and End points of memory access.
194/// Let's assume A is the first access and B is a memory access on N-th loop
195/// iteration. Then B is calculated as:
196/// B = A + Step*N .
197/// Step value may be positive or negative.
198/// N is a calculated back-edge taken count:
199/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
200/// Start and End points are calculated in the following way:
201/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
202/// where SizeOfElt is the size of single memory access in bytes.
203///
204/// There is no conflict when the intervals are disjoint:
205/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
206static std::pair<const SCEV *, const SCEV *>
207getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,
209 ScalarEvolution *SE = PSE.getSE();
210
211 const SCEV *ScStart;
212 const SCEV *ScEnd;
213
214 if (SE->isLoopInvariant(PtrExpr, Lp)) {
215 ScStart = ScEnd = PtrExpr;
216 } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
217 const SCEV *Ex = PSE.getBackedgeTakenCount();
218
219 ScStart = AR->getStart();
220 ScEnd = AR->evaluateAtIteration(Ex, *SE);
221 const SCEV *Step = AR->getStepRecurrence(*SE);
222
223 // For expressions with negative step, the upper bound is ScStart and the
224 // lower bound is ScEnd.
225 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
226 if (CStep->getValue()->isNegative())
227 std::swap(ScStart, ScEnd);
228 } else {
229 // Fallback case: the step is not constant, but we can still
230 // get the upper and lower bounds of the interval by using min/max
231 // expressions.
232 ScStart = SE->getUMinExpr(ScStart, ScEnd);
233 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
234 }
235 } else
236 return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
237
238 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
239 assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
240
241 // Add the size of the pointed element to ScEnd.
242 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
243 Type *IdxTy = DL.getIndexType(PtrExpr->getType());
244 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
245 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
246
247 return {ScStart, ScEnd};
248}
249
250/// Calculate Start and End points of memory access using
251/// getStartAndEndForAccess.
253 Type *AccessTy, bool WritePtr,
254 unsigned DepSetId, unsigned ASId,
256 bool NeedsFreeze) {
257 const auto &[ScStart, ScEnd] =
258 getStartAndEndForAccess(Lp, PtrExpr, AccessTy, PSE);
259 assert(!isa<SCEVCouldNotCompute>(ScStart) &&
260 !isa<SCEVCouldNotCompute>(ScEnd) &&
261 "must be able to compute both start and end expressions");
262 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
263 NeedsFreeze);
264}
265
266bool RuntimePointerChecking::tryToCreateDiffCheck(
267 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
268 // If either group contains multiple different pointers, bail out.
269 // TODO: Support multiple pointers by using the minimum or maximum pointer,
270 // depending on src & sink.
271 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
272 return false;
273
274 PointerInfo *Src = &Pointers[CGI.Members[0]];
275 PointerInfo *Sink = &Pointers[CGJ.Members[0]];
276
277 // If either pointer is read and written, multiple checks may be needed. Bail
278 // out.
279 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
280 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
281 return false;
282
283 ArrayRef<unsigned> AccSrc =
284 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
285 ArrayRef<unsigned> AccSink =
286 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
287 // If either pointer is accessed multiple times, there may not be a clear
288 // src/sink relation. Bail out for now.
289 if (AccSrc.size() != 1 || AccSink.size() != 1)
290 return false;
291
292 // If the sink is accessed before src, swap src/sink.
293 if (AccSink[0] < AccSrc[0])
294 std::swap(Src, Sink);
295
296 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
297 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
298 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
299 SinkAR->getLoop() != DC.getInnermostLoop())
300 return false;
301
303 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
305 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
306 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
307 Type *DstTy = getLoadStoreType(SinkInsts[0]);
308 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy))
309 return false;
310
311 const DataLayout &DL =
312 SinkAR->getLoop()->getHeader()->getModule()->getDataLayout();
313 unsigned AllocSize =
314 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
315
316 // Only matching constant steps matching the AllocSize are supported at the
317 // moment. This simplifies the difference computation. Can be extended in the
318 // future.
319 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
320 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
321 Step->getAPInt().abs() != AllocSize)
322 return false;
323
324 IntegerType *IntTy =
325 IntegerType::get(Src->PointerValue->getContext(),
326 DL.getPointerSizeInBits(CGI.AddressSpace));
327
328 // When counting down, the dependence distance needs to be swapped.
329 if (Step->getValue()->isNegative())
330 std::swap(SinkAR, SrcAR);
331
332 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
333 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
334 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
335 isa<SCEVCouldNotCompute>(SrcStartInt))
336 return false;
337
338 const Loop *InnerLoop = SrcAR->getLoop();
339 // If the start values for both Src and Sink also vary according to an outer
340 // loop, then it's probably better to avoid creating diff checks because
341 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
342 // do the expanded full range overlap checks, which can be hoisted.
343 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
344 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
345 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
346 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
347 const Loop *StartARLoop = SrcStartAR->getLoop();
348 if (StartARLoop == SinkStartAR->getLoop() &&
349 StartARLoop == InnerLoop->getParentLoop() &&
350 // If the diff check would already be loop invariant (due to the
351 // recurrences being the same), then we prefer to keep the diff checks
352 // because they are cheaper.
353 SrcStartAR->getStepRecurrence(*SE) !=
354 SinkStartAR->getStepRecurrence(*SE)) {
355 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
356 "cannot be hoisted out of the outer loop\n");
357 return false;
358 }
359 }
360
361 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
362 << "SrcStart: " << *SrcStartInt << '\n'
363 << "SinkStartInt: " << *SinkStartInt << '\n');
364 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
365 Src->NeedsFreeze || Sink->NeedsFreeze);
366 return true;
367}
368
369SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
371
372 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
373 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
376
377 if (needsChecking(CGI, CGJ)) {
378 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
379 Checks.push_back(std::make_pair(&CGI, &CGJ));
380 }
381 }
382 }
383 return Checks;
384}
385
386void RuntimePointerChecking::generateChecks(
387 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
388 assert(Checks.empty() && "Checks is not empty");
389 groupChecks(DepCands, UseDependencies);
390 Checks = generateChecks();
391}
392
394 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
395 for (const auto &I : M.Members)
396 for (const auto &J : N.Members)
397 if (needsChecking(I, J))
398 return true;
399 return false;
400}
401
402/// Compare \p I and \p J and return the minimum.
403/// Return nullptr in case we couldn't find an answer.
404static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
405 ScalarEvolution *SE) {
406 const SCEV *Diff = SE->getMinusSCEV(J, I);
407 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
408
409 if (!C)
410 return nullptr;
411 return C->getValue()->isNegative() ? J : I;
412}
413
415 RuntimePointerChecking &RtCheck) {
416 return addPointer(
417 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
418 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
419 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
420}
421
423 const SCEV *End, unsigned AS,
424 bool NeedsFreeze,
425 ScalarEvolution &SE) {
426 assert(AddressSpace == AS &&
427 "all pointers in a checking group must be in the same address space");
428
429 // Compare the starts and ends with the known minimum and maximum
430 // of this set. We need to know how we compare against the min/max
431 // of the set in order to be able to emit memchecks.
432 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
433 if (!Min0)
434 return false;
435
436 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
437 if (!Min1)
438 return false;
439
440 // Update the low bound expression if we've found a new min value.
441 if (Min0 == Start)
442 Low = Start;
443
444 // Update the high bound expression if we've found a new max value.
445 if (Min1 != End)
446 High = End;
447
449 this->NeedsFreeze |= NeedsFreeze;
450 return true;
451}
452
453void RuntimePointerChecking::groupChecks(
454 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
455 // We build the groups from dependency candidates equivalence classes
456 // because:
457 // - We know that pointers in the same equivalence class share
458 // the same underlying object and therefore there is a chance
459 // that we can compare pointers
460 // - We wouldn't be able to merge two pointers for which we need
461 // to emit a memcheck. The classes in DepCands are already
462 // conveniently built such that no two pointers in the same
463 // class need checking against each other.
464
465 // We use the following (greedy) algorithm to construct the groups
466 // For every pointer in the equivalence class:
467 // For each existing group:
468 // - if the difference between this pointer and the min/max bounds
469 // of the group is a constant, then make the pointer part of the
470 // group and update the min/max bounds of that group as required.
471
472 CheckingGroups.clear();
473
474 // If we need to check two pointers to the same underlying object
475 // with a non-constant difference, we shouldn't perform any pointer
476 // grouping with those pointers. This is because we can easily get
477 // into cases where the resulting check would return false, even when
478 // the accesses are safe.
479 //
480 // The following example shows this:
481 // for (i = 0; i < 1000; ++i)
482 // a[5000 + i * m] = a[i] + a[i + 9000]
483 //
484 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
485 // (0, 10000) which is always false. However, if m is 1, there is no
486 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
487 // us to perform an accurate check in this case.
488 //
489 // The above case requires that we have an UnknownDependence between
490 // accesses to the same underlying object. This cannot happen unless
491 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
492 // is also false. In this case we will use the fallback path and create
493 // separate checking groups for all pointers.
494
495 // If we don't have the dependency partitions, construct a new
496 // checking pointer group for each pointer. This is also required
497 // for correctness, because in this case we can have checking between
498 // pointers to the same underlying object.
499 if (!UseDependencies) {
500 for (unsigned I = 0; I < Pointers.size(); ++I)
501 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
502 return;
503 }
504
505 unsigned TotalComparisons = 0;
506
508 for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
509 auto [It, _] = PositionMap.insert({Pointers[Index].PointerValue, {}});
510 It->second.push_back(Index);
511 }
512
513 // We need to keep track of what pointers we've already seen so we
514 // don't process them twice.
516
517 // Go through all equivalence classes, get the "pointer check groups"
518 // and add them to the overall solution. We use the order in which accesses
519 // appear in 'Pointers' to enforce determinism.
520 for (unsigned I = 0; I < Pointers.size(); ++I) {
521 // We've seen this pointer before, and therefore already processed
522 // its equivalence class.
523 if (Seen.count(I))
524 continue;
525
526 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
527 Pointers[I].IsWritePtr);
528
530 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
531
532 // Because DepCands is constructed by visiting accesses in the order in
533 // which they appear in alias sets (which is deterministic) and the
534 // iteration order within an equivalence class member is only dependent on
535 // the order in which unions and insertions are performed on the
536 // equivalence class, the iteration order is deterministic.
537 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
538 MI != ME; ++MI) {
539 auto PointerI = PositionMap.find(MI->getPointer());
540 assert(PointerI != PositionMap.end() &&
541 "pointer in equivalence class not found in PositionMap");
542 for (unsigned Pointer : PointerI->second) {
543 bool Merged = false;
544 // Mark this pointer as seen.
545 Seen.insert(Pointer);
546
547 // Go through all the existing sets and see if we can find one
548 // which can include this pointer.
549 for (RuntimeCheckingPtrGroup &Group : Groups) {
550 // Don't perform more than a certain amount of comparisons.
551 // This should limit the cost of grouping the pointers to something
552 // reasonable. If we do end up hitting this threshold, the algorithm
553 // will create separate groups for all remaining pointers.
554 if (TotalComparisons > MemoryCheckMergeThreshold)
555 break;
556
557 TotalComparisons++;
558
559 if (Group.addPointer(Pointer, *this)) {
560 Merged = true;
561 break;
562 }
563 }
564
565 if (!Merged)
566 // We couldn't add this pointer to any existing set or the threshold
567 // for the number of comparisons has been reached. Create a new group
568 // to hold the current pointer.
569 Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
570 }
571 }
572
573 // We've computed the grouped checks for this partition.
574 // Save the results and continue with the next one.
575 llvm::copy(Groups, std::back_inserter(CheckingGroups));
576 }
577}
578
580 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
581 unsigned PtrIdx2) {
582 return (PtrToPartition[PtrIdx1] != -1 &&
583 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
584}
585
586bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
587 const PointerInfo &PointerI = Pointers[I];
588 const PointerInfo &PointerJ = Pointers[J];
589
590 // No need to check if two readonly pointers intersect.
591 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
592 return false;
593
594 // Only need to check pointers between two different dependency sets.
595 if (PointerI.DependencySetId == PointerJ.DependencySetId)
596 return false;
597
598 // Only need to check pointers in the same alias set.
599 if (PointerI.AliasSetId != PointerJ.AliasSetId)
600 return false;
601
602 return true;
603}
604
607 unsigned Depth) const {
608 unsigned N = 0;
609 for (const auto &[Check1, Check2] : Checks) {
610 const auto &First = Check1->Members, &Second = Check2->Members;
611
612 OS.indent(Depth) << "Check " << N++ << ":\n";
613
614 OS.indent(Depth + 2) << "Comparing group (" << Check1 << "):\n";
615 for (unsigned K = 0; K < First.size(); ++K)
616 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
617
618 OS.indent(Depth + 2) << "Against group (" << Check2 << "):\n";
619 for (unsigned K = 0; K < Second.size(); ++K)
620 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
621 }
622}
623
625
626 OS.indent(Depth) << "Run-time memory checks:\n";
627 printChecks(OS, Checks, Depth);
628
629 OS.indent(Depth) << "Grouped accesses:\n";
630 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
631 const auto &CG = CheckingGroups[I];
632
633 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
634 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
635 << ")\n";
636 for (unsigned J = 0; J < CG.Members.size(); ++J) {
637 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
638 << "\n";
639 }
640 }
641}
642
643namespace {
644
645/// Analyses memory accesses in a loop.
646///
647/// Checks whether run time pointer checks are needed and builds sets for data
648/// dependence checking.
649class AccessAnalysis {
650public:
651 /// Read or write access location.
652 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
653 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
654
655 AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
658 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
659 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
660 LoopAliasScopes(LoopAliasScopes) {
661 // We're analyzing dependences across loop iterations.
662 BAA.enableCrossIterationMode();
663 }
664
665 /// Register a load and whether it is only read from.
666 void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
667 Value *Ptr = const_cast<Value *>(Loc.Ptr);
668 AST.add(adjustLoc(Loc));
669 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
670 if (IsReadOnly)
671 ReadOnlyPtr.insert(Ptr);
672 }
673
674 /// Register a store.
675 void addStore(MemoryLocation &Loc, Type *AccessTy) {
676 Value *Ptr = const_cast<Value *>(Loc.Ptr);
677 AST.add(adjustLoc(Loc));
678 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
679 }
680
681 /// Check if we can emit a run-time no-alias check for \p Access.
682 ///
683 /// Returns true if we can emit a run-time no alias check for \p Access.
684 /// If we can check this access, this also adds it to a dependence set and
685 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
686 /// we will attempt to use additional run-time checks in order to get
687 /// the bounds of the pointer.
688 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
689 MemAccessInfo Access, Type *AccessTy,
690 const DenseMap<Value *, const SCEV *> &Strides,
692 Loop *TheLoop, unsigned &RunningDepId,
693 unsigned ASId, bool ShouldCheckStride, bool Assume);
694
695 /// Check whether we can check the pointers at runtime for
696 /// non-intersection.
697 ///
698 /// Returns true if we need no check or if we do and we can generate them
699 /// (i.e. the pointers have computable bounds).
700 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
701 Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
702 Value *&UncomputablePtr, bool ShouldCheckWrap = false);
703
704 /// Goes over all memory accesses, checks whether a RT check is needed
705 /// and builds sets of dependent accesses.
706 void buildDependenceSets() {
707 processMemAccesses();
708 }
709
710 /// Initial processing of memory accesses determined that we need to
711 /// perform dependency checking.
712 ///
713 /// Note that this can later be cleared if we retry memcheck analysis without
714 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
715 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
716
717 /// We decided that no dependence analysis would be used. Reset the state.
718 void resetDepChecks(MemoryDepChecker &DepChecker) {
719 CheckDeps.clear();
720 DepChecker.clearDependences();
721 }
722
723 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
724
727 return UnderlyingObjects;
728 }
729
730private:
732
733 /// Adjust the MemoryLocation so that it represents accesses to this
734 /// location across all iterations, rather than a single one.
735 MemoryLocation adjustLoc(MemoryLocation Loc) const {
736 // The accessed location varies within the loop, but remains within the
737 // underlying object.
739 Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
740 Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
741 return Loc;
742 }
743
744 /// Drop alias scopes that are only valid within a single loop iteration.
745 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
746 if (!ScopeList)
747 return nullptr;
748
749 // For the sake of simplicity, drop the whole scope list if any scope is
750 // iteration-local.
751 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
752 return LoopAliasScopes.contains(cast<MDNode>(Scope));
753 }))
754 return nullptr;
755
756 return ScopeList;
757 }
758
759 /// Go over all memory access and check whether runtime pointer checks
760 /// are needed and build sets of dependency check candidates.
761 void processMemAccesses();
762
763 /// Map of all accesses. Values are the types used to access memory pointed to
764 /// by the pointer.
765 PtrAccessMap Accesses;
766
767 /// The loop being checked.
768 const Loop *TheLoop;
769
770 /// List of accesses that need a further dependence check.
771 MemAccessInfoList CheckDeps;
772
773 /// Set of pointers that are read only.
774 SmallPtrSet<Value*, 16> ReadOnlyPtr;
775
776 /// Batched alias analysis results.
777 BatchAAResults BAA;
778
779 /// An alias set tracker to partition the access set by underlying object and
780 //intrinsic property (such as TBAA metadata).
781 AliasSetTracker AST;
782
783 LoopInfo *LI;
784
785 /// Sets of potentially dependent accesses - members of one set share an
786 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
787 /// dependence check.
789
790 /// Initial processing of memory accesses determined that we may need
791 /// to add memchecks. Perform the analysis to determine the necessary checks.
792 ///
793 /// Note that, this is different from isDependencyCheckNeeded. When we retry
794 /// memcheck analysis without dependency checking
795 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
796 /// cleared while this remains set if we have potentially dependent accesses.
797 bool IsRTCheckAnalysisNeeded = false;
798
799 /// The SCEV predicate containing all the SCEV-related assumptions.
801
803
804 /// Alias scopes that are declared inside the loop, and as such not valid
805 /// across iterations.
806 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
807};
808
809} // end anonymous namespace
810
811/// Check whether a pointer can participate in a runtime bounds check.
812/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
813/// by adding run-time checks (overflow checks) if necessary.
815 const SCEV *PtrScev, Loop *L, bool Assume) {
816 // The bounds for loop-invariant pointer is trivial.
817 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
818 return true;
819
820 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
821
822 if (!AR && Assume)
823 AR = PSE.getAsAddRec(Ptr);
824
825 if (!AR)
826 return false;
827
828 return AR->isAffine();
829}
830
831/// Check whether a pointer address cannot wrap.
833 const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
834 Loop *L) {
835 const SCEV *PtrScev = PSE.getSCEV(Ptr);
836 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
837 return true;
838
839 int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
840 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
841 return true;
842
843 return false;
844}
845
846static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
847 function_ref<void(Value *)> AddPointer) {
849 SmallVector<Value *> WorkList;
850 WorkList.push_back(StartPtr);
851
852 while (!WorkList.empty()) {
853 Value *Ptr = WorkList.pop_back_val();
854 if (!Visited.insert(Ptr).second)
855 continue;
856 auto *PN = dyn_cast<PHINode>(Ptr);
857 // SCEV does not look through non-header PHIs inside the loop. Such phis
858 // can be analyzed by adding separate accesses for each incoming pointer
859 // value.
860 if (PN && InnermostLoop.contains(PN->getParent()) &&
861 PN->getParent() != InnermostLoop.getHeader()) {
862 for (const Use &Inc : PN->incoming_values())
863 WorkList.push_back(Inc);
864 } else
865 AddPointer(Ptr);
866 }
867}
868
869// Walk back through the IR for a pointer, looking for a select like the
870// following:
871//
872// %offset = select i1 %cmp, i64 %a, i64 %b
873// %addr = getelementptr double, double* %base, i64 %offset
874// %ld = load double, double* %addr, align 8
875//
876// We won't be able to form a single SCEVAddRecExpr from this since the
877// address for each loop iteration depends on %cmp. We could potentially
878// produce multiple valid SCEVAddRecExprs, though, and check all of them for
879// memory safety/aliasing if needed.
880//
881// If we encounter some IR we don't yet handle, or something obviously fine
882// like a constant, then we just add the SCEV for that term to the list passed
883// in by the caller. If we have a node that may potentially yield a valid
884// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
885// ourselves before adding to the list.
886static void findForkedSCEVs(
887 ScalarEvolution *SE, const Loop *L, Value *Ptr,
889 unsigned Depth) {
890 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
891 // we've exceeded our limit on recursion, just return whatever we have
892 // regardless of whether it can be used for a forked pointer or not, along
893 // with an indication of whether it might be a poison or undef value.
894 const SCEV *Scev = SE->getSCEV(Ptr);
895 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
896 !isa<Instruction>(Ptr) || Depth == 0) {
897 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
898 return;
899 }
900
901 Depth--;
902
903 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
904 return get<1>(S);
905 };
906
907 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
908 switch (Opcode) {
909 case Instruction::Add:
910 return SE->getAddExpr(L, R);
911 case Instruction::Sub:
912 return SE->getMinusSCEV(L, R);
913 default:
914 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
915 }
916 };
917
918 Instruction *I = cast<Instruction>(Ptr);
919 unsigned Opcode = I->getOpcode();
920 switch (Opcode) {
921 case Instruction::GetElementPtr: {
922 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
923 Type *SourceTy = GEP->getSourceElementType();
924 // We only handle base + single offset GEPs here for now.
925 // Not dealing with preexisting gathers yet, so no vectors.
926 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
927 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
928 break;
929 }
932 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
933 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
934
935 // See if we need to freeze our fork...
936 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
937 any_of(OffsetScevs, UndefPoisonCheck);
938
939 // Check that we only have a single fork, on either the base or the offset.
940 // Copy the SCEV across for the one without a fork in order to generate
941 // the full SCEV for both sides of the GEP.
942 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
943 BaseScevs.push_back(BaseScevs[0]);
944 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
945 OffsetScevs.push_back(OffsetScevs[0]);
946 else {
947 ScevList.emplace_back(Scev, NeedsFreeze);
948 break;
949 }
950
951 // Find the pointer type we need to extend to.
952 Type *IntPtrTy = SE->getEffectiveSCEVType(
953 SE->getSCEV(GEP->getPointerOperand())->getType());
954
955 // Find the size of the type being pointed to. We only have a single
956 // index term (guarded above) so we don't need to index into arrays or
957 // structures, just get the size of the scalar value.
958 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
959
960 // Scale up the offsets by the size of the type, then add to the bases.
961 const SCEV *Scaled1 = SE->getMulExpr(
962 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
963 const SCEV *Scaled2 = SE->getMulExpr(
964 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
965 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
966 NeedsFreeze);
967 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
968 NeedsFreeze);
969 break;
970 }
971 case Instruction::Select: {
973 // A select means we've found a forked pointer, but we currently only
974 // support a single select per pointer so if there's another behind this
975 // then we just bail out and return the generic SCEV.
976 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
977 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
978 if (ChildScevs.size() == 2) {
979 ScevList.push_back(ChildScevs[0]);
980 ScevList.push_back(ChildScevs[1]);
981 } else
982 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
983 break;
984 }
985 case Instruction::PHI: {
987 // A phi means we've found a forked pointer, but we currently only
988 // support a single phi per pointer so if there's another behind this
989 // then we just bail out and return the generic SCEV.
990 if (I->getNumOperands() == 2) {
991 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
992 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
993 }
994 if (ChildScevs.size() == 2) {
995 ScevList.push_back(ChildScevs[0]);
996 ScevList.push_back(ChildScevs[1]);
997 } else
998 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
999 break;
1000 }
1001 case Instruction::Add:
1002 case Instruction::Sub: {
1005 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1006 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1007
1008 // See if we need to freeze our fork...
1009 bool NeedsFreeze =
1010 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1011
1012 // Check that we only have a single fork, on either the left or right side.
1013 // Copy the SCEV across for the one without a fork in order to generate
1014 // the full SCEV for both sides of the BinOp.
1015 if (LScevs.size() == 2 && RScevs.size() == 1)
1016 RScevs.push_back(RScevs[0]);
1017 else if (RScevs.size() == 2 && LScevs.size() == 1)
1018 LScevs.push_back(LScevs[0]);
1019 else {
1020 ScevList.emplace_back(Scev, NeedsFreeze);
1021 break;
1022 }
1023
1024 ScevList.emplace_back(
1025 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
1026 NeedsFreeze);
1027 ScevList.emplace_back(
1028 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
1029 NeedsFreeze);
1030 break;
1031 }
1032 default:
1033 // Just return the current SCEV if we haven't handled the instruction yet.
1034 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1035 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1036 break;
1037 }
1038}
1039
1042 const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
1043 const Loop *L) {
1044 ScalarEvolution *SE = PSE.getSE();
1045 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1047 findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
1048
1049 // For now, we will only accept a forked pointer with two possible SCEVs
1050 // that are either SCEVAddRecExprs or loop invariant.
1051 if (Scevs.size() == 2 &&
1052 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1053 SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1054 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1055 SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1056 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
1057 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
1058 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
1059 return Scevs;
1060 }
1061
1062 return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1063}
1064
1065bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1066 MemAccessInfo Access, Type *AccessTy,
1067 const DenseMap<Value *, const SCEV *> &StridesMap,
1069 Loop *TheLoop, unsigned &RunningDepId,
1070 unsigned ASId, bool ShouldCheckWrap,
1071 bool Assume) {
1072 Value *Ptr = Access.getPointer();
1073
1075 findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
1076
1077 for (auto &P : TranslatedPtrs) {
1078 const SCEV *PtrExpr = get<0>(P);
1079 if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
1080 return false;
1081
1082 // When we run after a failing dependency check we have to make sure
1083 // we don't have wrapping pointers.
1084 if (ShouldCheckWrap) {
1085 // Skip wrap checking when translating pointers.
1086 if (TranslatedPtrs.size() > 1)
1087 return false;
1088
1089 if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1090 auto *Expr = PSE.getSCEV(Ptr);
1091 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1092 return false;
1094 }
1095 }
1096 // If there's only one option for Ptr, look it up after bounds and wrap
1097 // checking, because assumptions might have been added to PSE.
1098 if (TranslatedPtrs.size() == 1)
1099 TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1100 false};
1101 }
1102
1103 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1104 // The id of the dependence set.
1105 unsigned DepId;
1106
1107 if (isDependencyCheckNeeded()) {
1108 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1109 unsigned &LeaderId = DepSetId[Leader];
1110 if (!LeaderId)
1111 LeaderId = RunningDepId++;
1112 DepId = LeaderId;
1113 } else
1114 // Each access has its own dependence set.
1115 DepId = RunningDepId++;
1116
1117 bool IsWrite = Access.getInt();
1118 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1119 NeedsFreeze);
1120 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1121 }
1122
1123 return true;
1124}
1125
1126bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1127 ScalarEvolution *SE, Loop *TheLoop,
1128 const DenseMap<Value *, const SCEV *> &StridesMap,
1129 Value *&UncomputablePtr, bool ShouldCheckWrap) {
1130 // Find pointers with computable bounds. We are going to use this information
1131 // to place a runtime bound check.
1132 bool CanDoRT = true;
1133
1134 bool MayNeedRTCheck = false;
1135 if (!IsRTCheckAnalysisNeeded) return true;
1136
1137 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1138
1139 // We assign a consecutive id to access from different alias sets.
1140 // Accesses between different groups doesn't need to be checked.
1141 unsigned ASId = 0;
1142 for (auto &AS : AST) {
1143 int NumReadPtrChecks = 0;
1144 int NumWritePtrChecks = 0;
1145 bool CanDoAliasSetRT = true;
1146 ++ASId;
1147 auto ASPointers = AS.getPointers();
1148
1149 // We assign consecutive id to access from different dependence sets.
1150 // Accesses within the same set don't need a runtime check.
1151 unsigned RunningDepId = 1;
1153
1155
1156 // First, count how many write and read accesses are in the alias set. Also
1157 // collect MemAccessInfos for later.
1159 for (const Value *ConstPtr : ASPointers) {
1160 Value *Ptr = const_cast<Value *>(ConstPtr);
1161 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1162 if (IsWrite)
1163 ++NumWritePtrChecks;
1164 else
1165 ++NumReadPtrChecks;
1166 AccessInfos.emplace_back(Ptr, IsWrite);
1167 }
1168
1169 // We do not need runtime checks for this alias set, if there are no writes
1170 // or a single write and no reads.
1171 if (NumWritePtrChecks == 0 ||
1172 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1173 assert((ASPointers.size() <= 1 ||
1174 all_of(ASPointers,
1175 [this](const Value *Ptr) {
1176 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1177 true);
1178 return DepCands.findValue(AccessWrite) == DepCands.end();
1179 })) &&
1180 "Can only skip updating CanDoRT below, if all entries in AS "
1181 "are reads or there is at most 1 entry");
1182 continue;
1183 }
1184
1185 for (auto &Access : AccessInfos) {
1186 for (const auto &AccessTy : Accesses[Access]) {
1187 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1188 DepSetId, TheLoop, RunningDepId, ASId,
1189 ShouldCheckWrap, false)) {
1190 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1191 << *Access.getPointer() << '\n');
1192 Retries.push_back({Access, AccessTy});
1193 CanDoAliasSetRT = false;
1194 }
1195 }
1196 }
1197
1198 // Note that this function computes CanDoRT and MayNeedRTCheck
1199 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1200 // we have a pointer for which we couldn't find the bounds but we don't
1201 // actually need to emit any checks so it does not matter.
1202 //
1203 // We need runtime checks for this alias set, if there are at least 2
1204 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1205 // any bound checks (because in that case the number of dependence sets is
1206 // incomplete).
1207 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1208
1209 // We need to perform run-time alias checks, but some pointers had bounds
1210 // that couldn't be checked.
1211 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1212 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1213 // We know that we need these checks, so we can now be more aggressive
1214 // and add further checks if required (overflow checks).
1215 CanDoAliasSetRT = true;
1216 for (const auto &[Access, AccessTy] : Retries) {
1217 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1218 DepSetId, TheLoop, RunningDepId, ASId,
1219 ShouldCheckWrap, /*Assume=*/true)) {
1220 CanDoAliasSetRT = false;
1221 UncomputablePtr = Access.getPointer();
1222 break;
1223 }
1224 }
1225 }
1226
1227 CanDoRT &= CanDoAliasSetRT;
1228 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1229 ++ASId;
1230 }
1231
1232 // If the pointers that we would use for the bounds comparison have different
1233 // address spaces, assume the values aren't directly comparable, so we can't
1234 // use them for the runtime check. We also have to assume they could
1235 // overlap. In the future there should be metadata for whether address spaces
1236 // are disjoint.
1237 unsigned NumPointers = RtCheck.Pointers.size();
1238 for (unsigned i = 0; i < NumPointers; ++i) {
1239 for (unsigned j = i + 1; j < NumPointers; ++j) {
1240 // Only need to check pointers between two different dependency sets.
1241 if (RtCheck.Pointers[i].DependencySetId ==
1242 RtCheck.Pointers[j].DependencySetId)
1243 continue;
1244 // Only need to check pointers in the same alias set.
1245 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1246 continue;
1247
1248 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1249 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1250
1251 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1252 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1253 if (ASi != ASj) {
1254 LLVM_DEBUG(
1255 dbgs() << "LAA: Runtime check would require comparison between"
1256 " different address spaces\n");
1257 return false;
1258 }
1259 }
1260 }
1261
1262 if (MayNeedRTCheck && CanDoRT)
1263 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1264
1265 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1266 << " pointer comparisons.\n");
1267
1268 // If we can do run-time checks, but there are no checks, no runtime checks
1269 // are needed. This can happen when all pointers point to the same underlying
1270 // object for example.
1271 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1272
1273 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1274 if (!CanDoRTIfNeeded)
1275 RtCheck.reset();
1276 return CanDoRTIfNeeded;
1277}
1278
1279void AccessAnalysis::processMemAccesses() {
1280 // We process the set twice: first we process read-write pointers, last we
1281 // process read-only pointers. This allows us to skip dependence tests for
1282 // read-only pointers.
1283
1284 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1285 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1286 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1287 LLVM_DEBUG({
1288 for (const auto &[A, _] : Accesses)
1289 dbgs() << "\t" << *A.getPointer() << " ("
1290 << (A.getInt() ? "write"
1291 : (ReadOnlyPtr.count(A.getPointer()) ? "read-only"
1292 : "read"))
1293 << ")\n";
1294 });
1295
1296 // The AliasSetTracker has nicely partitioned our pointers by metadata
1297 // compatibility and potential for underlying-object overlap. As a result, we
1298 // only need to check for potential pointer dependencies within each alias
1299 // set.
1300 for (const auto &AS : AST) {
1301 // Note that both the alias-set tracker and the alias sets themselves used
1302 // ordered collections internally and so the iteration order here is
1303 // deterministic.
1304 auto ASPointers = AS.getPointers();
1305
1306 bool SetHasWrite = false;
1307
1308 // Map of pointers to last access encountered.
1309 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1310 UnderlyingObjToAccessMap ObjToLastAccess;
1311
1312 // Set of access to check after all writes have been processed.
1313 PtrAccessMap DeferredAccesses;
1314
1315 // Iterate over each alias set twice, once to process read/write pointers,
1316 // and then to process read-only pointers.
1317 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1318 bool UseDeferred = SetIteration > 0;
1319 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1320
1321 for (const Value *ConstPtr : ASPointers) {
1322 Value *Ptr = const_cast<Value *>(ConstPtr);
1323
1324 // For a single memory access in AliasSetTracker, Accesses may contain
1325 // both read and write, and they both need to be handled for CheckDeps.
1326 for (const auto &[AC, _] : S) {
1327 if (AC.getPointer() != Ptr)
1328 continue;
1329
1330 bool IsWrite = AC.getInt();
1331
1332 // If we're using the deferred access set, then it contains only
1333 // reads.
1334 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1335 if (UseDeferred && !IsReadOnlyPtr)
1336 continue;
1337 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1338 // read or a write.
1339 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1340 S.count(MemAccessInfo(Ptr, false))) &&
1341 "Alias-set pointer not in the access set?");
1342
1343 MemAccessInfo Access(Ptr, IsWrite);
1344 DepCands.insert(Access);
1345
1346 // Memorize read-only pointers for later processing and skip them in
1347 // the first round (they need to be checked after we have seen all
1348 // write pointers). Note: we also mark pointer that are not
1349 // consecutive as "read-only" pointers (so that we check
1350 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1351 if (!UseDeferred && IsReadOnlyPtr) {
1352 // We only use the pointer keys, the types vector values don't
1353 // matter.
1354 DeferredAccesses.insert({Access, {}});
1355 continue;
1356 }
1357
1358 // If this is a write - check other reads and writes for conflicts. If
1359 // this is a read only check other writes for conflicts (but only if
1360 // there is no other write to the ptr - this is an optimization to
1361 // catch "a[i] = a[i] + " without having to do a dependence check).
1362 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1363 CheckDeps.push_back(Access);
1364 IsRTCheckAnalysisNeeded = true;
1365 }
1366
1367 if (IsWrite)
1368 SetHasWrite = true;
1369
1370 // Create sets of pointers connected by a shared alias set and
1371 // underlying object.
1372 typedef SmallVector<const Value *, 16> ValueVector;
1373 ValueVector TempObjects;
1374
1375 UnderlyingObjects[Ptr] = {};
1376 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1377 ::getUnderlyingObjects(Ptr, UOs, LI);
1379 << "Underlying objects for pointer " << *Ptr << "\n");
1380 for (const Value *UnderlyingObj : UOs) {
1381 // nullptr never alias, don't join sets for pointer that have "null"
1382 // in their UnderlyingObjects list.
1383 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1385 TheLoop->getHeader()->getParent(),
1386 UnderlyingObj->getType()->getPointerAddressSpace()))
1387 continue;
1388
1389 UnderlyingObjToAccessMap::iterator Prev =
1390 ObjToLastAccess.find(UnderlyingObj);
1391 if (Prev != ObjToLastAccess.end())
1392 DepCands.unionSets(Access, Prev->second);
1393
1394 ObjToLastAccess[UnderlyingObj] = Access;
1395 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1396 }
1397 }
1398 }
1399 }
1400 }
1401}
1402
1403/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1404/// i.e. monotonically increasing/decreasing.
1405static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1406 PredicatedScalarEvolution &PSE, const Loop *L) {
1407
1408 // FIXME: This should probably only return true for NUW.
1410 return true;
1411
1413 return true;
1414
1415 // Scalar evolution does not propagate the non-wrapping flags to values that
1416 // are derived from a non-wrapping induction variable because non-wrapping
1417 // could be flow-sensitive.
1418 //
1419 // Look through the potentially overflowing instruction to try to prove
1420 // non-wrapping for the *specific* value of Ptr.
1421
1422 // The arithmetic implied by an inbounds GEP can't overflow.
1423 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1424 if (!GEP || !GEP->isInBounds())
1425 return false;
1426
1427 // Make sure there is only one non-const index and analyze that.
1428 Value *NonConstIndex = nullptr;
1429 for (Value *Index : GEP->indices())
1430 if (!isa<ConstantInt>(Index)) {
1431 if (NonConstIndex)
1432 return false;
1433 NonConstIndex = Index;
1434 }
1435 if (!NonConstIndex)
1436 // The recurrence is on the pointer, ignore for now.
1437 return false;
1438
1439 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1440 // AddRec using a NSW operation.
1441 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1442 if (OBO->hasNoSignedWrap() &&
1443 // Assume constant for other the operand so that the AddRec can be
1444 // easily found.
1445 isa<ConstantInt>(OBO->getOperand(1))) {
1446 auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1447
1448 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1449 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1450 }
1451
1452 return false;
1453}
1454
1455/// Check whether the access through \p Ptr has a constant stride.
1457 Type *AccessTy, Value *Ptr,
1458 const Loop *Lp,
1459 const DenseMap<Value *, const SCEV *> &StridesMap,
1460 bool Assume, bool ShouldCheckWrap) {
1461 Type *Ty = Ptr->getType();
1462 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1463
1464 if (isa<ScalableVectorType>(AccessTy)) {
1465 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1466 << "\n");
1467 return std::nullopt;
1468 }
1469
1470 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1471
1472 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1473 if (Assume && !AR)
1474 AR = PSE.getAsAddRec(Ptr);
1475
1476 if (!AR) {
1477 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1478 << " SCEV: " << *PtrScev << "\n");
1479 return std::nullopt;
1480 }
1481
1482 // The access function must stride over the innermost loop.
1483 if (Lp != AR->getLoop()) {
1484 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1485 << *Ptr << " SCEV: " << *AR << "\n");
1486 return std::nullopt;
1487 }
1488
1489 // Check the step is constant.
1490 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1491
1492 // Calculate the pointer stride and check if it is constant.
1493 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1494 if (!C) {
1495 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1496 << " SCEV: " << *AR << "\n");
1497 return std::nullopt;
1498 }
1499
1500 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1501 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1502 int64_t Size = AllocSize.getFixedValue();
1503 const APInt &APStepVal = C->getAPInt();
1504
1505 // Huge step value - give up.
1506 if (APStepVal.getBitWidth() > 64)
1507 return std::nullopt;
1508
1509 int64_t StepVal = APStepVal.getSExtValue();
1510
1511 // Strided access.
1512 int64_t Stride = StepVal / Size;
1513 int64_t Rem = StepVal % Size;
1514 if (Rem)
1515 return std::nullopt;
1516
1517 if (!ShouldCheckWrap)
1518 return Stride;
1519
1520 // The address calculation must not wrap. Otherwise, a dependence could be
1521 // inverted.
1522 if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1523 return Stride;
1524
1525 // An inbounds getelementptr that is a AddRec with a unit stride
1526 // cannot wrap per definition. If it did, the result would be poison
1527 // and any memory access dependent on it would be immediate UB
1528 // when executed.
1529 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1530 GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1531 return Stride;
1532
1533 // If the null pointer is undefined, then a access sequence which would
1534 // otherwise access it can be assumed not to unsigned wrap. Note that this
1535 // assumes the object in memory is aligned to the natural alignment.
1536 unsigned AddrSpace = Ty->getPointerAddressSpace();
1537 if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1538 (Stride == 1 || Stride == -1))
1539 return Stride;
1540
1541 if (Assume) {
1543 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1544 << "LAA: Pointer: " << *Ptr << "\n"
1545 << "LAA: SCEV: " << *AR << "\n"
1546 << "LAA: Added an overflow assumption\n");
1547 return Stride;
1548 }
1549 LLVM_DEBUG(
1550 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1551 << *Ptr << " SCEV: " << *AR << "\n");
1552 return std::nullopt;
1553}
1554
1555std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1556 Type *ElemTyB, Value *PtrB,
1557 const DataLayout &DL,
1558 ScalarEvolution &SE, bool StrictCheck,
1559 bool CheckType) {
1560 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1561
1562 // Make sure that A and B are different pointers.
1563 if (PtrA == PtrB)
1564 return 0;
1565
1566 // Make sure that the element types are the same if required.
1567 if (CheckType && ElemTyA != ElemTyB)
1568 return std::nullopt;
1569
1570 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1571 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1572
1573 // Check that the address spaces match.
1574 if (ASA != ASB)
1575 return std::nullopt;
1576 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1577
1578 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1579 Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1580 Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1581
1582 int Val;
1583 if (PtrA1 == PtrB1) {
1584 // Retrieve the address space again as pointer stripping now tracks through
1585 // `addrspacecast`.
1586 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1587 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1588 // Check that the address spaces match and that the pointers are valid.
1589 if (ASA != ASB)
1590 return std::nullopt;
1591
1592 IdxWidth = DL.getIndexSizeInBits(ASA);
1593 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1594 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1595
1596 OffsetB -= OffsetA;
1597 Val = OffsetB.getSExtValue();
1598 } else {
1599 // Otherwise compute the distance with SCEV between the base pointers.
1600 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1601 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1602 const auto *Diff =
1603 dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1604 if (!Diff)
1605 return std::nullopt;
1606 Val = Diff->getAPInt().getSExtValue();
1607 }
1608 int Size = DL.getTypeStoreSize(ElemTyA);
1609 int Dist = Val / Size;
1610
1611 // Ensure that the calculated distance matches the type-based one after all
1612 // the bitcasts removal in the provided pointers.
1613 if (!StrictCheck || Dist * Size == Val)
1614 return Dist;
1615 return std::nullopt;
1616}
1617
1619 const DataLayout &DL, ScalarEvolution &SE,
1620 SmallVectorImpl<unsigned> &SortedIndices) {
1622 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1623 "Expected list of pointer operands.");
1624 // Walk over the pointers, and map each of them to an offset relative to
1625 // first pointer in the array.
1626 Value *Ptr0 = VL[0];
1627
1628 using DistOrdPair = std::pair<int64_t, int>;
1629 auto Compare = llvm::less_first();
1630 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1631 Offsets.emplace(0, 0);
1632 bool IsConsecutive = true;
1633 for (auto [Idx, Ptr] : drop_begin(enumerate(VL))) {
1634 std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1635 /*StrictCheck=*/true);
1636 if (!Diff)
1637 return false;
1638
1639 // Check if the pointer with the same offset is found.
1640 int64_t Offset = *Diff;
1641 auto [It, IsInserted] = Offsets.emplace(Offset, Idx);
1642 if (!IsInserted)
1643 return false;
1644 // Consecutive order if the inserted element is the last one.
1645 IsConsecutive &= std::next(It) == Offsets.end();
1646 }
1647 SortedIndices.clear();
1648 if (!IsConsecutive) {
1649 // Fill SortedIndices array only if it is non-consecutive.
1650 SortedIndices.resize(VL.size());
1651 for (auto [Idx, Off] : enumerate(Offsets))
1652 SortedIndices[Idx] = Off.second;
1653 }
1654 return true;
1655}
1656
1657/// Returns true if the memory operations \p A and \p B are consecutive.
1659 ScalarEvolution &SE, bool CheckType) {
1662 if (!PtrA || !PtrB)
1663 return false;
1664 Type *ElemTyA = getLoadStoreType(A);
1665 Type *ElemTyB = getLoadStoreType(B);
1666 std::optional<int> Diff =
1667 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1668 /*StrictCheck=*/true, CheckType);
1669 return Diff && *Diff == 1;
1670}
1671
1673 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1674 [this, SI](Value *Ptr) {
1675 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1676 InstMap.push_back(SI);
1677 ++AccessIdx;
1678 });
1679}
1680
1682 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1683 [this, LI](Value *Ptr) {
1684 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1685 InstMap.push_back(LI);
1686 ++AccessIdx;
1687 });
1688}
1689
1692 switch (Type) {
1693 case NoDep:
1694 case Forward:
1697
1698 case Unknown:
1701 case Backward:
1703 case IndirectUnsafe:
1705 }
1706 llvm_unreachable("unexpected DepType!");
1707}
1708
1710 switch (Type) {
1711 case NoDep:
1712 case Forward:
1713 case ForwardButPreventsForwarding:
1714 case Unknown:
1715 case IndirectUnsafe:
1716 return false;
1717
1718 case BackwardVectorizable:
1719 case Backward:
1720 case BackwardVectorizableButPreventsForwarding:
1721 return true;
1722 }
1723 llvm_unreachable("unexpected DepType!");
1724}
1725
1727 return isBackward() || Type == Unknown;
1728}
1729
1731 switch (Type) {
1732 case Forward:
1733 case ForwardButPreventsForwarding:
1734 return true;
1735
1736 case NoDep:
1737 case Unknown:
1738 case BackwardVectorizable:
1739 case Backward:
1740 case BackwardVectorizableButPreventsForwarding:
1741 case IndirectUnsafe:
1742 return false;
1743 }
1744 llvm_unreachable("unexpected DepType!");
1745}
1746
1747bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1748 uint64_t TypeByteSize) {
1749 // If loads occur at a distance that is not a multiple of a feasible vector
1750 // factor store-load forwarding does not take place.
1751 // Positive dependences might cause troubles because vectorizing them might
1752 // prevent store-load forwarding making vectorized code run a lot slower.
1753 // a[i] = a[i-3] ^ a[i-8];
1754 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1755 // hence on your typical architecture store-load forwarding does not take
1756 // place. Vectorizing in such cases does not make sense.
1757 // Store-load forwarding distance.
1758
1759 // After this many iterations store-to-load forwarding conflicts should not
1760 // cause any slowdowns.
1761 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1762 // Maximum vector factor.
1763 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1764 VectorizerParams::MaxVectorWidth * TypeByteSize, MinDepDistBytes);
1765
1766 // Compute the smallest VF at which the store and load would be misaligned.
1767 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1768 VF *= 2) {
1769 // If the number of vector iteration between the store and the load are
1770 // small we could incur conflicts.
1771 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1772 MaxVFWithoutSLForwardIssues = (VF >> 1);
1773 break;
1774 }
1775 }
1776
1777 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1778 LLVM_DEBUG(
1779 dbgs() << "LAA: Distance " << Distance
1780 << " that could cause a store-load forwarding conflict\n");
1781 return true;
1782 }
1783
1784 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1785 MaxVFWithoutSLForwardIssues !=
1786 VectorizerParams::MaxVectorWidth * TypeByteSize)
1787 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1788 return false;
1789}
1790
1791void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1792 if (Status < S)
1793 Status = S;
1794}
1795
1796/// Given a dependence-distance \p Dist between two
1797/// memory accesses, that have strides in the same direction whose absolute
1798/// value of the maximum stride is given in \p MaxStride, and that have the same
1799/// type size \p TypeByteSize, in a loop whose takenCount is \p
1800/// BackedgeTakenCount, check if it is possible to prove statically that the
1801/// dependence distance is larger than the range that the accesses will travel
1802/// through the execution of the loop. If so, return true; false otherwise. This
1803/// is useful for example in loops such as the following (PR31098):
1804/// for (i = 0; i < D; ++i) {
1805/// = out[i];
1806/// out[i+D] =
1807/// }
1809 const SCEV &BackedgeTakenCount,
1810 const SCEV &Dist, uint64_t MaxStride,
1811 uint64_t TypeByteSize) {
1812
1813 // If we can prove that
1814 // (**) |Dist| > BackedgeTakenCount * Step
1815 // where Step is the absolute stride of the memory accesses in bytes,
1816 // then there is no dependence.
1817 //
1818 // Rationale:
1819 // We basically want to check if the absolute distance (|Dist/Step|)
1820 // is >= the loop iteration count (or > BackedgeTakenCount).
1821 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1822 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1823 // that the dependence distance is >= VF; This is checked elsewhere.
1824 // But in some cases we can prune dependence distances early, and
1825 // even before selecting the VF, and without a runtime test, by comparing
1826 // the distance against the loop iteration count. Since the vectorized code
1827 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1828 // also guarantees that distance >= VF.
1829 //
1830 const uint64_t ByteStride = MaxStride * TypeByteSize;
1831 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1832 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1833
1834 const SCEV *CastedDist = &Dist;
1835 const SCEV *CastedProduct = Product;
1836 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1837 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1838
1839 // The dependence distance can be positive/negative, so we sign extend Dist;
1840 // The multiplication of the absolute stride in bytes and the
1841 // backedgeTakenCount is non-negative, so we zero extend Product.
1842 if (DistTypeSizeBits > ProductTypeSizeBits)
1843 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1844 else
1845 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1846
1847 // Is Dist - (BackedgeTakenCount * Step) > 0 ?
1848 // (If so, then we have proven (**) because |Dist| >= Dist)
1849 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1850 if (SE.isKnownPositive(Minus))
1851 return true;
1852
1853 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ?
1854 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1855 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1856 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1857 return SE.isKnownPositive(Minus);
1858}
1859
1860/// Check the dependence for two accesses with the same stride \p Stride.
1861/// \p Distance is the positive distance and \p TypeByteSize is type size in
1862/// bytes.
1863///
1864/// \returns true if they are independent.
1866 uint64_t TypeByteSize) {
1867 assert(Stride > 1 && "The stride must be greater than 1");
1868 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1869 assert(Distance > 0 && "The distance must be non-zero");
1870
1871 // Skip if the distance is not multiple of type byte size.
1872 if (Distance % TypeByteSize)
1873 return false;
1874
1875 uint64_t ScaledDist = Distance / TypeByteSize;
1876
1877 // No dependence if the scaled distance is not multiple of the stride.
1878 // E.g.
1879 // for (i = 0; i < 1024 ; i += 4)
1880 // A[i+2] = A[i] + 1;
1881 //
1882 // Two accesses in memory (scaled distance is 2, stride is 4):
1883 // | A[0] | | | | A[4] | | | |
1884 // | | | A[2] | | | | A[6] | |
1885 //
1886 // E.g.
1887 // for (i = 0; i < 1024 ; i += 3)
1888 // A[i+4] = A[i] + 1;
1889 //
1890 // Two accesses in memory (scaled distance is 4, stride is 3):
1891 // | A[0] | | | A[3] | | | A[6] | | |
1892 // | | | | | A[4] | | | A[7] | |
1893 return ScaledDist % Stride;
1894}
1895
1896/// Returns true if any of the underlying objects has a loop varying address,
1897/// i.e. may change in \p L.
1898static bool
1900 ScalarEvolution &SE, const Loop *L) {
1901 return any_of(UnderlyingObjects, [&SE, L](const Value *UO) {
1902 return !SE.isLoopInvariant(SE.getSCEV(const_cast<Value *>(UO)), L);
1903 });
1904}
1905
1906namespace {
1907struct DepDistanceStrideAndSizeInfo {
1908 const SCEV *Dist;
1909 uint64_t StrideA;
1910 uint64_t StrideB;
1911 uint64_t TypeByteSize;
1912 bool AIsWrite;
1913 bool BIsWrite;
1914
1915 DepDistanceStrideAndSizeInfo(const SCEV *Dist, uint64_t StrideA,
1916 uint64_t StrideB, uint64_t TypeByteSize,
1917 bool AIsWrite, bool BIsWrite)
1918 : Dist(Dist), StrideA(StrideA), StrideB(StrideB),
1919 TypeByteSize(TypeByteSize), AIsWrite(AIsWrite), BIsWrite(BIsWrite) {}
1920};
1921} // namespace
1922
1923// Get the dependence distance, strides, type size and whether it is a write for
1924// the dependence between A and B. Returns a DepType, if we can prove there's
1925// no dependence or the analysis fails. Outlined to lambda to limit he scope
1926// of various temporary variables, like A/BPtr, StrideA/BPtr and others.
1927// Returns either the dependence result, if it could already be determined, or a
1928// struct containing (Distance, Stride, TypeSize, AIsWrite, BIsWrite).
1929static std::variant<MemoryDepChecker::Dependence::DepType,
1930 DepDistanceStrideAndSizeInfo>
1934 const DenseMap<Value *, const SCEV *> &Strides,
1935 const DenseMap<Value *, SmallVector<const Value *, 16>> &UnderlyingObjects,
1936 PredicatedScalarEvolution &PSE, const Loop *InnermostLoop) {
1937 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1938 auto &SE = *PSE.getSE();
1939 auto [APtr, AIsWrite] = A;
1940 auto [BPtr, BIsWrite] = B;
1941
1942 // Two reads are independent.
1943 if (!AIsWrite && !BIsWrite)
1945
1946 Type *ATy = getLoadStoreType(AInst);
1947 Type *BTy = getLoadStoreType(BInst);
1948
1949 // We cannot check pointers in different address spaces.
1950 if (APtr->getType()->getPointerAddressSpace() !=
1951 BPtr->getType()->getPointerAddressSpace())
1953
1954 int64_t StrideAPtr =
1955 getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true).value_or(0);
1956 int64_t StrideBPtr =
1957 getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true).value_or(0);
1958
1959 const SCEV *Src = PSE.getSCEV(APtr);
1960 const SCEV *Sink = PSE.getSCEV(BPtr);
1961
1962 // If the induction step is negative we have to invert source and sink of the
1963 // dependence when measuring the distance between them. We should not swap
1964 // AIsWrite with BIsWrite, as their uses expect them in program order.
1965 if (StrideAPtr < 0) {
1966 std::swap(Src, Sink);
1967 std::swap(AInst, BInst);
1968 }
1969
1970 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1971
1972 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1973 << "(Induction step: " << StrideAPtr << ")\n");
1974 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
1975 << ": " << *Dist << "\n");
1976
1977 // Needs accesses where the addresses of the accessed underlying objects do
1978 // not change within the loop.
1979 if (isLoopVariantIndirectAddress(UnderlyingObjects.find(APtr)->second, SE,
1980 InnermostLoop) ||
1981 isLoopVariantIndirectAddress(UnderlyingObjects.find(BPtr)->second, SE,
1982 InnermostLoop))
1984
1985 // Check if we can prove that Sink only accesses memory after Src's end or
1986 // vice versa.
1987 const auto &[SrcStart, SrcEnd] =
1988 getStartAndEndForAccess(InnermostLoop, Src, ATy, PSE);
1989 const auto &[SinkStart, SinkEnd] =
1990 getStartAndEndForAccess(InnermostLoop, Sink, BTy, PSE);
1991
1992 if (!isa<SCEVCouldNotCompute>(SrcStart) &&
1993 !isa<SCEVCouldNotCompute>(SrcEnd) &&
1994 !isa<SCEVCouldNotCompute>(SinkStart) &&
1995 !isa<SCEVCouldNotCompute>(SinkEnd)) {
1996 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
1998 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart))
2000 }
2001
2002 // Need accesses with constant strides and the same direction. We don't want
2003 // to vectorize "A[B[i]] += ..." and similar code or pointer arithmetic that
2004 // could wrap in the address space.
2005 if (!StrideAPtr || !StrideBPtr || (StrideAPtr > 0 && StrideBPtr < 0) ||
2006 (StrideAPtr < 0 && StrideBPtr > 0)) {
2007 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
2009 }
2010
2011 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
2012 bool HasSameSize =
2013 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
2014 if (!HasSameSize)
2015 TypeByteSize = 0;
2016 return DepDistanceStrideAndSizeInfo(Dist, std::abs(StrideAPtr),
2017 std::abs(StrideBPtr), TypeByteSize,
2018 AIsWrite, BIsWrite);
2019}
2020
2021MemoryDepChecker::Dependence::DepType MemoryDepChecker::isDependent(
2022 const MemAccessInfo &A, unsigned AIdx, const MemAccessInfo &B,
2023 unsigned BIdx, const DenseMap<Value *, const SCEV *> &Strides,
2025 &UnderlyingObjects) {
2026 assert(AIdx < BIdx && "Must pass arguments in program order");
2027
2028 // Get the dependence distance, stride, type size and what access writes for
2029 // the dependence between A and B.
2031 A, InstMap[AIdx], B, InstMap[BIdx], Strides, UnderlyingObjects, PSE,
2032 InnermostLoop);
2033 if (std::holds_alternative<Dependence::DepType>(Res))
2034 return std::get<Dependence::DepType>(Res);
2035
2036 auto &[Dist, StrideA, StrideB, TypeByteSize, AIsWrite, BIsWrite] =
2037 std::get<DepDistanceStrideAndSizeInfo>(Res);
2038 bool HasSameSize = TypeByteSize > 0;
2039
2040 std::optional<uint64_t> CommonStride =
2041 StrideA == StrideB ? std::make_optional(StrideA) : std::nullopt;
2042 if (isa<SCEVCouldNotCompute>(Dist)) {
2043 // TODO: Relax requirement that there is a common stride to retry with
2044 // non-constant distance dependencies.
2045 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2046 LLVM_DEBUG(dbgs() << "LAA: Dependence because of uncomputable distance.\n");
2047 return Dependence::Unknown;
2048 }
2049
2050 ScalarEvolution &SE = *PSE.getSE();
2051 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
2052 uint64_t MaxStride = std::max(StrideA, StrideB);
2053
2054 // If the distance between the acecsses is larger than their maximum absolute
2055 // stride multiplied by the backedge taken count, the accesses are independet,
2056 // i.e. they are far enough appart that accesses won't access the same
2057 // location across all loop ierations.
2058 if (HasSameSize &&
2060 MaxStride, TypeByteSize))
2061 return Dependence::NoDep;
2062
2063 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
2064
2065 // Attempt to prove strided accesses independent.
2066 if (C) {
2067 const APInt &Val = C->getAPInt();
2068 int64_t Distance = Val.getSExtValue();
2069
2070 // If the distance between accesses and their strides are known constants,
2071 // check whether the accesses interlace each other.
2072 if (std::abs(Distance) > 0 && CommonStride && *CommonStride > 1 &&
2073 HasSameSize &&
2074 areStridedAccessesIndependent(std::abs(Distance), *CommonStride,
2075 TypeByteSize)) {
2076 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2077 return Dependence::NoDep;
2078 }
2079 } else
2080 Dist = SE.applyLoopGuards(Dist, InnermostLoop);
2081
2082 // Negative distances are not plausible dependencies.
2083 if (SE.isKnownNonPositive(Dist)) {
2084 if (SE.isKnownNonNegative(Dist)) {
2085 if (HasSameSize) {
2086 // Write to the same location with the same size.
2087 return Dependence::Forward;
2088 }
2089 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2090 "different type sizes\n");
2091 return Dependence::Unknown;
2092 }
2093
2094 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2095 // Check if the first access writes to a location that is read in a later
2096 // iteration, where the distance between them is not a multiple of a vector
2097 // factor and relatively small.
2098 //
2099 // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2100 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2101 // forward dependency will allow vectorization using any width.
2102
2103 if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2104 if (!C) {
2105 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2106 // condition to consider retrying with runtime checks. Historically, we
2107 // did not set it when strides were different but there is no inherent
2108 // reason to.
2109 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2110 return Dependence::Unknown;
2111 }
2112 if (!HasSameSize ||
2113 couldPreventStoreLoadForward(C->getAPInt().abs().getZExtValue(),
2114 TypeByteSize)) {
2115 LLVM_DEBUG(
2116 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2118 }
2119 }
2120
2121 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2122 return Dependence::Forward;
2123 }
2124
2125 int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2126 // Below we only handle strictly positive distances.
2127 if (MinDistance <= 0) {
2128 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2129 return Dependence::Unknown;
2130 }
2131
2132 if (!isa<SCEVConstant>(Dist)) {
2133 // Previously this case would be treated as Unknown, possibly setting
2134 // FoundNonConstantDistanceDependence to force re-trying with runtime
2135 // checks. Until the TODO below is addressed, set it here to preserve
2136 // original behavior w.r.t. re-trying with runtime checks.
2137 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2138 // condition to consider retrying with runtime checks. Historically, we
2139 // did not set it when strides were different but there is no inherent
2140 // reason to.
2141 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2142 }
2143
2144 if (!HasSameSize) {
2145 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2146 "different type sizes\n");
2147 return Dependence::Unknown;
2148 }
2149
2150 if (!CommonStride)
2151 return Dependence::Unknown;
2152
2153 // Bail out early if passed-in parameters make vectorization not feasible.
2154 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2156 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2158 // The minimum number of iterations for a vectorized/unrolled version.
2159 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2160
2161 // It's not vectorizable if the distance is smaller than the minimum distance
2162 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2163 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
2164 // TypeByteSize (No need to plus the last gap distance).
2165 //
2166 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2167 // foo(int *A) {
2168 // int *B = (int *)((char *)A + 14);
2169 // for (i = 0 ; i < 1024 ; i += 2)
2170 // B[i] = A[i] + 1;
2171 // }
2172 //
2173 // Two accesses in memory (stride is 2):
2174 // | A[0] | | A[2] | | A[4] | | A[6] | |
2175 // | B[0] | | B[2] | | B[4] |
2176 //
2177 // MinDistance needs for vectorizing iterations except the last iteration:
2178 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2179 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2180 //
2181 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2182 // 12, which is less than distance.
2183 //
2184 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2185 // the minimum distance needed is 28, which is greater than distance. It is
2186 // not safe to do vectorization.
2187
2188 // We know that Dist is positive, but it may not be constant. Use the signed
2189 // minimum for computations below, as this ensures we compute the closest
2190 // possible dependence distance.
2191 uint64_t MinDistanceNeeded =
2192 TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;
2193 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2194 if (!isa<SCEVConstant>(Dist)) {
2195 // For non-constant distances, we checked the lower bound of the
2196 // dependence distance and the distance may be larger at runtime (and safe
2197 // for vectorization). Classify it as Unknown, so we re-try with runtime
2198 // checks.
2199 return Dependence::Unknown;
2200 }
2201 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2202 << MinDistance << '\n');
2203 return Dependence::Backward;
2204 }
2205
2206 // Unsafe if the minimum distance needed is greater than smallest dependence
2207 // distance distance.
2208 if (MinDistanceNeeded > MinDepDistBytes) {
2209 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2210 << MinDistanceNeeded << " size in bytes\n");
2211 return Dependence::Backward;
2212 }
2213
2214 // Positive distance bigger than max vectorization factor.
2215 // FIXME: Should use max factor instead of max distance in bytes, which could
2216 // not handle different types.
2217 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2218 // void foo (int *A, char *B) {
2219 // for (unsigned i = 0; i < 1024; i++) {
2220 // A[i+2] = A[i] + 1;
2221 // B[i+2] = B[i] + 1;
2222 // }
2223 // }
2224 //
2225 // This case is currently unsafe according to the max safe distance. If we
2226 // analyze the two accesses on array B, the max safe dependence distance
2227 // is 2. Then we analyze the accesses on array A, the minimum distance needed
2228 // is 8, which is less than 2 and forbidden vectorization, But actually
2229 // both A and B could be vectorized by 2 iterations.
2230 MinDepDistBytes =
2231 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2232
2233 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2234 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2235 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2236 isa<SCEVConstant>(Dist) &&
2237 couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {
2238 // Sanity check that we didn't update MinDepDistBytes when calling
2239 // couldPreventStoreLoadForward
2240 assert(MinDepDistBytes == MinDepDistBytesOld &&
2241 "An update to MinDepDistBytes requires an update to "
2242 "MaxSafeVectorWidthInBits");
2243 (void)MinDepDistBytesOld;
2245 }
2246
2247 // An update to MinDepDistBytes requires an update to MaxSafeVectorWidthInBits
2248 // since there is a backwards dependency.
2249 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);
2250 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2251 << " with max VF = " << MaxVF << '\n');
2252
2253 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2254 if (!isa<SCEVConstant>(Dist) && MaxVFInBits < MaxTargetVectorWidthInBits) {
2255 // For non-constant distances, we checked the lower bound of the dependence
2256 // distance and the distance may be larger at runtime (and safe for
2257 // vectorization). Classify it as Unknown, so we re-try with runtime checks.
2258 return Dependence::Unknown;
2259 }
2260
2261 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2263}
2264
2266 DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
2267 const DenseMap<Value *, const SCEV *> &Strides,
2269 &UnderlyingObjects) {
2270
2271 MinDepDistBytes = -1;
2273 for (MemAccessInfo CurAccess : CheckDeps) {
2274 if (Visited.count(CurAccess))
2275 continue;
2276
2277 // Get the relevant memory access set.
2279 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2280
2281 // Check accesses within this set.
2283 AccessSets.member_begin(I);
2285 AccessSets.member_end();
2286
2287 // Check every access pair.
2288 while (AI != AE) {
2289 Visited.insert(*AI);
2290 bool AIIsWrite = AI->getInt();
2291 // Check loads only against next equivalent class, but stores also against
2292 // other stores in the same equivalence class - to the same address.
2294 (AIIsWrite ? AI : std::next(AI));
2295 while (OI != AE) {
2296 // Check every accessing instruction pair in program order.
2297 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2298 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2299 // Scan all accesses of another equivalence class, but only the next
2300 // accesses of the same equivalent class.
2301 for (std::vector<unsigned>::iterator
2302 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2303 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2304 I2 != I2E; ++I2) {
2305 auto A = std::make_pair(&*AI, *I1);
2306 auto B = std::make_pair(&*OI, *I2);
2307
2308 assert(*I1 != *I2);
2309 if (*I1 > *I2)
2310 std::swap(A, B);
2311
2313 isDependent(*A.first, A.second, *B.first, B.second, Strides,
2314 UnderlyingObjects);
2316
2317 // Gather dependences unless we accumulated MaxDependences
2318 // dependences. In that case return as soon as we find the first
2319 // unsafe dependence. This puts a limit on this quadratic
2320 // algorithm.
2321 if (RecordDependences) {
2322 if (Type != Dependence::NoDep)
2323 Dependences.push_back(Dependence(A.second, B.second, Type));
2324
2325 if (Dependences.size() >= MaxDependences) {
2326 RecordDependences = false;
2327 Dependences.clear();
2329 << "Too many dependences, stopped recording\n");
2330 }
2331 }
2332 if (!RecordDependences && !isSafeForVectorization())
2333 return false;
2334 }
2335 ++OI;
2336 }
2337 ++AI;
2338 }
2339 }
2340
2341 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2342 return isSafeForVectorization();
2343}
2344
2347 MemAccessInfo Access(Ptr, IsWrite);
2348 auto &IndexVector = Accesses.find(Access)->second;
2349
2351 transform(IndexVector,
2352 std::back_inserter(Insts),
2353 [&](unsigned Idx) { return this->InstMap[Idx]; });
2354 return Insts;
2355}
2356
2358 "NoDep",
2359 "Unknown",
2360 "IndirectUnsafe",
2361 "Forward",
2362 "ForwardButPreventsForwarding",
2363 "Backward",
2364 "BackwardVectorizable",
2365 "BackwardVectorizableButPreventsForwarding"};
2366
2368 raw_ostream &OS, unsigned Depth,
2369 const SmallVectorImpl<Instruction *> &Instrs) const {
2370 OS.indent(Depth) << DepName[Type] << ":\n";
2371 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2372 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2373}
2374
2375bool LoopAccessInfo::canAnalyzeLoop() {
2376 // We need to have a loop header.
2377 LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
2378 << TheLoop->getHeader()->getParent()->getName() << ": "
2379 << TheLoop->getHeader()->getName() << '\n');
2380
2381 // We can only analyze innermost loops.
2382 if (!TheLoop->isInnermost()) {
2383 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2384 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2385 return false;
2386 }
2387
2388 // We must have a single backedge.
2389 if (TheLoop->getNumBackEdges() != 1) {
2390 LLVM_DEBUG(
2391 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2392 recordAnalysis("CFGNotUnderstood")
2393 << "loop control flow is not understood by analyzer";
2394 return false;
2395 }
2396
2397 // ScalarEvolution needs to be able to find the exit count.
2398 const SCEV *ExitCount = PSE->getBackedgeTakenCount();
2399 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2400 recordAnalysis("CantComputeNumberOfIterations")
2401 << "could not determine number of loop iterations";
2402 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2403 return false;
2404 }
2405
2406 return true;
2407}
2408
2409void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
2410 const TargetLibraryInfo *TLI,
2411 DominatorTree *DT) {
2412 // Holds the Load and Store instructions.
2415 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2416
2417 // Holds all the different accesses in the loop.
2418 unsigned NumReads = 0;
2419 unsigned NumReadWrites = 0;
2420
2421 bool HasComplexMemInst = false;
2422
2423 // A runtime check is only legal to insert if there are no convergent calls.
2424 HasConvergentOp = false;
2425
2426 PtrRtChecking->Pointers.clear();
2427 PtrRtChecking->Need = false;
2428
2429 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2430
2431 const bool EnableMemAccessVersioningOfLoop =
2433 !TheLoop->getHeader()->getParent()->hasOptSize();
2434
2435 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2436 // loop info, as it may be arbitrary.
2437 LoopBlocksRPO RPOT(TheLoop);
2438 RPOT.perform(LI);
2439 for (BasicBlock *BB : RPOT) {
2440 // Scan the BB and collect legal loads and stores. Also detect any
2441 // convergent instructions.
2442 for (Instruction &I : *BB) {
2443 if (auto *Call = dyn_cast<CallBase>(&I)) {
2444 if (Call->isConvergent())
2445 HasConvergentOp = true;
2446 }
2447
2448 // With both a non-vectorizable memory instruction and a convergent
2449 // operation, found in this loop, no reason to continue the search.
2450 if (HasComplexMemInst && HasConvergentOp) {
2451 CanVecMem = false;
2452 return;
2453 }
2454
2455 // Avoid hitting recordAnalysis multiple times.
2456 if (HasComplexMemInst)
2457 continue;
2458
2459 // Record alias scopes defined inside the loop.
2460 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2461 for (Metadata *Op : Decl->getScopeList()->operands())
2462 LoopAliasScopes.insert(cast<MDNode>(Op));
2463
2464 // Many math library functions read the rounding mode. We will only
2465 // vectorize a loop if it contains known function calls that don't set
2466 // the flag. Therefore, it is safe to ignore this read from memory.
2467 auto *Call = dyn_cast<CallInst>(&I);
2468 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2469 continue;
2470
2471 // If this is a load, save it. If this instruction can read from memory
2472 // but is not a load, then we quit. Notice that we don't handle function
2473 // calls that read or write.
2474 if (I.mayReadFromMemory()) {
2475 // If the function has an explicit vectorized counterpart, we can safely
2476 // assume that it can be vectorized.
2477 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2478 !VFDatabase::getMappings(*Call).empty())
2479 continue;
2480
2481 auto *Ld = dyn_cast<LoadInst>(&I);
2482 if (!Ld) {
2483 recordAnalysis("CantVectorizeInstruction", Ld)
2484 << "instruction cannot be vectorized";
2485 HasComplexMemInst = true;
2486 continue;
2487 }
2488 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2489 recordAnalysis("NonSimpleLoad", Ld)
2490 << "read with atomic ordering or volatile read";
2491 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2492 HasComplexMemInst = true;
2493 continue;
2494 }
2495 NumLoads++;
2496 Loads.push_back(Ld);
2497 DepChecker->addAccess(Ld);
2498 if (EnableMemAccessVersioningOfLoop)
2499 collectStridedAccess(Ld);
2500 continue;
2501 }
2502
2503 // Save 'store' instructions. Abort if other instructions write to memory.
2504 if (I.mayWriteToMemory()) {
2505 auto *St = dyn_cast<StoreInst>(&I);
2506 if (!St) {
2507 recordAnalysis("CantVectorizeInstruction", St)
2508 << "instruction cannot be vectorized";
2509 HasComplexMemInst = true;
2510 continue;
2511 }
2512 if (!St->isSimple() && !IsAnnotatedParallel) {
2513 recordAnalysis("NonSimpleStore", St)
2514 << "write with atomic ordering or volatile write";
2515 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2516 HasComplexMemInst = true;
2517 continue;
2518 }
2519 NumStores++;
2520 Stores.push_back(St);
2521 DepChecker->addAccess(St);
2522 if (EnableMemAccessVersioningOfLoop)
2523 collectStridedAccess(St);
2524 }
2525 } // Next instr.
2526 } // Next block.
2527
2528 if (HasComplexMemInst) {
2529 CanVecMem = false;
2530 return;
2531 }
2532
2533 // Now we have two lists that hold the loads and the stores.
2534 // Next, we find the pointers that they use.
2535
2536 // Check if we see any stores. If there are no stores, then we don't
2537 // care if the pointers are *restrict*.
2538 if (!Stores.size()) {
2539 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2540 CanVecMem = true;
2541 return;
2542 }
2543
2544 MemoryDepChecker::DepCandidates DependentAccesses;
2545 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,
2546 LoopAliasScopes);
2547
2548 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2549 // multiple times on the same object. If the ptr is accessed twice, once
2550 // for read and once for write, it will only appear once (on the write
2551 // list). This is okay, since we are going to check for conflicts between
2552 // writes and between reads and writes, but not between reads and reads.
2554
2555 // Record uniform store addresses to identify if we have multiple stores
2556 // to the same address.
2557 SmallPtrSet<Value *, 16> UniformStores;
2558
2559 for (StoreInst *ST : Stores) {
2560 Value *Ptr = ST->getPointerOperand();
2561
2562 if (isInvariant(Ptr)) {
2563 // Record store instructions to loop invariant addresses
2564 StoresToInvariantAddresses.push_back(ST);
2565 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2566 !UniformStores.insert(Ptr).second;
2567 }
2568
2569 // If we did *not* see this pointer before, insert it to the read-write
2570 // list. At this phase it is only a 'write' list.
2571 Type *AccessTy = getLoadStoreType(ST);
2572 if (Seen.insert({Ptr, AccessTy}).second) {
2573 ++NumReadWrites;
2574
2576 // The TBAA metadata could have a control dependency on the predication
2577 // condition, so we cannot rely on it when determining whether or not we
2578 // need runtime pointer checks.
2579 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2580 Loc.AATags.TBAA = nullptr;
2581
2582 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2583 [&Accesses, AccessTy, Loc](Value *Ptr) {
2584 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2585 Accesses.addStore(NewLoc, AccessTy);
2586 });
2587 }
2588 }
2589
2590 if (IsAnnotatedParallel) {
2591 LLVM_DEBUG(
2592 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2593 << "checks.\n");
2594 CanVecMem = true;
2595 return;
2596 }
2597
2598 for (LoadInst *LD : Loads) {
2599 Value *Ptr = LD->getPointerOperand();
2600 // If we did *not* see this pointer before, insert it to the
2601 // read list. If we *did* see it before, then it is already in
2602 // the read-write list. This allows us to vectorize expressions
2603 // such as A[i] += x; Because the address of A[i] is a read-write
2604 // pointer. This only works if the index of A[i] is consecutive.
2605 // If the address of i is unknown (for example A[B[i]]) then we may
2606 // read a few words, modify, and write a few words, and some of the
2607 // words may be written to the same address.
2608 bool IsReadOnlyPtr = false;
2609 Type *AccessTy = getLoadStoreType(LD);
2610 if (Seen.insert({Ptr, AccessTy}).second ||
2611 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2612 ++NumReads;
2613 IsReadOnlyPtr = true;
2614 }
2615
2616 // See if there is an unsafe dependency between a load to a uniform address and
2617 // store to the same uniform address.
2618 if (UniformStores.count(Ptr)) {
2619 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2620 "load and uniform store to the same address!\n");
2621 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2622 }
2623
2625 // The TBAA metadata could have a control dependency on the predication
2626 // condition, so we cannot rely on it when determining whether or not we
2627 // need runtime pointer checks.
2628 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2629 Loc.AATags.TBAA = nullptr;
2630
2631 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2632 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2633 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2634 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2635 });
2636 }
2637
2638 // If we write (or read-write) to a single destination and there are no
2639 // other reads in this loop then is it safe to vectorize.
2640 if (NumReadWrites == 1 && NumReads == 0) {
2641 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2642 CanVecMem = true;
2643 return;
2644 }
2645
2646 // Build dependence sets and check whether we need a runtime pointer bounds
2647 // check.
2648 Accesses.buildDependenceSets();
2649
2650 // Find pointers with computable bounds. We are going to use this information
2651 // to place a runtime bound check.
2652 Value *UncomputablePtr = nullptr;
2653 bool CanDoRTIfNeeded =
2654 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2655 SymbolicStrides, UncomputablePtr, false);
2656 if (!CanDoRTIfNeeded) {
2657 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2658 recordAnalysis("CantIdentifyArrayBounds", I)
2659 << "cannot identify array bounds";
2660 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2661 << "the array bounds.\n");
2662 CanVecMem = false;
2663 return;
2664 }
2665
2666 LLVM_DEBUG(
2667 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2668
2669 CanVecMem = true;
2670 if (Accesses.isDependencyCheckNeeded()) {
2671 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2672 CanVecMem = DepChecker->areDepsSafe(
2673 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides,
2674 Accesses.getUnderlyingObjects());
2675
2676 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2677 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2678
2679 // Clear the dependency checks. We assume they are not needed.
2680 Accesses.resetDepChecks(*DepChecker);
2681
2682 PtrRtChecking->reset();
2683 PtrRtChecking->Need = true;
2684
2685 auto *SE = PSE->getSE();
2686 UncomputablePtr = nullptr;
2687 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2688 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2689
2690 // Check that we found the bounds for the pointer.
2691 if (!CanDoRTIfNeeded) {
2692 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2693 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2694 << "cannot check memory dependencies at runtime";
2695 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2696 CanVecMem = false;
2697 return;
2698 }
2699
2700 CanVecMem = true;
2701 }
2702 }
2703
2704 if (HasConvergentOp) {
2705 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2706 << "cannot add control dependency to convergent operation";
2707 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2708 "would be needed with a convergent operation\n");
2709 CanVecMem = false;
2710 return;
2711 }
2712
2713 if (CanVecMem)
2714 LLVM_DEBUG(
2715 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2716 << (PtrRtChecking->Need ? "" : " don't")
2717 << " need runtime memory checks.\n");
2718 else
2719 emitUnsafeDependenceRemark();
2720}
2721
2722void LoopAccessInfo::emitUnsafeDependenceRemark() {
2723 const auto *Deps = getDepChecker().getDependences();
2724 if (!Deps)
2725 return;
2726 const auto *Found =
2727 llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2730 });
2731 if (Found == Deps->end())
2732 return;
2733 MemoryDepChecker::Dependence Dep = *Found;
2734
2735 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2736
2737 // Emit remark for first unsafe dependence
2738 bool HasForcedDistribution = false;
2739 std::optional<const MDOperand *> Value =
2740 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2741 if (Value) {
2742 const MDOperand *Op = *Value;
2743 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2744 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2745 }
2746
2747 const std::string Info =
2748 HasForcedDistribution
2749 ? "unsafe dependent memory operations in loop."
2750 : "unsafe dependent memory operations in loop. Use "
2751 "#pragma clang loop distribute(enable) to allow loop distribution "
2752 "to attempt to isolate the offending operations into a separate "
2753 "loop";
2755 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2756
2757 switch (Dep.Type) {
2761 llvm_unreachable("Unexpected dependence");
2763 R << "\nBackward loop carried data dependence.";
2764 break;
2766 R << "\nForward loop carried data dependence that prevents "
2767 "store-to-load forwarding.";
2768 break;
2770 R << "\nBackward loop carried data dependence that prevents "
2771 "store-to-load forwarding.";
2772 break;
2774 R << "\nUnsafe indirect dependence.";
2775 break;
2777 R << "\nUnknown data dependence.";
2778 break;
2779 }
2780
2781 if (Instruction *I = Dep.getSource(getDepChecker())) {
2782 DebugLoc SourceLoc = I->getDebugLoc();
2783 if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2784 SourceLoc = DD->getDebugLoc();
2785 if (SourceLoc)
2786 R << " Memory location is the same as accessed at "
2787 << ore::NV("Location", SourceLoc);
2788 }
2789}
2790
2792 DominatorTree *DT) {
2793 assert(TheLoop->contains(BB) && "Unknown block used");
2794
2795 // Blocks that do not dominate the latch need predication.
2796 BasicBlock* Latch = TheLoop->getLoopLatch();
2797 return !DT->dominates(BB, Latch);
2798}
2799
2800OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2801 Instruction *I) {
2802 assert(!Report && "Multiple reports generated");
2803
2804 Value *CodeRegion = TheLoop->getHeader();
2805 DebugLoc DL = TheLoop->getStartLoc();
2806
2807 if (I) {
2808 CodeRegion = I->getParent();
2809 // If there is no debug location attached to the instruction, revert back to
2810 // using the loop's.
2811 if (I->getDebugLoc())
2812 DL = I->getDebugLoc();
2813 }
2814
2815 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2816 CodeRegion);
2817 return *Report;
2818}
2819
2821 auto *SE = PSE->getSE();
2822 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2823 // trivially loop-invariant FP values to be considered invariant.
2824 if (!SE->isSCEVable(V->getType()))
2825 return false;
2826 const SCEV *S = SE->getSCEV(V);
2827 return SE->isLoopInvariant(S, TheLoop);
2828}
2829
2830/// Find the operand of the GEP that should be checked for consecutive
2831/// stores. This ignores trailing indices that have no effect on the final
2832/// pointer.
2833static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2834 const DataLayout &DL = Gep->getModule()->getDataLayout();
2835 unsigned LastOperand = Gep->getNumOperands() - 1;
2836 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2837
2838 // Walk backwards and try to peel off zeros.
2839 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2840 // Find the type we're currently indexing into.
2841 gep_type_iterator GEPTI = gep_type_begin(Gep);
2842 std::advance(GEPTI, LastOperand - 2);
2843
2844 // If it's a type with the same allocation size as the result of the GEP we
2845 // can peel off the zero index.
2846 TypeSize ElemSize = GEPTI.isStruct()
2847 ? DL.getTypeAllocSize(GEPTI.getIndexedType())
2849 if (ElemSize != GEPAllocSize)
2850 break;
2851 --LastOperand;
2852 }
2853
2854 return LastOperand;
2855}
2856
2857/// If the argument is a GEP, then returns the operand identified by
2858/// getGEPInductionOperand. However, if there is some other non-loop-invariant
2859/// operand, it returns that instead.
2861 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2862 if (!GEP)
2863 return Ptr;
2864
2865 unsigned InductionOperand = getGEPInductionOperand(GEP);
2866
2867 // Check that all of the gep indices are uniform except for our induction
2868 // operand.
2869 for (unsigned I = 0, E = GEP->getNumOperands(); I != E; ++I)
2870 if (I != InductionOperand &&
2871 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(I)), Lp))
2872 return Ptr;
2873 return GEP->getOperand(InductionOperand);
2874}
2875
2876/// If a value has only one user that is a CastInst, return it.
2878 Value *UniqueCast = nullptr;
2879 for (User *U : Ptr->users()) {
2880 CastInst *CI = dyn_cast<CastInst>(U);
2881 if (CI && CI->getType() == Ty) {
2882 if (!UniqueCast)
2883 UniqueCast = CI;
2884 else
2885 return nullptr;
2886 }
2887 }
2888 return UniqueCast;
2889}
2890
2891/// Get the stride of a pointer access in a loop. Looks for symbolic
2892/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2894 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2895 if (!PtrTy || PtrTy->isAggregateType())
2896 return nullptr;
2897
2898 // Try to remove a gep instruction to make the pointer (actually index at this
2899 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2900 // pointer, otherwise, we are analyzing the index.
2901 Value *OrigPtr = Ptr;
2902
2903 // The size of the pointer access.
2904 int64_t PtrAccessSize = 1;
2905
2906 Ptr = stripGetElementPtr(Ptr, SE, Lp);
2907 const SCEV *V = SE->getSCEV(Ptr);
2908
2909 if (Ptr != OrigPtr)
2910 // Strip off casts.
2911 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2912 V = C->getOperand();
2913
2914 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2915 if (!S)
2916 return nullptr;
2917
2918 // If the pointer is invariant then there is no stride and it makes no
2919 // sense to add it here.
2920 if (Lp != S->getLoop())
2921 return nullptr;
2922
2923 V = S->getStepRecurrence(*SE);
2924 if (!V)
2925 return nullptr;
2926
2927 // Strip off the size of access multiplication if we are still analyzing the
2928 // pointer.
2929 if (OrigPtr == Ptr) {
2930 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2931 if (M->getOperand(0)->getSCEVType() != scConstant)
2932 return nullptr;
2933
2934 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2935
2936 // Huge step value - give up.
2937 if (APStepVal.getBitWidth() > 64)
2938 return nullptr;
2939
2940 int64_t StepVal = APStepVal.getSExtValue();
2941 if (PtrAccessSize != StepVal)
2942 return nullptr;
2943 V = M->getOperand(1);
2944 }
2945 }
2946
2947 // Note that the restriction after this loop invariant check are only
2948 // profitability restrictions.
2949 if (!SE->isLoopInvariant(V, Lp))
2950 return nullptr;
2951
2952 // Look for the loop invariant symbolic value.
2953 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
2954 if (!U) {
2955 const auto *C = dyn_cast<SCEVIntegralCastExpr>(V);
2956 if (!C)
2957 return nullptr;
2958 U = dyn_cast<SCEVUnknown>(C->getOperand());
2959 if (!U)
2960 return nullptr;
2961
2962 // Match legacy behavior - this is not needed for correctness
2963 if (!getUniqueCastUse(U->getValue(), Lp, V->getType()))
2964 return nullptr;
2965 }
2966
2967 return V;
2968}
2969
2970void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2971 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2972 if (!Ptr)
2973 return;
2974
2975 // Note: getStrideFromPointer is a *profitability* heuristic. We
2976 // could broaden the scope of values returned here - to anything
2977 // which happens to be loop invariant and contributes to the
2978 // computation of an interesting IV - but we chose not to as we
2979 // don't have a cost model here, and broadening the scope exposes
2980 // far too many unprofitable cases.
2981 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2982 if (!StrideExpr)
2983 return;
2984
2985 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2986 "versioning:");
2987 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2988
2989 if (!SpeculateUnitStride) {
2990 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2991 return;
2992 }
2993
2994 // Avoid adding the "Stride == 1" predicate when we know that
2995 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2996 // or zero iteration loop, as Trip-Count <= Stride == 1.
2997 //
2998 // TODO: We are currently not making a very informed decision on when it is
2999 // beneficial to apply stride versioning. It might make more sense that the
3000 // users of this analysis (such as the vectorizer) will trigger it, based on
3001 // their specific cost considerations; For example, in cases where stride
3002 // versioning does not help resolving memory accesses/dependences, the
3003 // vectorizer should evaluate the cost of the runtime test, and the benefit
3004 // of various possible stride specializations, considering the alternatives
3005 // of using gather/scatters (if available).
3006
3007 const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
3008
3009 // Match the types so we can compare the stride and the BETakenCount.
3010 // The Stride can be positive/negative, so we sign extend Stride;
3011 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
3012 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
3013 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
3014 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(BETakenCount->getType());
3015 const SCEV *CastedStride = StrideExpr;
3016 const SCEV *CastedBECount = BETakenCount;
3017 ScalarEvolution *SE = PSE->getSE();
3018 if (BETypeSizeBits >= StrideTypeSizeBits)
3019 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
3020 else
3021 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
3022 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
3023 // Since TripCount == BackEdgeTakenCount + 1, checking:
3024 // "Stride >= TripCount" is equivalent to checking:
3025 // Stride - BETakenCount > 0
3026 if (SE->isKnownPositive(StrideMinusBETaken)) {
3027 LLVM_DEBUG(
3028 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
3029 "Stride==1 predicate will imply that the loop executes "
3030 "at most once.\n");
3031 return;
3032 }
3033 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
3034
3035 // Strip back off the integer cast, and check that our result is a
3036 // SCEVUnknown as we expect.
3037 const SCEV *StrideBase = StrideExpr;
3038 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3039 StrideBase = C->getOperand();
3040 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
3041}
3042
3044 const TargetTransformInfo *TTI,
3045 const TargetLibraryInfo *TLI, AAResults *AA,
3046 DominatorTree *DT, LoopInfo *LI)
3047 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
3048 PtrRtChecking(nullptr), TheLoop(L) {
3049 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3050 if (TTI) {
3051 TypeSize FixedWidth =
3053 if (FixedWidth.isNonZero()) {
3054 // Scale the vector width by 2 as rough estimate to also consider
3055 // interleaving.
3056 MaxTargetVectorWidthInBits = FixedWidth.getFixedValue() * 2;
3057 }
3058
3059 TypeSize ScalableWidth =
3061 if (ScalableWidth.isNonZero())
3062 MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3063 }
3064 DepChecker =
3065 std::make_unique<MemoryDepChecker>(*PSE, L, MaxTargetVectorWidthInBits);
3066 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
3067 if (canAnalyzeLoop())
3068 analyzeLoop(AA, LI, TLI, DT);
3069}
3070
3072 if (CanVecMem) {
3073 OS.indent(Depth) << "Memory dependences are safe";
3074 const MemoryDepChecker &DC = getDepChecker();
3075 if (!DC.isSafeForAnyVectorWidth())
3076 OS << " with a maximum safe vector width of "
3077 << DC.getMaxSafeVectorWidthInBits() << " bits";
3078 if (PtrRtChecking->Need)
3079 OS << " with run-time checks";
3080 OS << "\n";
3081 }
3082
3083 if (HasConvergentOp)
3084 OS.indent(Depth) << "Has convergent operation in loop\n";
3085
3086 if (Report)
3087 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3088
3089 if (auto *Dependences = DepChecker->getDependences()) {
3090 OS.indent(Depth) << "Dependences:\n";
3091 for (const auto &Dep : *Dependences) {
3092 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3093 OS << "\n";
3094 }
3095 } else
3096 OS.indent(Depth) << "Too many dependences, not recorded\n";
3097
3098 // List the pair of accesses need run-time checks to prove independence.
3099 PtrRtChecking->print(OS, Depth);
3100 OS << "\n";
3101
3102 OS.indent(Depth)
3103 << "Non vectorizable stores to invariant address were "
3104 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3105 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3106 ? ""
3107 : "not ")
3108 << "found in loop.\n";
3109
3110 OS.indent(Depth) << "SCEV assumptions:\n";
3111 PSE->getPredicate().print(OS, Depth);
3112
3113 OS << "\n";
3114
3115 OS.indent(Depth) << "Expressions re-written:\n";
3116 PSE->print(OS, Depth);
3117}
3118
3120 auto [It, Inserted] = LoopAccessInfoMap.insert({&L, nullptr});
3121
3122 if (Inserted)
3123 It->second =
3124 std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT, &LI);
3125
3126 return *It->second;
3127}
3128
3130 Function &F, const PreservedAnalyses &PA,
3132 // Check whether our analysis is preserved.
3133 auto PAC = PA.getChecker<LoopAccessAnalysis>();
3134 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3135 // If not, give up now.
3136 return true;
3137
3138 // Check whether the analyses we depend on became invalid for any reason.
3139 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3140 // invalid.
3141 return Inv.invalidate<AAManager>(F, PA) ||
3143 Inv.invalidate<LoopAnalysis>(F, PA) ||
3145}
3146
3150 auto &AA = FAM.getResult<AAManager>(F);
3151 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3152 auto &LI = FAM.getResult<LoopAnalysis>(F);
3154 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3155 return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI);
3156}
3157
3158AnalysisKey LoopAccessAnalysis::Key;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
#define _
IRTranslator LLVM IR MI
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static std::variant< MemoryDepChecker::Dependence::DepType, DepDistanceStrideAndSizeInfo > getDependenceDistanceStrideAndSize(const AccessAnalysis::MemAccessInfo &A, Instruction *AInst, const AccessAnalysis::MemAccessInfo &B, Instruction *BInst, const DenseMap< Value *, const SCEV * > &Strides, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects, PredicatedScalarEvolution &PSE, const Loop *InnermostLoop)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &BackedgeTakenCount, const SCEV &Dist, uint64_t MaxStride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static bool isLoopVariantIndirectAddress(ArrayRef< const Value * > UnderlyingObjects, ScalarEvolution &SE, const Loop *L)
Returns true if any of the underlying objects has a loop varying address, i.e.
static Value * getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty)
If a value has only one user that is a CastInst, return it.
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, PredicatedScalarEvolution &PSE)
Calculate Start and End points of memory access.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
uint64_t High
#define P(N)
FunctionAnalysisManager FAM
This header defines various interfaces for pass management in LLVM.
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This pass exposes codegen information to IR-level passes.
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:76
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1446
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1520
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:47
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:360
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:378
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:321
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:473
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:289
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:601
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:1019
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:685
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
Type * getResultElementType() const
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:293
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:83
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
An instruction for reading from memory.
Definition: Instructions.h:184
Value * getPointerOperand()
Definition: Instructions.h:280
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition: LoopInfo.cpp:564
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition: LoopInfo.cpp:631
Metadata node.
Definition: Metadata.h:1067
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1426
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:889
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps, const DenseMap< Value *, const SCEV * > &Strides, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects)
Check whether the dependencies between the accesses are safe.
const SmallVectorImpl< Instruction * > & getMemoryInstructions() const
The vector of memory access instructions.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
bool shouldRetryWithRuntimeCheck() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
Root of the metadata hierarchy.
Definition: Metadata.h:62
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:293
Diagnostic information for optimization analysis remarks.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: Analysis.h:264
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getCouldNotCompute()
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TypeSize getRegisterBitWidth(RegisterKind K) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition: VectorUtils.h:70
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:736
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:199
constexpr bool isNonZero() const
Definition: TypeSize.h:158
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:470
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2406
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:126
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
Definition: LoopInfo.cpp:1053
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
AddressSpace
Definition: NVPTXBaseInfo.h:21
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition: STLExtras.h:1928
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2058
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:120
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1824
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
gep_type_iterator gep_type_begin(const User *GEP)
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
MDNode * Scope
The tag for alias scope specification (used with noalias).
Definition: Metadata.h:783
MDNode * TBAA
The tag for type-based alias analysis.
Definition: Metadata.h:777
MDNode * NoAlias
The tag specifying the noalias scope.
Definition: Metadata.h:786
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:26
Dependece between memory access instructions.
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
DepType
The type of the dependence.
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
RuntimeCheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1450