LLVM 18.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/SmallSet.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DebugLoc.h"
44#include "llvm/IR/Dominators.h"
45#include "llvm/IR/Function.h"
47#include "llvm/IR/InstrTypes.h"
48#include "llvm/IR/Instruction.h"
50#include "llvm/IR/Operator.h"
51#include "llvm/IR/PassManager.h"
53#include "llvm/IR/Type.h"
54#include "llvm/IR/Value.h"
55#include "llvm/IR/ValueHandle.h"
58#include "llvm/Support/Debug.h"
61#include <algorithm>
62#include <cassert>
63#include <cstdint>
64#include <iterator>
65#include <utility>
66#include <vector>
67
68using namespace llvm;
69using namespace llvm::PatternMatch;
70
71#define DEBUG_TYPE "loop-accesses"
72
74VectorizationFactor("force-vector-width", cl::Hidden,
75 cl::desc("Sets the SIMD width. Zero is autoselect."),
78
80VectorizationInterleave("force-vector-interleave", cl::Hidden,
81 cl::desc("Sets the vectorization interleave count. "
82 "Zero is autoselect."),
86
88 "runtime-memory-check-threshold", cl::Hidden,
89 cl::desc("When performing memory disambiguation checks at runtime do not "
90 "generate more than this number of comparisons (default = 8)."),
93
94/// The maximum iterations used to merge memory checks
96 "memory-check-merge-threshold", cl::Hidden,
97 cl::desc("Maximum number of comparisons done when trying to merge "
98 "runtime memory checks. (default = 100)"),
99 cl::init(100));
100
101/// Maximum SIMD width.
102const unsigned VectorizerParams::MaxVectorWidth = 64;
103
104/// We collect dependences up to this threshold.
106 MaxDependences("max-dependences", cl::Hidden,
107 cl::desc("Maximum number of dependences collected by "
108 "loop-access analysis (default = 100)"),
109 cl::init(100));
110
111/// This enables versioning on the strides of symbolically striding memory
112/// accesses in code like the following.
113/// for (i = 0; i < N; ++i)
114/// A[i * Stride1] += B[i * Stride2] ...
115///
116/// Will be roughly translated to
117/// if (Stride1 == 1 && Stride2 == 1) {
118/// for (i = 0; i < N; i+=4)
119/// A[i:i+3] += ...
120/// } else
121/// ...
123 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
124 cl::desc("Enable symbolic stride memory access versioning"));
125
126/// Enable store-to-load forwarding conflict detection. This option can
127/// be disabled for correctness testing.
129 "store-to-load-forwarding-conflict-detection", cl::Hidden,
130 cl::desc("Enable conflict detection in loop-access analysis"),
131 cl::init(true));
132
134 "max-forked-scev-depth", cl::Hidden,
135 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
136 cl::init(5));
137
139 "laa-speculate-unit-stride", cl::Hidden,
140 cl::desc("Speculate that non-constant strides are unit in LAA"),
141 cl::init(true));
142
144 "hoist-runtime-checks", cl::Hidden,
145 cl::desc(
146 "Hoist inner loop runtime memory checks to outer loop if possible"),
149
151 return ::VectorizationInterleave.getNumOccurrences() > 0;
152}
153
155 const DenseMap<Value *, const SCEV *> &PtrToStride,
156 Value *Ptr) {
157 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
158
159 // If there is an entry in the map return the SCEV of the pointer with the
160 // symbolic stride replaced by one.
162 if (SI == PtrToStride.end())
163 // For a non-symbolic stride, just return the original expression.
164 return OrigSCEV;
165
166 const SCEV *StrideSCEV = SI->second;
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const auto *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 auto *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
188 ->getPointerAddressSpace()),
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
191}
192
193/// Calculate Start and End points of memory access.
194/// Let's assume A is the first access and B is a memory access on N-th loop
195/// iteration. Then B is calculated as:
196/// B = A + Step*N .
197/// Step value may be positive or negative.
198/// N is a calculated back-edge taken count:
199/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
200/// Start and End points are calculated in the following way:
201/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
202/// where SizeOfElt is the size of single memory access in bytes.
203///
204/// There is no conflict when the intervals are disjoint:
205/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
207 Type *AccessTy, bool WritePtr,
208 unsigned DepSetId, unsigned ASId,
210 bool NeedsFreeze) {
211 ScalarEvolution *SE = PSE.getSE();
212
213 const SCEV *ScStart;
214 const SCEV *ScEnd;
215
216 if (SE->isLoopInvariant(PtrExpr, Lp)) {
217 ScStart = ScEnd = PtrExpr;
218 } else {
219 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr);
220 assert(AR && "Invalid addrec expression");
221 const SCEV *Ex = PSE.getBackedgeTakenCount();
222
223 ScStart = AR->getStart();
224 ScEnd = AR->evaluateAtIteration(Ex, *SE);
225 const SCEV *Step = AR->getStepRecurrence(*SE);
226
227 // For expressions with negative step, the upper bound is ScStart and the
228 // lower bound is ScEnd.
229 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
230 if (CStep->getValue()->isNegative())
231 std::swap(ScStart, ScEnd);
232 } else {
233 // Fallback case: the step is not constant, but we can still
234 // get the upper and lower bounds of the interval by using min/max
235 // expressions.
236 ScStart = SE->getUMinExpr(ScStart, ScEnd);
237 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
238 }
239 }
240 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
241 assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
242
243 // Add the size of the pointed element to ScEnd.
244 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
245 Type *IdxTy = DL.getIndexType(Ptr->getType());
246 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
247 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
248
249 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
250 NeedsFreeze);
251}
252
253void RuntimePointerChecking::tryToCreateDiffCheck(
254 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
255 if (!CanUseDiffCheck)
256 return;
257
258 // If either group contains multiple different pointers, bail out.
259 // TODO: Support multiple pointers by using the minimum or maximum pointer,
260 // depending on src & sink.
261 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1) {
262 CanUseDiffCheck = false;
263 return;
264 }
265
266 PointerInfo *Src = &Pointers[CGI.Members[0]];
267 PointerInfo *Sink = &Pointers[CGJ.Members[0]];
268
269 // If either pointer is read and written, multiple checks may be needed. Bail
270 // out.
271 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
272 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty()) {
273 CanUseDiffCheck = false;
274 return;
275 }
276
277 ArrayRef<unsigned> AccSrc =
278 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
279 ArrayRef<unsigned> AccSink =
280 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
281 // If either pointer is accessed multiple times, there may not be a clear
282 // src/sink relation. Bail out for now.
283 if (AccSrc.size() != 1 || AccSink.size() != 1) {
284 CanUseDiffCheck = false;
285 return;
286 }
287 // If the sink is accessed before src, swap src/sink.
288 if (AccSink[0] < AccSrc[0])
289 std::swap(Src, Sink);
290
291 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
292 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
293 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
294 SinkAR->getLoop() != DC.getInnermostLoop()) {
295 CanUseDiffCheck = false;
296 return;
297 }
298
300 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
302 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
303 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
304 Type *DstTy = getLoadStoreType(SinkInsts[0]);
305 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy)) {
306 CanUseDiffCheck = false;
307 return;
308 }
309 const DataLayout &DL =
310 SinkAR->getLoop()->getHeader()->getModule()->getDataLayout();
311 unsigned AllocSize =
312 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
313
314 // Only matching constant steps matching the AllocSize are supported at the
315 // moment. This simplifies the difference computation. Can be extended in the
316 // future.
317 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
318 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
319 Step->getAPInt().abs() != AllocSize) {
320 CanUseDiffCheck = false;
321 return;
322 }
323
324 IntegerType *IntTy =
325 IntegerType::get(Src->PointerValue->getContext(),
326 DL.getPointerSizeInBits(CGI.AddressSpace));
327
328 // When counting down, the dependence distance needs to be swapped.
329 if (Step->getValue()->isNegative())
330 std::swap(SinkAR, SrcAR);
331
332 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
333 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
334 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
335 isa<SCEVCouldNotCompute>(SrcStartInt)) {
336 CanUseDiffCheck = false;
337 return;
338 }
339
340 const Loop *InnerLoop = SrcAR->getLoop();
341 // If the start values for both Src and Sink also vary according to an outer
342 // loop, then it's probably better to avoid creating diff checks because
343 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
344 // do the expanded full range overlap checks, which can be hoisted.
345 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
346 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
347 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
348 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
349 const Loop *StartARLoop = SrcStartAR->getLoop();
350 if (StartARLoop == SinkStartAR->getLoop() &&
351 StartARLoop == InnerLoop->getParentLoop()) {
352 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
353 "cannot be hoisted out of the outer loop\n");
354 CanUseDiffCheck = false;
355 return;
356 }
357 }
358
359 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
360 << "SrcStart: " << *SrcStartInt << '\n'
361 << "SinkStartInt: " << *SinkStartInt << '\n');
362 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
363 Src->NeedsFreeze || Sink->NeedsFreeze);
364}
365
366SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
368
369 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
370 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
373
374 if (needsChecking(CGI, CGJ)) {
375 tryToCreateDiffCheck(CGI, CGJ);
376 Checks.push_back(std::make_pair(&CGI, &CGJ));
377 }
378 }
379 }
380 return Checks;
381}
382
383void RuntimePointerChecking::generateChecks(
384 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
385 assert(Checks.empty() && "Checks is not empty");
386 groupChecks(DepCands, UseDependencies);
387 Checks = generateChecks();
388}
389
391 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
392 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
393 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
394 if (needsChecking(M.Members[I], N.Members[J]))
395 return true;
396 return false;
397}
398
399/// Compare \p I and \p J and return the minimum.
400/// Return nullptr in case we couldn't find an answer.
401static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
402 ScalarEvolution *SE) {
403 const SCEV *Diff = SE->getMinusSCEV(J, I);
404 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
405
406 if (!C)
407 return nullptr;
408 if (C->getValue()->isNegative())
409 return J;
410 return I;
411}
412
414 RuntimePointerChecking &RtCheck) {
415 return addPointer(
416 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
417 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
418 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
419}
420
422 const SCEV *End, unsigned AS,
423 bool NeedsFreeze,
424 ScalarEvolution &SE) {
425 assert(AddressSpace == AS &&
426 "all pointers in a checking group must be in the same address space");
427
428 // Compare the starts and ends with the known minimum and maximum
429 // of this set. We need to know how we compare against the min/max
430 // of the set in order to be able to emit memchecks.
431 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
432 if (!Min0)
433 return false;
434
435 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
436 if (!Min1)
437 return false;
438
439 // Update the low bound expression if we've found a new min value.
440 if (Min0 == Start)
441 Low = Start;
442
443 // Update the high bound expression if we've found a new max value.
444 if (Min1 != End)
445 High = End;
446
448 this->NeedsFreeze |= NeedsFreeze;
449 return true;
450}
451
452void RuntimePointerChecking::groupChecks(
453 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
454 // We build the groups from dependency candidates equivalence classes
455 // because:
456 // - We know that pointers in the same equivalence class share
457 // the same underlying object and therefore there is a chance
458 // that we can compare pointers
459 // - We wouldn't be able to merge two pointers for which we need
460 // to emit a memcheck. The classes in DepCands are already
461 // conveniently built such that no two pointers in the same
462 // class need checking against each other.
463
464 // We use the following (greedy) algorithm to construct the groups
465 // For every pointer in the equivalence class:
466 // For each existing group:
467 // - if the difference between this pointer and the min/max bounds
468 // of the group is a constant, then make the pointer part of the
469 // group and update the min/max bounds of that group as required.
470
471 CheckingGroups.clear();
472
473 // If we need to check two pointers to the same underlying object
474 // with a non-constant difference, we shouldn't perform any pointer
475 // grouping with those pointers. This is because we can easily get
476 // into cases where the resulting check would return false, even when
477 // the accesses are safe.
478 //
479 // The following example shows this:
480 // for (i = 0; i < 1000; ++i)
481 // a[5000 + i * m] = a[i] + a[i + 9000]
482 //
483 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
484 // (0, 10000) which is always false. However, if m is 1, there is no
485 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
486 // us to perform an accurate check in this case.
487 //
488 // The above case requires that we have an UnknownDependence between
489 // accesses to the same underlying object. This cannot happen unless
490 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
491 // is also false. In this case we will use the fallback path and create
492 // separate checking groups for all pointers.
493
494 // If we don't have the dependency partitions, construct a new
495 // checking pointer group for each pointer. This is also required
496 // for correctness, because in this case we can have checking between
497 // pointers to the same underlying object.
498 if (!UseDependencies) {
499 for (unsigned I = 0; I < Pointers.size(); ++I)
500 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
501 return;
502 }
503
504 unsigned TotalComparisons = 0;
505
507 for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
508 auto Iter = PositionMap.insert({Pointers[Index].PointerValue, {}});
509 Iter.first->second.push_back(Index);
510 }
511
512 // We need to keep track of what pointers we've already seen so we
513 // don't process them twice.
515
516 // Go through all equivalence classes, get the "pointer check groups"
517 // and add them to the overall solution. We use the order in which accesses
518 // appear in 'Pointers' to enforce determinism.
519 for (unsigned I = 0; I < Pointers.size(); ++I) {
520 // We've seen this pointer before, and therefore already processed
521 // its equivalence class.
522 if (Seen.count(I))
523 continue;
524
525 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
526 Pointers[I].IsWritePtr);
527
529 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
530
531 // Because DepCands is constructed by visiting accesses in the order in
532 // which they appear in alias sets (which is deterministic) and the
533 // iteration order within an equivalence class member is only dependent on
534 // the order in which unions and insertions are performed on the
535 // equivalence class, the iteration order is deterministic.
536 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
537 MI != ME; ++MI) {
538 auto PointerI = PositionMap.find(MI->getPointer());
539 assert(PointerI != PositionMap.end() &&
540 "pointer in equivalence class not found in PositionMap");
541 for (unsigned Pointer : PointerI->second) {
542 bool Merged = false;
543 // Mark this pointer as seen.
544 Seen.insert(Pointer);
545
546 // Go through all the existing sets and see if we can find one
547 // which can include this pointer.
548 for (RuntimeCheckingPtrGroup &Group : Groups) {
549 // Don't perform more than a certain amount of comparisons.
550 // This should limit the cost of grouping the pointers to something
551 // reasonable. If we do end up hitting this threshold, the algorithm
552 // will create separate groups for all remaining pointers.
553 if (TotalComparisons > MemoryCheckMergeThreshold)
554 break;
555
556 TotalComparisons++;
557
558 if (Group.addPointer(Pointer, *this)) {
559 Merged = true;
560 break;
561 }
562 }
563
564 if (!Merged)
565 // We couldn't add this pointer to any existing set or the threshold
566 // for the number of comparisons has been reached. Create a new group
567 // to hold the current pointer.
568 Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
569 }
570 }
571
572 // We've computed the grouped checks for this partition.
573 // Save the results and continue with the next one.
574 llvm::copy(Groups, std::back_inserter(CheckingGroups));
575 }
576}
577
579 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
580 unsigned PtrIdx2) {
581 return (PtrToPartition[PtrIdx1] != -1 &&
582 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
583}
584
585bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
586 const PointerInfo &PointerI = Pointers[I];
587 const PointerInfo &PointerJ = Pointers[J];
588
589 // No need to check if two readonly pointers intersect.
590 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
591 return false;
592
593 // Only need to check pointers between two different dependency sets.
594 if (PointerI.DependencySetId == PointerJ.DependencySetId)
595 return false;
596
597 // Only need to check pointers in the same alias set.
598 if (PointerI.AliasSetId != PointerJ.AliasSetId)
599 return false;
600
601 return true;
602}
603
606 unsigned Depth) const {
607 unsigned N = 0;
608 for (const auto &Check : Checks) {
609 const auto &First = Check.first->Members, &Second = Check.second->Members;
610
611 OS.indent(Depth) << "Check " << N++ << ":\n";
612
613 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
614 for (unsigned K = 0; K < First.size(); ++K)
615 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
616
617 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
618 for (unsigned K = 0; K < Second.size(); ++K)
619 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
620 }
621}
622
624
625 OS.indent(Depth) << "Run-time memory checks:\n";
626 printChecks(OS, Checks, Depth);
627
628 OS.indent(Depth) << "Grouped accesses:\n";
629 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
630 const auto &CG = CheckingGroups[I];
631
632 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
633 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
634 << ")\n";
635 for (unsigned J = 0; J < CG.Members.size(); ++J) {
636 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
637 << "\n";
638 }
639 }
640}
641
642namespace {
643
644/// Analyses memory accesses in a loop.
645///
646/// Checks whether run time pointer checks are needed and builds sets for data
647/// dependence checking.
648class AccessAnalysis {
649public:
650 /// Read or write access location.
651 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
652 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
653
654 AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
657 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE) {
658 // We're analyzing dependences across loop iterations.
659 BAA.enableCrossIterationMode();
660 }
661
662 /// Register a load and whether it is only read from.
663 void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
664 Value *Ptr = const_cast<Value*>(Loc.Ptr);
666 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
667 if (IsReadOnly)
668 ReadOnlyPtr.insert(Ptr);
669 }
670
671 /// Register a store.
672 void addStore(MemoryLocation &Loc, Type *AccessTy) {
673 Value *Ptr = const_cast<Value*>(Loc.Ptr);
675 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
676 }
677
678 /// Check if we can emit a run-time no-alias check for \p Access.
679 ///
680 /// Returns true if we can emit a run-time no alias check for \p Access.
681 /// If we can check this access, this also adds it to a dependence set and
682 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
683 /// we will attempt to use additional run-time checks in order to get
684 /// the bounds of the pointer.
685 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
686 MemAccessInfo Access, Type *AccessTy,
687 const DenseMap<Value *, const SCEV *> &Strides,
689 Loop *TheLoop, unsigned &RunningDepId,
690 unsigned ASId, bool ShouldCheckStride, bool Assume);
691
692 /// Check whether we can check the pointers at runtime for
693 /// non-intersection.
694 ///
695 /// Returns true if we need no check or if we do and we can generate them
696 /// (i.e. the pointers have computable bounds).
697 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
698 Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
699 Value *&UncomputablePtr, bool ShouldCheckWrap = false);
700
701 /// Goes over all memory accesses, checks whether a RT check is needed
702 /// and builds sets of dependent accesses.
703 void buildDependenceSets() {
704 processMemAccesses();
705 }
706
707 /// Initial processing of memory accesses determined that we need to
708 /// perform dependency checking.
709 ///
710 /// Note that this can later be cleared if we retry memcheck analysis without
711 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
712 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
713
714 /// We decided that no dependence analysis would be used. Reset the state.
715 void resetDepChecks(MemoryDepChecker &DepChecker) {
716 CheckDeps.clear();
717 DepChecker.clearDependences();
718 }
719
720 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
721
722private:
724
725 /// Go over all memory access and check whether runtime pointer checks
726 /// are needed and build sets of dependency check candidates.
727 void processMemAccesses();
728
729 /// Map of all accesses. Values are the types used to access memory pointed to
730 /// by the pointer.
731 PtrAccessMap Accesses;
732
733 /// The loop being checked.
734 const Loop *TheLoop;
735
736 /// List of accesses that need a further dependence check.
737 MemAccessInfoList CheckDeps;
738
739 /// Set of pointers that are read only.
740 SmallPtrSet<Value*, 16> ReadOnlyPtr;
741
742 /// Batched alias analysis results.
743 BatchAAResults BAA;
744
745 /// An alias set tracker to partition the access set by underlying object and
746 //intrinsic property (such as TBAA metadata).
747 AliasSetTracker AST;
748
749 LoopInfo *LI;
750
751 /// Sets of potentially dependent accesses - members of one set share an
752 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
753 /// dependence check.
755
756 /// Initial processing of memory accesses determined that we may need
757 /// to add memchecks. Perform the analysis to determine the necessary checks.
758 ///
759 /// Note that, this is different from isDependencyCheckNeeded. When we retry
760 /// memcheck analysis without dependency checking
761 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
762 /// cleared while this remains set if we have potentially dependent accesses.
763 bool IsRTCheckAnalysisNeeded = false;
764
765 /// The SCEV predicate containing all the SCEV-related assumptions.
767};
768
769} // end anonymous namespace
770
771/// Check whether a pointer can participate in a runtime bounds check.
772/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
773/// by adding run-time checks (overflow checks) if necessary.
775 const SCEV *PtrScev, Loop *L, bool Assume) {
776 // The bounds for loop-invariant pointer is trivial.
777 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
778 return true;
779
780 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
781
782 if (!AR && Assume)
783 AR = PSE.getAsAddRec(Ptr);
784
785 if (!AR)
786 return false;
787
788 return AR->isAffine();
789}
790
791/// Check whether a pointer address cannot wrap.
793 const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
794 Loop *L) {
795 const SCEV *PtrScev = PSE.getSCEV(Ptr);
796 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
797 return true;
798
799 int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
800 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
801 return true;
802
803 return false;
804}
805
806static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
807 function_ref<void(Value *)> AddPointer) {
809 SmallVector<Value *> WorkList;
810 WorkList.push_back(StartPtr);
811
812 while (!WorkList.empty()) {
813 Value *Ptr = WorkList.pop_back_val();
814 if (!Visited.insert(Ptr).second)
815 continue;
816 auto *PN = dyn_cast<PHINode>(Ptr);
817 // SCEV does not look through non-header PHIs inside the loop. Such phis
818 // can be analyzed by adding separate accesses for each incoming pointer
819 // value.
820 if (PN && InnermostLoop.contains(PN->getParent()) &&
821 PN->getParent() != InnermostLoop.getHeader()) {
822 for (const Use &Inc : PN->incoming_values())
823 WorkList.push_back(Inc);
824 } else
825 AddPointer(Ptr);
826 }
827}
828
829// Walk back through the IR for a pointer, looking for a select like the
830// following:
831//
832// %offset = select i1 %cmp, i64 %a, i64 %b
833// %addr = getelementptr double, double* %base, i64 %offset
834// %ld = load double, double* %addr, align 8
835//
836// We won't be able to form a single SCEVAddRecExpr from this since the
837// address for each loop iteration depends on %cmp. We could potentially
838// produce multiple valid SCEVAddRecExprs, though, and check all of them for
839// memory safety/aliasing if needed.
840//
841// If we encounter some IR we don't yet handle, or something obviously fine
842// like a constant, then we just add the SCEV for that term to the list passed
843// in by the caller. If we have a node that may potentially yield a valid
844// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
845// ourselves before adding to the list.
846static void findForkedSCEVs(
847 ScalarEvolution *SE, const Loop *L, Value *Ptr,
849 unsigned Depth) {
850 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
851 // we've exceeded our limit on recursion, just return whatever we have
852 // regardless of whether it can be used for a forked pointer or not, along
853 // with an indication of whether it might be a poison or undef value.
854 const SCEV *Scev = SE->getSCEV(Ptr);
855 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
856 !isa<Instruction>(Ptr) || Depth == 0) {
857 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
858 return;
859 }
860
861 Depth--;
862
863 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
864 return get<1>(S);
865 };
866
867 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
868 switch (Opcode) {
869 case Instruction::Add:
870 return SE->getAddExpr(L, R);
871 case Instruction::Sub:
872 return SE->getMinusSCEV(L, R);
873 default:
874 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
875 }
876 };
877
878 Instruction *I = cast<Instruction>(Ptr);
879 unsigned Opcode = I->getOpcode();
880 switch (Opcode) {
881 case Instruction::GetElementPtr: {
882 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
883 Type *SourceTy = GEP->getSourceElementType();
884 // We only handle base + single offset GEPs here for now.
885 // Not dealing with preexisting gathers yet, so no vectors.
886 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
887 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
888 break;
889 }
892 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
893 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
894
895 // See if we need to freeze our fork...
896 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
897 any_of(OffsetScevs, UndefPoisonCheck);
898
899 // Check that we only have a single fork, on either the base or the offset.
900 // Copy the SCEV across for the one without a fork in order to generate
901 // the full SCEV for both sides of the GEP.
902 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
903 BaseScevs.push_back(BaseScevs[0]);
904 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
905 OffsetScevs.push_back(OffsetScevs[0]);
906 else {
907 ScevList.emplace_back(Scev, NeedsFreeze);
908 break;
909 }
910
911 // Find the pointer type we need to extend to.
912 Type *IntPtrTy = SE->getEffectiveSCEVType(
913 SE->getSCEV(GEP->getPointerOperand())->getType());
914
915 // Find the size of the type being pointed to. We only have a single
916 // index term (guarded above) so we don't need to index into arrays or
917 // structures, just get the size of the scalar value.
918 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
919
920 // Scale up the offsets by the size of the type, then add to the bases.
921 const SCEV *Scaled1 = SE->getMulExpr(
922 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
923 const SCEV *Scaled2 = SE->getMulExpr(
924 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
925 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
926 NeedsFreeze);
927 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
928 NeedsFreeze);
929 break;
930 }
931 case Instruction::Select: {
933 // A select means we've found a forked pointer, but we currently only
934 // support a single select per pointer so if there's another behind this
935 // then we just bail out and return the generic SCEV.
936 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
937 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
938 if (ChildScevs.size() == 2) {
939 ScevList.push_back(ChildScevs[0]);
940 ScevList.push_back(ChildScevs[1]);
941 } else
942 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
943 break;
944 }
945 case Instruction::PHI: {
947 // A phi means we've found a forked pointer, but we currently only
948 // support a single phi per pointer so if there's another behind this
949 // then we just bail out and return the generic SCEV.
950 if (I->getNumOperands() == 2) {
951 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
952 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
953 }
954 if (ChildScevs.size() == 2) {
955 ScevList.push_back(ChildScevs[0]);
956 ScevList.push_back(ChildScevs[1]);
957 } else
958 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
959 break;
960 }
961 case Instruction::Add:
962 case Instruction::Sub: {
965 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
966 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
967
968 // See if we need to freeze our fork...
969 bool NeedsFreeze =
970 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
971
972 // Check that we only have a single fork, on either the left or right side.
973 // Copy the SCEV across for the one without a fork in order to generate
974 // the full SCEV for both sides of the BinOp.
975 if (LScevs.size() == 2 && RScevs.size() == 1)
976 RScevs.push_back(RScevs[0]);
977 else if (RScevs.size() == 2 && LScevs.size() == 1)
978 LScevs.push_back(LScevs[0]);
979 else {
980 ScevList.emplace_back(Scev, NeedsFreeze);
981 break;
982 }
983
984 ScevList.emplace_back(
985 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
986 NeedsFreeze);
987 ScevList.emplace_back(
988 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
989 NeedsFreeze);
990 break;
991 }
992 default:
993 // Just return the current SCEV if we haven't handled the instruction yet.
994 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
995 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
996 break;
997 }
998}
999
1002 const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
1003 const Loop *L) {
1004 ScalarEvolution *SE = PSE.getSE();
1005 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1007 findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
1008
1009 // For now, we will only accept a forked pointer with two possible SCEVs
1010 // that are either SCEVAddRecExprs or loop invariant.
1011 if (Scevs.size() == 2 &&
1012 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1013 SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1014 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1015 SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1016 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
1017 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
1018 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
1019 return Scevs;
1020 }
1021
1022 return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1023}
1024
1025bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1026 MemAccessInfo Access, Type *AccessTy,
1027 const DenseMap<Value *, const SCEV *> &StridesMap,
1029 Loop *TheLoop, unsigned &RunningDepId,
1030 unsigned ASId, bool ShouldCheckWrap,
1031 bool Assume) {
1032 Value *Ptr = Access.getPointer();
1033
1035 findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
1036
1037 for (auto &P : TranslatedPtrs) {
1038 const SCEV *PtrExpr = get<0>(P);
1039 if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
1040 return false;
1041
1042 // When we run after a failing dependency check we have to make sure
1043 // we don't have wrapping pointers.
1044 if (ShouldCheckWrap) {
1045 // Skip wrap checking when translating pointers.
1046 if (TranslatedPtrs.size() > 1)
1047 return false;
1048
1049 if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1050 auto *Expr = PSE.getSCEV(Ptr);
1051 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1052 return false;
1054 }
1055 }
1056 // If there's only one option for Ptr, look it up after bounds and wrap
1057 // checking, because assumptions might have been added to PSE.
1058 if (TranslatedPtrs.size() == 1)
1059 TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1060 false};
1061 }
1062
1063 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1064 // The id of the dependence set.
1065 unsigned DepId;
1066
1067 if (isDependencyCheckNeeded()) {
1068 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1069 unsigned &LeaderId = DepSetId[Leader];
1070 if (!LeaderId)
1071 LeaderId = RunningDepId++;
1072 DepId = LeaderId;
1073 } else
1074 // Each access has its own dependence set.
1075 DepId = RunningDepId++;
1076
1077 bool IsWrite = Access.getInt();
1078 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1079 NeedsFreeze);
1080 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1081 }
1082
1083 return true;
1084}
1085
1086bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1087 ScalarEvolution *SE, Loop *TheLoop,
1088 const DenseMap<Value *, const SCEV *> &StridesMap,
1089 Value *&UncomputablePtr, bool ShouldCheckWrap) {
1090 // Find pointers with computable bounds. We are going to use this information
1091 // to place a runtime bound check.
1092 bool CanDoRT = true;
1093
1094 bool MayNeedRTCheck = false;
1095 if (!IsRTCheckAnalysisNeeded) return true;
1096
1097 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1098
1099 // We assign a consecutive id to access from different alias sets.
1100 // Accesses between different groups doesn't need to be checked.
1101 unsigned ASId = 0;
1102 for (auto &AS : AST) {
1103 int NumReadPtrChecks = 0;
1104 int NumWritePtrChecks = 0;
1105 bool CanDoAliasSetRT = true;
1106 ++ASId;
1107
1108 // We assign consecutive id to access from different dependence sets.
1109 // Accesses within the same set don't need a runtime check.
1110 unsigned RunningDepId = 1;
1112
1114
1115 // First, count how many write and read accesses are in the alias set. Also
1116 // collect MemAccessInfos for later.
1118 for (const auto &A : AS) {
1119 Value *Ptr = A.getValue();
1120 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1121
1122 if (IsWrite)
1123 ++NumWritePtrChecks;
1124 else
1125 ++NumReadPtrChecks;
1126 AccessInfos.emplace_back(Ptr, IsWrite);
1127 }
1128
1129 // We do not need runtime checks for this alias set, if there are no writes
1130 // or a single write and no reads.
1131 if (NumWritePtrChecks == 0 ||
1132 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1133 assert((AS.size() <= 1 ||
1134 all_of(AS,
1135 [this](auto AC) {
1136 MemAccessInfo AccessWrite(AC.getValue(), true);
1137 return DepCands.findValue(AccessWrite) == DepCands.end();
1138 })) &&
1139 "Can only skip updating CanDoRT below, if all entries in AS "
1140 "are reads or there is at most 1 entry");
1141 continue;
1142 }
1143
1144 for (auto &Access : AccessInfos) {
1145 for (const auto &AccessTy : Accesses[Access]) {
1146 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1147 DepSetId, TheLoop, RunningDepId, ASId,
1148 ShouldCheckWrap, false)) {
1149 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1150 << *Access.getPointer() << '\n');
1151 Retries.push_back({Access, AccessTy});
1152 CanDoAliasSetRT = false;
1153 }
1154 }
1155 }
1156
1157 // Note that this function computes CanDoRT and MayNeedRTCheck
1158 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1159 // we have a pointer for which we couldn't find the bounds but we don't
1160 // actually need to emit any checks so it does not matter.
1161 //
1162 // We need runtime checks for this alias set, if there are at least 2
1163 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1164 // any bound checks (because in that case the number of dependence sets is
1165 // incomplete).
1166 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1167
1168 // We need to perform run-time alias checks, but some pointers had bounds
1169 // that couldn't be checked.
1170 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1171 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1172 // We know that we need these checks, so we can now be more aggressive
1173 // and add further checks if required (overflow checks).
1174 CanDoAliasSetRT = true;
1175 for (auto Retry : Retries) {
1176 MemAccessInfo Access = Retry.first;
1177 Type *AccessTy = Retry.second;
1178 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1179 DepSetId, TheLoop, RunningDepId, ASId,
1180 ShouldCheckWrap, /*Assume=*/true)) {
1181 CanDoAliasSetRT = false;
1182 UncomputablePtr = Access.getPointer();
1183 break;
1184 }
1185 }
1186 }
1187
1188 CanDoRT &= CanDoAliasSetRT;
1189 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1190 ++ASId;
1191 }
1192
1193 // If the pointers that we would use for the bounds comparison have different
1194 // address spaces, assume the values aren't directly comparable, so we can't
1195 // use them for the runtime check. We also have to assume they could
1196 // overlap. In the future there should be metadata for whether address spaces
1197 // are disjoint.
1198 unsigned NumPointers = RtCheck.Pointers.size();
1199 for (unsigned i = 0; i < NumPointers; ++i) {
1200 for (unsigned j = i + 1; j < NumPointers; ++j) {
1201 // Only need to check pointers between two different dependency sets.
1202 if (RtCheck.Pointers[i].DependencySetId ==
1203 RtCheck.Pointers[j].DependencySetId)
1204 continue;
1205 // Only need to check pointers in the same alias set.
1206 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1207 continue;
1208
1209 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1210 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1211
1212 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1213 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1214 if (ASi != ASj) {
1215 LLVM_DEBUG(
1216 dbgs() << "LAA: Runtime check would require comparison between"
1217 " different address spaces\n");
1218 return false;
1219 }
1220 }
1221 }
1222
1223 if (MayNeedRTCheck && CanDoRT)
1224 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1225
1226 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1227 << " pointer comparisons.\n");
1228
1229 // If we can do run-time checks, but there are no checks, no runtime checks
1230 // are needed. This can happen when all pointers point to the same underlying
1231 // object for example.
1232 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1233
1234 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1235 if (!CanDoRTIfNeeded)
1236 RtCheck.reset();
1237 return CanDoRTIfNeeded;
1238}
1239
1240void AccessAnalysis::processMemAccesses() {
1241 // We process the set twice: first we process read-write pointers, last we
1242 // process read-only pointers. This allows us to skip dependence tests for
1243 // read-only pointers.
1244
1245 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1246 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1247 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1248 LLVM_DEBUG({
1249 for (auto A : Accesses)
1250 dbgs() << "\t" << *A.first.getPointer() << " ("
1251 << (A.first.getInt()
1252 ? "write"
1253 : (ReadOnlyPtr.count(A.first.getPointer()) ? "read-only"
1254 : "read"))
1255 << ")\n";
1256 });
1257
1258 // The AliasSetTracker has nicely partitioned our pointers by metadata
1259 // compatibility and potential for underlying-object overlap. As a result, we
1260 // only need to check for potential pointer dependencies within each alias
1261 // set.
1262 for (const auto &AS : AST) {
1263 // Note that both the alias-set tracker and the alias sets themselves used
1264 // linked lists internally and so the iteration order here is deterministic
1265 // (matching the original instruction order within each set).
1266
1267 bool SetHasWrite = false;
1268
1269 // Map of pointers to last access encountered.
1270 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1271 UnderlyingObjToAccessMap ObjToLastAccess;
1272
1273 // Set of access to check after all writes have been processed.
1274 PtrAccessMap DeferredAccesses;
1275
1276 // Iterate over each alias set twice, once to process read/write pointers,
1277 // and then to process read-only pointers.
1278 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1279 bool UseDeferred = SetIteration > 0;
1280 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1281
1282 for (const auto &AV : AS) {
1283 Value *Ptr = AV.getValue();
1284
1285 // For a single memory access in AliasSetTracker, Accesses may contain
1286 // both read and write, and they both need to be handled for CheckDeps.
1287 for (const auto &AC : S) {
1288 if (AC.first.getPointer() != Ptr)
1289 continue;
1290
1291 bool IsWrite = AC.first.getInt();
1292
1293 // If we're using the deferred access set, then it contains only
1294 // reads.
1295 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1296 if (UseDeferred && !IsReadOnlyPtr)
1297 continue;
1298 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1299 // read or a write.
1300 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1301 S.count(MemAccessInfo(Ptr, false))) &&
1302 "Alias-set pointer not in the access set?");
1303
1304 MemAccessInfo Access(Ptr, IsWrite);
1305 DepCands.insert(Access);
1306
1307 // Memorize read-only pointers for later processing and skip them in
1308 // the first round (they need to be checked after we have seen all
1309 // write pointers). Note: we also mark pointer that are not
1310 // consecutive as "read-only" pointers (so that we check
1311 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1312 if (!UseDeferred && IsReadOnlyPtr) {
1313 // We only use the pointer keys, the types vector values don't
1314 // matter.
1315 DeferredAccesses.insert({Access, {}});
1316 continue;
1317 }
1318
1319 // If this is a write - check other reads and writes for conflicts. If
1320 // this is a read only check other writes for conflicts (but only if
1321 // there is no other write to the ptr - this is an optimization to
1322 // catch "a[i] = a[i] + " without having to do a dependence check).
1323 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1324 CheckDeps.push_back(Access);
1325 IsRTCheckAnalysisNeeded = true;
1326 }
1327
1328 if (IsWrite)
1329 SetHasWrite = true;
1330
1331 // Create sets of pointers connected by a shared alias set and
1332 // underlying object.
1333 typedef SmallVector<const Value *, 16> ValueVector;
1334 ValueVector TempObjects;
1335
1336 getUnderlyingObjects(Ptr, TempObjects, LI);
1338 << "Underlying objects for pointer " << *Ptr << "\n");
1339 for (const Value *UnderlyingObj : TempObjects) {
1340 // nullptr never alias, don't join sets for pointer that have "null"
1341 // in their UnderlyingObjects list.
1342 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1344 TheLoop->getHeader()->getParent(),
1345 UnderlyingObj->getType()->getPointerAddressSpace()))
1346 continue;
1347
1348 UnderlyingObjToAccessMap::iterator Prev =
1349 ObjToLastAccess.find(UnderlyingObj);
1350 if (Prev != ObjToLastAccess.end())
1351 DepCands.unionSets(Access, Prev->second);
1352
1353 ObjToLastAccess[UnderlyingObj] = Access;
1354 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1355 }
1356 }
1357 }
1358 }
1359 }
1360}
1361
1362/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1363/// i.e. monotonically increasing/decreasing.
1364static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1365 PredicatedScalarEvolution &PSE, const Loop *L) {
1366
1367 // FIXME: This should probably only return true for NUW.
1369 return true;
1370
1372 return true;
1373
1374 // Scalar evolution does not propagate the non-wrapping flags to values that
1375 // are derived from a non-wrapping induction variable because non-wrapping
1376 // could be flow-sensitive.
1377 //
1378 // Look through the potentially overflowing instruction to try to prove
1379 // non-wrapping for the *specific* value of Ptr.
1380
1381 // The arithmetic implied by an inbounds GEP can't overflow.
1382 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1383 if (!GEP || !GEP->isInBounds())
1384 return false;
1385
1386 // Make sure there is only one non-const index and analyze that.
1387 Value *NonConstIndex = nullptr;
1388 for (Value *Index : GEP->indices())
1389 if (!isa<ConstantInt>(Index)) {
1390 if (NonConstIndex)
1391 return false;
1392 NonConstIndex = Index;
1393 }
1394 if (!NonConstIndex)
1395 // The recurrence is on the pointer, ignore for now.
1396 return false;
1397
1398 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1399 // AddRec using a NSW operation.
1400 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1401 if (OBO->hasNoSignedWrap() &&
1402 // Assume constant for other the operand so that the AddRec can be
1403 // easily found.
1404 isa<ConstantInt>(OBO->getOperand(1))) {
1405 auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1406
1407 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1408 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1409 }
1410
1411 return false;
1412}
1413
1414/// Check whether the access through \p Ptr has a constant stride.
1416 Type *AccessTy, Value *Ptr,
1417 const Loop *Lp,
1418 const DenseMap<Value *, const SCEV *> &StridesMap,
1419 bool Assume, bool ShouldCheckWrap) {
1420 Type *Ty = Ptr->getType();
1421 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1422
1423 if (isa<ScalableVectorType>(AccessTy)) {
1424 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1425 << "\n");
1426 return std::nullopt;
1427 }
1428
1429 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1430
1431 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1432 if (Assume && !AR)
1433 AR = PSE.getAsAddRec(Ptr);
1434
1435 if (!AR) {
1436 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1437 << " SCEV: " << *PtrScev << "\n");
1438 return std::nullopt;
1439 }
1440
1441 // The access function must stride over the innermost loop.
1442 if (Lp != AR->getLoop()) {
1443 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1444 << *Ptr << " SCEV: " << *AR << "\n");
1445 return std::nullopt;
1446 }
1447
1448 // Check the step is constant.
1449 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1450
1451 // Calculate the pointer stride and check if it is constant.
1452 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1453 if (!C) {
1454 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1455 << " SCEV: " << *AR << "\n");
1456 return std::nullopt;
1457 }
1458
1459 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1460 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1461 int64_t Size = AllocSize.getFixedValue();
1462 const APInt &APStepVal = C->getAPInt();
1463
1464 // Huge step value - give up.
1465 if (APStepVal.getBitWidth() > 64)
1466 return std::nullopt;
1467
1468 int64_t StepVal = APStepVal.getSExtValue();
1469
1470 // Strided access.
1471 int64_t Stride = StepVal / Size;
1472 int64_t Rem = StepVal % Size;
1473 if (Rem)
1474 return std::nullopt;
1475
1476 if (!ShouldCheckWrap)
1477 return Stride;
1478
1479 // The address calculation must not wrap. Otherwise, a dependence could be
1480 // inverted.
1481 if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1482 return Stride;
1483
1484 // An inbounds getelementptr that is a AddRec with a unit stride
1485 // cannot wrap per definition. If it did, the result would be poison
1486 // and any memory access dependent on it would be immediate UB
1487 // when executed.
1488 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1489 GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1490 return Stride;
1491
1492 // If the null pointer is undefined, then a access sequence which would
1493 // otherwise access it can be assumed not to unsigned wrap. Note that this
1494 // assumes the object in memory is aligned to the natural alignment.
1495 unsigned AddrSpace = Ty->getPointerAddressSpace();
1496 if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1497 (Stride == 1 || Stride == -1))
1498 return Stride;
1499
1500 if (Assume) {
1502 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1503 << "LAA: Pointer: " << *Ptr << "\n"
1504 << "LAA: SCEV: " << *AR << "\n"
1505 << "LAA: Added an overflow assumption\n");
1506 return Stride;
1507 }
1508 LLVM_DEBUG(
1509 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1510 << *Ptr << " SCEV: " << *AR << "\n");
1511 return std::nullopt;
1512}
1513
1514std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1515 Type *ElemTyB, Value *PtrB,
1516 const DataLayout &DL,
1517 ScalarEvolution &SE, bool StrictCheck,
1518 bool CheckType) {
1519 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1520
1521 // Make sure that A and B are different pointers.
1522 if (PtrA == PtrB)
1523 return 0;
1524
1525 // Make sure that the element types are the same if required.
1526 if (CheckType && ElemTyA != ElemTyB)
1527 return std::nullopt;
1528
1529 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1530 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1531
1532 // Check that the address spaces match.
1533 if (ASA != ASB)
1534 return std::nullopt;
1535 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1536
1537 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1538 Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1539 Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1540
1541 int Val;
1542 if (PtrA1 == PtrB1) {
1543 // Retrieve the address space again as pointer stripping now tracks through
1544 // `addrspacecast`.
1545 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1546 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1547 // Check that the address spaces match and that the pointers are valid.
1548 if (ASA != ASB)
1549 return std::nullopt;
1550
1551 IdxWidth = DL.getIndexSizeInBits(ASA);
1552 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1553 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1554
1555 OffsetB -= OffsetA;
1556 Val = OffsetB.getSExtValue();
1557 } else {
1558 // Otherwise compute the distance with SCEV between the base pointers.
1559 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1560 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1561 const auto *Diff =
1562 dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1563 if (!Diff)
1564 return std::nullopt;
1565 Val = Diff->getAPInt().getSExtValue();
1566 }
1567 int Size = DL.getTypeStoreSize(ElemTyA);
1568 int Dist = Val / Size;
1569
1570 // Ensure that the calculated distance matches the type-based one after all
1571 // the bitcasts removal in the provided pointers.
1572 if (!StrictCheck || Dist * Size == Val)
1573 return Dist;
1574 return std::nullopt;
1575}
1576
1578 const DataLayout &DL, ScalarEvolution &SE,
1579 SmallVectorImpl<unsigned> &SortedIndices) {
1581 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1582 "Expected list of pointer operands.");
1583 // Walk over the pointers, and map each of them to an offset relative to
1584 // first pointer in the array.
1585 Value *Ptr0 = VL[0];
1586
1587 using DistOrdPair = std::pair<int64_t, int>;
1588 auto Compare = llvm::less_first();
1589 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1590 Offsets.emplace(0, 0);
1591 int Cnt = 1;
1592 bool IsConsecutive = true;
1593 for (auto *Ptr : VL.drop_front()) {
1594 std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1595 /*StrictCheck=*/true);
1596 if (!Diff)
1597 return false;
1598
1599 // Check if the pointer with the same offset is found.
1600 int64_t Offset = *Diff;
1601 auto Res = Offsets.emplace(Offset, Cnt);
1602 if (!Res.second)
1603 return false;
1604 // Consecutive order if the inserted element is the last one.
1605 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1606 ++Cnt;
1607 }
1608 SortedIndices.clear();
1609 if (!IsConsecutive) {
1610 // Fill SortedIndices array only if it is non-consecutive.
1611 SortedIndices.resize(VL.size());
1612 Cnt = 0;
1613 for (const std::pair<int64_t, int> &Pair : Offsets) {
1614 SortedIndices[Cnt] = Pair.second;
1615 ++Cnt;
1616 }
1617 }
1618 return true;
1619}
1620
1621/// Returns true if the memory operations \p A and \p B are consecutive.
1623 ScalarEvolution &SE, bool CheckType) {
1626 if (!PtrA || !PtrB)
1627 return false;
1628 Type *ElemTyA = getLoadStoreType(A);
1629 Type *ElemTyB = getLoadStoreType(B);
1630 std::optional<int> Diff =
1631 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1632 /*StrictCheck=*/true, CheckType);
1633 return Diff && *Diff == 1;
1634}
1635
1637 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1638 [this, SI](Value *Ptr) {
1639 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1640 InstMap.push_back(SI);
1641 ++AccessIdx;
1642 });
1643}
1644
1646 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1647 [this, LI](Value *Ptr) {
1648 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1649 InstMap.push_back(LI);
1650 ++AccessIdx;
1651 });
1652}
1653
1656 switch (Type) {
1657 case NoDep:
1658 case Forward:
1661
1662 case Unknown:
1665 case Backward:
1668 }
1669 llvm_unreachable("unexpected DepType!");
1670}
1671
1673 switch (Type) {
1674 case NoDep:
1675 case Forward:
1676 case ForwardButPreventsForwarding:
1677 case Unknown:
1678 return false;
1679
1680 case BackwardVectorizable:
1681 case Backward:
1682 case BackwardVectorizableButPreventsForwarding:
1683 return true;
1684 }
1685 llvm_unreachable("unexpected DepType!");
1686}
1687
1689 return isBackward() || Type == Unknown;
1690}
1691
1693 switch (Type) {
1694 case Forward:
1695 case ForwardButPreventsForwarding:
1696 return true;
1697
1698 case NoDep:
1699 case Unknown:
1700 case BackwardVectorizable:
1701 case Backward:
1702 case BackwardVectorizableButPreventsForwarding:
1703 return false;
1704 }
1705 llvm_unreachable("unexpected DepType!");
1706}
1707
1708bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1709 uint64_t TypeByteSize) {
1710 // If loads occur at a distance that is not a multiple of a feasible vector
1711 // factor store-load forwarding does not take place.
1712 // Positive dependences might cause troubles because vectorizing them might
1713 // prevent store-load forwarding making vectorized code run a lot slower.
1714 // a[i] = a[i-3] ^ a[i-8];
1715 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1716 // hence on your typical architecture store-load forwarding does not take
1717 // place. Vectorizing in such cases does not make sense.
1718 // Store-load forwarding distance.
1719
1720 // After this many iterations store-to-load forwarding conflicts should not
1721 // cause any slowdowns.
1722 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1723 // Maximum vector factor.
1724 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1725 VectorizerParams::MaxVectorWidth * TypeByteSize, MinDepDistBytes);
1726
1727 // Compute the smallest VF at which the store and load would be misaligned.
1728 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1729 VF *= 2) {
1730 // If the number of vector iteration between the store and the load are
1731 // small we could incur conflicts.
1732 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1733 MaxVFWithoutSLForwardIssues = (VF >> 1);
1734 break;
1735 }
1736 }
1737
1738 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1739 LLVM_DEBUG(
1740 dbgs() << "LAA: Distance " << Distance
1741 << " that could cause a store-load forwarding conflict\n");
1742 return true;
1743 }
1744
1745 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1746 MaxVFWithoutSLForwardIssues !=
1747 VectorizerParams::MaxVectorWidth * TypeByteSize)
1748 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1749 return false;
1750}
1751
1752void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1753 if (Status < S)
1754 Status = S;
1755}
1756
1757/// Given a dependence-distance \p Dist between two
1758/// memory accesses, that have the same stride whose absolute value is given
1759/// in \p Stride, and that have the same type size \p TypeByteSize,
1760/// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1761/// possible to prove statically that the dependence distance is larger
1762/// than the range that the accesses will travel through the execution of
1763/// the loop. If so, return true; false otherwise. This is useful for
1764/// example in loops such as the following (PR31098):
1765/// for (i = 0; i < D; ++i) {
1766/// = out[i];
1767/// out[i+D] =
1768/// }
1770 const SCEV &BackedgeTakenCount,
1771 const SCEV &Dist, uint64_t Stride,
1772 uint64_t TypeByteSize) {
1773
1774 // If we can prove that
1775 // (**) |Dist| > BackedgeTakenCount * Step
1776 // where Step is the absolute stride of the memory accesses in bytes,
1777 // then there is no dependence.
1778 //
1779 // Rationale:
1780 // We basically want to check if the absolute distance (|Dist/Step|)
1781 // is >= the loop iteration count (or > BackedgeTakenCount).
1782 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1783 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1784 // that the dependence distance is >= VF; This is checked elsewhere.
1785 // But in some cases we can prune dependence distances early, and
1786 // even before selecting the VF, and without a runtime test, by comparing
1787 // the distance against the loop iteration count. Since the vectorized code
1788 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1789 // also guarantees that distance >= VF.
1790 //
1791 const uint64_t ByteStride = Stride * TypeByteSize;
1792 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1793 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1794
1795 const SCEV *CastedDist = &Dist;
1796 const SCEV *CastedProduct = Product;
1797 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1798 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1799
1800 // The dependence distance can be positive/negative, so we sign extend Dist;
1801 // The multiplication of the absolute stride in bytes and the
1802 // backedgeTakenCount is non-negative, so we zero extend Product.
1803 if (DistTypeSizeBits > ProductTypeSizeBits)
1804 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1805 else
1806 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1807
1808 // Is Dist - (BackedgeTakenCount * Step) > 0 ?
1809 // (If so, then we have proven (**) because |Dist| >= Dist)
1810 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1811 if (SE.isKnownPositive(Minus))
1812 return true;
1813
1814 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ?
1815 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1816 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1817 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1818 if (SE.isKnownPositive(Minus))
1819 return true;
1820
1821 return false;
1822}
1823
1824/// Check the dependence for two accesses with the same stride \p Stride.
1825/// \p Distance is the positive distance and \p TypeByteSize is type size in
1826/// bytes.
1827///
1828/// \returns true if they are independent.
1830 uint64_t TypeByteSize) {
1831 assert(Stride > 1 && "The stride must be greater than 1");
1832 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1833 assert(Distance > 0 && "The distance must be non-zero");
1834
1835 // Skip if the distance is not multiple of type byte size.
1836 if (Distance % TypeByteSize)
1837 return false;
1838
1839 uint64_t ScaledDist = Distance / TypeByteSize;
1840
1841 // No dependence if the scaled distance is not multiple of the stride.
1842 // E.g.
1843 // for (i = 0; i < 1024 ; i += 4)
1844 // A[i+2] = A[i] + 1;
1845 //
1846 // Two accesses in memory (scaled distance is 2, stride is 4):
1847 // | A[0] | | | | A[4] | | | |
1848 // | | | A[2] | | | | A[6] | |
1849 //
1850 // E.g.
1851 // for (i = 0; i < 1024 ; i += 3)
1852 // A[i+4] = A[i] + 1;
1853 //
1854 // Two accesses in memory (scaled distance is 4, stride is 3):
1855 // | A[0] | | | A[3] | | | A[6] | | |
1856 // | | | | | A[4] | | | A[7] | |
1857 return ScaledDist % Stride;
1858}
1859
1861MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1862 const MemAccessInfo &B, unsigned BIdx,
1863 const DenseMap<Value *, const SCEV *> &Strides) {
1864 assert (AIdx < BIdx && "Must pass arguments in program order");
1865
1866 auto [APtr, AIsWrite] = A;
1867 auto [BPtr, BIsWrite] = B;
1868 Type *ATy = getLoadStoreType(InstMap[AIdx]);
1869 Type *BTy = getLoadStoreType(InstMap[BIdx]);
1870
1871 // Two reads are independent.
1872 if (!AIsWrite && !BIsWrite)
1873 return Dependence::NoDep;
1874
1875 // We cannot check pointers in different address spaces.
1876 if (APtr->getType()->getPointerAddressSpace() !=
1877 BPtr->getType()->getPointerAddressSpace())
1878 return Dependence::Unknown;
1879
1880 int64_t StrideAPtr =
1881 getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true).value_or(0);
1882 int64_t StrideBPtr =
1883 getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true).value_or(0);
1884
1885 const SCEV *Src = PSE.getSCEV(APtr);
1886 const SCEV *Sink = PSE.getSCEV(BPtr);
1887
1888 // If the induction step is negative we have to invert source and sink of the
1889 // dependence.
1890 if (StrideAPtr < 0) {
1891 std::swap(APtr, BPtr);
1892 std::swap(ATy, BTy);
1893 std::swap(Src, Sink);
1894 std::swap(AIsWrite, BIsWrite);
1895 std::swap(AIdx, BIdx);
1896 std::swap(StrideAPtr, StrideBPtr);
1897 }
1898
1899 ScalarEvolution &SE = *PSE.getSE();
1900 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1901
1902 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1903 << "(Induction step: " << StrideAPtr << ")\n");
1904 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1905 << *InstMap[BIdx] << ": " << *Dist << "\n");
1906
1907 // Need accesses with constant stride. We don't want to vectorize
1908 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1909 // the address space.
1910 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1911 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1912 return Dependence::Unknown;
1913 }
1914
1915 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1916 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1917 bool HasSameSize =
1918 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
1919 uint64_t Stride = std::abs(StrideAPtr);
1920
1921 if (!isa<SCEVCouldNotCompute>(Dist) && HasSameSize &&
1923 Stride, TypeByteSize))
1924 return Dependence::NoDep;
1925
1926 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1927 if (!C) {
1928 LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1929 FoundNonConstantDistanceDependence = true;
1930 return Dependence::Unknown;
1931 }
1932
1933 const APInt &Val = C->getAPInt();
1934 int64_t Distance = Val.getSExtValue();
1935
1936 // Attempt to prove strided accesses independent.
1937 if (std::abs(Distance) > 0 && Stride > 1 && HasSameSize &&
1938 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1939 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1940 return Dependence::NoDep;
1941 }
1942
1943 // Negative distances are not plausible dependencies.
1944 if (Val.isNegative()) {
1945 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1946 // There is no need to update MaxSafeVectorWidthInBits after call to
1947 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes,
1948 // since a forward dependency will allow vectorization using any width.
1949 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1950 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1951 !HasSameSize)) {
1952 LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1954 }
1955
1956 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
1957 return Dependence::Forward;
1958 }
1959
1960 // Write to the same location with the same size.
1961 if (Val == 0) {
1962 if (HasSameSize)
1963 return Dependence::Forward;
1964 LLVM_DEBUG(
1965 dbgs() << "LAA: Zero dependence difference but different type sizes\n");
1966 return Dependence::Unknown;
1967 }
1968
1969 assert(Val.isStrictlyPositive() && "Expect a positive value");
1970
1971 if (!HasSameSize) {
1972 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
1973 "different type sizes\n");
1974 return Dependence::Unknown;
1975 }
1976
1977 // Bail out early if passed-in parameters make vectorization not feasible.
1978 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1980 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1982 // The minimum number of iterations for a vectorized/unrolled version.
1983 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1984
1985 // It's not vectorizable if the distance is smaller than the minimum distance
1986 // needed for a vectroized/unrolled version. Vectorizing one iteration in
1987 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1988 // TypeByteSize (No need to plus the last gap distance).
1989 //
1990 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1991 // foo(int *A) {
1992 // int *B = (int *)((char *)A + 14);
1993 // for (i = 0 ; i < 1024 ; i += 2)
1994 // B[i] = A[i] + 1;
1995 // }
1996 //
1997 // Two accesses in memory (stride is 2):
1998 // | A[0] | | A[2] | | A[4] | | A[6] | |
1999 // | B[0] | | B[2] | | B[4] |
2000 //
2001 // Distance needs for vectorizing iterations except the last iteration:
2002 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
2003 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2004 //
2005 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2006 // 12, which is less than distance.
2007 //
2008 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2009 // the minimum distance needed is 28, which is greater than distance. It is
2010 // not safe to do vectorization.
2011 uint64_t MinDistanceNeeded =
2012 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
2013 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
2014 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
2015 << Distance << '\n');
2016 return Dependence::Backward;
2017 }
2018
2019 // Unsafe if the minimum distance needed is greater than smallest dependence
2020 // distance distance.
2021 if (MinDistanceNeeded > MinDepDistBytes) {
2022 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2023 << MinDistanceNeeded << " size in bytes\n");
2024 return Dependence::Backward;
2025 }
2026
2027 // Positive distance bigger than max vectorization factor.
2028 // FIXME: Should use max factor instead of max distance in bytes, which could
2029 // not handle different types.
2030 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2031 // void foo (int *A, char *B) {
2032 // for (unsigned i = 0; i < 1024; i++) {
2033 // A[i+2] = A[i] + 1;
2034 // B[i+2] = B[i] + 1;
2035 // }
2036 // }
2037 //
2038 // This case is currently unsafe according to the max safe distance. If we
2039 // analyze the two accesses on array B, the max safe dependence distance
2040 // is 2. Then we analyze the accesses on array A, the minimum distance needed
2041 // is 8, which is less than 2 and forbidden vectorization, But actually
2042 // both A and B could be vectorized by 2 iterations.
2043 MinDepDistBytes =
2044 std::min(static_cast<uint64_t>(Distance), MinDepDistBytes);
2045
2046 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2047 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2048 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2049 couldPreventStoreLoadForward(Distance, TypeByteSize)) {
2050 // Sanity check that we didn't update MinDepDistBytes when calling
2051 // couldPreventStoreLoadForward
2052 assert(MinDepDistBytes == MinDepDistBytesOld &&
2053 "An update to MinDepDistBytes requires an update to "
2054 "MaxSafeVectorWidthInBits");
2055 (void)MinDepDistBytesOld;
2057 }
2058
2059 // An update to MinDepDistBytes requires an update to MaxSafeVectorWidthInBits
2060 // since there is a backwards dependency.
2061 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * Stride);
2062 LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
2063 << " with max VF = " << MaxVF << '\n');
2064 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2065 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2067}
2068
2070 MemAccessInfoList &CheckDeps,
2071 const DenseMap<Value *, const SCEV *> &Strides) {
2072
2073 MinDepDistBytes = -1;
2075 for (MemAccessInfo CurAccess : CheckDeps) {
2076 if (Visited.count(CurAccess))
2077 continue;
2078
2079 // Get the relevant memory access set.
2081 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2082
2083 // Check accesses within this set.
2085 AccessSets.member_begin(I);
2087 AccessSets.member_end();
2088
2089 // Check every access pair.
2090 while (AI != AE) {
2091 Visited.insert(*AI);
2092 bool AIIsWrite = AI->getInt();
2093 // Check loads only against next equivalent class, but stores also against
2094 // other stores in the same equivalence class - to the same address.
2096 (AIIsWrite ? AI : std::next(AI));
2097 while (OI != AE) {
2098 // Check every accessing instruction pair in program order.
2099 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2100 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2101 // Scan all accesses of another equivalence class, but only the next
2102 // accesses of the same equivalent class.
2103 for (std::vector<unsigned>::iterator
2104 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2105 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2106 I2 != I2E; ++I2) {
2107 auto A = std::make_pair(&*AI, *I1);
2108 auto B = std::make_pair(&*OI, *I2);
2109
2110 assert(*I1 != *I2);
2111 if (*I1 > *I2)
2112 std::swap(A, B);
2113
2115 isDependent(*A.first, A.second, *B.first, B.second, Strides);
2117
2118 // Gather dependences unless we accumulated MaxDependences
2119 // dependences. In that case return as soon as we find the first
2120 // unsafe dependence. This puts a limit on this quadratic
2121 // algorithm.
2122 if (RecordDependences) {
2123 if (Type != Dependence::NoDep)
2124 Dependences.push_back(Dependence(A.second, B.second, Type));
2125
2126 if (Dependences.size() >= MaxDependences) {
2127 RecordDependences = false;
2128 Dependences.clear();
2130 << "Too many dependences, stopped recording\n");
2131 }
2132 }
2133 if (!RecordDependences && !isSafeForVectorization())
2134 return false;
2135 }
2136 ++OI;
2137 }
2138 AI++;
2139 }
2140 }
2141
2142 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2143 return isSafeForVectorization();
2144}
2145
2148 MemAccessInfo Access(Ptr, isWrite);
2149 auto &IndexVector = Accesses.find(Access)->second;
2150
2152 transform(IndexVector,
2153 std::back_inserter(Insts),
2154 [&](unsigned Idx) { return this->InstMap[Idx]; });
2155 return Insts;
2156}
2157
2159 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
2160 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
2161
2163 raw_ostream &OS, unsigned Depth,
2164 const SmallVectorImpl<Instruction *> &Instrs) const {
2165 OS.indent(Depth) << DepName[Type] << ":\n";
2166 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2167 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2168}
2169
2170bool LoopAccessInfo::canAnalyzeLoop() {
2171 // We need to have a loop header.
2172 LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
2173 << TheLoop->getHeader()->getParent()->getName() << ": "
2174 << TheLoop->getHeader()->getName() << '\n');
2175
2176 // We can only analyze innermost loops.
2177 if (!TheLoop->isInnermost()) {
2178 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2179 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2180 return false;
2181 }
2182
2183 // We must have a single backedge.
2184 if (TheLoop->getNumBackEdges() != 1) {
2185 LLVM_DEBUG(
2186 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2187 recordAnalysis("CFGNotUnderstood")
2188 << "loop control flow is not understood by analyzer";
2189 return false;
2190 }
2191
2192 // ScalarEvolution needs to be able to find the exit count.
2193 const SCEV *ExitCount = PSE->getBackedgeTakenCount();
2194 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2195 recordAnalysis("CantComputeNumberOfIterations")
2196 << "could not determine number of loop iterations";
2197 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2198 return false;
2199 }
2200
2201 return true;
2202}
2203
2204void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
2205 const TargetLibraryInfo *TLI,
2206 DominatorTree *DT) {
2207 // Holds the Load and Store instructions.
2210
2211 // Holds all the different accesses in the loop.
2212 unsigned NumReads = 0;
2213 unsigned NumReadWrites = 0;
2214
2215 bool HasComplexMemInst = false;
2216
2217 // A runtime check is only legal to insert if there are no convergent calls.
2218 HasConvergentOp = false;
2219
2220 PtrRtChecking->Pointers.clear();
2221 PtrRtChecking->Need = false;
2222
2223 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2224
2225 const bool EnableMemAccessVersioningOfLoop =
2227 !TheLoop->getHeader()->getParent()->hasOptSize();
2228
2229 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2230 // loop info, as it may be arbitrary.
2231 LoopBlocksRPO RPOT(TheLoop);
2232 RPOT.perform(LI);
2233 for (BasicBlock *BB : RPOT) {
2234 // Scan the BB and collect legal loads and stores. Also detect any
2235 // convergent instructions.
2236 for (Instruction &I : *BB) {
2237 if (auto *Call = dyn_cast<CallBase>(&I)) {
2238 if (Call->isConvergent())
2239 HasConvergentOp = true;
2240 }
2241
2242 // With both a non-vectorizable memory instruction and a convergent
2243 // operation, found in this loop, no reason to continue the search.
2244 if (HasComplexMemInst && HasConvergentOp) {
2245 CanVecMem = false;
2246 return;
2247 }
2248
2249 // Avoid hitting recordAnalysis multiple times.
2250 if (HasComplexMemInst)
2251 continue;
2252
2253 // Many math library functions read the rounding mode. We will only
2254 // vectorize a loop if it contains known function calls that don't set
2255 // the flag. Therefore, it is safe to ignore this read from memory.
2256 auto *Call = dyn_cast<CallInst>(&I);
2257 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2258 continue;
2259
2260 // If this is a load, save it. If this instruction can read from memory
2261 // but is not a load, then we quit. Notice that we don't handle function
2262 // calls that read or write.
2263 if (I.mayReadFromMemory()) {
2264 // If the function has an explicit vectorized counterpart, we can safely
2265 // assume that it can be vectorized.
2266 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2267 !VFDatabase::getMappings(*Call).empty())
2268 continue;
2269
2270 auto *Ld = dyn_cast<LoadInst>(&I);
2271 if (!Ld) {
2272 recordAnalysis("CantVectorizeInstruction", Ld)
2273 << "instruction cannot be vectorized";
2274 HasComplexMemInst = true;
2275 continue;
2276 }
2277 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2278 recordAnalysis("NonSimpleLoad", Ld)
2279 << "read with atomic ordering or volatile read";
2280 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2281 HasComplexMemInst = true;
2282 continue;
2283 }
2284 NumLoads++;
2285 Loads.push_back(Ld);
2286 DepChecker->addAccess(Ld);
2287 if (EnableMemAccessVersioningOfLoop)
2288 collectStridedAccess(Ld);
2289 continue;
2290 }
2291
2292 // Save 'store' instructions. Abort if other instructions write to memory.
2293 if (I.mayWriteToMemory()) {
2294 auto *St = dyn_cast<StoreInst>(&I);
2295 if (!St) {
2296 recordAnalysis("CantVectorizeInstruction", St)
2297 << "instruction cannot be vectorized";
2298 HasComplexMemInst = true;
2299 continue;
2300 }
2301 if (!St->isSimple() && !IsAnnotatedParallel) {
2302 recordAnalysis("NonSimpleStore", St)
2303 << "write with atomic ordering or volatile write";
2304 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2305 HasComplexMemInst = true;
2306 continue;
2307 }
2308 NumStores++;
2309 Stores.push_back(St);
2310 DepChecker->addAccess(St);
2311 if (EnableMemAccessVersioningOfLoop)
2312 collectStridedAccess(St);
2313 }
2314 } // Next instr.
2315 } // Next block.
2316
2317 if (HasComplexMemInst) {
2318 CanVecMem = false;
2319 return;
2320 }
2321
2322 // Now we have two lists that hold the loads and the stores.
2323 // Next, we find the pointers that they use.
2324
2325 // Check if we see any stores. If there are no stores, then we don't
2326 // care if the pointers are *restrict*.
2327 if (!Stores.size()) {
2328 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2329 CanVecMem = true;
2330 return;
2331 }
2332
2333 MemoryDepChecker::DepCandidates DependentAccesses;
2334 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE);
2335
2336 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2337 // multiple times on the same object. If the ptr is accessed twice, once
2338 // for read and once for write, it will only appear once (on the write
2339 // list). This is okay, since we are going to check for conflicts between
2340 // writes and between reads and writes, but not between reads and reads.
2342
2343 // Record uniform store addresses to identify if we have multiple stores
2344 // to the same address.
2345 SmallPtrSet<Value *, 16> UniformStores;
2346
2347 for (StoreInst *ST : Stores) {
2348 Value *Ptr = ST->getPointerOperand();
2349
2350 if (isInvariant(Ptr)) {
2351 // Record store instructions to loop invariant addresses
2352 StoresToInvariantAddresses.push_back(ST);
2353 HasDependenceInvolvingLoopInvariantAddress |=
2354 !UniformStores.insert(Ptr).second;
2355 }
2356
2357 // If we did *not* see this pointer before, insert it to the read-write
2358 // list. At this phase it is only a 'write' list.
2359 Type *AccessTy = getLoadStoreType(ST);
2360 if (Seen.insert({Ptr, AccessTy}).second) {
2361 ++NumReadWrites;
2362
2364 // The TBAA metadata could have a control dependency on the predication
2365 // condition, so we cannot rely on it when determining whether or not we
2366 // need runtime pointer checks.
2367 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2368 Loc.AATags.TBAA = nullptr;
2369
2370 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2371 [&Accesses, AccessTy, Loc](Value *Ptr) {
2372 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2373 Accesses.addStore(NewLoc, AccessTy);
2374 });
2375 }
2376 }
2377
2378 if (IsAnnotatedParallel) {
2379 LLVM_DEBUG(
2380 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2381 << "checks.\n");
2382 CanVecMem = true;
2383 return;
2384 }
2385
2386 for (LoadInst *LD : Loads) {
2387 Value *Ptr = LD->getPointerOperand();
2388 // If we did *not* see this pointer before, insert it to the
2389 // read list. If we *did* see it before, then it is already in
2390 // the read-write list. This allows us to vectorize expressions
2391 // such as A[i] += x; Because the address of A[i] is a read-write
2392 // pointer. This only works if the index of A[i] is consecutive.
2393 // If the address of i is unknown (for example A[B[i]]) then we may
2394 // read a few words, modify, and write a few words, and some of the
2395 // words may be written to the same address.
2396 bool IsReadOnlyPtr = false;
2397 Type *AccessTy = getLoadStoreType(LD);
2398 if (Seen.insert({Ptr, AccessTy}).second ||
2399 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2400 ++NumReads;
2401 IsReadOnlyPtr = true;
2402 }
2403
2404 // See if there is an unsafe dependency between a load to a uniform address and
2405 // store to the same uniform address.
2406 if (UniformStores.count(Ptr)) {
2407 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2408 "load and uniform store to the same address!\n");
2409 HasDependenceInvolvingLoopInvariantAddress = true;
2410 }
2411
2413 // The TBAA metadata could have a control dependency on the predication
2414 // condition, so we cannot rely on it when determining whether or not we
2415 // need runtime pointer checks.
2416 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2417 Loc.AATags.TBAA = nullptr;
2418
2419 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2420 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2421 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2422 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2423 });
2424 }
2425
2426 // If we write (or read-write) to a single destination and there are no
2427 // other reads in this loop then is it safe to vectorize.
2428 if (NumReadWrites == 1 && NumReads == 0) {
2429 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2430 CanVecMem = true;
2431 return;
2432 }
2433
2434 // Build dependence sets and check whether we need a runtime pointer bounds
2435 // check.
2436 Accesses.buildDependenceSets();
2437
2438 // Find pointers with computable bounds. We are going to use this information
2439 // to place a runtime bound check.
2440 Value *UncomputablePtr = nullptr;
2441 bool CanDoRTIfNeeded =
2442 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2443 SymbolicStrides, UncomputablePtr, false);
2444 if (!CanDoRTIfNeeded) {
2445 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2446 recordAnalysis("CantIdentifyArrayBounds", I)
2447 << "cannot identify array bounds";
2448 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2449 << "the array bounds.\n");
2450 CanVecMem = false;
2451 return;
2452 }
2453
2454 LLVM_DEBUG(
2455 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2456
2457 CanVecMem = true;
2458 if (Accesses.isDependencyCheckNeeded()) {
2459 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2460 CanVecMem = DepChecker->areDepsSafe(
2461 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
2462
2463 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2464 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2465
2466 // Clear the dependency checks. We assume they are not needed.
2467 Accesses.resetDepChecks(*DepChecker);
2468
2469 PtrRtChecking->reset();
2470 PtrRtChecking->Need = true;
2471
2472 auto *SE = PSE->getSE();
2473 UncomputablePtr = nullptr;
2474 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2475 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2476
2477 // Check that we found the bounds for the pointer.
2478 if (!CanDoRTIfNeeded) {
2479 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2480 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2481 << "cannot check memory dependencies at runtime";
2482 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2483 CanVecMem = false;
2484 return;
2485 }
2486
2487 CanVecMem = true;
2488 }
2489 }
2490
2491 if (HasConvergentOp) {
2492 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2493 << "cannot add control dependency to convergent operation";
2494 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2495 "would be needed with a convergent operation\n");
2496 CanVecMem = false;
2497 return;
2498 }
2499
2500 if (CanVecMem)
2501 LLVM_DEBUG(
2502 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2503 << (PtrRtChecking->Need ? "" : " don't")
2504 << " need runtime memory checks.\n");
2505 else
2506 emitUnsafeDependenceRemark();
2507}
2508
2509void LoopAccessInfo::emitUnsafeDependenceRemark() {
2510 auto Deps = getDepChecker().getDependences();
2511 if (!Deps)
2512 return;
2513 auto Found = llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2516 });
2517 if (Found == Deps->end())
2518 return;
2519 MemoryDepChecker::Dependence Dep = *Found;
2520
2521 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2522
2523 // Emit remark for first unsafe dependence
2524 bool HasForcedDistribution = false;
2525 std::optional<const MDOperand *> Value =
2526 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2527 if (Value) {
2528 const MDOperand *Op = *Value;
2529 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2530 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2531 }
2532
2533 const std::string Info =
2534 HasForcedDistribution
2535 ? "unsafe dependent memory operations in loop."
2536 : "unsafe dependent memory operations in loop. Use "
2537 "#pragma loop distribute(enable) to allow loop distribution "
2538 "to attempt to isolate the offending operations into a separate "
2539 "loop";
2541 recordAnalysis("UnsafeDep", Dep.getDestination(*this)) << Info;
2542
2543 switch (Dep.Type) {
2547 llvm_unreachable("Unexpected dependence");
2549 R << "\nBackward loop carried data dependence.";
2550 break;
2552 R << "\nForward loop carried data dependence that prevents "
2553 "store-to-load forwarding.";
2554 break;
2556 R << "\nBackward loop carried data dependence that prevents "
2557 "store-to-load forwarding.";
2558 break;
2560 R << "\nUnknown data dependence.";
2561 break;
2562 }
2563
2564 if (Instruction *I = Dep.getSource(*this)) {
2565 DebugLoc SourceLoc = I->getDebugLoc();
2566 if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2567 SourceLoc = DD->getDebugLoc();
2568 if (SourceLoc)
2569 R << " Memory location is the same as accessed at "
2570 << ore::NV("Location", SourceLoc);
2571 }
2572}
2573
2575 DominatorTree *DT) {
2576 assert(TheLoop->contains(BB) && "Unknown block used");
2577
2578 // Blocks that do not dominate the latch need predication.
2579 BasicBlock* Latch = TheLoop->getLoopLatch();
2580 return !DT->dominates(BB, Latch);
2581}
2582
2583OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2584 Instruction *I) {
2585 assert(!Report && "Multiple reports generated");
2586
2587 Value *CodeRegion = TheLoop->getHeader();
2588 DebugLoc DL = TheLoop->getStartLoc();
2589
2590 if (I) {
2591 CodeRegion = I->getParent();
2592 // If there is no debug location attached to the instruction, revert back to
2593 // using the loop's.
2594 if (I->getDebugLoc())
2595 DL = I->getDebugLoc();
2596 }
2597
2598 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2599 CodeRegion);
2600 return *Report;
2601}
2602
2604 auto *SE = PSE->getSE();
2605 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2606 // trivially loop-invariant FP values to be considered invariant.
2607 if (!SE->isSCEVable(V->getType()))
2608 return false;
2609 const SCEV *S = SE->getSCEV(V);
2610 return SE->isLoopInvariant(S, TheLoop);
2611}
2612
2613/// Find the operand of the GEP that should be checked for consecutive
2614/// stores. This ignores trailing indices that have no effect on the final
2615/// pointer.
2616static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2617 const DataLayout &DL = Gep->getModule()->getDataLayout();
2618 unsigned LastOperand = Gep->getNumOperands() - 1;
2619 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2620
2621 // Walk backwards and try to peel off zeros.
2622 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2623 // Find the type we're currently indexing into.
2624 gep_type_iterator GEPTI = gep_type_begin(Gep);
2625 std::advance(GEPTI, LastOperand - 2);
2626
2627 // If it's a type with the same allocation size as the result of the GEP we
2628 // can peel off the zero index.
2629 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
2630 break;
2631 --LastOperand;
2632 }
2633
2634 return LastOperand;
2635}
2636
2637/// If the argument is a GEP, then returns the operand identified by
2638/// getGEPInductionOperand. However, if there is some other non-loop-invariant
2639/// operand, it returns that instead.
2641 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2642 if (!GEP)
2643 return Ptr;
2644
2645 unsigned InductionOperand = getGEPInductionOperand(GEP);
2646
2647 // Check that all of the gep indices are uniform except for our induction
2648 // operand.
2649 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
2650 if (i != InductionOperand &&
2651 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
2652 return Ptr;
2653 return GEP->getOperand(InductionOperand);
2654}
2655
2656/// If a value has only one user that is a CastInst, return it.
2658 Value *UniqueCast = nullptr;
2659 for (User *U : Ptr->users()) {
2660 CastInst *CI = dyn_cast<CastInst>(U);
2661 if (CI && CI->getType() == Ty) {
2662 if (!UniqueCast)
2663 UniqueCast = CI;
2664 else
2665 return nullptr;
2666 }
2667 }
2668 return UniqueCast;
2669}
2670
2671/// Get the stride of a pointer access in a loop. Looks for symbolic
2672/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2674 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2675 if (!PtrTy || PtrTy->isAggregateType())
2676 return nullptr;
2677
2678 // Try to remove a gep instruction to make the pointer (actually index at this
2679 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2680 // pointer, otherwise, we are analyzing the index.
2681 Value *OrigPtr = Ptr;
2682
2683 // The size of the pointer access.
2684 int64_t PtrAccessSize = 1;
2685
2686 Ptr = stripGetElementPtr(Ptr, SE, Lp);
2687 const SCEV *V = SE->getSCEV(Ptr);
2688
2689 if (Ptr != OrigPtr)
2690 // Strip off casts.
2691 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2692 V = C->getOperand();
2693
2694 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2695 if (!S)
2696 return nullptr;
2697
2698 // If the pointer is invariant then there is no stride and it makes no
2699 // sense to add it here.
2700 if (Lp != S->getLoop())
2701 return nullptr;
2702
2703 V = S->getStepRecurrence(*SE);
2704 if (!V)
2705 return nullptr;
2706
2707 // Strip off the size of access multiplication if we are still analyzing the
2708 // pointer.
2709 if (OrigPtr == Ptr) {
2710 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2711 if (M->getOperand(0)->getSCEVType() != scConstant)
2712 return nullptr;
2713
2714 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2715
2716 // Huge step value - give up.
2717 if (APStepVal.getBitWidth() > 64)
2718 return nullptr;
2719
2720 int64_t StepVal = APStepVal.getSExtValue();
2721 if (PtrAccessSize != StepVal)
2722 return nullptr;
2723 V = M->getOperand(1);
2724 }
2725 }
2726
2727 // Note that the restriction after this loop invariant check are only
2728 // profitability restrictions.
2729 if (!SE->isLoopInvariant(V, Lp))
2730 return nullptr;
2731
2732 // Look for the loop invariant symbolic value.
2733 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
2734 if (!U) {
2735 const auto *C = dyn_cast<SCEVIntegralCastExpr>(V);
2736 if (!C)
2737 return nullptr;
2738 U = dyn_cast<SCEVUnknown>(C->getOperand());
2739 if (!U)
2740 return nullptr;
2741
2742 // Match legacy behavior - this is not needed for correctness
2743 if (!getUniqueCastUse(U->getValue(), Lp, V->getType()))
2744 return nullptr;
2745 }
2746
2747 return V;
2748}
2749
2750void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2751 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2752 if (!Ptr)
2753 return;
2754
2755 // Note: getStrideFromPointer is a *profitability* heuristic. We
2756 // could broaden the scope of values returned here - to anything
2757 // which happens to be loop invariant and contributes to the
2758 // computation of an interesting IV - but we chose not to as we
2759 // don't have a cost model here, and broadening the scope exposes
2760 // far too many unprofitable cases.
2761 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2762 if (!StrideExpr)
2763 return;
2764
2765 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2766 "versioning:");
2767 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2768
2769 if (!SpeculateUnitStride) {
2770 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2771 return;
2772 }
2773
2774 // Avoid adding the "Stride == 1" predicate when we know that
2775 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2776 // or zero iteration loop, as Trip-Count <= Stride == 1.
2777 //
2778 // TODO: We are currently not making a very informed decision on when it is
2779 // beneficial to apply stride versioning. It might make more sense that the
2780 // users of this analysis (such as the vectorizer) will trigger it, based on
2781 // their specific cost considerations; For example, in cases where stride
2782 // versioning does not help resolving memory accesses/dependences, the
2783 // vectorizer should evaluate the cost of the runtime test, and the benefit
2784 // of various possible stride specializations, considering the alternatives
2785 // of using gather/scatters (if available).
2786
2787 const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2788
2789 // Match the types so we can compare the stride and the BETakenCount.
2790 // The Stride can be positive/negative, so we sign extend Stride;
2791 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2792 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2793 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2794 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(BETakenCount->getType());
2795 const SCEV *CastedStride = StrideExpr;
2796 const SCEV *CastedBECount = BETakenCount;
2797 ScalarEvolution *SE = PSE->getSE();
2798 if (BETypeSizeBits >= StrideTypeSizeBits)
2799 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
2800 else
2801 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
2802 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2803 // Since TripCount == BackEdgeTakenCount + 1, checking:
2804 // "Stride >= TripCount" is equivalent to checking:
2805 // Stride - BETakenCount > 0
2806 if (SE->isKnownPositive(StrideMinusBETaken)) {
2807 LLVM_DEBUG(
2808 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2809 "Stride==1 predicate will imply that the loop executes "
2810 "at most once.\n");
2811 return;
2812 }
2813 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
2814
2815 // Strip back off the integer cast, and check that our result is a
2816 // SCEVUnknown as we expect.
2817 const SCEV *StrideBase = StrideExpr;
2818 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
2819 StrideBase = C->getOperand();
2820 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
2821}
2822
2824 const TargetLibraryInfo *TLI, AAResults *AA,
2825 DominatorTree *DT, LoopInfo *LI)
2826 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2827 PtrRtChecking(nullptr),
2828 DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L) {
2829 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
2830 if (canAnalyzeLoop()) {
2831 analyzeLoop(AA, LI, TLI, DT);
2832 }
2833}
2834
2836 if (CanVecMem) {
2837 OS.indent(Depth) << "Memory dependences are safe";
2838 const MemoryDepChecker &DC = getDepChecker();
2839 if (!DC.isSafeForAnyVectorWidth())
2840 OS << " with a maximum safe vector width of "
2841 << DC.getMaxSafeVectorWidthInBits() << " bits";
2842 if (PtrRtChecking->Need)
2843 OS << " with run-time checks";
2844 OS << "\n";
2845 }
2846
2847 if (HasConvergentOp)
2848 OS.indent(Depth) << "Has convergent operation in loop\n";
2849
2850 if (Report)
2851 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2852
2853 if (auto *Dependences = DepChecker->getDependences()) {
2854 OS.indent(Depth) << "Dependences:\n";
2855 for (const auto &Dep : *Dependences) {
2856 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2857 OS << "\n";
2858 }
2859 } else
2860 OS.indent(Depth) << "Too many dependences, not recorded\n";
2861
2862 // List the pair of accesses need run-time checks to prove independence.
2863 PtrRtChecking->print(OS, Depth);
2864 OS << "\n";
2865
2866 OS.indent(Depth) << "Non vectorizable stores to invariant address were "
2867 << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ")
2868 << "found in loop.\n";
2869
2870 OS.indent(Depth) << "SCEV assumptions:\n";
2871 PSE->getPredicate().print(OS, Depth);
2872
2873 OS << "\n";
2874
2875 OS.indent(Depth) << "Expressions re-written:\n";
2876 PSE->print(OS, Depth);
2877}
2878
2880 auto I = LoopAccessInfoMap.insert({&L, nullptr});
2881
2882 if (I.second)
2883 I.first->second =
2884 std::make_unique<LoopAccessInfo>(&L, &SE, TLI, &AA, &DT, &LI);
2885
2886 return *I.first->second;
2887}
2888
2890 Function &F, const PreservedAnalyses &PA,
2892 // Check whether our analysis is preserved.
2893 auto PAC = PA.getChecker<LoopAccessAnalysis>();
2894 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
2895 // If not, give up now.
2896 return true;
2897
2898 // Check whether the analyses we depend on became invalid for any reason.
2899 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
2900 // invalid.
2901 return Inv.invalidate<AAManager>(F, PA) ||
2903 Inv.invalidate<LoopAnalysis>(F, PA) ||
2905}
2906
2910 auto &AA = FAM.getResult<AAManager>(F);
2911 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
2912 auto &LI = FAM.getResult<LoopAnalysis>(F);
2913 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
2914 return LoopAccessInfoManager(SE, AA, DT, LI, &TLI);
2915}
2916
2917AnalysisKey LoopAccessAnalysis::Key;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:469
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define Check(C,...)
#define DEBUG_TYPE
Hexagon Common GEP
IRTranslator LLVM IR MI
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(false))
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static Value * getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty)
If a value has only one user that is a CastInst, return it.
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &BackedgeTakenCount, const SCEV &Dist, uint64_t Stride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have the same stride whose absolut...
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
uint64_t High
#define P(N)
FunctionAnalysisManager FAM
This header defines various interfaces for pass management in LLVM.
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:76
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1485
APInt abs() const
Get the absolute value.
Definition: APInt.h:1730
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1433
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:307
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition: APInt.h:334
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1507
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: PassManager.h:90
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:661
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:679
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:145
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:428
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:647
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:940
Type * getResultElementType() const
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:71
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:279
An instruction for reading from memory.
Definition: Instructions.h:177
Value * getPointerOperand()
Definition: Instructions.h:264
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:47
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition: LoopInfo.cpp:564
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition: LoopInfo.cpp:631
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:772
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps, const DenseMap< Value *, const SCEV * > &Strides)
Check whether the dependencies between the accesses are safe.
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:254
Diagnostic information for optimization analysis remarks.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: PassManager.h:310
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:384
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition: VectorUtils.h:266
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:724
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:182
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:237
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:545
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:465
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:440
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1727
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
Definition: LoopInfo.cpp:1052
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
AddressSpace
Definition: NVPTXBaseInfo.h:21
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition: STLExtras.h:1933
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1734
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1985
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1829
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1754
gep_type_iterator gep_type_begin(const User *GEP)
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
MDNode * TBAA
The tag for type-based alias analysis.
Definition: Metadata.h:668
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:69
Dependece between memory access instructions.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
Instruction * getDestination(const LoopAccessInfo &LAI) const
Return the destination instruction of the dependence.
Instruction * getSource(const LoopAccessInfo &LAI) const
Return the source instruction of the dependence.
DepType
The type of the dependence.
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
RuntimeCheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1455