LLVM 22.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
40#include "llvm/IR/BasicBlock.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DebugLoc.h"
46#include "llvm/IR/Dominators.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/InstrTypes.h"
49#include "llvm/IR/Instruction.h"
52#include "llvm/IR/PassManager.h"
53#include "llvm/IR/Type.h"
54#include "llvm/IR/Value.h"
55#include "llvm/IR/ValueHandle.h"
58#include "llvm/Support/Debug.h"
61#include <algorithm>
62#include <cassert>
63#include <cstdint>
64#include <iterator>
65#include <utility>
66#include <variant>
67#include <vector>
68
69using namespace llvm;
70using namespace llvm::SCEVPatternMatch;
71
72#define DEBUG_TYPE "loop-accesses"
73
75VectorizationFactor("force-vector-width", cl::Hidden,
76 cl::desc("Sets the SIMD width. Zero is autoselect."),
79
81VectorizationInterleave("force-vector-interleave", cl::Hidden,
82 cl::desc("Sets the vectorization interleave count. "
83 "Zero is autoselect."),
87
89 "runtime-memory-check-threshold", cl::Hidden,
90 cl::desc("When performing memory disambiguation checks at runtime do not "
91 "generate more than this number of comparisons (default = 8)."),
94
95/// The maximum iterations used to merge memory checks
97 "memory-check-merge-threshold", cl::Hidden,
98 cl::desc("Maximum number of comparisons done when trying to merge "
99 "runtime memory checks. (default = 100)"),
100 cl::init(100));
101
102/// Maximum SIMD width.
103const unsigned VectorizerParams::MaxVectorWidth = 64;
104
105/// We collect dependences up to this threshold.
107 MaxDependences("max-dependences", cl::Hidden,
108 cl::desc("Maximum number of dependences collected by "
109 "loop-access analysis (default = 100)"),
110 cl::init(100));
111
112/// This enables versioning on the strides of symbolically striding memory
113/// accesses in code like the following.
114/// for (i = 0; i < N; ++i)
115/// A[i * Stride1] += B[i * Stride2] ...
116///
117/// Will be roughly translated to
118/// if (Stride1 == 1 && Stride2 == 1) {
119/// for (i = 0; i < N; i+=4)
120/// A[i:i+3] += ...
121/// } else
122/// ...
124 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
125 cl::desc("Enable symbolic stride memory access versioning"));
126
127/// Enable store-to-load forwarding conflict detection. This option can
128/// be disabled for correctness testing.
130 "store-to-load-forwarding-conflict-detection", cl::Hidden,
131 cl::desc("Enable conflict detection in loop-access analysis"),
132 cl::init(true));
133
135 "max-forked-scev-depth", cl::Hidden,
136 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
137 cl::init(5));
138
140 "laa-speculate-unit-stride", cl::Hidden,
141 cl::desc("Speculate that non-constant strides are unit in LAA"),
142 cl::init(true));
143
145 "hoist-runtime-checks", cl::Hidden,
146 cl::desc(
147 "Hoist inner loop runtime memory checks to outer loop if possible"),
150
152 return ::VectorizationInterleave.getNumOccurrences() > 0;
153}
154
156 const DenseMap<Value *, const SCEV *> &PtrToStride,
157 Value *Ptr) {
158 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
159
160 // If there is an entry in the map return the SCEV of the pointer with the
161 // symbolic stride replaced by one.
162 const SCEV *StrideSCEV = PtrToStride.lookup(Ptr);
163 if (!StrideSCEV)
164 // For a non-symbolic stride, just return the original expression.
165 return OrigSCEV;
166
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const SCEV *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 const SCEV *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, const RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
190 Members.push_back(Index);
191}
192
193/// Returns \p A + \p B, if it is guaranteed not to unsigned wrap. Otherwise
194/// return nullptr. \p A and \p B must have the same type.
195static const SCEV *addSCEVNoOverflow(const SCEV *A, const SCEV *B,
196 ScalarEvolution &SE) {
197 if (!SE.willNotOverflow(Instruction::Add, /*IsSigned=*/false, A, B))
198 return nullptr;
199 return SE.getAddExpr(A, B);
200}
201
202/// Returns \p A * \p B, if it is guaranteed not to unsigned wrap. Otherwise
203/// return nullptr. \p A and \p B must have the same type.
204static const SCEV *mulSCEVOverflow(const SCEV *A, const SCEV *B,
205 ScalarEvolution &SE) {
206 if (!SE.willNotOverflow(Instruction::Mul, /*IsSigned=*/false, A, B))
207 return nullptr;
208 return SE.getMulExpr(A, B);
209}
210
211/// Return true, if evaluating \p AR at \p MaxBTC cannot wrap, because \p AR at
212/// \p MaxBTC is guaranteed inbounds of the accessed object.
214 const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize,
216 AssumptionCache *AC,
217 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
218 auto *PointerBase = SE.getPointerBase(AR->getStart());
219 auto *StartPtr = dyn_cast<SCEVUnknown>(PointerBase);
220 if (!StartPtr)
221 return false;
222 const Loop *L = AR->getLoop();
223 bool CheckForNonNull, CheckForFreed;
224 Value *StartPtrV = StartPtr->getValue();
225 uint64_t DerefBytes = StartPtrV->getPointerDereferenceableBytes(
226 DL, CheckForNonNull, CheckForFreed);
227
228 if (DerefBytes && (CheckForNonNull || CheckForFreed))
229 return false;
230
231 const SCEV *Step = AR->getStepRecurrence(SE);
232 Type *WiderTy = SE.getWiderType(MaxBTC->getType(), Step->getType());
233 const SCEV *DerefBytesSCEV = SE.getConstant(WiderTy, DerefBytes);
234
235 // Check if we have a suitable dereferencable assumption we can use.
236 Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt();
237 if (BasicBlock *LoopPred = L->getLoopPredecessor()) {
238 if (isa<BranchInst>(LoopPred->getTerminator()))
239 CtxI = LoopPred->getTerminator();
240 }
241 RetainedKnowledge DerefRK;
242 getKnowledgeForValue(StartPtrV, {Attribute::Dereferenceable}, *AC,
243 [&](RetainedKnowledge RK, Instruction *Assume, auto) {
244 if (!isValidAssumeForContext(Assume, CtxI, DT))
245 return false;
246 if (StartPtrV->canBeFreed() &&
247 !willNotFreeBetween(Assume, CtxI))
248 return false;
249 DerefRK = std::max(DerefRK, RK);
250 return true;
251 });
252 if (DerefRK) {
253 DerefBytesSCEV =
254 SE.getUMaxExpr(DerefBytesSCEV, SE.getSCEV(DerefRK.IRArgValue));
255 }
256
257 if (DerefBytesSCEV->isZero())
258 return false;
259
260 bool IsKnownNonNegative = SE.isKnownNonNegative(Step);
261 if (!IsKnownNonNegative && !SE.isKnownNegative(Step))
262 return false;
263
264 Step = SE.getNoopOrSignExtend(Step, WiderTy);
265 MaxBTC = SE.getNoopOrZeroExtend(MaxBTC, WiderTy);
266
267 // For the computations below, make sure they don't unsigned wrap.
268 if (!SE.isKnownPredicate(CmpInst::ICMP_UGE, AR->getStart(), StartPtr))
269 return false;
270 const SCEV *StartOffset = SE.getNoopOrZeroExtend(
271 SE.getMinusSCEV(AR->getStart(), StartPtr), WiderTy);
272
273 if (!LoopGuards)
274 LoopGuards.emplace(ScalarEvolution::LoopGuards::collect(AR->getLoop(), SE));
275 MaxBTC = SE.applyLoopGuards(MaxBTC, *LoopGuards);
276
277 const SCEV *OffsetAtLastIter =
278 mulSCEVOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE);
279 if (!OffsetAtLastIter) {
280 // Re-try with constant max backedge-taken count if using the symbolic one
281 // failed.
282 MaxBTC = SE.getConstantMaxBackedgeTakenCount(AR->getLoop());
283 if (isa<SCEVCouldNotCompute>(MaxBTC))
284 return false;
285 MaxBTC = SE.getNoopOrZeroExtend(
286 MaxBTC, WiderTy);
287 OffsetAtLastIter =
288 mulSCEVOverflow(MaxBTC, SE.getAbsExpr(Step, /*IsNSW=*/false), SE);
289 if (!OffsetAtLastIter)
290 return false;
291 }
292
293 const SCEV *OffsetEndBytes = addSCEVNoOverflow(
294 OffsetAtLastIter, SE.getNoopOrZeroExtend(EltSize, WiderTy), SE);
295 if (!OffsetEndBytes)
296 return false;
297
298 if (IsKnownNonNegative) {
299 // For positive steps, check if
300 // (AR->getStart() - StartPtr) + (MaxBTC * Step) + EltSize <= DerefBytes,
301 // while making sure none of the computations unsigned wrap themselves.
302 const SCEV *EndBytes = addSCEVNoOverflow(StartOffset, OffsetEndBytes, SE);
303 if (!EndBytes)
304 return false;
305
306 DerefBytesSCEV = SE.applyLoopGuards(DerefBytesSCEV, *LoopGuards);
307 return SE.isKnownPredicate(CmpInst::ICMP_ULE, EndBytes, DerefBytesSCEV);
308 }
309
310 // For negative steps check if
311 // * StartOffset >= (MaxBTC * Step + EltSize)
312 // * StartOffset <= DerefBytes.
313 assert(SE.isKnownNegative(Step) && "must be known negative");
314 return SE.isKnownPredicate(CmpInst::ICMP_SGE, StartOffset, OffsetEndBytes) &&
315 SE.isKnownPredicate(CmpInst::ICMP_ULE, StartOffset, DerefBytesSCEV);
316}
317
318std::pair<const SCEV *, const SCEV *> llvm::getStartAndEndForAccess(
319 const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC,
320 const SCEV *MaxBTC, ScalarEvolution *SE,
321 DenseMap<std::pair<const SCEV *, Type *>,
322 std::pair<const SCEV *, const SCEV *>> *PointerBounds,
324 std::optional<ScalarEvolution::LoopGuards> &LoopGuards) {
325 std::pair<const SCEV *, const SCEV *> *PtrBoundsPair;
326 if (PointerBounds) {
327 auto [Iter, Ins] = PointerBounds->insert(
328 {{PtrExpr, AccessTy},
329 {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
330 if (!Ins)
331 return Iter->second;
332 PtrBoundsPair = &Iter->second;
333 }
334
335 const SCEV *ScStart;
336 const SCEV *ScEnd;
337
338 auto &DL = Lp->getHeader()->getDataLayout();
339 Type *IdxTy = DL.getIndexType(PtrExpr->getType());
340 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
341 if (SE->isLoopInvariant(PtrExpr, Lp)) {
342 ScStart = ScEnd = PtrExpr;
343 } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
344 ScStart = AR->getStart();
345 if (!isa<SCEVCouldNotCompute>(BTC))
346 // Evaluating AR at an exact BTC is safe: LAA separately checks that
347 // accesses cannot wrap in the loop. If evaluating AR at BTC wraps, then
348 // the loop either triggers UB when executing a memory access with a
349 // poison pointer or the wrapping/poisoned pointer is not used.
350 ScEnd = AR->evaluateAtIteration(BTC, *SE);
351 else {
352 // Evaluating AR at MaxBTC may wrap and create an expression that is less
353 // than the start of the AddRec due to wrapping (for example consider
354 // MaxBTC = -2). If that's the case, set ScEnd to -(EltSize + 1). ScEnd
355 // will get incremented by EltSize before returning, so this effectively
356 // sets ScEnd to the maximum unsigned value for the type. Note that LAA
357 // separately checks that accesses cannot not wrap, so unsigned max
358 // represents an upper bound.
359 if (evaluatePtrAddRecAtMaxBTCWillNotWrap(AR, MaxBTC, EltSizeSCEV, *SE, DL,
360 DT, AC, LoopGuards)) {
361 ScEnd = AR->evaluateAtIteration(MaxBTC, *SE);
362 } else {
363 ScEnd = SE->getAddExpr(
364 SE->getNegativeSCEV(EltSizeSCEV),
366 ConstantInt::get(EltSizeSCEV->getType(), -1), AR->getType())));
367 }
368 }
369 const SCEV *Step = AR->getStepRecurrence(*SE);
370
371 // For expressions with negative step, the upper bound is ScStart and the
372 // lower bound is ScEnd.
373 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
374 if (CStep->getValue()->isNegative())
375 std::swap(ScStart, ScEnd);
376 } else {
377 // Fallback case: the step is not constant, but we can still
378 // get the upper and lower bounds of the interval by using min/max
379 // expressions.
380 ScStart = SE->getUMinExpr(ScStart, ScEnd);
381 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
382 }
383 } else
384 return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
385
386 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
387 assert(SE->isLoopInvariant(ScEnd, Lp) && "ScEnd needs to be invariant");
388
389 // Add the size of the pointed element to ScEnd.
390 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
391
392 std::pair<const SCEV *, const SCEV *> Res = {ScStart, ScEnd};
393 if (PointerBounds)
394 *PtrBoundsPair = Res;
395 return Res;
396}
397
398/// Calculate Start and End points of memory access using
399/// getStartAndEndForAccess.
401 Type *AccessTy, bool WritePtr,
402 unsigned DepSetId, unsigned ASId,
404 bool NeedsFreeze) {
405 const SCEV *SymbolicMaxBTC = PSE.getSymbolicMaxBackedgeTakenCount();
406 const SCEV *BTC = PSE.getBackedgeTakenCount();
407 const auto &[ScStart, ScEnd] = getStartAndEndForAccess(
408 Lp, PtrExpr, AccessTy, BTC, SymbolicMaxBTC, PSE.getSE(),
409 &DC.getPointerBounds(), DC.getDT(), DC.getAC(), LoopGuards);
411 !isa<SCEVCouldNotCompute>(ScEnd) &&
412 "must be able to compute both start and end expressions");
413 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
414 NeedsFreeze);
415}
416
417bool RuntimePointerChecking::tryToCreateDiffCheck(
418 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
419 // If either group contains multiple different pointers, bail out.
420 // TODO: Support multiple pointers by using the minimum or maximum pointer,
421 // depending on src & sink.
422 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1)
423 return false;
424
425 const PointerInfo *Src = &Pointers[CGI.Members[0]];
426 const PointerInfo *Sink = &Pointers[CGJ.Members[0]];
427
428 // If either pointer is read and written, multiple checks may be needed. Bail
429 // out.
430 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
431 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
432 return false;
433
434 ArrayRef<unsigned> AccSrc =
435 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
436 ArrayRef<unsigned> AccSink =
437 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
438 // If either pointer is accessed multiple times, there may not be a clear
439 // src/sink relation. Bail out for now.
440 if (AccSrc.size() != 1 || AccSink.size() != 1)
441 return false;
442
443 // If the sink is accessed before src, swap src/sink.
444 if (AccSink[0] < AccSrc[0])
445 std::swap(Src, Sink);
446
447 const SCEVConstant *Step;
448 const SCEV *SrcStart;
449 const SCEV *SinkStart;
450 const Loop *InnerLoop = DC.getInnermostLoop();
451 if (!match(Src->Expr,
453 m_SpecificLoop(InnerLoop))) ||
454 !match(Sink->Expr,
456 m_SpecificLoop(InnerLoop))))
457 return false;
458
460 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
462 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
463 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
464 Type *DstTy = getLoadStoreType(SinkInsts[0]);
466 return false;
467
468 const DataLayout &DL = InnerLoop->getHeader()->getDataLayout();
469 unsigned AllocSize =
470 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
471
472 // Only matching constant steps matching the AllocSize are supported at the
473 // moment. This simplifies the difference computation. Can be extended in the
474 // future.
475 if (Step->getAPInt().abs() != AllocSize)
476 return false;
477
478 IntegerType *IntTy =
479 IntegerType::get(Src->PointerValue->getContext(),
480 DL.getPointerSizeInBits(CGI.AddressSpace));
481
482 // When counting down, the dependence distance needs to be swapped.
483 if (Step->getValue()->isNegative())
484 std::swap(SinkStart, SrcStart);
485
486 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkStart, IntTy);
487 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcStart, IntTy);
488 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
489 isa<SCEVCouldNotCompute>(SrcStartInt))
490 return false;
491
492 // If the start values for both Src and Sink also vary according to an outer
493 // loop, then it's probably better to avoid creating diff checks because
494 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
495 // do the expanded full range overlap checks, which can be hoisted.
496 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
497 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
498 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
499 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
500 const Loop *StartARLoop = SrcStartAR->getLoop();
501 if (StartARLoop == SinkStartAR->getLoop() &&
502 StartARLoop == InnerLoop->getParentLoop() &&
503 // If the diff check would already be loop invariant (due to the
504 // recurrences being the same), then we prefer to keep the diff checks
505 // because they are cheaper.
506 SrcStartAR->getStepRecurrence(*SE) !=
507 SinkStartAR->getStepRecurrence(*SE)) {
508 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
509 "cannot be hoisted out of the outer loop\n");
510 return false;
511 }
512 }
513
514 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
515 << "SrcStart: " << *SrcStartInt << '\n'
516 << "SinkStartInt: " << *SinkStartInt << '\n');
517 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
518 Src->NeedsFreeze || Sink->NeedsFreeze);
519 return true;
520}
521
523 SmallVector<RuntimePointerCheck, 4> Checks;
524
525 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
526 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
529
530 if (needsChecking(CGI, CGJ)) {
531 CanUseDiffCheck = CanUseDiffCheck && tryToCreateDiffCheck(CGI, CGJ);
532 Checks.emplace_back(&CGI, &CGJ);
533 }
534 }
535 }
536 return Checks;
537}
538
540 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
541 assert(Checks.empty() && "Checks is not empty");
542 groupChecks(DepCands, UseDependencies);
543 Checks = generateChecks();
544}
545
547 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
548 for (const auto &I : M.Members)
549 for (const auto &J : N.Members)
550 if (needsChecking(I, J))
551 return true;
552 return false;
553}
554
555/// Compare \p I and \p J and return the minimum.
556/// Return nullptr in case we couldn't find an answer.
557static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
558 ScalarEvolution *SE) {
559 std::optional<APInt> Diff = SE->computeConstantDifference(J, I);
560 if (!Diff)
561 return nullptr;
562 return Diff->isNegative() ? J : I;
563}
564
566 unsigned Index, const RuntimePointerChecking &RtCheck) {
567 return addPointer(
568 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
569 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
570 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
571}
572
573bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
574 const SCEV *End, unsigned AS,
575 bool NeedsFreeze,
576 ScalarEvolution &SE) {
577 assert(AddressSpace == AS &&
578 "all pointers in a checking group must be in the same address space");
579
580 // Compare the starts and ends with the known minimum and maximum
581 // of this set. We need to know how we compare against the min/max
582 // of the set in order to be able to emit memchecks.
583 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
584 if (!Min0)
585 return false;
586
587 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
588 if (!Min1)
589 return false;
590
591 // Update the low bound expression if we've found a new min value.
592 if (Min0 == Start)
593 Low = Start;
594
595 // Update the high bound expression if we've found a new max value.
596 if (Min1 != End)
597 High = End;
598
599 Members.push_back(Index);
600 this->NeedsFreeze |= NeedsFreeze;
601 return true;
602}
603
604void RuntimePointerChecking::groupChecks(
605 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
606 // We build the groups from dependency candidates equivalence classes
607 // because:
608 // - We know that pointers in the same equivalence class share
609 // the same underlying object and therefore there is a chance
610 // that we can compare pointers
611 // - We wouldn't be able to merge two pointers for which we need
612 // to emit a memcheck. The classes in DepCands are already
613 // conveniently built such that no two pointers in the same
614 // class need checking against each other.
615
616 // We use the following (greedy) algorithm to construct the groups
617 // For every pointer in the equivalence class:
618 // For each existing group:
619 // - if the difference between this pointer and the min/max bounds
620 // of the group is a constant, then make the pointer part of the
621 // group and update the min/max bounds of that group as required.
622
623 CheckingGroups.clear();
624
625 // If we need to check two pointers to the same underlying object
626 // with a non-constant difference, we shouldn't perform any pointer
627 // grouping with those pointers. This is because we can easily get
628 // into cases where the resulting check would return false, even when
629 // the accesses are safe.
630 //
631 // The following example shows this:
632 // for (i = 0; i < 1000; ++i)
633 // a[5000 + i * m] = a[i] + a[i + 9000]
634 //
635 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
636 // (0, 10000) which is always false. However, if m is 1, there is no
637 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
638 // us to perform an accurate check in this case.
639 //
640 // In the above case, we have a non-constant distance and an Unknown
641 // dependence between accesses to the same underlying object, and could retry
642 // with runtime checks. Therefore UseDependencies is false. In this case we
643 // will use the fallback path and create separate checking groups for all
644 // pointers.
645
646 // If we don't have the dependency partitions, construct a new
647 // checking pointer group for each pointer. This is also required
648 // for correctness, because in this case we can have checking between
649 // pointers to the same underlying object.
650 if (!UseDependencies) {
651 for (unsigned I = 0; I < Pointers.size(); ++I)
652 CheckingGroups.emplace_back(I, *this);
653 return;
654 }
655
656 unsigned TotalComparisons = 0;
657
659 for (unsigned Index = 0; Index < Pointers.size(); ++Index)
660 PositionMap[Pointers[Index].PointerValue].push_back(Index);
661
662 // We need to keep track of what pointers we've already seen so we
663 // don't process them twice.
665
666 // Go through all equivalence classes, get the "pointer check groups"
667 // and add them to the overall solution. We use the order in which accesses
668 // appear in 'Pointers' to enforce determinism.
669 for (unsigned I = 0; I < Pointers.size(); ++I) {
670 // We've seen this pointer before, and therefore already processed
671 // its equivalence class.
672 if (Seen.contains(I))
673 continue;
674
676 Pointers[I].IsWritePtr);
677
679
680 // Because DepCands is constructed by visiting accesses in the order in
681 // which they appear in alias sets (which is deterministic) and the
682 // iteration order within an equivalence class member is only dependent on
683 // the order in which unions and insertions are performed on the
684 // equivalence class, the iteration order is deterministic.
685 for (auto M : DepCands.members(Access)) {
686 auto PointerI = PositionMap.find(M.getPointer());
687 // If we can't find the pointer in PositionMap that means we can't
688 // generate a memcheck for it.
689 if (PointerI == PositionMap.end())
690 continue;
691 for (unsigned Pointer : PointerI->second) {
692 bool Merged = false;
693 // Mark this pointer as seen.
694 Seen.insert(Pointer);
695
696 // Go through all the existing sets and see if we can find one
697 // which can include this pointer.
698 for (RuntimeCheckingPtrGroup &Group : Groups) {
699 // Don't perform more than a certain amount of comparisons.
700 // This should limit the cost of grouping the pointers to something
701 // reasonable. If we do end up hitting this threshold, the algorithm
702 // will create separate groups for all remaining pointers.
703 if (TotalComparisons > MemoryCheckMergeThreshold)
704 break;
705
706 TotalComparisons++;
707
708 if (Group.addPointer(Pointer, *this)) {
709 Merged = true;
710 break;
711 }
712 }
713
714 if (!Merged)
715 // We couldn't add this pointer to any existing set or the threshold
716 // for the number of comparisons has been reached. Create a new group
717 // to hold the current pointer.
718 Groups.emplace_back(Pointer, *this);
719 }
720 }
721
722 // We've computed the grouped checks for this partition.
723 // Save the results and continue with the next one.
725 }
726}
727
729 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
730 unsigned PtrIdx2) {
731 return (PtrToPartition[PtrIdx1] != -1 &&
732 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
733}
734
735bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
736 const PointerInfo &PointerI = Pointers[I];
737 const PointerInfo &PointerJ = Pointers[J];
738
739 // No need to check if two readonly pointers intersect.
740 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
741 return false;
742
743 // Only need to check pointers between two different dependency sets.
744 if (PointerI.DependencySetId == PointerJ.DependencySetId)
745 return false;
746
747 // Only need to check pointers in the same alias set.
748 return PointerI.AliasSetId == PointerJ.AliasSetId;
749}
750
751/// Assign each RuntimeCheckingPtrGroup pointer an index for stable UTC output.
755 for (const auto &[Idx, CG] : enumerate(CheckingGroups))
756 PtrIndices[&CG] = Idx;
757 return PtrIndices;
758}
759
762 unsigned Depth) const {
763 unsigned N = 0;
764 auto PtrIndices = getPtrToIdxMap(CheckingGroups);
765 for (const auto &[Check1, Check2] : Checks) {
766 const auto &First = Check1->Members, &Second = Check2->Members;
767 OS.indent(Depth) << "Check " << N++ << ":\n";
768 OS.indent(Depth + 2) << "Comparing group GRP" << PtrIndices.at(Check1)
769 << ":\n";
770 for (unsigned K : First)
771 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
772 OS.indent(Depth + 2) << "Against group GRP" << PtrIndices.at(Check2)
773 << ":\n";
774 for (unsigned K : Second)
775 OS.indent(Depth + 2) << *Pointers[K].PointerValue << "\n";
776 }
777}
778
780
781 OS.indent(Depth) << "Run-time memory checks:\n";
782 printChecks(OS, Checks, Depth);
783
784 OS.indent(Depth) << "Grouped accesses:\n";
785 auto PtrIndices = getPtrToIdxMap(CheckingGroups);
786 for (const auto &CG : CheckingGroups) {
787 OS.indent(Depth + 2) << "Group GRP" << PtrIndices.at(&CG) << ":\n";
788 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
789 << ")\n";
790 for (unsigned Member : CG.Members) {
791 OS.indent(Depth + 6) << "Member: " << *Pointers[Member].Expr << "\n";
792 }
793 }
794}
795
796namespace {
797
798/// Analyses memory accesses in a loop.
799///
800/// Checks whether run time pointer checks are needed and builds sets for data
801/// dependence checking.
802class AccessAnalysis {
803public:
804 /// Read or write access location.
805 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
806 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
807
808 AccessAnalysis(const Loop *TheLoop, AAResults *AA, const LoopInfo *LI,
811 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
812 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DT(DT), DepCands(DA),
813 PSE(PSE), LoopAliasScopes(LoopAliasScopes) {
814 // We're analyzing dependences across loop iterations.
815 BAA.enableCrossIterationMode();
816 }
817
818 /// Register a load and whether it is only read from.
819 void addLoad(const MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
820 Value *Ptr = const_cast<Value *>(Loc.Ptr);
821 AST.add(adjustLoc(Loc));
822 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
823 if (IsReadOnly)
824 ReadOnlyPtr.insert(Ptr);
825 }
826
827 /// Register a store.
828 void addStore(const MemoryLocation &Loc, Type *AccessTy) {
829 Value *Ptr = const_cast<Value *>(Loc.Ptr);
830 AST.add(adjustLoc(Loc));
831 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
832 }
833
834 /// Check if we can emit a run-time no-alias check for \p Access.
835 ///
836 /// Returns true if we can emit a run-time no alias check for \p Access.
837 /// If we can check this access, this also adds it to a dependence set and
838 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
839 /// we will attempt to use additional run-time checks in order to get
840 /// the bounds of the pointer.
841 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
842 MemAccessInfo Access, Type *AccessTy,
843 const DenseMap<Value *, const SCEV *> &Strides,
844 DenseMap<Value *, unsigned> &DepSetId,
845 Loop *TheLoop, unsigned &RunningDepId,
846 unsigned ASId, bool Assume);
847
848 /// Check whether we can check the pointers at runtime for
849 /// non-intersection.
850 ///
851 /// Returns true if we need no check or if we do and we can generate them
852 /// (i.e. the pointers have computable bounds). A return value of false means
853 /// we couldn't analyze and generate runtime checks for all pointers in the
854 /// loop, but if \p AllowPartial is set then we will have checks for those
855 /// pointers we could analyze.
856 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, Loop *TheLoop,
857 const DenseMap<Value *, const SCEV *> &Strides,
858 Value *&UncomputablePtr, bool AllowPartial);
859
860 /// Goes over all memory accesses, checks whether a RT check is needed
861 /// and builds sets of dependent accesses.
862 void buildDependenceSets() {
863 processMemAccesses();
864 }
865
866 /// Initial processing of memory accesses determined that we need to
867 /// perform dependency checking.
868 ///
869 /// Note that this can later be cleared if we retry memcheck analysis without
870 /// dependency checking (i.e. ShouldRetryWithRuntimeChecks).
871 bool isDependencyCheckNeeded() const { return !CheckDeps.empty(); }
872
873 /// We decided that no dependence analysis would be used. Reset the state.
874 void resetDepChecks(MemoryDepChecker &DepChecker) {
875 CheckDeps.clear();
876 DepChecker.clearDependences();
877 }
878
879 const MemAccessInfoList &getDependenciesToCheck() const { return CheckDeps; }
880
881private:
882 typedef MapVector<MemAccessInfo, SmallSetVector<Type *, 1>> PtrAccessMap;
883
884 /// Adjust the MemoryLocation so that it represents accesses to this
885 /// location across all iterations, rather than a single one.
886 MemoryLocation adjustLoc(MemoryLocation Loc) const {
887 // The accessed location varies within the loop, but remains within the
888 // underlying object.
890 Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
891 Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
892 return Loc;
893 }
894
895 /// Drop alias scopes that are only valid within a single loop iteration.
896 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
897 if (!ScopeList)
898 return nullptr;
899
900 // For the sake of simplicity, drop the whole scope list if any scope is
901 // iteration-local.
902 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
903 return LoopAliasScopes.contains(cast<MDNode>(Scope));
904 }))
905 return nullptr;
906
907 return ScopeList;
908 }
909
910 /// Go over all memory access and check whether runtime pointer checks
911 /// are needed and build sets of dependency check candidates.
912 void processMemAccesses();
913
914 /// Map of all accesses. Values are the types used to access memory pointed to
915 /// by the pointer.
916 PtrAccessMap Accesses;
917
918 /// The loop being checked.
919 const Loop *TheLoop;
920
921 /// List of accesses that need a further dependence check.
922 MemAccessInfoList CheckDeps;
923
924 /// Set of pointers that are read only.
925 SmallPtrSet<Value*, 16> ReadOnlyPtr;
926
927 /// Batched alias analysis results.
928 BatchAAResults BAA;
929
930 /// An alias set tracker to partition the access set by underlying object and
931 //intrinsic property (such as TBAA metadata).
932 AliasSetTracker AST;
933
934 /// The LoopInfo of the loop being checked.
935 const LoopInfo *LI;
936
937 /// The dominator tree of the function.
938 DominatorTree &DT;
939
940 /// Sets of potentially dependent accesses - members of one set share an
941 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
942 /// dependence check.
944
945 /// Initial processing of memory accesses determined that we may need
946 /// to add memchecks. Perform the analysis to determine the necessary checks.
947 ///
948 /// Note that, this is different from isDependencyCheckNeeded. When we retry
949 /// memcheck analysis without dependency checking
950 /// (i.e. ShouldRetryWithRuntimeChecks), isDependencyCheckNeeded is
951 /// cleared while this remains set if we have potentially dependent accesses.
952 bool IsRTCheckAnalysisNeeded = false;
953
954 /// The SCEV predicate containing all the SCEV-related assumptions.
955 PredicatedScalarEvolution &PSE;
956
957 DenseMap<Value *, SmallVector<const Value *, 16>> UnderlyingObjects;
958
959 /// Alias scopes that are declared inside the loop, and as such not valid
960 /// across iterations.
961 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
962};
963
964} // end anonymous namespace
965
966/// Try to compute a constant stride for \p AR. Used by getPtrStride and
967/// isNoWrap.
968static std::optional<int64_t>
969getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
971 if (isa<ScalableVectorType>(AccessTy)) {
972 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
973 << "\n");
974 return std::nullopt;
975 }
976
977 // The access function must stride over the innermost loop.
978 if (Lp != AR->getLoop()) {
979 LLVM_DEBUG({
980 dbgs() << "LAA: Bad stride - Not striding over innermost loop ";
981 if (Ptr)
982 dbgs() << *Ptr << " ";
983
984 dbgs() << "SCEV: " << *AR << "\n";
985 });
986 return std::nullopt;
987 }
988
989 // Check the step is constant.
990 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
991
992 // Calculate the pointer stride and check if it is constant.
993 const APInt *APStepVal;
994 if (!match(Step, m_scev_APInt(APStepVal))) {
995 LLVM_DEBUG({
996 dbgs() << "LAA: Bad stride - Not a constant strided ";
997 if (Ptr)
998 dbgs() << *Ptr << " ";
999 dbgs() << "SCEV: " << *AR << "\n";
1000 });
1001 return std::nullopt;
1002 }
1003
1004 const auto &DL = Lp->getHeader()->getDataLayout();
1005 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1006 int64_t Size = AllocSize.getFixedValue();
1007
1008 // Huge step value - give up.
1009 std::optional<int64_t> StepVal = APStepVal->trySExtValue();
1010 if (!StepVal)
1011 return std::nullopt;
1012
1013 // Strided access.
1014 return *StepVal % Size ? std::nullopt : std::make_optional(*StepVal / Size);
1015}
1016
1017/// Check whether \p AR is a non-wrapping AddRec. If \p Ptr is not nullptr, use
1018/// informating from the IR pointer value to determine no-wrap.
1020 Value *Ptr, Type *AccessTy, const Loop *L, bool Assume,
1021 const DominatorTree &DT,
1022 std::optional<int64_t> Stride = std::nullopt) {
1023 // FIXME: This should probably only return true for NUW.
1025 return true;
1026
1028 return true;
1029
1030 // An nusw getelementptr that is an AddRec cannot wrap. If it would wrap,
1031 // the distance between the previously accessed location and the wrapped
1032 // location will be larger than half the pointer index type space. In that
1033 // case, the GEP would be poison and any memory access dependent on it would
1034 // be immediate UB when executed.
1036 GEP && GEP->hasNoUnsignedSignedWrap()) {
1037 // For the above reasoning to apply, the pointer must be dereferenced in
1038 // every iteration.
1039 if (L->getHeader() == L->getLoopLatch() ||
1040 any_of(GEP->users(), [L, &DT, GEP](User *U) {
1041 if (getLoadStorePointerOperand(U) != GEP)
1042 return false;
1043 BasicBlock *UserBB = cast<Instruction>(U)->getParent();
1044 return !LoopAccessInfo::blockNeedsPredication(UserBB, L, &DT);
1045 }))
1046 return true;
1047 }
1048
1049 if (!Stride)
1050 Stride = getStrideFromAddRec(AR, L, AccessTy, Ptr, PSE);
1051 if (Stride) {
1052 // If the null pointer is undefined, then a access sequence which would
1053 // otherwise access it can be assumed not to unsigned wrap. Note that this
1054 // assumes the object in memory is aligned to the natural alignment.
1055 unsigned AddrSpace = AR->getType()->getPointerAddressSpace();
1056 if (!NullPointerIsDefined(L->getHeader()->getParent(), AddrSpace) &&
1057 (Stride == 1 || Stride == -1))
1058 return true;
1059 }
1060
1061 if (Ptr && Assume) {
1063 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1064 << "LAA: Pointer: " << *Ptr << "\n"
1065 << "LAA: SCEV: " << *AR << "\n"
1066 << "LAA: Added an overflow assumption\n");
1067 return true;
1068 }
1069
1070 return false;
1071}
1072
1073static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
1074 function_ref<void(Value *)> AddPointer) {
1076 SmallVector<Value *> WorkList;
1077 WorkList.push_back(StartPtr);
1078
1079 while (!WorkList.empty()) {
1080 Value *Ptr = WorkList.pop_back_val();
1081 if (!Visited.insert(Ptr).second)
1082 continue;
1083 auto *PN = dyn_cast<PHINode>(Ptr);
1084 // SCEV does not look through non-header PHIs inside the loop. Such phis
1085 // can be analyzed by adding separate accesses for each incoming pointer
1086 // value.
1087 if (PN && InnermostLoop.contains(PN->getParent()) &&
1088 PN->getParent() != InnermostLoop.getHeader()) {
1089 llvm::append_range(WorkList, PN->incoming_values());
1090 } else
1091 AddPointer(Ptr);
1092 }
1093}
1094
1095// Walk back through the IR for a pointer, looking for a select like the
1096// following:
1097//
1098// %offset = select i1 %cmp, i64 %a, i64 %b
1099// %addr = getelementptr double, double* %base, i64 %offset
1100// %ld = load double, double* %addr, align 8
1101//
1102// We won't be able to form a single SCEVAddRecExpr from this since the
1103// address for each loop iteration depends on %cmp. We could potentially
1104// produce multiple valid SCEVAddRecExprs, though, and check all of them for
1105// memory safety/aliasing if needed.
1106//
1107// If we encounter some IR we don't yet handle, or something obviously fine
1108// like a constant, then we just add the SCEV for that term to the list passed
1109// in by the caller. If we have a node that may potentially yield a valid
1110// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
1111// ourselves before adding to the list.
1113 ScalarEvolution *SE, const Loop *L, Value *Ptr,
1115 unsigned Depth) {
1116 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
1117 // we've exceeded our limit on recursion, just return whatever we have
1118 // regardless of whether it can be used for a forked pointer or not, along
1119 // with an indication of whether it might be a poison or undef value.
1120 const SCEV *Scev = SE->getSCEV(Ptr);
1121 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
1122 !isa<Instruction>(Ptr) || Depth == 0) {
1123 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1124 return;
1125 }
1126
1127 Depth--;
1128
1129 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
1130 return get<1>(S);
1131 };
1132
1133 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
1134 switch (Opcode) {
1135 case Instruction::Add:
1136 return SE->getAddExpr(L, R);
1137 case Instruction::Sub:
1138 return SE->getMinusSCEV(L, R);
1139 default:
1140 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
1141 }
1142 };
1143
1145 unsigned Opcode = I->getOpcode();
1146 switch (Opcode) {
1147 case Instruction::GetElementPtr: {
1148 auto *GEP = cast<GetElementPtrInst>(I);
1149 Type *SourceTy = GEP->getSourceElementType();
1150 // We only handle base + single offset GEPs here for now.
1151 // Not dealing with preexisting gathers yet, so no vectors.
1152 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
1153 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
1154 break;
1155 }
1158 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
1159 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
1160
1161 // See if we need to freeze our fork...
1162 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
1163 any_of(OffsetScevs, UndefPoisonCheck);
1164
1165 // Check that we only have a single fork, on either the base or the offset.
1166 // Copy the SCEV across for the one without a fork in order to generate
1167 // the full SCEV for both sides of the GEP.
1168 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
1169 BaseScevs.push_back(BaseScevs[0]);
1170 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
1171 OffsetScevs.push_back(OffsetScevs[0]);
1172 else {
1173 ScevList.emplace_back(Scev, NeedsFreeze);
1174 break;
1175 }
1176
1177 Type *IntPtrTy = SE->getEffectiveSCEVType(GEP->getPointerOperandType());
1178
1179 // Find the size of the type being pointed to. We only have a single
1180 // index term (guarded above) so we don't need to index into arrays or
1181 // structures, just get the size of the scalar value.
1182 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
1183
1184 for (auto [B, O] : zip(BaseScevs, OffsetScevs)) {
1185 const SCEV *Base = get<0>(B);
1186 const SCEV *Offset = get<0>(O);
1187
1188 // Scale up the offsets by the size of the type, then add to the bases.
1189 const SCEV *Scaled =
1190 SE->getMulExpr(Size, SE->getTruncateOrSignExtend(Offset, IntPtrTy));
1191 ScevList.emplace_back(SE->getAddExpr(Base, Scaled), NeedsFreeze);
1192 }
1193 break;
1194 }
1195 case Instruction::Select: {
1197 // A select means we've found a forked pointer, but we currently only
1198 // support a single select per pointer so if there's another behind this
1199 // then we just bail out and return the generic SCEV.
1200 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
1201 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
1202 if (ChildScevs.size() == 2)
1203 append_range(ScevList, ChildScevs);
1204 else
1205 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1206 break;
1207 }
1208 case Instruction::PHI: {
1210 // A phi means we've found a forked pointer, but we currently only
1211 // support a single phi per pointer so if there's another behind this
1212 // then we just bail out and return the generic SCEV.
1213 if (I->getNumOperands() == 2) {
1214 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
1215 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
1216 }
1217 if (ChildScevs.size() == 2)
1218 append_range(ScevList, ChildScevs);
1219 else
1220 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1221 break;
1222 }
1223 case Instruction::Add:
1224 case Instruction::Sub: {
1227 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1228 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1229
1230 // See if we need to freeze our fork...
1231 bool NeedsFreeze =
1232 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1233
1234 // Check that we only have a single fork, on either the left or right side.
1235 // Copy the SCEV across for the one without a fork in order to generate
1236 // the full SCEV for both sides of the BinOp.
1237 if (LScevs.size() == 2 && RScevs.size() == 1)
1238 RScevs.push_back(RScevs[0]);
1239 else if (RScevs.size() == 2 && LScevs.size() == 1)
1240 LScevs.push_back(LScevs[0]);
1241 else {
1242 ScevList.emplace_back(Scev, NeedsFreeze);
1243 break;
1244 }
1245
1246 for (auto [L, R] : zip(LScevs, RScevs))
1247 ScevList.emplace_back(GetBinOpExpr(Opcode, get<0>(L), get<0>(R)),
1248 NeedsFreeze);
1249 break;
1250 }
1251 default:
1252 // Just return the current SCEV if we haven't handled the instruction yet.
1253 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1254 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1255 break;
1256 }
1257}
1258
1259bool AccessAnalysis::createCheckForAccess(
1260 RuntimePointerChecking &RtCheck, MemAccessInfo Access, Type *AccessTy,
1261 const DenseMap<Value *, const SCEV *> &StridesMap,
1262 DenseMap<Value *, unsigned> &DepSetId, Loop *TheLoop,
1263 unsigned &RunningDepId, unsigned ASId, bool Assume) {
1264 Value *Ptr = Access.getPointer();
1265 ScalarEvolution *SE = PSE.getSE();
1266 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1267
1269 findForkedSCEVs(SE, TheLoop, Ptr, RTCheckPtrs, MaxForkedSCEVDepth);
1270 assert(!RTCheckPtrs.empty() &&
1271 "Must have some runtime-check pointer candidates");
1272
1273 // RTCheckPtrs must have size 2 if there are forked pointers. Otherwise, there
1274 // are no forked pointers; replaceSymbolicStridesSCEV in this case.
1275 auto IsLoopInvariantOrAR =
1276 [&SE, &TheLoop](const PointerIntPair<const SCEV *, 1, bool> &P) {
1277 return SE->isLoopInvariant(P.getPointer(), TheLoop) ||
1278 isa<SCEVAddRecExpr>(P.getPointer());
1279 };
1280 if (RTCheckPtrs.size() == 2 && all_of(RTCheckPtrs, IsLoopInvariantOrAR)) {
1281 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n";
1282 for (const auto &[Idx, Q] : enumerate(RTCheckPtrs)) dbgs()
1283 << "\t(" << Idx << ") " << *Q.getPointer() << "\n");
1284 } else {
1285 RTCheckPtrs = {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1286 }
1287
1288 /// Check whether all pointers can participate in a runtime bounds check. They
1289 /// must either be invariant or non-wrapping affine AddRecs.
1290 for (auto &P : RTCheckPtrs) {
1291 // The bounds for loop-invariant pointer is trivial.
1292 if (SE->isLoopInvariant(P.getPointer(), TheLoop))
1293 continue;
1294
1295 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(P.getPointer());
1296 if (!AR && Assume)
1297 AR = PSE.getAsAddRec(Ptr);
1298 if (!AR || !AR->isAffine())
1299 return false;
1300
1301 // If there's only one option for Ptr, look it up after bounds and wrap
1302 // checking, because assumptions might have been added to PSE.
1303 if (RTCheckPtrs.size() == 1) {
1304 AR =
1306 P.setPointer(AR);
1307 }
1308
1309 if (!isNoWrap(PSE, AR, RTCheckPtrs.size() == 1 ? Ptr : nullptr, AccessTy,
1310 TheLoop, Assume, DT))
1311 return false;
1312 }
1313
1314 for (const auto &[PtrExpr, NeedsFreeze] : RTCheckPtrs) {
1315 // The id of the dependence set.
1316 unsigned DepId;
1317
1318 if (isDependencyCheckNeeded()) {
1319 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1320 unsigned &LeaderId = DepSetId[Leader];
1321 if (!LeaderId)
1322 LeaderId = RunningDepId++;
1323 DepId = LeaderId;
1324 } else
1325 // Each access has its own dependence set.
1326 DepId = RunningDepId++;
1327
1328 bool IsWrite = Access.getInt();
1329 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1330 NeedsFreeze);
1331 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1332 }
1333
1334 return true;
1335}
1336
1337bool AccessAnalysis::canCheckPtrAtRT(
1338 RuntimePointerChecking &RtCheck, Loop *TheLoop,
1339 const DenseMap<Value *, const SCEV *> &StridesMap, Value *&UncomputablePtr,
1340 bool AllowPartial) {
1341 // Find pointers with computable bounds. We are going to use this information
1342 // to place a runtime bound check.
1343 bool CanDoRT = true;
1344
1345 bool MayNeedRTCheck = false;
1346 if (!IsRTCheckAnalysisNeeded) return true;
1347
1348 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1349
1350 // We assign a consecutive id to access from different alias sets.
1351 // Accesses between different groups doesn't need to be checked.
1352 unsigned ASId = 0;
1353 for (const auto &AS : AST) {
1354 int NumReadPtrChecks = 0;
1355 int NumWritePtrChecks = 0;
1356 bool CanDoAliasSetRT = true;
1357 ++ASId;
1358 auto ASPointers = AS.getPointers();
1359
1360 // We assign consecutive id to access from different dependence sets.
1361 // Accesses within the same set don't need a runtime check.
1362 unsigned RunningDepId = 1;
1364
1366
1367 // First, count how many write and read accesses are in the alias set. Also
1368 // collect MemAccessInfos for later.
1370 for (const Value *ConstPtr : ASPointers) {
1371 Value *Ptr = const_cast<Value *>(ConstPtr);
1372 bool IsWrite = Accesses.contains(MemAccessInfo(Ptr, true));
1373 if (IsWrite)
1374 ++NumWritePtrChecks;
1375 else
1376 ++NumReadPtrChecks;
1377 AccessInfos.emplace_back(Ptr, IsWrite);
1378 }
1379
1380 // We do not need runtime checks for this alias set, if there are no writes
1381 // or a single write and no reads.
1382 if (NumWritePtrChecks == 0 ||
1383 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1384 assert((ASPointers.size() <= 1 ||
1385 all_of(ASPointers,
1386 [this](const Value *Ptr) {
1387 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1388 true);
1389 return !DepCands.contains(AccessWrite);
1390 })) &&
1391 "Can only skip updating CanDoRT below, if all entries in AS "
1392 "are reads or there is at most 1 entry");
1393 continue;
1394 }
1395
1396 for (auto &Access : AccessInfos) {
1397 for (const auto &AccessTy : Accesses[Access]) {
1398 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1399 DepSetId, TheLoop, RunningDepId, ASId,
1400 false)) {
1401 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1402 << *Access.getPointer() << '\n');
1403 Retries.emplace_back(Access, AccessTy);
1404 CanDoAliasSetRT = false;
1405 }
1406 }
1407 }
1408
1409 // Note that this function computes CanDoRT and MayNeedRTCheck
1410 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1411 // we have a pointer for which we couldn't find the bounds but we don't
1412 // actually need to emit any checks so it does not matter.
1413 //
1414 // We need runtime checks for this alias set, if there are at least 2
1415 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1416 // any bound checks (because in that case the number of dependence sets is
1417 // incomplete).
1418 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1419
1420 // We need to perform run-time alias checks, but some pointers had bounds
1421 // that couldn't be checked.
1422 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1423 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1424 // We know that we need these checks, so we can now be more aggressive
1425 // and add further checks if required (overflow checks).
1426 CanDoAliasSetRT = true;
1427 for (const auto &[Access, AccessTy] : Retries) {
1428 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1429 DepSetId, TheLoop, RunningDepId, ASId,
1430 /*Assume=*/true)) {
1431 CanDoAliasSetRT = false;
1432 UncomputablePtr = Access.getPointer();
1433 if (!AllowPartial)
1434 break;
1435 }
1436 }
1437 }
1438
1439 CanDoRT &= CanDoAliasSetRT;
1440 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1441 ++ASId;
1442 }
1443
1444 // If the pointers that we would use for the bounds comparison have different
1445 // address spaces, assume the values aren't directly comparable, so we can't
1446 // use them for the runtime check. We also have to assume they could
1447 // overlap. In the future there should be metadata for whether address spaces
1448 // are disjoint.
1449 unsigned NumPointers = RtCheck.Pointers.size();
1450 for (unsigned i = 0; i < NumPointers; ++i) {
1451 for (unsigned j = i + 1; j < NumPointers; ++j) {
1452 // Only need to check pointers between two different dependency sets.
1453 if (RtCheck.Pointers[i].DependencySetId ==
1454 RtCheck.Pointers[j].DependencySetId)
1455 continue;
1456 // Only need to check pointers in the same alias set.
1457 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1458 continue;
1459
1460 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1461 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1462
1463 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1464 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1465 if (ASi != ASj) {
1466 LLVM_DEBUG(
1467 dbgs() << "LAA: Runtime check would require comparison between"
1468 " different address spaces\n");
1469 return false;
1470 }
1471 }
1472 }
1473
1474 if (MayNeedRTCheck && (CanDoRT || AllowPartial))
1475 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1476
1477 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1478 << " pointer comparisons.\n");
1479
1480 // If we can do run-time checks, but there are no checks, no runtime checks
1481 // are needed. This can happen when all pointers point to the same underlying
1482 // object for example.
1483 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1484
1485 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1486 assert(CanDoRTIfNeeded == (CanDoRT || !MayNeedRTCheck) &&
1487 "CanDoRTIfNeeded depends on RtCheck.Need");
1488 if (!CanDoRTIfNeeded && !AllowPartial)
1489 RtCheck.reset();
1490 return CanDoRTIfNeeded;
1491}
1492
1493void AccessAnalysis::processMemAccesses() {
1494 // We process the set twice: first we process read-write pointers, last we
1495 // process read-only pointers. This allows us to skip dependence tests for
1496 // read-only pointers.
1497
1498 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1499 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1500 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1501 LLVM_DEBUG({
1502 for (const auto &[A, _] : Accesses)
1503 dbgs() << "\t" << *A.getPointer() << " ("
1504 << (A.getInt()
1505 ? "write"
1506 : (ReadOnlyPtr.contains(A.getPointer()) ? "read-only"
1507 : "read"))
1508 << ")\n";
1509 });
1510
1511 // The AliasSetTracker has nicely partitioned our pointers by metadata
1512 // compatibility and potential for underlying-object overlap. As a result, we
1513 // only need to check for potential pointer dependencies within each alias
1514 // set.
1515 for (const auto &AS : AST) {
1516 // Note that both the alias-set tracker and the alias sets themselves used
1517 // ordered collections internally and so the iteration order here is
1518 // deterministic.
1519 auto ASPointers = AS.getPointers();
1520
1521 bool SetHasWrite = false;
1522
1523 // Map of (pointer to underlying objects, accessed address space) to last
1524 // access encountered.
1525 typedef DenseMap<std::pair<const Value *, unsigned>, MemAccessInfo>
1526 UnderlyingObjToAccessMap;
1527 UnderlyingObjToAccessMap ObjToLastAccess;
1528
1529 // Set of access to check after all writes have been processed.
1530 PtrAccessMap DeferredAccesses;
1531
1532 // Iterate over each alias set twice, once to process read/write pointers,
1533 // and then to process read-only pointers.
1534 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1535 bool UseDeferred = SetIteration > 0;
1536 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1537
1538 for (const Value *ConstPtr : ASPointers) {
1539 Value *Ptr = const_cast<Value *>(ConstPtr);
1540
1541 // For a single memory access in AliasSetTracker, Accesses may contain
1542 // both read and write, and they both need to be handled for CheckDeps.
1543 for (const auto &[AC, _] : S) {
1544 if (AC.getPointer() != Ptr)
1545 continue;
1546
1547 bool IsWrite = AC.getInt();
1548
1549 // If we're using the deferred access set, then it contains only
1550 // reads.
1551 bool IsReadOnlyPtr = ReadOnlyPtr.contains(Ptr) && !IsWrite;
1552 if (UseDeferred && !IsReadOnlyPtr)
1553 continue;
1554 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1555 // read or a write.
1556 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1557 S.contains(MemAccessInfo(Ptr, false))) &&
1558 "Alias-set pointer not in the access set?");
1559
1560 MemAccessInfo Access(Ptr, IsWrite);
1561 DepCands.insert(Access);
1562
1563 // Memorize read-only pointers for later processing and skip them in
1564 // the first round (they need to be checked after we have seen all
1565 // write pointers). Note: we also mark pointer that are not
1566 // consecutive as "read-only" pointers (so that we check
1567 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1568 if (!UseDeferred && IsReadOnlyPtr) {
1569 // We only use the pointer keys, the types vector values don't
1570 // matter.
1571 DeferredAccesses.insert({Access, {}});
1572 continue;
1573 }
1574
1575 // If this is a write - check other reads and writes for conflicts. If
1576 // this is a read only check other writes for conflicts (but only if
1577 // there is no other write to the ptr - this is an optimization to
1578 // catch "a[i] = a[i] + " without having to do a dependence check).
1579 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1580 CheckDeps.push_back(Access);
1581 IsRTCheckAnalysisNeeded = true;
1582 }
1583
1584 if (IsWrite)
1585 SetHasWrite = true;
1586
1587 // Create sets of pointers connected by a shared alias set and
1588 // underlying object.
1589 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1590 UOs = {};
1591 ::getUnderlyingObjects(Ptr, UOs, LI);
1593 << "Underlying objects for pointer " << *Ptr << "\n");
1594 for (const Value *UnderlyingObj : UOs) {
1595 // nullptr never alias, don't join sets for pointer that have "null"
1596 // in their UnderlyingObjects list.
1597 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1599 TheLoop->getHeader()->getParent(),
1600 UnderlyingObj->getType()->getPointerAddressSpace()))
1601 continue;
1602
1603 auto [It, Inserted] = ObjToLastAccess.try_emplace(
1604 {UnderlyingObj,
1605 cast<PointerType>(Ptr->getType())->getAddressSpace()},
1606 Access);
1607 if (!Inserted) {
1608 DepCands.unionSets(Access, It->second);
1609 It->second = Access;
1610 }
1611
1612 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1613 }
1614 }
1615 }
1616 }
1617 }
1618}
1619
1620/// Check whether the access through \p Ptr has a constant stride.
1621std::optional<int64_t>
1623 const Loop *Lp, const DominatorTree &DT,
1624 const DenseMap<Value *, const SCEV *> &StridesMap,
1625 bool Assume, bool ShouldCheckWrap) {
1626 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1627 if (PSE.getSE()->isLoopInvariant(PtrScev, Lp))
1628 return 0;
1629
1630 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr");
1631
1632 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1633 if (Assume && !AR)
1634 AR = PSE.getAsAddRec(Ptr);
1635
1636 if (!AR) {
1637 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1638 << " SCEV: " << *PtrScev << "\n");
1639 return std::nullopt;
1640 }
1641
1642 std::optional<int64_t> Stride =
1643 getStrideFromAddRec(AR, Lp, AccessTy, Ptr, PSE);
1644 if (!ShouldCheckWrap || !Stride)
1645 return Stride;
1646
1647 if (isNoWrap(PSE, AR, Ptr, AccessTy, Lp, Assume, DT, Stride))
1648 return Stride;
1649
1650 LLVM_DEBUG(
1651 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1652 << *Ptr << " SCEV: " << *AR << "\n");
1653 return std::nullopt;
1654}
1655
1656std::optional<int64_t> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1657 Type *ElemTyB, Value *PtrB,
1658 const DataLayout &DL,
1659 ScalarEvolution &SE,
1660 bool StrictCheck, bool CheckType) {
1661 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1662
1663 // Make sure that A and B are different pointers.
1664 if (PtrA == PtrB)
1665 return 0;
1666
1667 // Make sure that the element types are the same if required.
1668 if (CheckType && ElemTyA != ElemTyB)
1669 return std::nullopt;
1670
1671 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1672 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1673
1674 // Check that the address spaces match.
1675 if (ASA != ASB)
1676 return std::nullopt;
1677 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1678
1679 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1680 const Value *PtrA1 = PtrA->stripAndAccumulateConstantOffsets(
1681 DL, OffsetA, /*AllowNonInbounds=*/true);
1682 const Value *PtrB1 = PtrB->stripAndAccumulateConstantOffsets(
1683 DL, OffsetB, /*AllowNonInbounds=*/true);
1684
1685 std::optional<int64_t> Val;
1686 if (PtrA1 == PtrB1) {
1687 // Retrieve the address space again as pointer stripping now tracks through
1688 // `addrspacecast`.
1689 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1690 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1691 // Check that the address spaces match and that the pointers are valid.
1692 if (ASA != ASB)
1693 return std::nullopt;
1694
1695 IdxWidth = DL.getIndexSizeInBits(ASA);
1696 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1697 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1698
1699 OffsetB -= OffsetA;
1700 Val = OffsetB.trySExtValue();
1701 } else {
1702 // Otherwise compute the distance with SCEV between the base pointers.
1703 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1704 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1705 std::optional<APInt> Diff =
1706 SE.computeConstantDifference(PtrSCEVB, PtrSCEVA);
1707 if (!Diff)
1708 return std::nullopt;
1709 Val = Diff->trySExtValue();
1710 }
1711
1712 if (!Val)
1713 return std::nullopt;
1714
1715 int64_t Size = DL.getTypeStoreSize(ElemTyA);
1716 int64_t Dist = *Val / Size;
1717
1718 // Ensure that the calculated distance matches the type-based one after all
1719 // the bitcasts removal in the provided pointers.
1720 if (!StrictCheck || Dist * Size == Val)
1721 return Dist;
1722 return std::nullopt;
1723}
1724
1726 const DataLayout &DL, ScalarEvolution &SE,
1727 SmallVectorImpl<unsigned> &SortedIndices) {
1729 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1730 "Expected list of pointer operands.");
1731 // Walk over the pointers, and map each of them to an offset relative to
1732 // first pointer in the array.
1733 Value *Ptr0 = VL[0];
1734
1735 using DistOrdPair = std::pair<int64_t, unsigned>;
1736 auto Compare = llvm::less_first();
1737 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1738 Offsets.emplace(0, 0);
1739 bool IsConsecutive = true;
1740 for (auto [Idx, Ptr] : drop_begin(enumerate(VL))) {
1741 std::optional<int64_t> Diff =
1742 getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1743 /*StrictCheck=*/true);
1744 if (!Diff)
1745 return false;
1746
1747 // Check if the pointer with the same offset is found.
1748 int64_t Offset = *Diff;
1749 auto [It, IsInserted] = Offsets.emplace(Offset, Idx);
1750 if (!IsInserted)
1751 return false;
1752 // Consecutive order if the inserted element is the last one.
1753 IsConsecutive &= std::next(It) == Offsets.end();
1754 }
1755 SortedIndices.clear();
1756 if (!IsConsecutive) {
1757 // Fill SortedIndices array only if it is non-consecutive.
1758 SortedIndices.resize(VL.size());
1759 for (auto [Idx, Off] : enumerate(Offsets))
1760 SortedIndices[Idx] = Off.second;
1761 }
1762 return true;
1763}
1764
1765/// Returns true if the memory operations \p A and \p B are consecutive.
1767 ScalarEvolution &SE, bool CheckType) {
1770 if (!PtrA || !PtrB)
1771 return false;
1772 Type *ElemTyA = getLoadStoreType(A);
1773 Type *ElemTyB = getLoadStoreType(B);
1774 std::optional<int64_t> Diff =
1775 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1776 /*StrictCheck=*/true, CheckType);
1777 return Diff == 1;
1778}
1779
1781 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1782 [this, SI](Value *Ptr) {
1783 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1784 InstMap.push_back(SI);
1785 ++AccessIdx;
1786 });
1787}
1788
1790 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1791 [this, LI](Value *Ptr) {
1792 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1793 InstMap.push_back(LI);
1794 ++AccessIdx;
1795 });
1796}
1797
1816
1818 switch (Type) {
1819 case NoDep:
1820 case Forward:
1822 case Unknown:
1823 case IndirectUnsafe:
1824 return false;
1825
1827 case Backward:
1829 return true;
1830 }
1831 llvm_unreachable("unexpected DepType!");
1832}
1833
1837
1839 switch (Type) {
1840 case Forward:
1842 return true;
1843
1844 case NoDep:
1845 case Unknown:
1847 case Backward:
1849 case IndirectUnsafe:
1850 return false;
1851 }
1852 llvm_unreachable("unexpected DepType!");
1853}
1854
1855bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1856 uint64_t TypeByteSize,
1857 unsigned CommonStride) {
1858 // If loads occur at a distance that is not a multiple of a feasible vector
1859 // factor store-load forwarding does not take place.
1860 // Positive dependences might cause troubles because vectorizing them might
1861 // prevent store-load forwarding making vectorized code run a lot slower.
1862 // a[i] = a[i-3] ^ a[i-8];
1863 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1864 // hence on your typical architecture store-load forwarding does not take
1865 // place. Vectorizing in such cases does not make sense.
1866 // Store-load forwarding distance.
1867
1868 // After this many iterations store-to-load forwarding conflicts should not
1869 // cause any slowdowns.
1870 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1871 // Maximum vector factor.
1872 uint64_t MaxVFWithoutSLForwardIssuesPowerOf2 =
1873 std::min(VectorizerParams::MaxVectorWidth * TypeByteSize,
1874 MaxStoreLoadForwardSafeDistanceInBits);
1875
1876 // Compute the smallest VF at which the store and load would be misaligned.
1877 for (uint64_t VF = 2 * TypeByteSize;
1878 VF <= MaxVFWithoutSLForwardIssuesPowerOf2; VF *= 2) {
1879 // If the number of vector iteration between the store and the load are
1880 // small we could incur conflicts.
1881 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1882 MaxVFWithoutSLForwardIssuesPowerOf2 = (VF >> 1);
1883 break;
1884 }
1885 }
1886
1887 if (MaxVFWithoutSLForwardIssuesPowerOf2 < 2 * TypeByteSize) {
1888 LLVM_DEBUG(
1889 dbgs() << "LAA: Distance " << Distance
1890 << " that could cause a store-load forwarding conflict\n");
1891 return true;
1892 }
1893
1894 if (CommonStride &&
1895 MaxVFWithoutSLForwardIssuesPowerOf2 <
1896 MaxStoreLoadForwardSafeDistanceInBits &&
1897 MaxVFWithoutSLForwardIssuesPowerOf2 !=
1898 VectorizerParams::MaxVectorWidth * TypeByteSize) {
1899 uint64_t MaxVF =
1900 bit_floor(MaxVFWithoutSLForwardIssuesPowerOf2 / CommonStride);
1901 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1902 MaxStoreLoadForwardSafeDistanceInBits =
1903 std::min(MaxStoreLoadForwardSafeDistanceInBits, MaxVFInBits);
1904 }
1905 return false;
1906}
1907
1908void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1909 if (Status < S)
1910 Status = S;
1911}
1912
1913/// Given a dependence-distance \p Dist between two memory accesses, that have
1914/// strides in the same direction whose absolute value of the maximum stride is
1915/// given in \p MaxStride, in a loop whose maximum backedge taken count is \p
1916/// MaxBTC, check if it is possible to prove statically that the dependence
1917/// distance is larger than the range that the accesses will travel through the
1918/// execution of the loop. If so, return true; false otherwise. This is useful
1919/// for example in loops such as the following (PR31098):
1920///
1921/// for (i = 0; i < D; ++i) {
1922/// = out[i];
1923/// out[i+D] =
1924/// }
1926 const SCEV &MaxBTC, const SCEV &Dist,
1927 uint64_t MaxStride) {
1928
1929 // If we can prove that
1930 // (**) |Dist| > MaxBTC * Step
1931 // where Step is the absolute stride of the memory accesses in bytes,
1932 // then there is no dependence.
1933 //
1934 // Rationale:
1935 // We basically want to check if the absolute distance (|Dist/Step|)
1936 // is >= the loop iteration count (or > MaxBTC).
1937 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1938 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1939 // that the dependence distance is >= VF; This is checked elsewhere.
1940 // But in some cases we can prune dependence distances early, and
1941 // even before selecting the VF, and without a runtime test, by comparing
1942 // the distance against the loop iteration count. Since the vectorized code
1943 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1944 // also guarantees that distance >= VF.
1945 //
1946 const SCEV *Step = SE.getConstant(MaxBTC.getType(), MaxStride);
1947 const SCEV *Product = SE.getMulExpr(&MaxBTC, Step);
1948
1949 const SCEV *CastedDist = &Dist;
1950 const SCEV *CastedProduct = Product;
1951 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1952 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1953
1954 // The dependence distance can be positive/negative, so we sign extend Dist;
1955 // The multiplication of the absolute stride in bytes and the
1956 // backedgeTakenCount is non-negative, so we zero extend Product.
1957 if (DistTypeSizeBits > ProductTypeSizeBits)
1958 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1959 else
1960 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1961
1962 // Is Dist - (MaxBTC * Step) > 0 ?
1963 // (If so, then we have proven (**) because |Dist| >= Dist)
1964 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1965 if (SE.isKnownPositive(Minus))
1966 return true;
1967
1968 // Second try: Is -Dist - (MaxBTC * Step) > 0 ?
1969 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1970 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1971 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1972 return SE.isKnownPositive(Minus);
1973}
1974
1975/// Check the dependence for two accesses with the same stride \p Stride.
1976/// \p Distance is the positive distance in bytes, and \p TypeByteSize is type
1977/// size in bytes.
1978///
1979/// \returns true if they are independent.
1981 uint64_t TypeByteSize) {
1982 assert(Stride > 1 && "The stride must be greater than 1");
1983 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1984 assert(Distance > 0 && "The distance must be non-zero");
1985
1986 // Skip if the distance is not multiple of type byte size.
1987 if (Distance % TypeByteSize)
1988 return false;
1989
1990 // No dependence if the distance is not multiple of the stride.
1991 // E.g.
1992 // for (i = 0; i < 1024 ; i += 4)
1993 // A[i+2] = A[i] + 1;
1994 //
1995 // Two accesses in memory (distance is 2, stride is 4):
1996 // | A[0] | | | | A[4] | | | |
1997 // | | | A[2] | | | | A[6] | |
1998 //
1999 // E.g.
2000 // for (i = 0; i < 1024 ; i += 3)
2001 // A[i+4] = A[i] + 1;
2002 //
2003 // Two accesses in memory (distance is 4, stride is 3):
2004 // | A[0] | | | A[3] | | | A[6] | | |
2005 // | | | | | A[4] | | | A[7] | |
2006 return Distance % Stride;
2007}
2008
2009bool MemoryDepChecker::areAccessesCompletelyBeforeOrAfter(const SCEV *Src,
2010 Type *SrcTy,
2011 const SCEV *Sink,
2012 Type *SinkTy) {
2013 const SCEV *BTC = PSE.getBackedgeTakenCount();
2014 const SCEV *SymbolicMaxBTC = PSE.getSymbolicMaxBackedgeTakenCount();
2015 ScalarEvolution &SE = *PSE.getSE();
2016 const auto &[SrcStart_, SrcEnd_] =
2017 getStartAndEndForAccess(InnermostLoop, Src, SrcTy, BTC, SymbolicMaxBTC,
2018 &SE, &PointerBounds, DT, AC, LoopGuards);
2019 if (isa<SCEVCouldNotCompute>(SrcStart_) || isa<SCEVCouldNotCompute>(SrcEnd_))
2020 return false;
2021
2022 const auto &[SinkStart_, SinkEnd_] =
2023 getStartAndEndForAccess(InnermostLoop, Sink, SinkTy, BTC, SymbolicMaxBTC,
2024 &SE, &PointerBounds, DT, AC, LoopGuards);
2025 if (isa<SCEVCouldNotCompute>(SinkStart_) ||
2026 isa<SCEVCouldNotCompute>(SinkEnd_))
2027 return false;
2028
2029 if (!LoopGuards)
2030 LoopGuards.emplace(ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
2031
2032 auto SrcEnd = SE.applyLoopGuards(SrcEnd_, *LoopGuards);
2033 auto SinkStart = SE.applyLoopGuards(SinkStart_, *LoopGuards);
2034 if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
2035 return true;
2036
2037 auto SinkEnd = SE.applyLoopGuards(SinkEnd_, *LoopGuards);
2038 auto SrcStart = SE.applyLoopGuards(SrcStart_, *LoopGuards);
2039 return SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart);
2040}
2041
2043 MemoryDepChecker::DepDistanceStrideAndSizeInfo>
2044MemoryDepChecker::getDependenceDistanceStrideAndSize(
2045 const AccessAnalysis::MemAccessInfo &A, Instruction *AInst,
2046 const AccessAnalysis::MemAccessInfo &B, Instruction *BInst) {
2047 const auto &DL = InnermostLoop->getHeader()->getDataLayout();
2048 auto &SE = *PSE.getSE();
2049 const auto &[APtr, AIsWrite] = A;
2050 const auto &[BPtr, BIsWrite] = B;
2051
2052 // Two reads are independent.
2053 if (!AIsWrite && !BIsWrite)
2055
2056 Type *ATy = getLoadStoreType(AInst);
2057 Type *BTy = getLoadStoreType(BInst);
2058
2059 // We cannot check pointers in different address spaces.
2060 if (APtr->getType()->getPointerAddressSpace() !=
2061 BPtr->getType()->getPointerAddressSpace())
2063
2064 std::optional<int64_t> StrideAPtr = getPtrStride(
2065 PSE, ATy, APtr, InnermostLoop, *DT, SymbolicStrides, true, true);
2066 std::optional<int64_t> StrideBPtr = getPtrStride(
2067 PSE, BTy, BPtr, InnermostLoop, *DT, SymbolicStrides, true, true);
2068
2069 const SCEV *Src = PSE.getSCEV(APtr);
2070 const SCEV *Sink = PSE.getSCEV(BPtr);
2071
2072 // If the induction step is negative we have to invert source and sink of the
2073 // dependence when measuring the distance between them. We should not swap
2074 // AIsWrite with BIsWrite, as their uses expect them in program order.
2075 if (StrideAPtr && *StrideAPtr < 0) {
2076 std::swap(Src, Sink);
2077 std::swap(AInst, BInst);
2078 std::swap(ATy, BTy);
2079 std::swap(StrideAPtr, StrideBPtr);
2080 }
2081
2082 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
2083
2084 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
2085 << "\n");
2086 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
2087 << ": " << *Dist << "\n");
2088
2089 // Need accesses with constant strides and the same direction for further
2090 // dependence analysis. We don't want to vectorize "A[B[i]] += ..." and
2091 // similar code or pointer arithmetic that could wrap in the address space.
2092
2093 // If either Src or Sink are not strided (i.e. not a non-wrapping AddRec) and
2094 // not loop-invariant (stride will be 0 in that case), we cannot analyze the
2095 // dependence further and also cannot generate runtime checks.
2096 if (!StrideAPtr || !StrideBPtr) {
2097 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
2099 }
2100
2101 int64_t StrideAPtrInt = *StrideAPtr;
2102 int64_t StrideBPtrInt = *StrideBPtr;
2103 LLVM_DEBUG(dbgs() << "LAA: Src induction step: " << StrideAPtrInt
2104 << " Sink induction step: " << StrideBPtrInt << "\n");
2105 // At least Src or Sink are loop invariant and the other is strided or
2106 // invariant. We can generate a runtime check to disambiguate the accesses.
2107 if (!StrideAPtrInt || !StrideBPtrInt)
2109
2110 // Both Src and Sink have a constant stride, check if they are in the same
2111 // direction.
2112 if ((StrideAPtrInt > 0) != (StrideBPtrInt > 0)) {
2113 LLVM_DEBUG(
2114 dbgs() << "Pointer access with strides in different directions\n");
2116 }
2117
2118 TypeSize AStoreSz = DL.getTypeStoreSize(ATy);
2119 TypeSize BStoreSz = DL.getTypeStoreSize(BTy);
2120
2121 // If store sizes are not the same, set TypeByteSize to zero, so we can check
2122 // it in the caller isDependent.
2123 uint64_t ASz = DL.getTypeAllocSize(ATy);
2124 uint64_t BSz = DL.getTypeAllocSize(BTy);
2125 uint64_t TypeByteSize = (AStoreSz == BStoreSz) ? BSz : 0;
2126
2127 uint64_t StrideAScaled = std::abs(StrideAPtrInt) * ASz;
2128 uint64_t StrideBScaled = std::abs(StrideBPtrInt) * BSz;
2129
2130 uint64_t MaxStride = std::max(StrideAScaled, StrideBScaled);
2131
2132 std::optional<uint64_t> CommonStride;
2133 if (StrideAScaled == StrideBScaled)
2134 CommonStride = StrideAScaled;
2135
2136 // TODO: Historically, we didn't retry with runtime checks when (unscaled)
2137 // strides were different but there is no inherent reason to.
2138 if (!isa<SCEVConstant>(Dist))
2139 ShouldRetryWithRuntimeChecks |= StrideAPtrInt == StrideBPtrInt;
2140
2141 // If distance is a SCEVCouldNotCompute, return Unknown immediately.
2142 if (isa<SCEVCouldNotCompute>(Dist)) {
2143 LLVM_DEBUG(dbgs() << "LAA: Uncomputable distance.\n");
2144 return Dependence::Unknown;
2145 }
2146
2147 return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride,
2148 TypeByteSize, AIsWrite, BIsWrite);
2149}
2150
2152MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
2153 const MemAccessInfo &B, unsigned BIdx) {
2154 assert(AIdx < BIdx && "Must pass arguments in program order");
2155
2156 // Check if we can prove that Sink only accesses memory after Src's end or
2157 // vice versa. The helper is used to perform the checks only on the exit paths
2158 // where it helps to improve the analysis result.
2159 auto CheckCompletelyBeforeOrAfter = [&]() {
2160 auto *APtr = A.getPointer();
2161 auto *BPtr = B.getPointer();
2162 Type *ATy = getLoadStoreType(InstMap[AIdx]);
2163 Type *BTy = getLoadStoreType(InstMap[BIdx]);
2164 const SCEV *Src = PSE.getSCEV(APtr);
2165 const SCEV *Sink = PSE.getSCEV(BPtr);
2166 return areAccessesCompletelyBeforeOrAfter(Src, ATy, Sink, BTy);
2167 };
2168
2169 // Get the dependence distance, stride, type size and what access writes for
2170 // the dependence between A and B.
2171 auto Res =
2172 getDependenceDistanceStrideAndSize(A, InstMap[AIdx], B, InstMap[BIdx]);
2173 if (std::holds_alternative<Dependence::DepType>(Res)) {
2174 if (std::get<Dependence::DepType>(Res) == Dependence::Unknown &&
2175 CheckCompletelyBeforeOrAfter())
2176 return Dependence::NoDep;
2177 return std::get<Dependence::DepType>(Res);
2178 }
2179
2180 auto &[Dist, MaxStride, CommonStride, TypeByteSize, AIsWrite, BIsWrite] =
2181 std::get<DepDistanceStrideAndSizeInfo>(Res);
2182 bool HasSameSize = TypeByteSize > 0;
2183
2184 ScalarEvolution &SE = *PSE.getSE();
2185 auto &DL = InnermostLoop->getHeader()->getDataLayout();
2186
2187 // If the distance between the acecsses is larger than their maximum absolute
2188 // stride multiplied by the symbolic maximum backedge taken count (which is an
2189 // upper bound of the number of iterations), the accesses are independet, i.e.
2190 // they are far enough appart that accesses won't access the same location
2191 // across all loop ierations.
2192 if (HasSameSize &&
2194 DL, SE, *(PSE.getSymbolicMaxBackedgeTakenCount()), *Dist, MaxStride))
2195 return Dependence::NoDep;
2196
2197 // The rest of this function relies on ConstDist being at most 64-bits, which
2198 // is checked earlier. Will assert if the calling code changes.
2199 const APInt *APDist = nullptr;
2200 uint64_t ConstDist =
2201 match(Dist, m_scev_APInt(APDist)) ? APDist->abs().getZExtValue() : 0;
2202
2203 // Attempt to prove strided accesses independent.
2204 if (APDist) {
2205 // If the distance between accesses and their strides are known constants,
2206 // check whether the accesses interlace each other.
2207 if (ConstDist > 0 && CommonStride && CommonStride > 1 && HasSameSize &&
2208 areStridedAccessesIndependent(ConstDist, *CommonStride, TypeByteSize)) {
2209 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2210 return Dependence::NoDep;
2211 }
2212 } else {
2213 if (!LoopGuards)
2214 LoopGuards.emplace(
2215 ScalarEvolution::LoopGuards::collect(InnermostLoop, SE));
2216 Dist = SE.applyLoopGuards(Dist, *LoopGuards);
2217 }
2218
2219 // Negative distances are not plausible dependencies.
2220 if (SE.isKnownNonPositive(Dist)) {
2221 if (SE.isKnownNonNegative(Dist)) {
2222 if (HasSameSize) {
2223 // Write to the same location with the same size.
2224 return Dependence::Forward;
2225 }
2226 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2227 "different type sizes\n");
2228 return Dependence::Unknown;
2229 }
2230
2231 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2232 // Check if the first access writes to a location that is read in a later
2233 // iteration, where the distance between them is not a multiple of a vector
2234 // factor and relatively small.
2235 //
2236 // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2237 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2238 // forward dependency will allow vectorization using any width.
2239
2240 if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2241 if (!ConstDist) {
2242 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2244 }
2245 if (!HasSameSize ||
2246 couldPreventStoreLoadForward(ConstDist, TypeByteSize)) {
2247 LLVM_DEBUG(
2248 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2250 }
2251 }
2252
2253 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2254 return Dependence::Forward;
2255 }
2256
2257 int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2258 // Below we only handle strictly positive distances.
2259 if (MinDistance <= 0) {
2260 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2262 }
2263
2264 if (!HasSameSize) {
2265 if (CheckCompletelyBeforeOrAfter())
2266 return Dependence::NoDep;
2267 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2268 "different type sizes\n");
2269 return Dependence::Unknown;
2270 }
2271 // Bail out early if passed-in parameters make vectorization not feasible.
2272 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2274 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2276 // The minimum number of iterations for a vectorized/unrolled version.
2277 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2278
2279 // It's not vectorizable if the distance is smaller than the minimum distance
2280 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2281 // front needs MaxStride. Vectorizing the last iteration needs TypeByteSize.
2282 // (No need to plus the last gap distance).
2283 //
2284 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2285 // foo(int *A) {
2286 // int *B = (int *)((char *)A + 14);
2287 // for (i = 0 ; i < 1024 ; i += 2)
2288 // B[i] = A[i] + 1;
2289 // }
2290 //
2291 // Two accesses in memory (stride is 4 * 2):
2292 // | A[0] | | A[2] | | A[4] | | A[6] | |
2293 // | B[0] | | B[2] | | B[4] |
2294 //
2295 // MinDistance needs for vectorizing iterations except the last iteration:
2296 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2297 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2298 //
2299 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2300 // 12, which is less than distance.
2301 //
2302 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2303 // the minimum distance needed is 28, which is greater than distance. It is
2304 // not safe to do vectorization.
2305 //
2306 // We use MaxStride (maximum of src and sink strides) to get a conservative
2307 // lower bound on the MinDistanceNeeded in case of different strides.
2308
2309 // We know that Dist is positive, but it may not be constant. Use the signed
2310 // minimum for computations below, as this ensures we compute the closest
2311 // possible dependence distance.
2312 uint64_t MinDistanceNeeded = MaxStride * (MinNumIter - 1) + TypeByteSize;
2313 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2314 if (!ConstDist) {
2315 // For non-constant distances, we checked the lower bound of the
2316 // dependence distance and the distance may be larger at runtime (and safe
2317 // for vectorization). Classify it as Unknown, so we re-try with runtime
2318 // checks, unless we can prove both accesses cannot overlap.
2319 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2321 }
2322 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2323 << MinDistance << '\n');
2324 return Dependence::Backward;
2325 }
2326
2327 // Unsafe if the minimum distance needed is greater than smallest dependence
2328 // distance distance.
2329 if (MinDistanceNeeded > MinDepDistBytes) {
2330 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2331 << MinDistanceNeeded << " size in bytes\n");
2332 return Dependence::Backward;
2333 }
2334
2335 MinDepDistBytes =
2336 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2337
2338 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2339 if (IsTrueDataDependence && EnableForwardingConflictDetection && ConstDist &&
2340 couldPreventStoreLoadForward(MinDistance, TypeByteSize, *CommonStride))
2342
2343 uint64_t MaxVF = MinDepDistBytes / MaxStride;
2344 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2345 << " with max VF = " << MaxVF << '\n');
2346
2347 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2348 if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) {
2349 // For non-constant distances, we checked the lower bound of the dependence
2350 // distance and the distance may be larger at runtime (and safe for
2351 // vectorization). Classify it as Unknown, so we re-try with runtime checks,
2352 // unless we can prove both accesses cannot overlap.
2353 return CheckCompletelyBeforeOrAfter() ? Dependence::NoDep
2355 }
2356
2357 if (CheckCompletelyBeforeOrAfter())
2358 return Dependence::NoDep;
2359
2360 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2362}
2363
2365 const MemAccessInfoList &CheckDeps) {
2366
2367 MinDepDistBytes = -1;
2369 for (MemAccessInfo CurAccess : CheckDeps) {
2370 if (Visited.contains(CurAccess))
2371 continue;
2372
2373 // Check accesses within this set.
2375 DepCands.findLeader(CurAccess);
2377 DepCands.member_end();
2378
2379 // Check every access pair.
2380 while (AI != AE) {
2381 Visited.insert(*AI);
2382 bool AIIsWrite = AI->getInt();
2383 // Check loads only against next equivalent class, but stores also against
2384 // other stores in the same equivalence class - to the same address.
2386 (AIIsWrite ? AI : std::next(AI));
2387 while (OI != AE) {
2388 // Check every accessing instruction pair in program order.
2389 auto &Acc = Accesses[*AI];
2390 for (std::vector<unsigned>::iterator I1 = Acc.begin(), I1E = Acc.end();
2391 I1 != I1E; ++I1)
2392 // Scan all accesses of another equivalence class, but only the next
2393 // accesses of the same equivalent class.
2394 for (std::vector<unsigned>::iterator
2395 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2396 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2397 I2 != I2E; ++I2) {
2398 auto A = std::make_pair(&*AI, *I1);
2399 auto B = std::make_pair(&*OI, *I2);
2400
2401 assert(*I1 != *I2);
2402 if (*I1 > *I2)
2403 std::swap(A, B);
2404
2406 isDependent(*A.first, A.second, *B.first, B.second);
2408
2409 // Gather dependences unless we accumulated MaxDependences
2410 // dependences. In that case return as soon as we find the first
2411 // unsafe dependence. This puts a limit on this quadratic
2412 // algorithm.
2413 if (RecordDependences) {
2414 if (Type != Dependence::NoDep)
2415 Dependences.emplace_back(A.second, B.second, Type);
2416
2417 if (Dependences.size() >= MaxDependences) {
2418 RecordDependences = false;
2419 Dependences.clear();
2421 << "Too many dependences, stopped recording\n");
2422 }
2423 }
2424 if (!RecordDependences && !isSafeForVectorization())
2425 return false;
2426 }
2427 ++OI;
2428 }
2429 ++AI;
2430 }
2431 }
2432
2433 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2434 return isSafeForVectorization();
2435}
2436
2439 MemAccessInfo Access(Ptr, IsWrite);
2440 auto I = Accesses.find(Access);
2442 if (I != Accesses.end()) {
2443 transform(I->second, std::back_inserter(Insts),
2444 [&](unsigned Idx) { return this->InstMap[Idx]; });
2445 }
2446
2447 return Insts;
2448}
2449
2451 "NoDep",
2452 "Unknown",
2453 "IndirectUnsafe",
2454 "Forward",
2455 "ForwardButPreventsForwarding",
2456 "Backward",
2457 "BackwardVectorizable",
2458 "BackwardVectorizableButPreventsForwarding"};
2459
2461 raw_ostream &OS, unsigned Depth,
2462 const SmallVectorImpl<Instruction *> &Instrs) const {
2463 OS.indent(Depth) << DepName[Type] << ":\n";
2464 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2465 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2466}
2467
2468bool LoopAccessInfo::canAnalyzeLoop() {
2469 // We need to have a loop header.
2470 LLVM_DEBUG(dbgs() << "\nLAA: Checking a loop in '"
2471 << TheLoop->getHeader()->getParent()->getName() << "' from "
2472 << TheLoop->getLocStr() << "\n");
2473
2474 // We can only analyze innermost loops.
2475 if (!TheLoop->isInnermost()) {
2476 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2477 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2478 return false;
2479 }
2480
2481 // We must have a single backedge.
2482 if (TheLoop->getNumBackEdges() != 1) {
2483 LLVM_DEBUG(
2484 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2485 recordAnalysis("CFGNotUnderstood")
2486 << "loop control flow is not understood by analyzer";
2487 return false;
2488 }
2489
2490 // ScalarEvolution needs to be able to find the symbolic max backedge taken
2491 // count, which is an upper bound on the number of loop iterations. The loop
2492 // may execute fewer iterations, if it exits via an uncountable exit.
2493 const SCEV *ExitCount = PSE->getSymbolicMaxBackedgeTakenCount();
2494 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2495 recordAnalysis("CantComputeNumberOfIterations")
2496 << "could not determine number of loop iterations";
2497 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2498 return false;
2499 }
2500
2501 LLVM_DEBUG(dbgs() << "LAA: Found an analyzable loop: "
2502 << TheLoop->getHeader()->getName() << "\n");
2503 return true;
2504}
2505
2506bool LoopAccessInfo::analyzeLoop(AAResults *AA, const LoopInfo *LI,
2507 const TargetLibraryInfo *TLI,
2508 DominatorTree *DT) {
2509 // Holds the Load and Store instructions.
2512 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2513
2514 // Holds all the different accesses in the loop.
2515 unsigned NumReads = 0;
2516 unsigned NumReadWrites = 0;
2517
2518 bool HasComplexMemInst = false;
2519
2520 // A runtime check is only legal to insert if there are no convergent calls.
2521 HasConvergentOp = false;
2522
2523 PtrRtChecking->Pointers.clear();
2524 PtrRtChecking->Need = false;
2525
2526 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2527
2528 const bool EnableMemAccessVersioningOfLoop =
2530 !TheLoop->getHeader()->getParent()->hasOptSize();
2531
2532 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2533 // loop info, as it may be arbitrary.
2534 LoopBlocksRPO RPOT(TheLoop);
2535 RPOT.perform(LI);
2536 for (BasicBlock *BB : RPOT) {
2537 // Scan the BB and collect legal loads and stores. Also detect any
2538 // convergent instructions.
2539 for (Instruction &I : *BB) {
2540 if (auto *Call = dyn_cast<CallBase>(&I)) {
2541 if (Call->isConvergent())
2542 HasConvergentOp = true;
2543 }
2544
2545 // With both a non-vectorizable memory instruction and a convergent
2546 // operation, found in this loop, no reason to continue the search.
2547 if (HasComplexMemInst && HasConvergentOp)
2548 return false;
2549
2550 // Avoid hitting recordAnalysis multiple times.
2551 if (HasComplexMemInst)
2552 continue;
2553
2554 // Record alias scopes defined inside the loop.
2555 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2556 for (Metadata *Op : Decl->getScopeList()->operands())
2557 LoopAliasScopes.insert(cast<MDNode>(Op));
2558
2559 // Many math library functions read the rounding mode. We will only
2560 // vectorize a loop if it contains known function calls that don't set
2561 // the flag. Therefore, it is safe to ignore this read from memory.
2562 auto *Call = dyn_cast<CallInst>(&I);
2564 continue;
2565
2566 // If this is a load, save it. If this instruction can read from memory
2567 // but is not a load, we only allow it if it's a call to a function with a
2568 // vector mapping and no pointer arguments.
2569 if (I.mayReadFromMemory()) {
2570 auto hasPointerArgs = [](CallBase *CB) {
2571 return any_of(CB->args(), [](Value const *Arg) {
2572 return Arg->getType()->isPointerTy();
2573 });
2574 };
2575
2576 // If the function has an explicit vectorized counterpart, and does not
2577 // take output/input pointers, we can safely assume that it can be
2578 // vectorized.
2579 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2580 !hasPointerArgs(Call) && !VFDatabase::getMappings(*Call).empty())
2581 continue;
2582
2583 auto *Ld = dyn_cast<LoadInst>(&I);
2584 if (!Ld) {
2585 recordAnalysis("CantVectorizeInstruction", Ld)
2586 << "instruction cannot be vectorized";
2587 HasComplexMemInst = true;
2588 continue;
2589 }
2590 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2591 recordAnalysis("NonSimpleLoad", Ld)
2592 << "read with atomic ordering or volatile read";
2593 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2594 HasComplexMemInst = true;
2595 continue;
2596 }
2597 NumLoads++;
2598 Loads.push_back(Ld);
2599 DepChecker->addAccess(Ld);
2600 if (EnableMemAccessVersioningOfLoop)
2601 collectStridedAccess(Ld);
2602 continue;
2603 }
2604
2605 // Save 'store' instructions. Abort if other instructions write to memory.
2606 if (I.mayWriteToMemory()) {
2607 auto *St = dyn_cast<StoreInst>(&I);
2608 if (!St) {
2609 recordAnalysis("CantVectorizeInstruction", St)
2610 << "instruction cannot be vectorized";
2611 HasComplexMemInst = true;
2612 continue;
2613 }
2614 if (!St->isSimple() && !IsAnnotatedParallel) {
2615 recordAnalysis("NonSimpleStore", St)
2616 << "write with atomic ordering or volatile write";
2617 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2618 HasComplexMemInst = true;
2619 continue;
2620 }
2621 NumStores++;
2622 Stores.push_back(St);
2623 DepChecker->addAccess(St);
2624 if (EnableMemAccessVersioningOfLoop)
2625 collectStridedAccess(St);
2626 }
2627 } // Next instr.
2628 } // Next block.
2629
2630 if (HasComplexMemInst)
2631 return false;
2632
2633 // Now we have two lists that hold the loads and the stores.
2634 // Next, we find the pointers that they use.
2635
2636 // Check if we see any stores. If there are no stores, then we don't
2637 // care if the pointers are *restrict*.
2638 if (!Stores.size()) {
2639 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2640 return true;
2641 }
2642
2644 AccessAnalysis Accesses(TheLoop, AA, LI, *DT, DepCands, *PSE,
2645 LoopAliasScopes);
2646
2647 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2648 // multiple times on the same object. If the ptr is accessed twice, once
2649 // for read and once for write, it will only appear once (on the write
2650 // list). This is okay, since we are going to check for conflicts between
2651 // writes and between reads and writes, but not between reads and reads.
2652 SmallSet<std::pair<Value *, Type *>, 16> Seen;
2653
2654 // Record uniform store addresses to identify if we have multiple stores
2655 // to the same address.
2656 SmallPtrSet<Value *, 16> UniformStores;
2657
2658 for (StoreInst *ST : Stores) {
2659 Value *Ptr = ST->getPointerOperand();
2660
2661 if (isInvariant(Ptr)) {
2662 // Record store instructions to loop invariant addresses
2663 StoresToInvariantAddresses.push_back(ST);
2664 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2665 !UniformStores.insert(Ptr).second;
2666 }
2667
2668 // If we did *not* see this pointer before, insert it to the read-write
2669 // list. At this phase it is only a 'write' list.
2670 Type *AccessTy = getLoadStoreType(ST);
2671 if (Seen.insert({Ptr, AccessTy}).second) {
2672 ++NumReadWrites;
2673
2674 MemoryLocation Loc = MemoryLocation::get(ST);
2675 // The TBAA metadata could have a control dependency on the predication
2676 // condition, so we cannot rely on it when determining whether or not we
2677 // need runtime pointer checks.
2678 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2679 Loc.AATags.TBAA = nullptr;
2680
2681 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2682 [&Accesses, AccessTy, Loc](Value *Ptr) {
2683 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2684 Accesses.addStore(NewLoc, AccessTy);
2685 });
2686 }
2687 }
2688
2689 if (IsAnnotatedParallel) {
2690 LLVM_DEBUG(
2691 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2692 << "checks.\n");
2693 return true;
2694 }
2695
2696 for (LoadInst *LD : Loads) {
2697 Value *Ptr = LD->getPointerOperand();
2698 // If we did *not* see this pointer before, insert it to the
2699 // read list. If we *did* see it before, then it is already in
2700 // the read-write list. This allows us to vectorize expressions
2701 // such as A[i] += x; Because the address of A[i] is a read-write
2702 // pointer. This only works if the index of A[i] is consecutive.
2703 // If the address of i is unknown (for example A[B[i]]) then we may
2704 // read a few words, modify, and write a few words, and some of the
2705 // words may be written to the same address.
2706 bool IsReadOnlyPtr = false;
2707 Type *AccessTy = getLoadStoreType(LD);
2708 if (Seen.insert({Ptr, AccessTy}).second ||
2709 !getPtrStride(*PSE, AccessTy, Ptr, TheLoop, *DT, SymbolicStrides, false,
2710 true)) {
2711 ++NumReads;
2712 IsReadOnlyPtr = true;
2713 }
2714
2715 // See if there is an unsafe dependency between a load to a uniform address and
2716 // store to the same uniform address.
2717 if (UniformStores.contains(Ptr)) {
2718 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2719 "load and uniform store to the same address!\n");
2720 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2721 }
2722
2723 MemoryLocation Loc = MemoryLocation::get(LD);
2724 // The TBAA metadata could have a control dependency on the predication
2725 // condition, so we cannot rely on it when determining whether or not we
2726 // need runtime pointer checks.
2727 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2728 Loc.AATags.TBAA = nullptr;
2729
2730 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2731 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2732 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2733 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2734 });
2735 }
2736
2737 // If we write (or read-write) to a single destination and there are no
2738 // other reads in this loop then is it safe to vectorize.
2739 if (NumReadWrites == 1 && NumReads == 0) {
2740 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2741 return true;
2742 }
2743
2744 // Build dependence sets and check whether we need a runtime pointer bounds
2745 // check.
2746 Accesses.buildDependenceSets();
2747
2748 // Find pointers with computable bounds. We are going to use this information
2749 // to place a runtime bound check.
2750 Value *UncomputablePtr = nullptr;
2751 HasCompletePtrRtChecking = Accesses.canCheckPtrAtRT(
2752 *PtrRtChecking, TheLoop, SymbolicStrides, UncomputablePtr, AllowPartial);
2753 if (!HasCompletePtrRtChecking) {
2754 const auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2755 recordAnalysis("CantIdentifyArrayBounds", I)
2756 << "cannot identify array bounds";
2757 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2758 << "the array bounds.\n");
2759 return false;
2760 }
2761
2762 LLVM_DEBUG(
2763 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2764
2765 bool DepsAreSafe = true;
2766 if (Accesses.isDependencyCheckNeeded()) {
2767 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2768 DepsAreSafe =
2769 DepChecker->areDepsSafe(DepCands, Accesses.getDependenciesToCheck());
2770
2771 if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeChecks()) {
2772 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2773
2774 // Clear the dependency checks. We assume they are not needed.
2775 Accesses.resetDepChecks(*DepChecker);
2776
2777 PtrRtChecking->reset();
2778 PtrRtChecking->Need = true;
2779
2780 UncomputablePtr = nullptr;
2781 HasCompletePtrRtChecking =
2782 Accesses.canCheckPtrAtRT(*PtrRtChecking, TheLoop, SymbolicStrides,
2783 UncomputablePtr, AllowPartial);
2784
2785 // Check that we found the bounds for the pointer.
2786 if (!HasCompletePtrRtChecking) {
2787 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2788 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2789 << "cannot check memory dependencies at runtime";
2790 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2791 return false;
2792 }
2793 DepsAreSafe = true;
2794 }
2795 }
2796
2797 if (HasConvergentOp) {
2798 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2799 << "cannot add control dependency to convergent operation";
2800 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2801 "would be needed with a convergent operation\n");
2802 return false;
2803 }
2804
2805 if (DepsAreSafe) {
2806 LLVM_DEBUG(
2807 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2808 << (PtrRtChecking->Need ? "" : " don't")
2809 << " need runtime memory checks.\n");
2810 return true;
2811 }
2812
2813 emitUnsafeDependenceRemark();
2814 return false;
2815}
2816
2817void LoopAccessInfo::emitUnsafeDependenceRemark() {
2818 const auto *Deps = getDepChecker().getDependences();
2819 if (!Deps)
2820 return;
2821 const auto *Found =
2822 llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2825 });
2826 if (Found == Deps->end())
2827 return;
2828 MemoryDepChecker::Dependence Dep = *Found;
2829
2830 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2831
2832 // Emit remark for first unsafe dependence
2833 bool HasForcedDistribution = false;
2834 std::optional<const MDOperand *> Value =
2835 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2836 if (Value) {
2837 const MDOperand *Op = *Value;
2838 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2839 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2840 }
2841
2842 const std::string Info =
2843 HasForcedDistribution
2844 ? "unsafe dependent memory operations in loop."
2845 : "unsafe dependent memory operations in loop. Use "
2846 "#pragma clang loop distribute(enable) to allow loop distribution "
2847 "to attempt to isolate the offending operations into a separate "
2848 "loop";
2849 OptimizationRemarkAnalysis &R =
2850 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2851
2852 switch (Dep.Type) {
2856 llvm_unreachable("Unexpected dependence");
2858 R << "\nBackward loop carried data dependence.";
2859 break;
2861 R << "\nForward loop carried data dependence that prevents "
2862 "store-to-load forwarding.";
2863 break;
2865 R << "\nBackward loop carried data dependence that prevents "
2866 "store-to-load forwarding.";
2867 break;
2869 R << "\nUnsafe indirect dependence.";
2870 break;
2872 R << "\nUnknown data dependence.";
2873 break;
2874 }
2875
2876 if (Instruction *I = Dep.getSource(getDepChecker())) {
2877 DebugLoc SourceLoc = I->getDebugLoc();
2879 SourceLoc = DD->getDebugLoc();
2880 if (SourceLoc)
2881 R << " Memory location is the same as accessed at "
2882 << ore::NV("Location", SourceLoc);
2883 }
2884}
2885
2887 const Loop *TheLoop,
2888 const DominatorTree *DT) {
2889 assert(TheLoop->contains(BB) && "Unknown block used");
2890
2891 // Blocks that do not dominate the latch need predication.
2892 const BasicBlock *Latch = TheLoop->getLoopLatch();
2893 return !DT->dominates(BB, Latch);
2894}
2895
2897LoopAccessInfo::recordAnalysis(StringRef RemarkName, const Instruction *I) {
2898 assert(!Report && "Multiple reports generated");
2899
2900 const BasicBlock *CodeRegion = TheLoop->getHeader();
2901 DebugLoc DL = TheLoop->getStartLoc();
2902
2903 if (I) {
2904 CodeRegion = I->getParent();
2905 // If there is no debug location attached to the instruction, revert back to
2906 // using the loop's.
2907 if (I->getDebugLoc())
2908 DL = I->getDebugLoc();
2909 }
2910
2911 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName,
2912 DL, CodeRegion);
2913 return *Report;
2914}
2915
2917 auto *SE = PSE->getSE();
2918 if (TheLoop->isLoopInvariant(V))
2919 return true;
2920 if (!SE->isSCEVable(V->getType()))
2921 return false;
2922 const SCEV *S = SE->getSCEV(V);
2923 return SE->isLoopInvariant(S, TheLoop);
2924}
2925
2926/// If \p Ptr is a GEP, which has a loop-variant operand, return that operand.
2927/// Otherwise, return \p Ptr.
2929 Loop *Lp) {
2931 if (!GEP)
2932 return Ptr;
2933
2934 Value *V = Ptr;
2935 for (const Use &U : GEP->operands()) {
2936 if (!SE->isLoopInvariant(SE->getSCEV(U), Lp)) {
2937 if (V == Ptr)
2938 V = U;
2939 else
2940 // There must be exactly one loop-variant operand.
2941 return Ptr;
2942 }
2943 }
2944 return V;
2945}
2946
2947/// Get the stride of a pointer access in a loop. Looks for symbolic
2948/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2950 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2951 if (!PtrTy)
2952 return nullptr;
2953
2954 // Try to remove a gep instruction to make the pointer (actually index at this
2955 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2956 // pointer, otherwise, we are analyzing the index.
2957 Value *OrigPtr = Ptr;
2958
2959 Ptr = getLoopVariantGEPOperand(Ptr, SE, Lp);
2960 const SCEV *V = SE->getSCEV(Ptr);
2961
2962 if (Ptr != OrigPtr)
2963 // Strip off casts.
2964 while (auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2965 V = C->getOperand();
2966
2968 return nullptr;
2969
2970 // Note that the restriction after this loop invariant check are only
2971 // profitability restrictions.
2972 if (!SE->isLoopInvariant(V, Lp))
2973 return nullptr;
2974
2975 // Look for the loop invariant symbolic value.
2976 if (isa<SCEVUnknown>(V))
2977 return V;
2978
2979 if (auto *C = dyn_cast<SCEVIntegralCastExpr>(V))
2980 if (isa<SCEVUnknown>(C->getOperand()))
2981 return V;
2982
2983 return nullptr;
2984}
2985
2986void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2987 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2988 if (!Ptr)
2989 return;
2990
2991 // Note: getStrideFromPointer is a *profitability* heuristic. We
2992 // could broaden the scope of values returned here - to anything
2993 // which happens to be loop invariant and contributes to the
2994 // computation of an interesting IV - but we chose not to as we
2995 // don't have a cost model here, and broadening the scope exposes
2996 // far too many unprofitable cases.
2997 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2998 if (!StrideExpr)
2999 return;
3000
3001 if (auto *Unknown = dyn_cast<SCEVUnknown>(StrideExpr))
3002 if (isa<UndefValue>(Unknown->getValue()))
3003 return;
3004
3005 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
3006 "versioning:");
3007 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
3008
3009 if (!SpeculateUnitStride) {
3010 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
3011 return;
3012 }
3013
3014 // Avoid adding the "Stride == 1" predicate when we know that
3015 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
3016 // or zero iteration loop, as Trip-Count <= Stride == 1.
3017 //
3018 // TODO: We are currently not making a very informed decision on when it is
3019 // beneficial to apply stride versioning. It might make more sense that the
3020 // users of this analysis (such as the vectorizer) will trigger it, based on
3021 // their specific cost considerations; For example, in cases where stride
3022 // versioning does not help resolving memory accesses/dependences, the
3023 // vectorizer should evaluate the cost of the runtime test, and the benefit
3024 // of various possible stride specializations, considering the alternatives
3025 // of using gather/scatters (if available).
3026
3027 const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
3028
3029 // Match the types so we can compare the stride and the MaxBTC.
3030 // The Stride can be positive/negative, so we sign extend Stride;
3031 // The backedgeTakenCount is non-negative, so we zero extend MaxBTC.
3032 const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
3033 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
3034 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
3035 const SCEV *CastedStride = StrideExpr;
3036 const SCEV *CastedBECount = MaxBTC;
3037 ScalarEvolution *SE = PSE->getSE();
3038 if (BETypeSizeBits >= StrideTypeSizeBits)
3039 CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
3040 else
3041 CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
3042 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
3043 // Since TripCount == BackEdgeTakenCount + 1, checking:
3044 // "Stride >= TripCount" is equivalent to checking:
3045 // Stride - MaxBTC> 0
3046 if (SE->isKnownPositive(StrideMinusBETaken)) {
3047 LLVM_DEBUG(
3048 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
3049 "Stride==1 predicate will imply that the loop executes "
3050 "at most once.\n");
3051 return;
3052 }
3053 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
3054
3055 // Strip back off the integer cast, and check that our result is a
3056 // SCEVUnknown as we expect.
3057 const SCEV *StrideBase = StrideExpr;
3058 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3059 StrideBase = C->getOperand();
3060 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
3061}
3062
3064 const TargetTransformInfo *TTI,
3065 const TargetLibraryInfo *TLI, AAResults *AA,
3066 DominatorTree *DT, LoopInfo *LI,
3067 AssumptionCache *AC, bool AllowPartial)
3068 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
3069 PtrRtChecking(nullptr), TheLoop(L), AllowPartial(AllowPartial) {
3070 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3071 if (TTI && !TTI->enableScalableVectorization())
3072 // Scale the vector width by 2 as rough estimate to also consider
3073 // interleaving.
3074 MaxTargetVectorWidthInBits =
3075 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) * 2;
3076
3077 DepChecker = std::make_unique<MemoryDepChecker>(
3078 *PSE, AC, DT, L, SymbolicStrides, MaxTargetVectorWidthInBits, LoopGuards);
3079 PtrRtChecking =
3080 std::make_unique<RuntimePointerChecking>(*DepChecker, SE, LoopGuards);
3081 if (canAnalyzeLoop())
3082 CanVecMem = analyzeLoop(AA, LI, TLI, DT);
3083}
3084
3085void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
3086 if (CanVecMem) {
3087 OS.indent(Depth) << "Memory dependences are safe";
3088 const MemoryDepChecker &DC = getDepChecker();
3089 if (!DC.isSafeForAnyVectorWidth())
3090 OS << " with a maximum safe vector width of "
3091 << DC.getMaxSafeVectorWidthInBits() << " bits";
3094 OS << ", with a maximum safe store-load forward width of " << SLDist
3095 << " bits";
3096 }
3097 if (PtrRtChecking->Need)
3098 OS << " with run-time checks";
3099 OS << "\n";
3100 }
3101
3102 if (HasConvergentOp)
3103 OS.indent(Depth) << "Has convergent operation in loop\n";
3104
3105 if (Report)
3106 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3107
3108 if (auto *Dependences = DepChecker->getDependences()) {
3109 OS.indent(Depth) << "Dependences:\n";
3110 for (const auto &Dep : *Dependences) {
3111 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3112 OS << "\n";
3113 }
3114 } else
3115 OS.indent(Depth) << "Too many dependences, not recorded\n";
3116
3117 // List the pair of accesses need run-time checks to prove independence.
3118 PtrRtChecking->print(OS, Depth);
3119 if (PtrRtChecking->Need && !HasCompletePtrRtChecking)
3120 OS.indent(Depth) << "Generated run-time checks are incomplete\n";
3121 OS << "\n";
3122
3123 OS.indent(Depth)
3124 << "Non vectorizable stores to invariant address were "
3125 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3126 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3127 ? ""
3128 : "not ")
3129 << "found in loop.\n";
3130
3131 OS.indent(Depth) << "SCEV assumptions:\n";
3132 PSE->getPredicate().print(OS, Depth);
3133
3134 OS << "\n";
3135
3136 OS.indent(Depth) << "Expressions re-written:\n";
3137 PSE->print(OS, Depth);
3138}
3139
3141 bool AllowPartial) {
3142 const auto &[It, Inserted] = LoopAccessInfoMap.try_emplace(&L);
3143
3144 // We need to create the LoopAccessInfo if either we don't already have one,
3145 // or if it was created with a different value of AllowPartial.
3146 if (Inserted || It->second->hasAllowPartial() != AllowPartial)
3147 It->second = std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT,
3148 &LI, AC, AllowPartial);
3149
3150 return *It->second;
3151}
3153 // Collect LoopAccessInfo entries that may keep references to IR outside the
3154 // analyzed loop or SCEVs that may have been modified or invalidated. At the
3155 // moment, that is loops requiring memory or SCEV runtime checks, as those cache
3156 // SCEVs, e.g. for pointer expressions.
3157 for (const auto &[L, LAI] : LoopAccessInfoMap) {
3158 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3159 LAI->getPSE().getPredicate().isAlwaysTrue())
3160 continue;
3161 LoopAccessInfoMap.erase(L);
3162 }
3163}
3164
3166 Function &F, const PreservedAnalyses &PA,
3167 FunctionAnalysisManager::Invalidator &Inv) {
3168 // Check whether our analysis is preserved.
3169 auto PAC = PA.getChecker<LoopAccessAnalysis>();
3170 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3171 // If not, give up now.
3172 return true;
3173
3174 // Check whether the analyses we depend on became invalid for any reason.
3175 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3176 // invalid.
3177 return Inv.invalidate<AAManager>(F, PA) ||
3178 Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
3179 Inv.invalidate<LoopAnalysis>(F, PA) ||
3180 Inv.invalidate<DominatorTreeAnalysis>(F, PA);
3181}
3182
3185 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
3186 auto &AA = FAM.getResult<AAManager>(F);
3187 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3188 auto &LI = FAM.getResult<LoopAnalysis>(F);
3189 auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
3190 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3191 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
3192 return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI, &AC);
3193}
3194
3195AnalysisKey LoopAccessAnalysis::Key;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
@ Scaled
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Forward Handle Accesses
DXIL Resource Access
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
#define _
This header defines various interfaces for pass management in LLVM.
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static bool evaluatePtrAddRecAtMaxBTCWillNotWrap(const SCEVAddRecExpr *AR, const SCEV *MaxBTC, const SCEV *EltSize, ScalarEvolution &SE, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Return true, if evaluating AR at MaxBTC cannot wrap, because AR at MaxBTC is guaranteed inbounds of t...
static std::optional< int64_t > getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy, Value *Ptr, PredicatedScalarEvolution &PSE)
Try to compute a constant stride for AR.
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static DenseMap< const RuntimeCheckingPtrGroup *, unsigned > getPtrToIdxMap(ArrayRef< RuntimeCheckingPtrGroup > CheckingGroups)
Assign each RuntimeCheckingPtrGroup pointer an index for stable UTC output.
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR, Value *Ptr, Type *AccessTy, const Loop *L, bool Assume, const DominatorTree &DT, std::optional< int64_t > Stride=std::nullopt)
Check whether AR is a non-wrapping AddRec.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &MaxBTC, const SCEV &Dist, uint64_t MaxStride)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static const SCEV * mulSCEVOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE)
Returns A * B, if it is guaranteed not to unsigned wrap.
static Value * getLoopVariantGEPOperand(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If Ptr is a GEP, which has a loop-variant operand, return that operand.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
static const SCEV * addSCEVNoOverflow(const SCEV *A, const SCEV *B, ScalarEvolution &SE)
Returns A + B, if it is guaranteed not to unsigned wrap.
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file provides utility analysis objects describing memory locations.
#define P(N)
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
APInt abs() const
Get the absolute value.
Definition APInt.h:1796
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1041
std::optional< int64_t > trySExtValue() const
Get sign extended value if possible.
Definition APInt.h:1575
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1563
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
Definition Analysis.h:50
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:143
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:138
A function analysis which provides an AssumptionCache.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
bool isNoBuiltin() const
Return true if the call should not be treated as a call to a builtin.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isConvergent() const
Determine if the invoke is convergent.
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
bool isNegative() const
Definition Constants.h:209
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
Analysis pass which computes a DominatorTree.
Definition Dominators.h:284
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
iterator_range< member_iterator > members(const ECValue &ECV) const
bool contains(const ElemTy &V) const
Returns true if V is contained an equivalence class.
const ECValue & insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
member_iterator findLeader(const ElemTy &V) const
findLeader - Given a value in the set, return a member iterator for the equivalence class it is in.
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition Function.h:706
bool empty() const
Definition Function.h:857
PointerType * getType() const
Global values are always pointers.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
An instruction for reading from memory.
Value * getPointerOperand()
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const LoopAccessInfo & getInfo(Loop &L, bool AllowPartial=false)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
LLVM_ABI bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static LLVM_ABI bool blockNeedsPredication(const BasicBlock *BB, const Loop *TheLoop, const DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LLVM_ABI LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI, AssumptionCache *AC, bool AllowPartial=false)
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
std::string getLocStr() const
Return a string containing the debug location of the loop (file name + line number if present,...
Definition LoopInfo.cpp:667
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition LoopInfo.cpp:565
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition LoopInfo.cpp:632
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1440
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyStoreLoadForwardDistances() const
Return true if there are no store-load forwarding dependencies.
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
LLVM_ABI bool areDepsSafe(const DepCandidates &AccessSets, const MemAccessInfoList &CheckDeps)
Check whether the dependencies between the accesses are safe, and records the dependence information ...
EquivalenceClasses< MemAccessInfo > DepCandidates
Set of potential dependent memory accesses.
bool shouldRetryWithRuntimeChecks() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
SmallVector< MemAccessInfo, 8 > MemAccessInfoList
LLVM_ABI SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
LLVM_ABI void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
uint64_t getStoreLoadForwardSafeDistanceInBits() const
Return safe power-of-2 number of elements, which do not prevent store-load forwarding,...
Representation for a specific memory location.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
Diagnostic information for optimization analysis remarks.
PointerIntPair - This class implements a pair of a pointer and small integer.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition Analysis.h:275
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
LLVM_ABI void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
LLVM_ABI bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
LLVM_ABI void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static LLVM_ABI bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
LLVM_ABI void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
This class represents an analyzed expression in the program.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:228
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition VectorUtils.h:74
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI bool canBeFreed() const
Return true if the memory object referred to by V can by freed in the scope for which the SSA value d...
Definition Value.cpp:808
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition Value.cpp:873
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:314
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:201
An efficient, type-erasing, non-owning reference to a callable.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
class_match< const SCEVConstant > m_SCEVConstant()
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
class_match< const SCEV > m_SCEV()
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, bool > hasa(Y &&MD)
Check whether Metadata has a Value.
Definition Metadata.h:650
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:667
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, const Instruction *CtxI)
Returns true, if no instruction between Assume and CtxI may free memory and the function is marked as...
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
LLVM_ABI RetainedKnowledge getKnowledgeForValue(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, AssumptionCache &AC, function_ref< bool(RetainedKnowledge, Instruction *, const CallBase::BundleOpInfo *)> Filter=[](auto...) { return true;})
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and it match...
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
unsigned getPointerAddressSpace(const Type *T)
Definition SPIRVUtils.h:345
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:1968
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI std::optional< int64_t > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
LLVM_ABI bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
TargetTransformInfo TTI
LLVM_ABI const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
LLVM_ABI bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1758
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
Definition bit.h:330
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI std::pair< const SCEV *, const SCEV * > getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy, const SCEV *BTC, const SCEV *MaxBTC, ScalarEvolution *SE, DenseMap< std::pair< const SCEV *, Type * >, std::pair< const SCEV *, const SCEV * > > *PointerBounds, DominatorTree *DT, AssumptionCache *AC, std::optional< ScalarEvolution::LoopGuards > &LoopGuards)
Calculate Start and End points of memory access using exact backedge taken count BTC if computable or...
LLVM_ABI std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DominatorTree &DT, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:867
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
IR Values for the lower and upper bounds of a pointer evolution.
MDNode * Scope
The tag for alias scope specification (used with noalias).
Definition Metadata.h:784
MDNode * TBAA
The tag for type-based alias analysis.
Definition Metadata.h:778
MDNode * NoAlias
The tag specifying the noalias scope.
Definition Metadata.h:787
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
unsigned Destination
Index of the destination of the dependence in the InstMap vector.
LLVM_ABI bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
LLVM_ABI bool isForward() const
Lexically forward dependence.
LLVM_ABI bool isBackward() const
Lexically backward dependence.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
unsigned Source
Index of the source of the dependence in the InstMap vector.
DepType
The type of the dependence.
static LLVM_ABI const char * DepName[]
String version of the types.
static LLVM_ABI VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
Represent one information held inside an operand bundle of an llvm.assume.
unsigned AddressSpace
Address space of the involved pointers.
LLVM_ABI bool addPointer(unsigned Index, const RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
LLVM_ABI RuntimeCheckingPtrGroup(unsigned Index, const RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static LLVM_ABI const unsigned MaxVectorWidth
Maximum SIMD width.
static LLVM_ABI unsigned VectorizationFactor
VF as overridden by the user.
static LLVM_ABI unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static LLVM_ABI bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static LLVM_ABI unsigned VectorizationInterleave
Interleave factor as overridden by the user.
static LLVM_ABI bool HoistRuntimeChecks
Function object to check whether the first component of a container supported by std::get (like std::...
Definition STLExtras.h:1425