LLVM 23.0.0git
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the primary stateless implementation of the
10// Alias Analysis interface that implements identities (two different
11// globals cannot alias, etc), but does no stateful analysis.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ScopeExit.h"
20#include "llvm/ADT/Statistic.h"
23#include "llvm/Analysis/CFG.h"
29#include "llvm/IR/Argument.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/CycleInfo.h"
35#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/Dominators.h"
38#include "llvm/IR/Function.h"
40#include "llvm/IR/GlobalAlias.h"
42#include "llvm/IR/InstrTypes.h"
43#include "llvm/IR/Instruction.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/Operator.h"
49#include "llvm/IR/Type.h"
50#include "llvm/IR/User.h"
51#include "llvm/IR/Value.h"
53#include "llvm/Pass.h"
59#include <cassert>
60#include <cstdint>
61#include <cstdlib>
62#include <optional>
63#include <utility>
64
65#define DEBUG_TYPE "basicaa"
66
67using namespace llvm;
68
69/// Enable analysis of recursive PHI nodes.
71 cl::init(true));
72
73static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
74 cl::Hidden, cl::init(true));
75
76/// SearchLimitReached / SearchTimes shows how often the limit of
77/// to decompose GEPs is reached. It will affect the precision
78/// of basic alias analysis.
79STATISTIC(SearchLimitReached, "Number of times the limit to "
80 "decompose GEPs is reached");
81STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
82
84 FunctionAnalysisManager::Invalidator &Inv) {
85 // We don't care if this analysis itself is preserved, it has no state. But
86 // we need to check that the analyses it depends on have been. Note that we
87 // may be created without handles to some analyses and in that case don't
88 // depend on them.
89 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
90 (DT_ && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
91 Inv.invalidate<TargetLibraryAnalysis>(Fn, PA))
92 return true;
93
94 // Otherwise this analysis result remains valid.
95 return false;
96}
97
98//===----------------------------------------------------------------------===//
99// Useful predicates
100//===----------------------------------------------------------------------===//
101
102/// Returns the size of the object specified by V or UnknownSize if unknown.
103static std::optional<TypeSize> getObjectSize(const Value *V,
104 const DataLayout &DL,
105 const TargetLibraryInfo &TLI,
106 bool NullIsValidLoc,
107 bool RoundToAlign = false) {
108 ObjectSizeOpts Opts;
109 Opts.RoundToAlign = RoundToAlign;
110 Opts.NullIsUnknownSize = NullIsValidLoc;
111 if (std::optional<TypeSize> Size = getBaseObjectSize(V, DL, &TLI, Opts)) {
112 // FIXME: Remove this check, only exists to preserve previous behavior.
113 if (Size->isScalable())
114 return std::nullopt;
115 return Size;
116 }
117 return std::nullopt;
118}
119
120/// Returns true if we can prove that the object specified by V is smaller than
121/// Size. Bails out early unless the root object is passed as the first
122/// parameter.
124 const DataLayout &DL,
125 const TargetLibraryInfo &TLI,
126 bool NullIsValidLoc) {
127 // Note that the meanings of the "object" are slightly different in the
128 // following contexts:
129 // c1: llvm::getObjectSize()
130 // c2: llvm.objectsize() intrinsic
131 // c3: isObjectSmallerThan()
132 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
133 // refers to the "entire object".
134 //
135 // Consider this example:
136 // char *p = (char*)malloc(100)
137 // char *q = p+80;
138 //
139 // In the context of c1 and c2, the "object" pointed by q refers to the
140 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
141 //
142 // In the context of c3, the "object" refers to the chunk of memory being
143 // allocated. So, the "object" has 100 bytes, and q points to the middle the
144 // "object". However, unless p, the root object, is passed as the first
145 // parameter, the call to isIdentifiedObject() makes isObjectSmallerThan()
146 // bail out early.
147 if (!isIdentifiedObject(V))
148 return false;
149
150 // This function needs to use the aligned object size because we allow
151 // reads a bit past the end given sufficient alignment.
152 std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
153 /*RoundToAlign*/ true);
154
155 return ObjectSize && TypeSize::isKnownLT(*ObjectSize, Size);
156}
157
158/// Return the minimal extent from \p V to the end of the underlying object,
159/// assuming the result is used in an aliasing query. E.g., we do use the query
160/// location size and the fact that null pointers cannot alias here.
162 const LocationSize &LocSize,
163 const DataLayout &DL,
164 bool NullIsValidLoc) {
165 // If we have dereferenceability information we know a lower bound for the
166 // extent as accesses for a lower offset would be valid. We need to exclude
167 // the "or null" part if null is a valid pointer. We can ignore frees, as an
168 // access after free would be undefined behavior.
169 bool CanBeNull, CanBeFreed;
170 uint64_t DerefBytes =
171 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
172 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
173 // If queried with a precise location size, we assume that location size to be
174 // accessed, thus valid.
175 if (LocSize.isPrecise())
176 DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
177 return TypeSize::getFixed(DerefBytes);
178}
179
180/// Returns true if we can prove that the object specified by V has size Size.
181static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
182 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
183 std::optional<TypeSize> ObjectSize =
184 getObjectSize(V, DL, TLI, NullIsValidLoc);
185 return ObjectSize && *ObjectSize == Size;
186}
187
188/// Return true if both V1 and V2 are VScale
189static bool areBothVScale(const Value *V1, const Value *V2) {
192}
193
194//===----------------------------------------------------------------------===//
195// CaptureAnalysis implementations
196//===----------------------------------------------------------------------===//
197
199
201 const Value *Object, const Instruction *I, bool OrAt, bool ReturnCaptures) {
202 if (!isIdentifiedFunctionLocal(Object))
204
205 auto [CacheIt, Inserted] = IsCapturedCache.try_emplace(Object);
206 if (Inserted)
207 CacheIt->second = PointerMayBeCaptured(
209 [](CaptureComponents CC) { return capturesFullProvenance(CC); });
210
211 return ReturnCaptures ? CacheIt->second.WithRet : CacheIt->second.WithoutRet;
212}
213
214static bool isNotInCycle(const Instruction *I, const DominatorTree *DT,
215 const LoopInfo *LI, const CycleInfo *CI) {
216 if (CI)
217 return !CI->getCycle(I->getParent());
218
219 BasicBlock *BB = const_cast<BasicBlock *>(I->getParent());
221 return Succs.empty() ||
222 !isPotentiallyReachableFromMany(Succs, BB, nullptr, DT, LI);
223}
224
226 const Value *Object, const Instruction *I, bool OrAt, bool ReturnCaptures) {
227 if (!isIdentifiedFunctionLocal(Object))
229
230 auto Iter = EarliestEscapes.try_emplace(Object);
231 if (Iter.second) {
232 auto [EarliestInst, Res] = FindEarliestCapture(
233 Object, *DT.getRoot()->getParent(), DT, CaptureComponents::Provenance);
234 if (EarliestInst)
235 Inst2Obj[EarliestInst].push_back(Object);
236 Iter.first->second = {EarliestInst, Res};
237 }
238
239 if (ReturnCaptures) {
240 assert(!I && "Context instruction not supported if ReturnCaptures");
241 return Iter.first->second.second.WithRet;
242 }
243
244 auto IsNotCapturedBefore = [&]() {
245 // No capturing instruction.
246 Instruction *CaptureInst = Iter.first->second.first;
247 if (!CaptureInst)
248 return true;
249
250 // No context instruction means any use is capturing.
251 if (!I)
252 return false;
253
254 if (I == CaptureInst) {
255 if (OrAt)
256 return false;
257 return isNotInCycle(I, &DT, LI, CI);
258 }
259
260 return !isPotentiallyReachable(CaptureInst, I, nullptr, &DT, LI, CI);
261 };
262 if (IsNotCapturedBefore())
264 return Iter.first->second.second.WithoutRet;
265}
266
268 auto Iter = Inst2Obj.find(I);
269 if (Iter != Inst2Obj.end()) {
270 for (const Value *Obj : Iter->second)
271 EarliestEscapes.erase(Obj);
272 Inst2Obj.erase(I);
273 }
274}
275
276//===----------------------------------------------------------------------===//
277// GetElementPtr Instruction Decomposition and Analysis
278//===----------------------------------------------------------------------===//
279
280namespace {
281/// Represents zext(sext(trunc(V))).
282struct CastedValue {
283 const Value *V;
284 unsigned ZExtBits = 0;
285 unsigned SExtBits = 0;
286 unsigned TruncBits = 0;
287 /// Whether trunc(V) is non-negative.
288 bool IsNonNegative = false;
289
290 explicit CastedValue(const Value *V) : V(V) {}
291 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
292 unsigned TruncBits, bool IsNonNegative)
293 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
294 IsNonNegative(IsNonNegative) {}
295
296 unsigned getBitWidth() const {
297 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
298 SExtBits;
299 }
300
301 CastedValue withValue(const Value *NewV, bool PreserveNonNeg) const {
302 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
303 IsNonNegative && PreserveNonNeg);
304 }
305
306 /// Replace V with zext(NewV)
307 CastedValue withZExtOfValue(const Value *NewV, bool ZExtNonNegative) const {
308 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
310 if (ExtendBy <= TruncBits)
311 // zext<nneg>(trunc(zext(NewV))) == zext<nneg>(trunc(NewV))
312 // The nneg can be preserved on the outer zext here.
313 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
314 IsNonNegative);
315
316 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
317 ExtendBy -= TruncBits;
318 // zext<nneg>(zext(NewV)) == zext(NewV)
319 // zext(zext<nneg>(NewV)) == zext<nneg>(NewV)
320 // The nneg can be preserved from the inner zext here but must be dropped
321 // from the outer.
322 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
323 ZExtNonNegative);
324 }
325
326 /// Replace V with sext(NewV)
327 CastedValue withSExtOfValue(const Value *NewV) const {
328 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
330 if (ExtendBy <= TruncBits)
331 // zext<nneg>(trunc(sext(NewV))) == zext<nneg>(trunc(NewV))
332 // The nneg can be preserved on the outer zext here
333 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
334 IsNonNegative);
335
336 // zext(sext(sext(NewV)))
337 ExtendBy -= TruncBits;
338 // zext<nneg>(sext(sext(NewV))) = zext<nneg>(sext(NewV))
339 // The nneg can be preserved on the outer zext here
340 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
341 }
342
343 APInt evaluateWith(APInt N) const {
344 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
345 "Incompatible bit width");
346 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
347 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
348 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
349 return N;
350 }
351
352 ConstantRange evaluateWith(ConstantRange N) const {
353 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
354 "Incompatible bit width");
355 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
356 if (IsNonNegative && !N.isAllNonNegative())
357 N = N.intersectWith(
358 ConstantRange(APInt::getZero(N.getBitWidth()),
359 APInt::getSignedMinValue(N.getBitWidth())));
360 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
361 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
362 return N;
363 }
364
365 bool canDistributeOver(bool NUW, bool NSW) const {
366 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
367 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
368 // trunc(x op y) == trunc(x) op trunc(y)
369 return (!ZExtBits || NUW) && (!SExtBits || NSW);
370 }
371
372 bool hasSameCastsAs(const CastedValue &Other) const {
373 if (V->getType() != Other.V->getType())
374 return false;
375
376 if (ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
377 TruncBits == Other.TruncBits)
378 return true;
379 // If either CastedValue has a nneg zext then the sext/zext bits are
380 // interchangable for that value.
381 if (IsNonNegative || Other.IsNonNegative)
382 return (ZExtBits + SExtBits == Other.ZExtBits + Other.SExtBits &&
383 TruncBits == Other.TruncBits);
384 return false;
385 }
386};
387
388/// Represents zext(sext(trunc(V))) * Scale + Offset.
389struct LinearExpression {
390 CastedValue Val;
391 APInt Scale;
392 APInt Offset;
393
394 /// True if all operations in this expression are NUW.
395 bool IsNUW;
396 /// True if all operations in this expression are NSW.
397 bool IsNSW;
398
399 LinearExpression(const CastedValue &Val, const APInt &Scale,
400 const APInt &Offset, bool IsNUW, bool IsNSW)
401 : Val(Val), Scale(Scale), Offset(Offset), IsNUW(IsNUW), IsNSW(IsNSW) {}
402
403 LinearExpression(const CastedValue &Val)
404 : Val(Val), IsNUW(true), IsNSW(true) {
405 unsigned BitWidth = Val.getBitWidth();
406 Scale = APInt(BitWidth, 1);
407 Offset = APInt(BitWidth, 0);
408 }
409
410 LinearExpression mul(const APInt &Other, bool MulIsNUW, bool MulIsNSW) const {
411 // The check for zero offset is necessary, because generally
412 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
413 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
414 bool NUW = IsNUW && (Other.isOne() || MulIsNUW);
415 return LinearExpression(Val, Scale * Other, Offset * Other, NUW, NSW);
416 }
417};
418}
419
420/// Analyzes the specified value as a linear expression: "A*V + B", where A and
421/// B are constant integers.
423 const CastedValue &Val, const DataLayout &DL, unsigned Depth,
425 // Limit our recursion depth.
426 if (Depth == 6)
427 return Val;
428
429 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
430 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
431 Val.evaluateWith(Const->getValue()), true, true);
432
433 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
434 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
435 APInt RHS = Val.evaluateWith(RHSC->getValue());
436 // The only non-OBO case we deal with is or, and only limited to the
437 // case where it is both nuw and nsw.
438 bool NUW = true, NSW = true;
440 NUW &= BOp->hasNoUnsignedWrap();
441 NSW &= BOp->hasNoSignedWrap();
442 }
443 if (!Val.canDistributeOver(NUW, NSW))
444 return Val;
445
446 // While we can distribute over trunc, we cannot preserve nowrap flags
447 // in that case.
448 if (Val.TruncBits)
449 NUW = NSW = false;
450
451 LinearExpression E(Val);
452 switch (BOp->getOpcode()) {
453 default:
454 // We don't understand this instruction, so we can't decompose it any
455 // further.
456 return Val;
457 case Instruction::Or:
458 // X|C == X+C if it is disjoint. Otherwise we can't analyze it.
459 if (!cast<PossiblyDisjointInst>(BOp)->isDisjoint())
460 return Val;
461
462 [[fallthrough]];
463 case Instruction::Add: {
464 E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
465 Depth + 1, AC, DT);
466 E.Offset += RHS;
467 E.IsNUW &= NUW;
468 E.IsNSW &= NSW;
469 break;
470 }
471 case Instruction::Sub: {
472 E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
473 Depth + 1, AC, DT);
474 E.Offset -= RHS;
475 E.IsNUW = false; // sub nuw x, y is not add nuw x, -y.
476 E.IsNSW &= NSW;
477 break;
478 }
479 case Instruction::Mul:
480 E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
481 Depth + 1, AC, DT)
482 .mul(RHS, NUW, NSW);
483 break;
484 case Instruction::Shl:
485 // We're trying to linearize an expression of the kind:
486 // shl i8 -128, 36
487 // where the shift count exceeds the bitwidth of the type.
488 // We can't decompose this further (the expression would return
489 // a poison value).
490 if (RHS.getLimitedValue() > Val.getBitWidth())
491 return Val;
492
493 E = GetLinearExpression(Val.withValue(BOp->getOperand(0), NSW), DL,
494 Depth + 1, AC, DT);
495 E.Offset <<= RHS.getLimitedValue();
496 E.Scale <<= RHS.getLimitedValue();
497 E.IsNUW &= NUW;
498 E.IsNSW &= NSW;
499 break;
500 }
501 return E;
502 }
503 }
504
505 if (const auto *ZExt = dyn_cast<ZExtInst>(Val.V))
506 return GetLinearExpression(
507 Val.withZExtOfValue(ZExt->getOperand(0), ZExt->hasNonNeg()), DL,
508 Depth + 1, AC, DT);
509
510 if (isa<SExtInst>(Val.V))
511 return GetLinearExpression(
512 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
513 DL, Depth + 1, AC, DT);
514
515 return Val;
516}
517
518namespace {
519// A linear transformation of a Value; this class represents
520// ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
521struct VariableGEPIndex {
522 CastedValue Val;
523 APInt Scale;
524
525 // Context instruction to use when querying information about this index.
526 const Instruction *CxtI;
527
528 /// True if all operations in this expression are NSW.
529 bool IsNSW;
530
531 /// True if the index should be subtracted rather than added. We don't simply
532 /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
533 /// non-wrapping, while X + INT_MIN*(-1) wraps.
534 bool IsNegated;
535
536 bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
537 if (IsNegated == Other.IsNegated)
538 return Scale == -Other.Scale;
539 return Scale == Other.Scale;
540 }
541
542 void dump() const {
543 print(dbgs());
544 dbgs() << "\n";
545 }
546 void print(raw_ostream &OS) const {
547 OS << "(V=" << Val.V->getName()
548 << ", zextbits=" << Val.ZExtBits
549 << ", sextbits=" << Val.SExtBits
550 << ", truncbits=" << Val.TruncBits
551 << ", scale=" << Scale
552 << ", nsw=" << IsNSW
553 << ", negated=" << IsNegated << ")";
554 }
555};
556}
557
558// Represents the internal structure of a GEP, decomposed into a base pointer,
559// constant offsets, and variable scaled indices.
561 // Base pointer of the GEP
562 const Value *Base;
563 // Total constant offset from base.
565 // Scaled variable (non-constant) indices.
567 // Nowrap flags common to all GEP operations involved in expression.
569
570 void dump() const {
571 print(dbgs());
572 dbgs() << "\n";
573 }
574 void print(raw_ostream &OS) const {
575 OS << ", inbounds=" << (NWFlags.isInBounds() ? "1" : "0")
576 << ", nuw=" << (NWFlags.hasNoUnsignedWrap() ? "1" : "0")
577 << "(DecomposedGEP Base=" << Base->getName() << ", Offset=" << Offset
578 << ", VarIndices=[";
579 for (size_t i = 0; i < VarIndices.size(); i++) {
580 if (i != 0)
581 OS << ", ";
582 VarIndices[i].print(OS);
583 }
584 OS << "])";
585 }
586};
587
588
589/// If V is a symbolic pointer expression, decompose it into a base pointer
590/// with a constant offset and a number of scaled symbolic offsets.
591///
592/// The scaled symbolic offsets (represented by pairs of a Value* and a scale
593/// in the VarIndices vector) are Value*'s that are known to be scaled by the
594/// specified amount, but which may have other unrepresented high bits. As
595/// such, the gep cannot necessarily be reconstructed from its decomposed form.
597BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
599 // Limit recursion depth to limit compile time in crazy cases.
600 unsigned MaxLookup = MaxLookupSearchDepth;
601 SearchTimes++;
602 const Instruction *CxtI = dyn_cast<Instruction>(V);
603
604 unsigned IndexSize = DL.getIndexTypeSizeInBits(V->getType());
605 DecomposedGEP Decomposed;
606 Decomposed.Offset = APInt(IndexSize, 0);
607 do {
608 // See if this is a bitcast or GEP.
609 const Operator *Op = dyn_cast<Operator>(V);
610 if (!Op) {
611 // The only non-operator case we can handle are GlobalAliases.
612 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
613 if (!GA->isInterposable()) {
614 V = GA->getAliasee();
615 continue;
616 }
617 }
618 Decomposed.Base = V;
619 return Decomposed;
620 }
621
622 if (Op->getOpcode() == Instruction::BitCast ||
623 Op->getOpcode() == Instruction::AddrSpaceCast) {
624 Value *NewV = Op->getOperand(0);
625 auto *NewVTy = NewV->getType();
626 // Don't look through casts to non-scalar-pointer types or address spaces
627 // with differing index widths.
628 if (!isa<PointerType>(NewVTy) ||
629 DL.getIndexTypeSizeInBits(NewVTy) != IndexSize) {
630 Decomposed.Base = V;
631 return Decomposed;
632 }
633 V = NewV;
634 continue;
635 }
636
637 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
638 if (!GEPOp) {
639 if (const auto *PHI = dyn_cast<PHINode>(V)) {
640 // Look through single-arg phi nodes created by LCSSA.
641 if (PHI->getNumIncomingValues() == 1) {
642 V = PHI->getIncomingValue(0);
643 continue;
644 }
645 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
646 // CaptureTracking can know about special capturing properties of some
647 // intrinsics like launder.invariant.group, that can't be expressed with
648 // the attributes, but have properties like returning aliasing pointer.
649 // Because some analysis may assume that nocaptured pointer is not
650 // returned from some special intrinsic (because function would have to
651 // be marked with returns attribute), it is crucial to use this function
652 // because it should be in sync with CaptureTracking. Not using it may
653 // cause weird miscompilations where 2 aliasing pointers are assumed to
654 // noalias.
655 // Pass MustPreserveOffset=true so we exclude llvm.ptrmask, which can
656 // change the byte offset by clearing low bits and would otherwise
657 // corrupt the symbolic offset we are accumulating in `Decomposed`.
659 Call, /*MustPreserveOffset=*/true)) {
660 V = RP;
661 continue;
662 }
663 }
664
665 Decomposed.Base = V;
666 return Decomposed;
667 }
668
669 // Track the common nowrap flags for all GEPs we see.
670 Decomposed.NWFlags &= GEPOp->getNoWrapFlags();
671
672 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
673
674 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
676 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
677 I != E; ++I, ++GTI) {
678 const Value *Index = *I;
679 // Compute the (potentially symbolic) offset in bytes for this index.
680 if (StructType *STy = GTI.getStructTypeOrNull()) {
681 // For a struct, add the member offset.
682 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
683 if (FieldNo == 0)
684 continue;
685
686 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
687 continue;
688 }
689
690 // For an array/pointer, add the element offset, explicitly scaled.
691 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
692 if (CIdx->isZero())
693 continue;
694
695 // Don't attempt to analyze GEPs if the scalable index is not zero.
696 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
697 if (AllocTypeSize.isScalable()) {
698 Decomposed.Base = V;
699 return Decomposed;
700 }
701
702 Decomposed.Offset += AllocTypeSize.getFixedValue() *
703 CIdx->getValue().sextOrTrunc(IndexSize);
704 continue;
705 }
706
707 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
708 if (AllocTypeSize.isScalable()) {
709 Decomposed.Base = V;
710 return Decomposed;
711 }
712
713 // If the integer type is smaller than the index size, it is implicitly
714 // sign extended or truncated to index size.
715 bool NUSW = GEPOp->hasNoUnsignedSignedWrap();
716 bool NUW = GEPOp->hasNoUnsignedWrap();
717 bool NonNeg = NUSW && NUW;
718 unsigned Width = Index->getType()->getIntegerBitWidth();
719 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
720 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
721 LinearExpression LE = GetLinearExpression(
722 CastedValue(Index, 0, SExtBits, TruncBits, NonNeg), DL, 0, AC, DT);
723
724 // Scale by the type size.
725 unsigned TypeSize = AllocTypeSize.getFixedValue();
726 LE = LE.mul(APInt(IndexSize, TypeSize), NUW, NUSW);
727 Decomposed.Offset += LE.Offset;
728 APInt Scale = LE.Scale;
729 if (!LE.IsNUW)
730 Decomposed.NWFlags = Decomposed.NWFlags.withoutNoUnsignedWrap();
731
732 // If we already had an occurrence of this index variable, merge this
733 // scale into it. For example, we want to handle:
734 // A[x][x] -> x*16 + x*4 -> x*20
735 // This also ensures that 'x' only appears in the index list once.
736 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
737 if ((Decomposed.VarIndices[i].Val.V == LE.Val.V ||
738 areBothVScale(Decomposed.VarIndices[i].Val.V, LE.Val.V)) &&
739 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
740 Scale += Decomposed.VarIndices[i].Scale;
741 // We cannot guarantee no-wrap for the merge.
742 LE.IsNSW = LE.IsNUW = false;
743 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
744 break;
745 }
746 }
747
748 if (!!Scale) {
749 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW,
750 /* IsNegated */ false};
751 Decomposed.VarIndices.push_back(Entry);
752 }
753 }
754
755 // Analyze the base pointer next.
756 V = GEPOp->getOperand(0);
757 } while (--MaxLookup);
758
759 // If the chain of expressions is too deep, just return early.
760 Decomposed.Base = V;
761 SearchLimitReached++;
762 return Decomposed;
763}
764
766 AAQueryInfo &AAQI,
767 bool IgnoreLocals) {
768 assert(Visited.empty() && "Visited must be cleared after use!");
769 llvm::scope_exit _([&] { Visited.clear(); });
770
771 unsigned MaxLookup = 8;
773 Worklist.push_back(Loc.Ptr);
775
776 do {
777 const Value *V = getUnderlyingObject(Worklist.pop_back_val());
778 if (!Visited.insert(V).second)
779 continue;
780
781 // Ignore allocas if we were instructed to do so.
782 if (IgnoreLocals && isa<AllocaInst>(V))
783 continue;
784
785 // If the location points to memory that is known to be invariant for
786 // the life of the underlying SSA value, then we can exclude Mod from
787 // the set of valid memory effects.
788 //
789 // An argument that is marked readonly and noalias is known to be
790 // invariant while that function is executing.
791 if (const Argument *Arg = dyn_cast<Argument>(V)) {
792 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
793 Result |= ModRefInfo::Ref;
794 continue;
795 }
796 }
797
798 // A global constant can't be mutated.
799 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
800 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
801 // global to be marked constant in some modules and non-constant in
802 // others. GV may even be a declaration, not a definition.
803 if (!GV->isConstant())
804 return ModRefInfo::ModRef;
805 continue;
806 }
807
808 // If both select values point to local memory, then so does the select.
809 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
810 Worklist.push_back(SI->getTrueValue());
811 Worklist.push_back(SI->getFalseValue());
812 continue;
813 }
814
815 // If all values incoming to a phi node point to local memory, then so does
816 // the phi.
817 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
818 // Don't bother inspecting phi nodes with many operands.
819 if (PN->getNumIncomingValues() > MaxLookup)
820 return ModRefInfo::ModRef;
821 append_range(Worklist, PN->incoming_values());
822 continue;
823 }
824
825 // Otherwise be conservative.
826 return ModRefInfo::ModRef;
827 } while (!Worklist.empty() && --MaxLookup);
828
829 // If we hit the maximum number of instructions to examine, be conservative.
830 if (!Worklist.empty())
831 return ModRefInfo::ModRef;
832
833 return Result;
834}
835
836static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
838 return II && II->getIntrinsicID() == IID;
839}
840
841/// Returns the behavior when calling the given call site.
843 AAQueryInfo &AAQI) {
844 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
845
846 if (const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
847 MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
848 // Operand bundles on the call may also read or write memory, in addition
849 // to the behavior of the called function.
850 if (Call->hasReadingOperandBundles())
851 FuncME |= MemoryEffects::readOnly();
852 if (Call->hasClobberingOperandBundles())
853 FuncME |= MemoryEffects::writeOnly();
854 if (Call->isVolatile()) {
855 // Volatile operations also access inaccessible memory.
857 }
858 Min &= FuncME;
859 }
860
861 return Min;
862}
863
864/// Returns the behavior when calling the given function. For use when the call
865/// site is not known.
867 switch (F->getIntrinsicID()) {
868 case Intrinsic::experimental_guard:
869 case Intrinsic::experimental_deoptimize:
870 // These intrinsics can read arbitrary memory, and additionally modref
871 // inaccessible memory to model control dependence.
872 return MemoryEffects::readOnly() |
874 }
875
876 return F->getMemoryEffects();
877}
878
880 unsigned ArgIdx) {
881 if (Call->doesNotAccessMemory(ArgIdx))
883
884 if (Call->onlyWritesMemory(ArgIdx))
885 return ModRefInfo::Mod;
886
887 if (Call->onlyReadsMemory(ArgIdx))
888 return ModRefInfo::Ref;
889
890 return ModRefInfo::ModRef;
891}
892
893#ifndef NDEBUG
894static const Function *getParent(const Value *V) {
895 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
896 if (!inst->getParent())
897 return nullptr;
898 return inst->getParent()->getParent();
899 }
900
901 if (const Argument *arg = dyn_cast<Argument>(V))
902 return arg->getParent();
903
904 return nullptr;
905}
906
907static bool notDifferentParent(const Value *O1, const Value *O2) {
908
909 const Function *F1 = getParent(O1);
910 const Function *F2 = getParent(O2);
911
912 return !F1 || !F2 || F1 == F2;
913}
914#endif
915
917 const MemoryLocation &LocB, AAQueryInfo &AAQI,
918 const Instruction *CtxI) {
919 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
920 "BasicAliasAnalysis doesn't support interprocedural queries.");
921 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
922}
923
924/// Checks to see if the specified callsite can clobber the specified memory
925/// object.
926///
927/// Since we only look at local properties of this function, we really can't
928/// say much about this query. We do, however, use simple "address taken"
929/// analysis on local objects.
931 const MemoryLocation &Loc,
932 AAQueryInfo &AAQI) {
934 "AliasAnalysis query involving multiple functions!");
935
936 const Value *Object = getUnderlyingObject(Loc.Ptr);
937
938 // Calls marked 'tail' cannot read or write allocas from the current frame
939 // because the current frame might be destroyed by the time they run. However,
940 // a tail call may use an alloca with byval. Calling with byval copies the
941 // contents of the alloca into argument registers or stack slots, so there is
942 // no lifetime issue.
943 if (isa<AllocaInst>(Object))
944 if (const CallInst *CI = dyn_cast<CallInst>(Call))
945 if (CI->isTailCall() &&
946 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
948
949 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
950 // modify them even though the alloca is not escaped.
951 if (auto *AI = dyn_cast<AllocaInst>(Object))
952 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
953 return ModRefInfo::Mod;
954
955 // We can completely ignore inaccessible memory here, because MemoryLocations
956 // can only reference accessible memory.
957 auto ME = AAQI.AAR.getMemoryEffects(Call, AAQI)
959 if (ME.doesNotAccessMemory())
961
962 ModRefInfo ArgMR = ME.getModRef(IRMemLocation::ArgMem);
963 ModRefInfo ErrnoMR = ME.getModRef(IRMemLocation::ErrnoMem);
964 ModRefInfo OtherMR = ME.getModRef(IRMemLocation::Other);
965
966 // Take into account potential synchronization effects of the call.
967 // We assume synchronization can not occur if the call does not read/write
968 // other memory (this in particular ensures that readonly/argmemonly continue
969 // to work as expected for frontends that do not emit nosync).
970 // FIXME: This should apply to all calls, but is limited to inline asm to
971 // limit impact. This ensures that inline asm memory barriers work correctly.
973 if (isModAndRefSet(OtherMR) && Call->maySynchronize() &&
974 Call->isInlineAsm()) {
975 SyncMR = getSyncEffects(&AAQI.AAR, Loc, AAQI);
976 if (isModAndRefSet(SyncMR))
977 return SyncMR;
978 }
979
980 // An identified function-local object that does not escape can only be
981 // accessed via call arguments. Reduce OtherMR (which includes accesses to
982 // escaped memory) based on that.
983 //
984 // We model calls that can return twice (setjmp) as clobbering non-escaping
985 // objects, to model any accesses that may occur prior to the second return.
986 // As an exception, ignore allocas, as setjmp is not required to preserve
987 // non-volatile stores for them.
988 if (isModOrRefSet(OtherMR) && !isa<Constant>(Object) && Call != Object &&
989 (isa<AllocaInst>(Object) || !Call->hasFnAttr(Attribute::ReturnsTwice))) {
991 Object, Call, /*OrAt=*/false, /*ReturnCaptures=*/false);
992 if (capturesNothing(CC))
993 OtherMR = ModRefInfo::NoModRef;
994 else if (capturesReadProvenanceOnly(CC))
995 OtherMR = ModRefInfo::Ref;
996 }
997
998 // Refine the modref info for argument memory. We only bother to do this
999 // if ArgMR is not a subset of OtherMR, otherwise this won't have an impact
1000 // on the final result.
1001 if ((ArgMR | OtherMR) != OtherMR) {
1003 for (const Use &U : Call->data_ops()) {
1004 const Value *Arg = U;
1005 if (!Arg->getType()->isPointerTy())
1006 continue;
1007 unsigned ArgIdx = Call->getDataOperandNo(&U);
1008 MemoryLocation ArgLoc =
1009 Call->isArgOperand(&U)
1010 ? MemoryLocation::getForArgument(Call, ArgIdx, TLI)
1012 AliasResult ArgAlias = AAQI.AAR.alias(ArgLoc, Loc, AAQI, Call);
1013 if (ArgAlias != AliasResult::NoAlias)
1014 NewArgMR |= ArgMR & AAQI.AAR.getArgModRefInfo(Call, ArgIdx);
1015
1016 // Exit early if we cannot improve over the original ArgMR.
1017 if (NewArgMR == ArgMR)
1018 break;
1019 }
1020 ArgMR = NewArgMR;
1021 }
1022
1023 ModRefInfo Result = ArgMR | OtherMR | SyncMR;
1024
1025 // Refine accesses to errno memory.
1026 if ((ErrnoMR | Result) != Result) {
1027 if (AAQI.AAR.aliasErrno(Loc, Call->getModule()) != AliasResult::NoAlias) {
1028 // Exclusion conditions do not hold, this memory location may alias errno.
1029 Result |= ErrnoMR;
1030 }
1031 }
1032
1033 if (!isModAndRefSet(Result))
1034 return Result;
1035
1036 // If the call is malloc/calloc like, we can assume that it doesn't
1037 // modify any IR visible value. This is only valid because we assume these
1038 // routines do not read values visible in the IR. TODO: Consider special
1039 // casing realloc and strdup routines which access only their arguments as
1040 // well. Or alternatively, replace all of this with inaccessiblememonly once
1041 // that's implemented fully.
1042 if (isMallocOrCallocLikeFn(Call, &TLI)) {
1043 // Be conservative if the accessed pointer may alias the allocation -
1044 // fallback to the generic handling below.
1047 return ModRefInfo::NoModRef;
1048 }
1049
1050 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1051 // writing so that proper control dependencies are maintained but they never
1052 // mod any particular memory location visible to the IR.
1053 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1054 // intrinsic is now modeled as reading memory. This prevents hoisting the
1055 // invariant.start intrinsic over stores. Consider:
1056 // *ptr = 40;
1057 // *ptr = 50;
1058 // invariant_start(ptr)
1059 // int val = *ptr;
1060 // print(val);
1061 //
1062 // This cannot be transformed to:
1063 //
1064 // *ptr = 40;
1065 // invariant_start(ptr)
1066 // *ptr = 50;
1067 // int val = *ptr;
1068 // print(val);
1069 //
1070 // The transformation will cause the second store to be ignored (based on
1071 // rules of invariant.start) and print 40, while the first program always
1072 // prints 50.
1073 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1074 return ModRefInfo::Ref;
1075
1076 // Be conservative.
1077 return ModRefInfo::ModRef;
1078}
1079
1081 const CallBase *Call2,
1082 AAQueryInfo &AAQI) {
1083 // Guard intrinsics are marked as arbitrarily writing so that proper control
1084 // dependencies are maintained but they never mods any particular memory
1085 // location.
1086 //
1087 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1088 // heap state at the point the guard is issued needs to be consistent in case
1089 // the guard invokes the "deopt" continuation.
1090
1091 // NB! This function is *not* commutative, so we special case two
1092 // possibilities for guard intrinsics.
1093
1094 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1095 return isModSet(getMemoryEffects(Call2, AAQI).getModRef())
1098
1099 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1100 return isModSet(getMemoryEffects(Call1, AAQI).getModRef())
1103
1104 // Be conservative.
1105 return ModRefInfo::ModRef;
1106}
1107
1108/// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1109/// another pointer.
1110///
1111/// We know that V1 is a GEP, but we don't know anything about V2.
1112/// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1113/// V2.
1114AliasResult BasicAAResult::aliasGEP(
1115 const GEPOperator *GEP1, LocationSize V1Size,
1116 const Value *V2, LocationSize V2Size,
1117 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1118 auto BaseObjectsAlias = [&]() {
1119 AliasResult BaseAlias =
1120 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1121 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1122 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1124 };
1125
1126 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1127 // TODO: This limitation exists for compile-time reasons. Relax it if we
1128 // can avoid exponential pathological cases.
1129 if (!isa<GEPOperator>(V2))
1130 return AliasResult::MayAlias;
1131
1132 // If both accesses have unknown size, we can only check whether the base
1133 // objects don't alias.
1134 return BaseObjectsAlias();
1135 }
1136
1137 DominatorTree *DT = getDT(AAQI);
1138 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1139 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1140
1141 // Bail if we were not able to decompose anything.
1142 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1143 return AliasResult::MayAlias;
1144
1145 // Fall back to base objects if pointers have different index widths.
1146 if (DecompGEP1.Offset.getBitWidth() != DecompGEP2.Offset.getBitWidth())
1147 return BaseObjectsAlias();
1148
1149 // Swap GEP1 and GEP2 if GEP2 has more variable indices.
1150 if (DecompGEP1.VarIndices.size() < DecompGEP2.VarIndices.size()) {
1151 std::swap(DecompGEP1, DecompGEP2);
1152 std::swap(V1Size, V2Size);
1153 std::swap(UnderlyingV1, UnderlyingV2);
1154 }
1155
1156 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1157 // symbolic difference.
1158 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1159
1160 // If an inbounds GEP would have to start from an out of bounds address
1161 // for the two to alias, then we can assume noalias.
1162 // TODO: Remove !isScalable() once BasicAA fully support scalable location
1163 // size
1164
1165 if (DecompGEP1.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1166 V2Size.hasValue() && !V2Size.isScalable() &&
1167 DecompGEP1.Offset.sge(V2Size.getValue()) &&
1168 isBaseOfObject(DecompGEP2.Base))
1169 return AliasResult::NoAlias;
1170
1171 // Symmetric case to above.
1172 if (DecompGEP2.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1173 V1Size.hasValue() && !V1Size.isScalable() &&
1174 DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1175 isBaseOfObject(DecompGEP1.Base))
1176 return AliasResult::NoAlias;
1177
1178 // For GEPs with identical offsets, we can preserve the size and AAInfo
1179 // when performing the alias check on the underlying objects.
1180 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1181 return AAQI.AAR.alias(MemoryLocation(DecompGEP1.Base, V1Size),
1182 MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1183
1184 // Do the base pointers alias?
1185 AliasResult BaseAlias =
1186 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1187 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1188
1189 // If we get a No or May, then return it immediately, no amount of analysis
1190 // will improve this situation.
1191 if (BaseAlias != AliasResult::MustAlias) {
1192 assert(BaseAlias == AliasResult::NoAlias ||
1193 BaseAlias == AliasResult::MayAlias);
1194 return BaseAlias;
1195 }
1196
1197 // If there is a constant difference between the pointers, but the difference
1198 // is less than the size of the associated memory object, then we know
1199 // that the objects are partially overlapping. If the difference is
1200 // greater, we know they do not overlap.
1201 if (DecompGEP1.VarIndices.empty()) {
1202 APInt &Off = DecompGEP1.Offset;
1203
1204 // Initialize for Off >= 0 (V2 <= GEP1) case.
1205 LocationSize VLeftSize = V2Size;
1206 LocationSize VRightSize = V1Size;
1207 const bool Swapped = Off.isNegative();
1208
1209 if (Swapped) {
1210 // Swap if we have the situation where:
1211 // + +
1212 // | BaseOffset |
1213 // ---------------->|
1214 // |-->V1Size |-------> V2Size
1215 // GEP1 V2
1216 std::swap(VLeftSize, VRightSize);
1217 Off = -Off;
1218 }
1219
1220 if (!VLeftSize.hasValue())
1221 return AliasResult::MayAlias;
1222
1223 const TypeSize LSize = VLeftSize.getValue();
1224 if (!LSize.isScalable()) {
1225 if (Off.ult(LSize)) {
1226 // Conservatively drop processing if a phi was visited and/or offset is
1227 // too big.
1228 AliasResult AR = AliasResult::PartialAlias;
1229 if (VRightSize.hasValue() && !VRightSize.isScalable() &&
1230 Off.ule(INT32_MAX) && (Off + VRightSize.getValue()).ule(LSize)) {
1231 // Memory referenced by right pointer is nested. Save the offset in
1232 // cache. Note that originally offset estimated as GEP1-V2, but
1233 // AliasResult contains the shift that represents GEP1+Offset=V2.
1234 AR.setOffset(-Off.getSExtValue());
1235 AR.swap(Swapped);
1236 }
1237 return AR;
1238 }
1239 return AliasResult::NoAlias;
1240 } else {
1241 // We can use the getVScaleRange to prove that Off >= (CR.upper * LSize).
1242 ConstantRange CR = getVScaleRange(&F, Off.getBitWidth());
1243 bool Overflow;
1244 APInt UpperRange = CR.getUnsignedMax().umul_ov(
1245 APInt(Off.getBitWidth(), LSize.getKnownMinValue()), Overflow);
1246 if (!Overflow && Off.uge(UpperRange))
1247 return AliasResult::NoAlias;
1248 }
1249 }
1250
1251 // VScale Alias Analysis - Given one scalable offset between accesses and a
1252 // scalable typesize, we can divide each side by vscale, treating both values
1253 // as a constant. We prove that Offset/vscale >= TypeSize/vscale.
1254 if (DecompGEP1.VarIndices.size() == 1 &&
1255 DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1256 DecompGEP1.Offset.isZero() &&
1257 PatternMatch::match(DecompGEP1.VarIndices[0].Val.V,
1259 const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1260 APInt Scale =
1261 ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1262 LocationSize VLeftSize = Scale.isNegative() ? V1Size : V2Size;
1263
1264 // Check if the offset is known to not overflow, if it does then attempt to
1265 // prove it with the known values of vscale_range.
1266 bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1267 if (Overflows) {
1268 ConstantRange CR = getVScaleRange(&F, Scale.getBitWidth());
1269 (void)CR.getSignedMax().smul_ov(Scale, Overflows);
1270 }
1271
1272 if (!Overflows) {
1273 // Note that we do not check that the typesize is scalable, as vscale >= 1
1274 // so noalias still holds so long as the dependency distance is at least
1275 // as big as the typesize.
1276 if (VLeftSize.hasValue() &&
1277 Scale.abs().uge(VLeftSize.getValue().getKnownMinValue()))
1278 return AliasResult::NoAlias;
1279 }
1280 }
1281
1282 // If the difference between pointers is Offset +<nuw> Indices then we know
1283 // that the addition does not wrap the pointer index type (add nuw) and the
1284 // constant Offset is a lower bound on the distance between the pointers. We
1285 // can then prove NoAlias via Offset u>= VLeftSize.
1286 // + + +
1287 // | BaseOffset | +<nuw> Indices |
1288 // ---------------->|-------------------->|
1289 // |-->V2Size | |-------> V1Size
1290 // LHS RHS
1291 if (!DecompGEP1.VarIndices.empty() &&
1292 DecompGEP1.NWFlags.hasNoUnsignedWrap() && V2Size.hasValue() &&
1293 !V2Size.isScalable() && DecompGEP1.Offset.uge(V2Size.getValue()))
1294 return AliasResult::NoAlias;
1295
1296 // Bail on analysing scalable LocationSize
1297 if (V1Size.isScalable() || V2Size.isScalable())
1298 return AliasResult::MayAlias;
1299
1300 // We need to know both access sizes for all the following heuristics. Don't
1301 // try to reason about sizes larger than the index space.
1302 unsigned BW = DecompGEP1.Offset.getBitWidth();
1303 if (!V1Size.hasValue() || !V2Size.hasValue() ||
1304 !isUIntN(BW, V1Size.getValue()) || !isUIntN(BW, V2Size.getValue()))
1305 return AliasResult::MayAlias;
1306
1307 APInt GCD;
1308 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1309 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1310 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1311 const APInt &Scale = Index.Scale;
1312
1313 SimplifyQuery SQ(DL, DT, &AC, Index.CxtI, /*UseInstrInfo=*/true);
1314 KnownBits Known = computeKnownBits(Index.Val.V, SQ);
1315
1316 APInt ScaleForGCD = Scale;
1317 if (!Index.IsNSW)
1318 ScaleForGCD =
1320
1321 // If V has known trailing zeros, V is a multiple of 2^VarTZ, so
1322 // V*Scale is a multiple of ScaleForGCD * 2^VarTZ. Shift ScaleForGCD
1323 // left to account for this (trailing zeros compose additively through
1324 // multiplication, even in Z/2^n).
1325 unsigned VarTZ = Known.countMinTrailingZeros();
1326 if (VarTZ > 0) {
1327 unsigned MaxShift =
1328 Scale.getBitWidth() - ScaleForGCD.getSignificantBits();
1329 ScaleForGCD <<= std::min(VarTZ, MaxShift);
1330 }
1331
1332 if (i == 0)
1333 GCD = ScaleForGCD.abs();
1334 else
1335 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1336
1337 ConstantRange CR =
1338 computeConstantRange(Index.Val.V, /*ForSigned=*/false, SQ);
1339 CR = CR.intersectWith(
1340 ConstantRange::fromKnownBits(Known, /* Signed */ true),
1342 CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1343
1344 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1345 "Bit widths are normalized to MaxIndexSize");
1346 if (Index.IsNSW)
1347 CR = CR.smul_sat(ConstantRange(Scale));
1348 else
1349 CR = CR.smul_fast(ConstantRange(Scale));
1350
1351 if (Index.IsNegated)
1352 OffsetRange = OffsetRange.sub(CR);
1353 else
1354 OffsetRange = OffsetRange.add(CR);
1355 }
1356
1357 // We now have accesses at two offsets from the same base:
1358 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1359 // 2. 0 with size V2Size
1360 // Using arithmetic modulo GCD, the accesses are at
1361 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1362 // into the range [V2Size..GCD), then we know they cannot overlap.
1363 APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1364 if (ModOffset.isNegative())
1365 ModOffset += GCD; // We want mod, not rem.
1366 if (ModOffset.uge(V2Size.getValue()) &&
1367 (GCD - ModOffset).uge(V1Size.getValue()))
1368 return AliasResult::NoAlias;
1369
1370 // Compute ranges of potentially accessed bytes for both accesses. If the
1371 // interseciton is empty, there can be no overlap.
1372 ConstantRange Range1 = OffsetRange.add(
1373 ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1374 ConstantRange Range2 =
1375 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1376 if (Range1.intersectWith(Range2).isEmptySet())
1377 return AliasResult::NoAlias;
1378
1379 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1380 // potentially wrapping math.
1381 auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1382 if (Var.IsNSW)
1383 return true;
1384
1385 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1386 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1387 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1388 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1389 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1390 if (MaxScaleValueBW <= 0)
1391 return false;
1392 return Var.Scale.ule(
1393 APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1394 };
1395
1396 // Try to determine the range of values for VarIndex such that
1397 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1398 std::optional<APInt> MinAbsVarIndex;
1399 if (DecompGEP1.VarIndices.size() == 1) {
1400 // VarIndex = Scale*V.
1401 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1402 if (Var.Val.TruncBits == 0 &&
1403 isKnownNonZero(Var.Val.V, SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
1404 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1405 // presence of potentially wrapping math.
1406 if (MultiplyByScaleNoWrap(Var)) {
1407 // If V != 0 then abs(VarIndex) >= abs(Scale).
1408 MinAbsVarIndex = Var.Scale.abs();
1409 }
1410 }
1411 } else if (DecompGEP1.VarIndices.size() == 2) {
1412 // VarIndex = Scale*V0 + (-Scale)*V1.
1413 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1414 // Check that MayBeCrossIteration is false, to avoid reasoning about
1415 // inequality of values across loop iterations.
1416 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1417 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1418 if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1419 Var0.Val.hasSameCastsAs(Var1.Val) && !AAQI.MayBeCrossIteration &&
1420 MultiplyByScaleNoWrap(Var0) && MultiplyByScaleNoWrap(Var1) &&
1421 isKnownNonEqual(Var0.Val.V, Var1.Val.V,
1422 SimplifyQuery(DL, DT, &AC, /*CxtI=*/Var0.CxtI
1423 ? Var0.CxtI
1424 : Var1.CxtI)))
1425 MinAbsVarIndex = Var0.Scale.abs();
1426 }
1427
1428 if (MinAbsVarIndex) {
1429 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1430 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1431 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1432 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1433 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1434 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1435 return AliasResult::NoAlias;
1436 }
1437
1438 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1439 return AliasResult::NoAlias;
1440
1441 // Statically, we can see that the base objects are the same, but the
1442 // pointers have dynamic offsets which we can't resolve. And none of our
1443 // little tricks above worked.
1444 return AliasResult::MayAlias;
1445}
1446
1448 // If the results agree, take it.
1449 if (A == B)
1450 return A;
1451 // A mix of PartialAlias and MustAlias is PartialAlias.
1455 // Otherwise, we don't know anything.
1456 return AliasResult::MayAlias;
1457}
1458
1459/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1460/// against another.
1462BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1463 const Value *V2, LocationSize V2Size,
1464 AAQueryInfo &AAQI) {
1465 // If the values are Selects with the same condition, we can do a more precise
1466 // check: just check for aliases between the values on corresponding arms.
1467 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1468 if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1469 AAQI)) {
1470 AliasResult Alias =
1471 AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1472 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1473 if (Alias == AliasResult::MayAlias)
1474 return AliasResult::MayAlias;
1475 AliasResult ThisAlias =
1476 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1477 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1478 return MergeAliasResults(ThisAlias, Alias);
1479 }
1480
1481 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1482 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1483 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1484 MemoryLocation(V2, V2Size), AAQI);
1485 if (Alias == AliasResult::MayAlias)
1486 return AliasResult::MayAlias;
1487
1488 AliasResult ThisAlias =
1489 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1490 MemoryLocation(V2, V2Size), AAQI);
1491 return MergeAliasResults(ThisAlias, Alias);
1492}
1493
1494/// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1495/// another.
1496AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1497 const Value *V2, LocationSize V2Size,
1498 AAQueryInfo &AAQI) {
1499 if (!PN->getNumIncomingValues())
1500 return AliasResult::NoAlias;
1501 // If the values are PHIs in the same block, we can do a more precise
1502 // as well as efficient check: just check for aliases between the values
1503 // on corresponding edges. Don't do this if we are analyzing across
1504 // iterations, as we may pick a different phi entry in different iterations.
1505 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1506 if (PN2->getParent() == PN->getParent() && !AAQI.MayBeCrossIteration) {
1507 std::optional<AliasResult> Alias;
1508 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1509 AliasResult ThisAlias = AAQI.AAR.alias(
1510 MemoryLocation(PN->getIncomingValue(i), PNSize),
1511 MemoryLocation(
1512 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1513 AAQI);
1514 if (Alias)
1515 *Alias = MergeAliasResults(*Alias, ThisAlias);
1516 else
1517 Alias = ThisAlias;
1518 if (*Alias == AliasResult::MayAlias)
1519 break;
1520 }
1521 return *Alias;
1522 }
1523
1524 SmallVector<Value *, 4> V1Srcs;
1525 // If a phi operand recurses back to the phi, we can still determine NoAlias
1526 // if we don't alias the underlying objects of the other phi operands, as we
1527 // know that the recursive phi needs to be based on them in some way.
1528 bool isRecursive = false;
1529 auto CheckForRecPhi = [&](Value *PV) {
1531 return false;
1532 if (getUnderlyingObject(PV) == PN) {
1533 isRecursive = true;
1534 return true;
1535 }
1536 return false;
1537 };
1538
1539 SmallPtrSet<Value *, 4> UniqueSrc;
1540 Value *OnePhi = nullptr;
1541 for (Value *PV1 : PN->incoming_values()) {
1542 // Skip the phi itself being the incoming value.
1543 if (PV1 == PN)
1544 continue;
1545
1546 if (isa<PHINode>(PV1)) {
1547 if (OnePhi && OnePhi != PV1) {
1548 // To control potential compile time explosion, we choose to be
1549 // conserviate when we have more than one Phi input. It is important
1550 // that we handle the single phi case as that lets us handle LCSSA
1551 // phi nodes and (combined with the recursive phi handling) simple
1552 // pointer induction variable patterns.
1553 return AliasResult::MayAlias;
1554 }
1555 OnePhi = PV1;
1556 }
1557
1558 if (CheckForRecPhi(PV1))
1559 continue;
1560
1561 if (UniqueSrc.insert(PV1).second)
1562 V1Srcs.push_back(PV1);
1563 }
1564
1565 if (OnePhi && UniqueSrc.size() > 1)
1566 // Out of an abundance of caution, allow only the trivial lcssa and
1567 // recursive phi cases.
1568 return AliasResult::MayAlias;
1569
1570 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1571 // value. This should only be possible in blocks unreachable from the entry
1572 // block, but return MayAlias just in case.
1573 if (V1Srcs.empty())
1574 return AliasResult::MayAlias;
1575
1576 // If this PHI node is recursive, indicate that the pointer may be moved
1577 // across iterations. We can only prove NoAlias if different underlying
1578 // objects are involved.
1579 if (isRecursive)
1581
1582 // In the recursive alias queries below, we may compare values from two
1583 // different loop iterations.
1584 SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1585
1586 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(V1Srcs[0], PNSize),
1587 MemoryLocation(V2, V2Size), AAQI);
1588
1589 // Early exit if the check of the first PHI source against V2 is MayAlias.
1590 // Other results are not possible.
1591 if (Alias == AliasResult::MayAlias)
1592 return AliasResult::MayAlias;
1593 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1594 // remain valid to all elements and needs to conservatively return MayAlias.
1595 if (isRecursive && Alias != AliasResult::NoAlias)
1596 return AliasResult::MayAlias;
1597
1598 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1599 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1600 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1601 Value *V = V1Srcs[i];
1602
1603 AliasResult ThisAlias = AAQI.AAR.alias(
1604 MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), AAQI);
1605 Alias = MergeAliasResults(ThisAlias, Alias);
1606 if (Alias == AliasResult::MayAlias)
1607 break;
1608 }
1609
1610 return Alias;
1611}
1612
1613// Return true for an Argument or extractvalue(Argument). These are all known
1614// to not alias with FunctionLocal objects and can come up from coerced function
1615// arguments.
1616static bool isArgumentOrArgumentLike(const Value *V) {
1617 if (isa<Argument>(V))
1618 return true;
1619 auto *E = dyn_cast<ExtractValueInst>(V);
1620 return E && isa<Argument>(E->getOperand(0));
1621}
1622
1623/// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1624/// array references.
1625AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1626 const Value *V2, LocationSize V2Size,
1627 AAQueryInfo &AAQI,
1628 const Instruction *CtxI) {
1629 // If either of the memory references is empty, it doesn't matter what the
1630 // pointer values are.
1631 if (V1Size.isZero() || V2Size.isZero())
1632 return AliasResult::NoAlias;
1633
1634 // Strip off any casts if they exist.
1637
1638 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1639 // value for undef that aliases nothing in the program.
1640 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1641 return AliasResult::NoAlias;
1642
1643 // Are we checking for alias of the same value?
1644 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1645 // different iterations. We must therefore make sure that this is not the
1646 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1647 // happen by looking at the visited phi nodes and making sure they cannot
1648 // reach the value.
1649 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1651
1652 // Figure out what objects these things are pointing to if we can.
1655
1656 // Null values in the default address space don't point to any object, so they
1657 // don't alias any other pointer.
1658 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1659 if (!NullPointerIsDefined(&F, CPN->getPointerType()->getAddressSpace()))
1660 return AliasResult::NoAlias;
1661 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1662 if (!NullPointerIsDefined(&F, CPN->getPointerType()->getAddressSpace()))
1663 return AliasResult::NoAlias;
1664
1665 if (O1 != O2) {
1666 // If V1/V2 point to two different objects, we know that we have no alias.
1668 return AliasResult::NoAlias;
1669
1670 // Function arguments can't alias with things that are known to be
1671 // unambigously identified at the function level.
1674 return AliasResult::NoAlias;
1675
1676 // If one pointer is the result of a call/invoke or load and the other is a
1677 // non-escaping local object within the same function, then we know the
1678 // object couldn't escape to a point where the call could return it.
1679 //
1680 // Note that if the pointers are in different functions, there are a
1681 // variety of complications. A call with a nocapture argument may still
1682 // temporary store the nocapture argument's value in a temporary memory
1683 // location if that memory location doesn't escape. Or it may pass a
1684 // nocapture value to other functions as long as they don't capture it.
1686 O2, dyn_cast<Instruction>(O1), /*OrAt=*/true,
1687 /*ReturnCaptures=*/false)))
1688 return AliasResult::NoAlias;
1690 O1, dyn_cast<Instruction>(O2), /*OrAt=*/true,
1691 /*ReturnCaptures=*/false)))
1692 return AliasResult::NoAlias;
1693 }
1694
1695 // If the size of one access is larger than the entire object on the other
1696 // side, then we know such behavior is undefined and can assume no alias.
1697 bool NullIsValidLocation = NullPointerIsDefined(&F);
1699 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1700 TLI, NullIsValidLocation)) ||
1702 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1703 TLI, NullIsValidLocation)))
1704 return AliasResult::NoAlias;
1705
1707 for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(O1)) {
1708 if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx)
1709 continue;
1710
1711 AssumeInst *Assume = cast<AssumeInst>(Elem);
1712 OperandBundleUse OBU = Assume->getOperandBundleAt(Elem.Index);
1713 if (OBU.getTagName() == "separate_storage") {
1714 assert(OBU.Inputs.size() == 2);
1715 const Value *Hint1 = OBU.Inputs[0].get();
1716 const Value *Hint2 = OBU.Inputs[1].get();
1717 // This is often a no-op; instcombine rewrites this for us. No-op
1718 // getUnderlyingObject calls are fast, though.
1719 const Value *HintO1 = getUnderlyingObject(Hint1);
1720 const Value *HintO2 = getUnderlyingObject(Hint2);
1721
1722 DominatorTree *DT = getDT(AAQI);
1723 auto ValidAssumeForPtrContext = [&](const Value *Ptr) {
1724 if (const Instruction *PtrI = dyn_cast<Instruction>(Ptr)) {
1725 return isValidAssumeForContext(Assume, PtrI, DT,
1726 /* AllowEphemerals */ true);
1727 }
1728 if (const Argument *PtrA = dyn_cast<Argument>(Ptr)) {
1729 const Instruction *FirstI =
1730 &*PtrA->getParent()->getEntryBlock().begin();
1731 return isValidAssumeForContext(Assume, FirstI, DT,
1732 /* AllowEphemerals */ true);
1733 }
1734 return false;
1735 };
1736
1737 if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1738 // Note that we go back to V1 and V2 for the
1739 // ValidAssumeForPtrContext checks; they're dominated by O1 and O2,
1740 // so strictly more assumptions are valid for them.
1741 if ((CtxI && isValidAssumeForContext(Assume, CtxI, DT,
1742 /* AllowEphemerals */ true)) ||
1743 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1744 return AliasResult::NoAlias;
1745 }
1746 }
1747 }
1748 }
1749 }
1750
1751 // If one the accesses may be before the accessed pointer, canonicalize this
1752 // by using unknown after-pointer sizes for both accesses. This is
1753 // equivalent, because regardless of which pointer is lower, one of them
1754 // will always came after the other, as long as the underlying objects aren't
1755 // disjoint. We do this so that the rest of BasicAA does not have to deal
1756 // with accesses before the base pointer, and to improve cache utilization by
1757 // merging equivalent states.
1758 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1759 V1Size = LocationSize::afterPointer();
1760 V2Size = LocationSize::afterPointer();
1761 }
1762
1763 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1764 // for recursive queries. For this reason, this limit is chosen to be large
1765 // enough to be very rarely hit, while still being small enough to avoid
1766 // stack overflows.
1767 if (AAQI.Depth >= 512)
1768 return AliasResult::MayAlias;
1769
1770 // Check the cache before climbing up use-def chains. This also terminates
1771 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1772 // cache key, because some cases where MayBeCrossIteration==false returns
1773 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1774 AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1775 {V2, V2Size, AAQI.MayBeCrossIteration});
1776 const bool Swapped = V1 > V2;
1777 if (Swapped)
1778 std::swap(Locs.first, Locs.second);
1779 const auto &Pair = AAQI.AliasCache.try_emplace(
1780 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1781 if (!Pair.second) {
1782 auto &Entry = Pair.first->second;
1783 if (!Entry.isDefinitive()) {
1784 // Remember that we used an assumption. This may either be a direct use
1785 // of an assumption, or a use of an entry that may itself be based on an
1786 // assumption.
1787 ++AAQI.NumAssumptionUses;
1788 if (Entry.isAssumption())
1789 ++Entry.NumAssumptionUses;
1790 }
1791 // Cache contains sorted {V1,V2} pairs but we should return original order.
1792 auto Result = Entry.Result;
1793 Result.swap(Swapped);
1794 return Result;
1795 }
1796
1797 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1798 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1799 AliasResult Result =
1800 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1801
1802 auto It = AAQI.AliasCache.find(Locs);
1803 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1804 auto &Entry = It->second;
1805
1806 // Check whether a NoAlias assumption has been used, but disproven.
1807 bool AssumptionDisproven =
1808 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1809 if (AssumptionDisproven)
1811
1812 // This is a definitive result now, when considered as a root query.
1813 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1814 Entry.Result = Result;
1815 // Cache contains sorted {V1,V2} pairs.
1816 Entry.Result.swap(Swapped);
1817
1818 // If the assumption has been disproven, remove any results that may have
1819 // been based on this assumption. Do this after the Entry updates above to
1820 // avoid iterator invalidation.
1821 if (AssumptionDisproven)
1822 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1824
1825 // The result may still be based on assumptions higher up in the chain.
1826 // Remember it, so it can be purged from the cache later.
1827 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1828 Result != AliasResult::MayAlias) {
1831 } else {
1832 Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1833 }
1834
1835 // Depth is incremented before this function is called, so Depth==1 indicates
1836 // a root query.
1837 if (AAQI.Depth == 1) {
1838 // Any remaining assumption based results must be based on proven
1839 // assumptions, so convert them to definitive results.
1840 for (const auto &Loc : AAQI.AssumptionBasedResults) {
1841 auto It = AAQI.AliasCache.find(Loc);
1842 if (It != AAQI.AliasCache.end())
1843 It->second.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1844 }
1846 AAQI.NumAssumptionUses = 0;
1847 }
1848 return Result;
1849}
1850
1851AliasResult BasicAAResult::aliasCheckRecursive(
1852 const Value *V1, LocationSize V1Size,
1853 const Value *V2, LocationSize V2Size,
1854 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1855 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1856 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1857 if (Result != AliasResult::MayAlias)
1858 return Result;
1859 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1860 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1861 Result.swap();
1862 if (Result != AliasResult::MayAlias)
1863 return Result;
1864 }
1865
1866 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1867 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1868 if (Result != AliasResult::MayAlias)
1869 return Result;
1870 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1871 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1872 Result.swap();
1873 if (Result != AliasResult::MayAlias)
1874 return Result;
1875 }
1876
1877 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1878 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1879 if (Result != AliasResult::MayAlias)
1880 return Result;
1881 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1882 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1883 Result.swap();
1884 if (Result != AliasResult::MayAlias)
1885 return Result;
1886 }
1887
1888 // If both pointers are pointing into the same object and one of them
1889 // accesses the entire object, then the accesses must overlap in some way.
1890 if (O1 == O2) {
1891 bool NullIsValidLocation = NullPointerIsDefined(&F);
1892 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1893 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1894 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1896 }
1897
1898 return AliasResult::MayAlias;
1899}
1900
1902 const Module *M) {
1903 // There cannot be any alias with errno if the given memory location is an
1904 // identified function-local object, or the size of the memory access is
1905 // larger than the integer size.
1906 if (Loc.Size.hasValue() &&
1907 Loc.Size.getValue().getKnownMinValue() * 8 > TLI.getIntSize())
1908 return AliasResult::NoAlias;
1909
1911 return AliasResult::NoAlias;
1912 return AliasResult::MayAlias;
1913}
1914
1915/// Check whether two Values can be considered equivalent.
1916///
1917/// If the values may come from different cycle iterations, this will also
1918/// check that the values are not part of cycle. We have to do this because we
1919/// are looking through phi nodes, that is we say
1920/// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1921bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1922 const Value *V2,
1923 const AAQueryInfo &AAQI) {
1924 if (V != V2)
1925 return false;
1926
1927 if (!AAQI.MayBeCrossIteration)
1928 return true;
1929
1930 // Non-instructions and instructions in the entry block cannot be part of
1931 // a loop.
1932 const Instruction *Inst = dyn_cast<Instruction>(V);
1933 if (!Inst || Inst->getParent()->isEntryBlock())
1934 return true;
1935
1936 return isNotInCycle(Inst, getDT(AAQI), /*LI=*/nullptr, /*CI=*/nullptr);
1937}
1938
1939/// Computes the symbolic difference between two de-composed GEPs.
1940void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1941 const DecomposedGEP &SrcGEP,
1942 const AAQueryInfo &AAQI) {
1943 // Drop nuw flag from GEP if subtraction of constant offsets overflows in an
1944 // unsigned sense.
1945 if (DestGEP.Offset.ult(SrcGEP.Offset))
1946 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1947
1948 DestGEP.Offset -= SrcGEP.Offset;
1949 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1950 // Find V in Dest. This is N^2, but pointer indices almost never have more
1951 // than a few variable indexes.
1952 bool Found = false;
1953 for (auto I : enumerate(DestGEP.VarIndices)) {
1954 VariableGEPIndex &Dest = I.value();
1955 if ((!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) &&
1956 !areBothVScale(Dest.Val.V, Src.Val.V)) ||
1957 !Dest.Val.hasSameCastsAs(Src.Val))
1958 continue;
1959
1960 // Normalize IsNegated if we're going to lose the NSW flag anyway.
1961 if (Dest.IsNegated) {
1962 Dest.Scale = -Dest.Scale;
1963 Dest.IsNegated = false;
1964 Dest.IsNSW = false;
1965 }
1966
1967 // If we found it, subtract off Scale V's from the entry in Dest. If it
1968 // goes to zero, remove the entry.
1969 if (Dest.Scale != Src.Scale) {
1970 // Drop nuw flag from GEP if subtraction of V's Scale overflows in an
1971 // unsigned sense.
1972 if (Dest.Scale.ult(Src.Scale))
1973 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1974
1975 Dest.Scale -= Src.Scale;
1976 Dest.IsNSW = false;
1977 } else {
1978 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1979 }
1980 Found = true;
1981 break;
1982 }
1983
1984 // If we didn't consume this entry, add it to the end of the Dest list.
1985 if (!Found) {
1986 VariableGEPIndex Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1987 /* IsNegated */ true};
1988 DestGEP.VarIndices.push_back(Entry);
1989
1990 // Drop nuw flag when we have unconsumed variable indices from SrcGEP.
1991 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1992 }
1993 }
1994}
1995
1996bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1997 LocationSize MaybeV1Size,
1998 LocationSize MaybeV2Size,
1999 AssumptionCache *AC,
2000 DominatorTree *DT,
2001 const AAQueryInfo &AAQI) {
2002 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
2003 !MaybeV2Size.hasValue())
2004 return false;
2005
2006 const uint64_t V1Size = MaybeV1Size.getValue();
2007 const uint64_t V2Size = MaybeV2Size.getValue();
2008
2009 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
2010
2011 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
2012 !Var0.hasNegatedScaleOf(Var1) ||
2013 Var0.Val.V->getType() != Var1.Val.V->getType())
2014 return false;
2015
2016 // We'll strip off the Extensions of Var0 and Var1 and do another round
2017 // of GetLinearExpression decomposition. In the example above, if Var0
2018 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
2019
2020 LinearExpression E0 =
2021 GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
2022 LinearExpression E1 =
2023 GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
2024 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
2025 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
2026 return false;
2027
2028 // We have a hit - Var0 and Var1 only differ by a constant offset!
2029
2030 // If we've been sext'ed then zext'd the maximum difference between Var0 and
2031 // Var1 is possible to calculate, but we're just interested in the absolute
2032 // minimum difference between the two. The minimum distance may occur due to
2033 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
2034 // the minimum distance between %i and %i + 5 is 3.
2035 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
2036 MinDiff = APIntOps::umin(MinDiff, Wrapped);
2037 APInt MinDiffBytes =
2038 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
2039
2040 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
2041 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
2042 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
2043 // V2Size can fit in the MinDiffBytes gap.
2044 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
2045 MinDiffBytes.uge(V2Size + GEP.Offset.abs());
2046}
2047
2048//===----------------------------------------------------------------------===//
2049// BasicAliasAnalysis Pass
2050//===----------------------------------------------------------------------===//
2051
2052AnalysisKey BasicAA::Key;
2053
2055 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2056 auto &AC = AM.getResult<AssumptionAnalysis>(F);
2057 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
2058 return BasicAAResult(F.getDataLayout(), F, TLI, AC, DT);
2059}
2060
2062
2063char BasicAAWrapperPass::ID = 0;
2064
2065void BasicAAWrapperPass::anchor() {}
2066
2068 "Basic Alias Analysis (stateless AA impl)", true, true)
2073 "Basic Alias Analysis (stateless AA impl)", true, true)
2074
2078
2083
2084 Result.reset(new BasicAAResult(F.getDataLayout(), F,
2085 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
2086 &DTWP.getDomTree()));
2087
2088 return false;
2089}
2090
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
static const Function * getParent(const Value *V)
static bool isObjectSmallerThan(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
static cl::opt< bool > EnableSeparateStorageAnalysis("basic-aa-separate-storage", cl::Hidden, cl::init(true))
static bool isArgumentOrArgumentLike(const Value *V)
static bool notDifferentParent(const Value *O1, const Value *O2)
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
static bool isNotInCycle(const Instruction *I, const DominatorTree *DT, const LoopInfo *LI, const CycleInfo *CI)
static bool areBothVScale(const Value *V1, const Value *V2)
Return true if both V1 and V2 are VScale.
basic Basic Alias true
static TypeSize getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file declares the LLVM IR specialization of the GenericCycle templates.
Hexagon Common GEP
#define _
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file provides utility analysis objects describing memory locations.
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
This file provides utility classes that use RAII to save and restore values.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Value * RHS
This class stores info we want to provide to or retain within an alias query.
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
unsigned Depth
Query depth used to distinguish recursive queries.
int NumAssumptionUses
How many active NoAlias assumption uses there are.
std::pair< AACacheLoc, AACacheLoc > LocPair
AliasCacheT AliasCache
bool MayBeCrossIteration
Tracks whether the accesses may be on different cycle iterations.
CaptureAnalysis * CA
LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
LLVM_ABI ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the ModRef info associated with a pointer argument of a call.
LLVM_ABI AliasResult aliasErrno(const MemoryLocation &Loc, const Module *M)
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:2023
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1076
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:207
APInt abs() const
Get the absolute value.
Definition APInt.h:1818
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1118
bool isNegative() const
Determine sign of this APInt.
Definition APInt.h:330
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1662
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
Definition APInt.h:1554
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition APInt.cpp:2012
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:335
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1228
The possible results of an alias query.
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
@ MayAlias
The two locations may or may not alias.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
void setOffset(int32_t NewOffset)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
This is the AA result object for the basic, local, and stateless alias analysis.
LLVM_ABI AliasResult aliasErrno(const MemoryLocation &Loc, const Module *M)
LLVM_ABI ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
LLVM_ABI ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI)
Returns the behavior when calling the given call site.
LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
LLVM_ABI bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
LLVM_ABI AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI, const Instruction *CtxI)
Legacy wrapper pass to provide the BasicAAResult object.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
LLVM_ABI BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
LLVM Basic Block Representation.
Definition BasicBlock.h:62
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:254
bool erase(const KeyT &Val)
Definition DenseMap.h:328
iterator end()
Definition DenseMap.h:81
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:314
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
void removeInstruction(Instruction *I)
CaptureComponents getCapturesBefore(const Value *Object, const Instruction *I, bool OrAt, bool ReturnCaptures) override
Return how Object may be captured before instruction I, considering only provenance captures.
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags all()
bool hasNoUnsignedSignedWrap() const
Definition Operator.h:432
bool hasNoUnsignedWrap() const
Definition Operator.h:436
LLVM_ABI Type * getSourceElementType() const
Definition Operator.cpp:82
GEPNoWrapFlags getNoWrapFlags() const
Definition Operator.h:425
CycleT * getCycle(const BlockT *Block) const
Find the innermost cycle containing a given block.
Module * getParent()
Get the module that this global value is contained inside of...
A wrapper class for inspecting calls to intrinsic functions.
bool hasValue() const
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isScalable() const
TypeSize getValue() const
bool isPrecise() const
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
static MemoryEffectsBase readOnly()
Definition ModRef.h:133
MemoryEffectsBase getWithoutLoc(Location Loc) const
Get new MemoryEffectsBase with NoModRef on the given Loc.
Definition ModRef.h:231
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:149
static MemoryEffectsBase writeOnly()
Definition ModRef.h:138
Representation for a specific memory location.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
const Value * Ptr
The address of the start of the location.
static LLVM_ABI MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI)
Return a location representing a particular argument of a call.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition Operator.h:33
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
This class represents the LLVM 'select' instruction.
CaptureComponents getCapturesBefore(const Value *Object, const Instruction *I, bool OrAt, bool ReturnCaptures) override
Return how Object may be captured before instruction I, considering only provenance captures.
size_type size() const
Definition SmallPtrSet.h:99
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:328
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_iterator op_begin()
Definition User.h:259
const Use * const_op_iterator
Definition User.h:255
Value * getOperand(unsigned i) const
Definition User.h:207
op_iterator op_end()
Definition User.h:261
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
LLVM_ABI const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
Definition Value.cpp:725
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition ilist_node.h:34
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition APInt.h:2287
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition APInt.cpp:830
@ Entry
Definition COFF.h:862
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
bool capturesReadProvenanceOnly(CaptureComponents CC)
Definition ModRef.h:391
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
SaveAndRestore(T &) -> SaveAndRestore< T >
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2553
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
LLVM_ABI bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2207
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
Definition MathExtras.h:243
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:356
LLVM_ABI const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveOffset)
This function returns call pointer argument that is considered the same by aliasing rules.
LLVM_ABI std::optional< TypeSize > getBaseObjectSize(const Value *Ptr, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Like getObjectSize(), but only returns the size of base objects (like allocas, global variables and a...
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool capturesFullProvenance(CaptureComponents CC)
Definition ModRef.h:396
LLVM_ABI ModRefInfo getSyncEffects(AAResults *AA, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Get ModRefInfo for a synchronizing operation, such as a fence or stronger than monotonic atomic load/...
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:209
generic_gep_type_iterator<> gep_type_iterator
bool isModOrRefSet(const ModRefInfo MRI)
Definition ModRef.h:43
constexpr unsigned MaxLookupSearchDepth
The max limit of the search depth in DecomposeGEPExpression() and getUnderlyingObject().
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI FunctionPass * createBasicAAWrapperPass()
LLVM_ABI bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
CaptureComponents
Components of the pointer that may be captured.
Definition ModRef.h:365
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ NoModRef
The access neither references nor modifies the value stored in memory.
Definition ModRef.h:30
@ ErrnoMem
Errno memory.
Definition ModRef.h:66
@ ArgMem
Access to memory via argument pointers.
Definition ModRef.h:62
@ Other
Any other memory.
Definition ModRef.h:68
@ InaccessibleMem
Memory that is inaccessible via LLVM IR.
Definition ModRef.h:64
LLVM_ABI bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr, const CycleInfo *CI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition CFG.cpp:335
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
LLVM_ABI bool isPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const BasicBlock *StopBB, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr, const CycleInfo *CI=nullptr)
Determine whether there is at least one path from a block in 'Worklist' to 'StopBB' without passing t...
Definition CFG.cpp:293
LLVM_ABI std::pair< Instruction *, CaptureResult > FindEarliestCapture(const Value *V, Function &F, const DominatorTree &DT, CaptureComponents Mask, unsigned MaxUsesToExplore=0)
bool isModAndRefSet(const ModRefInfo MRI)
Definition ModRef.h:46
LLVM_ABI bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
constexpr unsigned BitWidth
LLVM_ABI bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNotCapturedBefore.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
gep_type_iterator gep_type_begin(const User *GEP)
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
bool capturesNothing(CaptureComponents CC)
Definition ModRef.h:375
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, const SimplifyQuery &SQ, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:876
#define N
SmallVector< VariableGEPIndex, 4 > VarIndices
static constexpr int Definitive
Cache entry is neither an assumption nor does it use a (non-definitive) assumption.
static constexpr int AssumptionBased
Cache entry is not an assumption itself, but may be using an assumption from higher up the stack.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
virtual CaptureComponents getCapturesBefore(const Value *Object, const Instruction *I, bool OrAt, bool ReturnCaptures)=0
Return how Object may be captured before instruction I, considering only provenance captures.
virtual ~CaptureAnalysis()=0
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:256
Linear expression BasePtr + Index * Scale + Offset.
Definition Loads.h:211
LinearExpression(Value *BasePtr, unsigned BitWidth)
Definition Loads.h:218
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
StringRef getTagName() const
Return the tag of this operand bundle as a string.
ArrayRef< Use > Inputs