LLVM 18.0.0git
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the primary stateless implementation of the
10// Alias Analysis interface that implements identities (two different
11// globals cannot alias, etc), but does no stateful analysis.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ScopeExit.h"
20#include "llvm/ADT/Statistic.h"
23#include "llvm/Analysis/CFG.h"
29#include "llvm/IR/Argument.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
39#include "llvm/IR/GlobalAlias.h"
41#include "llvm/IR/InstrTypes.h"
42#include "llvm/IR/Instruction.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/Operator.h"
47#include "llvm/IR/Type.h"
48#include "llvm/IR/User.h"
49#include "llvm/IR/Value.h"
51#include "llvm/Pass.h"
57#include <cassert>
58#include <cstdint>
59#include <cstdlib>
60#include <optional>
61#include <utility>
62
63#define DEBUG_TYPE "basicaa"
64
65using namespace llvm;
66
67/// Enable analysis of recursive PHI nodes.
69 cl::init(true));
70
71static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
72 cl::Hidden, cl::init(false));
73
74/// SearchLimitReached / SearchTimes shows how often the limit of
75/// to decompose GEPs is reached. It will affect the precision
76/// of basic alias analysis.
77STATISTIC(SearchLimitReached, "Number of times the limit to "
78 "decompose GEPs is reached");
79STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
80
81// The max limit of the search depth in DecomposeGEPExpression() and
82// getUnderlyingObject().
83static const unsigned MaxLookupSearchDepth = 6;
84
87 // We don't care if this analysis itself is preserved, it has no state. But
88 // we need to check that the analyses it depends on have been. Note that we
89 // may be created without handles to some analyses and in that case don't
90 // depend on them.
91 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
92 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)))
93 return true;
94
95 // Otherwise this analysis result remains valid.
96 return false;
97}
98
99//===----------------------------------------------------------------------===//
100// Useful predicates
101//===----------------------------------------------------------------------===//
102
103/// Returns the size of the object specified by V or UnknownSize if unknown.
104static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
105 const TargetLibraryInfo &TLI,
106 bool NullIsValidLoc,
107 bool RoundToAlign = false) {
109 ObjectSizeOpts Opts;
110 Opts.RoundToAlign = RoundToAlign;
111 Opts.NullIsUnknownSize = NullIsValidLoc;
112 if (getObjectSize(V, Size, DL, &TLI, Opts))
113 return Size;
115}
116
117/// Returns true if we can prove that the object specified by V is smaller than
118/// Size.
120 const DataLayout &DL,
121 const TargetLibraryInfo &TLI,
122 bool NullIsValidLoc) {
123 // Note that the meanings of the "object" are slightly different in the
124 // following contexts:
125 // c1: llvm::getObjectSize()
126 // c2: llvm.objectsize() intrinsic
127 // c3: isObjectSmallerThan()
128 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
129 // refers to the "entire object".
130 //
131 // Consider this example:
132 // char *p = (char*)malloc(100)
133 // char *q = p+80;
134 //
135 // In the context of c1 and c2, the "object" pointed by q refers to the
136 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
137 //
138 // However, in the context of c3, the "object" refers to the chunk of memory
139 // being allocated. So, the "object" has 100 bytes, and q points to the middle
140 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
141 // parameter, before the llvm::getObjectSize() is called to get the size of
142 // entire object, we should:
143 // - either rewind the pointer q to the base-address of the object in
144 // question (in this case rewind to p), or
145 // - just give up. It is up to caller to make sure the pointer is pointing
146 // to the base address the object.
147 //
148 // We go for 2nd option for simplicity.
149 if (!isIdentifiedObject(V))
150 return false;
151
152 // This function needs to use the aligned object size because we allow
153 // reads a bit past the end given sufficient alignment.
154 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
155 /*RoundToAlign*/ true);
156
157 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
158}
159
160/// Return the minimal extent from \p V to the end of the underlying object,
161/// assuming the result is used in an aliasing query. E.g., we do use the query
162/// location size and the fact that null pointers cannot alias here.
164 const LocationSize &LocSize,
165 const DataLayout &DL,
166 bool NullIsValidLoc) {
167 // If we have dereferenceability information we know a lower bound for the
168 // extent as accesses for a lower offset would be valid. We need to exclude
169 // the "or null" part if null is a valid pointer. We can ignore frees, as an
170 // access after free would be undefined behavior.
171 bool CanBeNull, CanBeFreed;
172 uint64_t DerefBytes =
173 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
174 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
175 // If queried with a precise location size, we assume that location size to be
176 // accessed, thus valid.
177 if (LocSize.isPrecise())
178 DerefBytes = std::max(DerefBytes, LocSize.getValue());
179 return DerefBytes;
180}
181
182/// Returns true if we can prove that the object specified by V has size Size.
183static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
184 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
185 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
186 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
187}
188
189//===----------------------------------------------------------------------===//
190// CaptureInfo implementations
191//===----------------------------------------------------------------------===//
192
193CaptureInfo::~CaptureInfo() = default;
194
196 const Instruction *I) {
197 return isNonEscapingLocalObject(Object, &IsCapturedCache);
198}
199
201 const Instruction *I) {
202 if (!isIdentifiedFunctionLocal(Object))
203 return false;
204
205 auto Iter = EarliestEscapes.insert({Object, nullptr});
206 if (Iter.second) {
207 Instruction *EarliestCapture = FindEarliestCapture(
208 Object, *const_cast<Function *>(I->getFunction()),
209 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT, EphValues);
210 if (EarliestCapture) {
211 auto Ins = Inst2Obj.insert({EarliestCapture, {}});
212 Ins.first->second.push_back(Object);
213 }
214 Iter.first->second = EarliestCapture;
215 }
216
217 // No capturing instruction.
218 if (!Iter.first->second)
219 return true;
220
221 return I != Iter.first->second &&
222 !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI);
223}
224
226 auto Iter = Inst2Obj.find(I);
227 if (Iter != Inst2Obj.end()) {
228 for (const Value *Obj : Iter->second)
229 EarliestEscapes.erase(Obj);
230 Inst2Obj.erase(I);
231 }
232}
233
234//===----------------------------------------------------------------------===//
235// GetElementPtr Instruction Decomposition and Analysis
236//===----------------------------------------------------------------------===//
237
238namespace {
239/// Represents zext(sext(trunc(V))).
240struct CastedValue {
241 const Value *V;
242 unsigned ZExtBits = 0;
243 unsigned SExtBits = 0;
244 unsigned TruncBits = 0;
245
246 explicit CastedValue(const Value *V) : V(V) {}
247 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
248 unsigned TruncBits)
249 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
250
251 unsigned getBitWidth() const {
252 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
253 SExtBits;
254 }
255
256 CastedValue withValue(const Value *NewV) const {
257 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
258 }
259
260 /// Replace V with zext(NewV)
261 CastedValue withZExtOfValue(const Value *NewV) const {
262 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
264 if (ExtendBy <= TruncBits)
265 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
266
267 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
268 ExtendBy -= TruncBits;
269 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
270 }
271
272 /// Replace V with sext(NewV)
273 CastedValue withSExtOfValue(const Value *NewV) const {
274 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
276 if (ExtendBy <= TruncBits)
277 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
278
279 // zext(sext(sext(NewV)))
280 ExtendBy -= TruncBits;
281 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
282 }
283
284 APInt evaluateWith(APInt N) const {
285 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
286 "Incompatible bit width");
287 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
288 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
289 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
290 return N;
291 }
292
293 ConstantRange evaluateWith(ConstantRange N) const {
294 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
295 "Incompatible bit width");
296 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
297 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
298 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
299 return N;
300 }
301
302 bool canDistributeOver(bool NUW, bool NSW) const {
303 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
304 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
305 // trunc(x op y) == trunc(x) op trunc(y)
306 return (!ZExtBits || NUW) && (!SExtBits || NSW);
307 }
308
309 bool hasSameCastsAs(const CastedValue &Other) const {
310 return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
311 TruncBits == Other.TruncBits;
312 }
313};
314
315/// Represents zext(sext(trunc(V))) * Scale + Offset.
316struct LinearExpression {
317 CastedValue Val;
318 APInt Scale;
320
321 /// True if all operations in this expression are NSW.
322 bool IsNSW;
323
324 LinearExpression(const CastedValue &Val, const APInt &Scale,
325 const APInt &Offset, bool IsNSW)
326 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
327
328 LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
329 unsigned BitWidth = Val.getBitWidth();
330 Scale = APInt(BitWidth, 1);
331 Offset = APInt(BitWidth, 0);
332 }
333
334 LinearExpression mul(const APInt &Other, bool MulIsNSW) const {
335 // The check for zero offset is necessary, because generally
336 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
337 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
338 return LinearExpression(Val, Scale * Other, Offset * Other, NSW);
339 }
340};
341}
342
343/// Analyzes the specified value as a linear expression: "A*V + B", where A and
344/// B are constant integers.
345static LinearExpression GetLinearExpression(
346 const CastedValue &Val, const DataLayout &DL, unsigned Depth,
348 // Limit our recursion depth.
349 if (Depth == 6)
350 return Val;
351
352 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
353 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
354 Val.evaluateWith(Const->getValue()), true);
355
356 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
357 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
358 APInt RHS = Val.evaluateWith(RHSC->getValue());
359 // The only non-OBO case we deal with is or, and only limited to the
360 // case where it is both nuw and nsw.
361 bool NUW = true, NSW = true;
362 if (isa<OverflowingBinaryOperator>(BOp)) {
363 NUW &= BOp->hasNoUnsignedWrap();
364 NSW &= BOp->hasNoSignedWrap();
365 }
366 if (!Val.canDistributeOver(NUW, NSW))
367 return Val;
368
369 // While we can distribute over trunc, we cannot preserve nowrap flags
370 // in that case.
371 if (Val.TruncBits)
372 NUW = NSW = false;
373
374 LinearExpression E(Val);
375 switch (BOp->getOpcode()) {
376 default:
377 // We don't understand this instruction, so we can't decompose it any
378 // further.
379 return Val;
380 case Instruction::Or:
381 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
382 // analyze it.
383 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
384 BOp, DT))
385 return Val;
386
387 [[fallthrough]];
388 case Instruction::Add: {
389 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
390 Depth + 1, AC, DT);
391 E.Offset += RHS;
392 E.IsNSW &= NSW;
393 break;
394 }
395 case Instruction::Sub: {
396 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
397 Depth + 1, AC, DT);
398 E.Offset -= RHS;
399 E.IsNSW &= NSW;
400 break;
401 }
402 case Instruction::Mul:
403 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
404 Depth + 1, AC, DT)
405 .mul(RHS, NSW);
406 break;
407 case Instruction::Shl:
408 // We're trying to linearize an expression of the kind:
409 // shl i8 -128, 36
410 // where the shift count exceeds the bitwidth of the type.
411 // We can't decompose this further (the expression would return
412 // a poison value).
413 if (RHS.getLimitedValue() > Val.getBitWidth())
414 return Val;
415
416 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
417 Depth + 1, AC, DT);
418 E.Offset <<= RHS.getLimitedValue();
419 E.Scale <<= RHS.getLimitedValue();
420 E.IsNSW &= NSW;
421 break;
422 }
423 return E;
424 }
425 }
426
427 if (isa<ZExtInst>(Val.V))
428 return GetLinearExpression(
429 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
430 DL, Depth + 1, AC, DT);
431
432 if (isa<SExtInst>(Val.V))
433 return GetLinearExpression(
434 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
435 DL, Depth + 1, AC, DT);
436
437 return Val;
438}
439
440/// To ensure a pointer offset fits in an integer of size IndexSize
441/// (in bits) when that size is smaller than the maximum index size. This is
442/// an issue, for example, in particular for 32b pointers with negative indices
443/// that rely on two's complement wrap-arounds for precise alias information
444/// where the maximum index size is 64b.
445static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize) {
446 assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
447 unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
448 return (Offset << ShiftBits).ashr(ShiftBits);
449}
450
451namespace {
452// A linear transformation of a Value; this class represents
453// ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
454struct VariableGEPIndex {
455 CastedValue Val;
456 APInt Scale;
457
458 // Context instruction to use when querying information about this index.
459 const Instruction *CxtI;
460
461 /// True if all operations in this expression are NSW.
462 bool IsNSW;
463
464 /// True if the index should be subtracted rather than added. We don't simply
465 /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
466 /// non-wrapping, while X + INT_MIN*(-1) wraps.
467 bool IsNegated;
468
469 bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
470 if (IsNegated == Other.IsNegated)
471 return Scale == -Other.Scale;
472 return Scale == Other.Scale;
473 }
474
475 void dump() const {
476 print(dbgs());
477 dbgs() << "\n";
478 }
479 void print(raw_ostream &OS) const {
480 OS << "(V=" << Val.V->getName()
481 << ", zextbits=" << Val.ZExtBits
482 << ", sextbits=" << Val.SExtBits
483 << ", truncbits=" << Val.TruncBits
484 << ", scale=" << Scale
485 << ", nsw=" << IsNSW
486 << ", negated=" << IsNegated << ")";
487 }
488};
489}
490
491// Represents the internal structure of a GEP, decomposed into a base pointer,
492// constant offsets, and variable scaled indices.
494 // Base pointer of the GEP
495 const Value *Base;
496 // Total constant offset from base.
498 // Scaled variable (non-constant) indices.
500 // Are all operations inbounds GEPs or non-indexing operations?
501 // (std::nullopt iff expression doesn't involve any geps)
502 std::optional<bool> InBounds;
503
504 void dump() const {
505 print(dbgs());
506 dbgs() << "\n";
507 }
508 void print(raw_ostream &OS) const {
509 OS << "(DecomposedGEP Base=" << Base->getName()
510 << ", Offset=" << Offset
511 << ", VarIndices=[";
512 for (size_t i = 0; i < VarIndices.size(); i++) {
513 if (i != 0)
514 OS << ", ";
515 VarIndices[i].print(OS);
516 }
517 OS << "])";
518 }
519};
520
521
522/// If V is a symbolic pointer expression, decompose it into a base pointer
523/// with a constant offset and a number of scaled symbolic offsets.
524///
525/// The scaled symbolic offsets (represented by pairs of a Value* and a scale
526/// in the VarIndices vector) are Value*'s that are known to be scaled by the
527/// specified amount, but which may have other unrepresented high bits. As
528/// such, the gep cannot necessarily be reconstructed from its decomposed form.
530BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
532 // Limit recursion depth to limit compile time in crazy cases.
533 unsigned MaxLookup = MaxLookupSearchDepth;
534 SearchTimes++;
535 const Instruction *CxtI = dyn_cast<Instruction>(V);
536
537 unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
538 DecomposedGEP Decomposed;
539 Decomposed.Offset = APInt(MaxIndexSize, 0);
540 do {
541 // See if this is a bitcast or GEP.
542 const Operator *Op = dyn_cast<Operator>(V);
543 if (!Op) {
544 // The only non-operator case we can handle are GlobalAliases.
545 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
546 if (!GA->isInterposable()) {
547 V = GA->getAliasee();
548 continue;
549 }
550 }
551 Decomposed.Base = V;
552 return Decomposed;
553 }
554
555 if (Op->getOpcode() == Instruction::BitCast ||
556 Op->getOpcode() == Instruction::AddrSpaceCast) {
557 V = Op->getOperand(0);
558 continue;
559 }
560
561 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
562 if (!GEPOp) {
563 if (const auto *PHI = dyn_cast<PHINode>(V)) {
564 // Look through single-arg phi nodes created by LCSSA.
565 if (PHI->getNumIncomingValues() == 1) {
566 V = PHI->getIncomingValue(0);
567 continue;
568 }
569 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
570 // CaptureTracking can know about special capturing properties of some
571 // intrinsics like launder.invariant.group, that can't be expressed with
572 // the attributes, but have properties like returning aliasing pointer.
573 // Because some analysis may assume that nocaptured pointer is not
574 // returned from some special intrinsic (because function would have to
575 // be marked with returns attribute), it is crucial to use this function
576 // because it should be in sync with CaptureTracking. Not using it may
577 // cause weird miscompilations where 2 aliasing pointers are assumed to
578 // noalias.
579 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
580 V = RP;
581 continue;
582 }
583 }
584
585 Decomposed.Base = V;
586 return Decomposed;
587 }
588
589 // Track whether we've seen at least one in bounds gep, and if so, whether
590 // all geps parsed were in bounds.
591 if (Decomposed.InBounds == std::nullopt)
592 Decomposed.InBounds = GEPOp->isInBounds();
593 else if (!GEPOp->isInBounds())
594 Decomposed.InBounds = false;
595
596 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
597
598 unsigned AS = GEPOp->getPointerAddressSpace();
599 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
601 unsigned IndexSize = DL.getIndexSizeInBits(AS);
602 // Assume all GEP operands are constants until proven otherwise.
603 bool GepHasConstantOffset = true;
604 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
605 I != E; ++I, ++GTI) {
606 const Value *Index = *I;
607 // Compute the (potentially symbolic) offset in bytes for this index.
608 if (StructType *STy = GTI.getStructTypeOrNull()) {
609 // For a struct, add the member offset.
610 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
611 if (FieldNo == 0)
612 continue;
613
614 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
615 continue;
616 }
617
618 // For an array/pointer, add the element offset, explicitly scaled.
619 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
620 if (CIdx->isZero())
621 continue;
622
623 // Don't attempt to analyze GEPs if the scalable index is not zero.
624 TypeSize AllocTypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
625 if (AllocTypeSize.isScalable()) {
626 Decomposed.Base = V;
627 return Decomposed;
628 }
629
630 Decomposed.Offset += AllocTypeSize.getFixedValue() *
631 CIdx->getValue().sextOrTrunc(MaxIndexSize);
632 continue;
633 }
634
635 TypeSize AllocTypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
636 if (AllocTypeSize.isScalable()) {
637 Decomposed.Base = V;
638 return Decomposed;
639 }
640
641 GepHasConstantOffset = false;
642
643 // If the integer type is smaller than the index size, it is implicitly
644 // sign extended or truncated to index size.
645 unsigned Width = Index->getType()->getIntegerBitWidth();
646 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
647 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
648 LinearExpression LE = GetLinearExpression(
649 CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
650
651 // Scale by the type size.
652 unsigned TypeSize = AllocTypeSize.getFixedValue();
653 LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
654 Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
655 APInt Scale = LE.Scale.sext(MaxIndexSize);
656
657 // If we already had an occurrence of this index variable, merge this
658 // scale into it. For example, we want to handle:
659 // A[x][x] -> x*16 + x*4 -> x*20
660 // This also ensures that 'x' only appears in the index list once.
661 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
662 if (Decomposed.VarIndices[i].Val.V == LE.Val.V &&
663 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
664 Scale += Decomposed.VarIndices[i].Scale;
665 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
666 break;
667 }
668 }
669
670 // Make sure that we have a scale that makes sense for this target's
671 // index size.
672 Scale = adjustToIndexSize(Scale, IndexSize);
673
674 if (!!Scale) {
675 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW,
676 /* IsNegated */ false};
677 Decomposed.VarIndices.push_back(Entry);
678 }
679 }
680
681 // Take care of wrap-arounds
682 if (GepHasConstantOffset)
683 Decomposed.Offset = adjustToIndexSize(Decomposed.Offset, IndexSize);
684
685 // Analyze the base pointer next.
686 V = GEPOp->getOperand(0);
687 } while (--MaxLookup);
688
689 // If the chain of expressions is too deep, just return early.
690 Decomposed.Base = V;
691 SearchLimitReached++;
692 return Decomposed;
693}
694
696 AAQueryInfo &AAQI,
697 bool IgnoreLocals) {
698 assert(Visited.empty() && "Visited must be cleared after use!");
699 auto _ = make_scope_exit([&] { Visited.clear(); });
700
701 unsigned MaxLookup = 8;
703 Worklist.push_back(Loc.Ptr);
705
706 do {
707 const Value *V = getUnderlyingObject(Worklist.pop_back_val());
708 if (!Visited.insert(V).second)
709 continue;
710
711 // Ignore allocas if we were instructed to do so.
712 if (IgnoreLocals && isa<AllocaInst>(V))
713 continue;
714
715 // If the location points to memory that is known to be invariant for
716 // the life of the underlying SSA value, then we can exclude Mod from
717 // the set of valid memory effects.
718 //
719 // An argument that is marked readonly and noalias is known to be
720 // invariant while that function is executing.
721 if (const Argument *Arg = dyn_cast<Argument>(V)) {
722 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
723 Result |= ModRefInfo::Ref;
724 continue;
725 }
726 }
727
728 // A global constant can't be mutated.
729 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
730 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
731 // global to be marked constant in some modules and non-constant in
732 // others. GV may even be a declaration, not a definition.
733 if (!GV->isConstant())
734 return ModRefInfo::ModRef;
735 continue;
736 }
737
738 // If both select values point to local memory, then so does the select.
739 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
740 Worklist.push_back(SI->getTrueValue());
741 Worklist.push_back(SI->getFalseValue());
742 continue;
743 }
744
745 // If all values incoming to a phi node point to local memory, then so does
746 // the phi.
747 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
748 // Don't bother inspecting phi nodes with many operands.
749 if (PN->getNumIncomingValues() > MaxLookup)
750 return ModRefInfo::ModRef;
751 append_range(Worklist, PN->incoming_values());
752 continue;
753 }
754
755 // Otherwise be conservative.
756 return ModRefInfo::ModRef;
757 } while (!Worklist.empty() && --MaxLookup);
758
759 // If we hit the maximum number of instructions to examine, be conservative.
760 if (!Worklist.empty())
761 return ModRefInfo::ModRef;
762
763 return Result;
764}
765
766static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
767 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
768 return II && II->getIntrinsicID() == IID;
769}
770
771/// Returns the behavior when calling the given call site.
773 AAQueryInfo &AAQI) {
774 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
775
776 if (const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
777 MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
778 // Operand bundles on the call may also read or write memory, in addition
779 // to the behavior of the called function.
780 if (Call->hasReadingOperandBundles())
781 FuncME |= MemoryEffects::readOnly();
782 if (Call->hasClobberingOperandBundles())
783 FuncME |= MemoryEffects::writeOnly();
784 Min &= FuncME;
785 }
786
787 return Min;
788}
789
790/// Returns the behavior when calling the given function. For use when the call
791/// site is not known.
793 switch (F->getIntrinsicID()) {
794 case Intrinsic::experimental_guard:
795 case Intrinsic::experimental_deoptimize:
796 // These intrinsics can read arbitrary memory, and additionally modref
797 // inaccessible memory to model control dependence.
798 return MemoryEffects::readOnly() |
800 }
801
802 return F->getMemoryEffects();
803}
804
806 unsigned ArgIdx) {
807 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
808 return ModRefInfo::Mod;
809
810 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
811 return ModRefInfo::Ref;
812
813 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
815
816 return ModRefInfo::ModRef;
817}
818
819#ifndef NDEBUG
820static const Function *getParent(const Value *V) {
821 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
822 if (!inst->getParent())
823 return nullptr;
824 return inst->getParent()->getParent();
825 }
826
827 if (const Argument *arg = dyn_cast<Argument>(V))
828 return arg->getParent();
829
830 return nullptr;
831}
832
833static bool notDifferentParent(const Value *O1, const Value *O2) {
834
835 const Function *F1 = getParent(O1);
836 const Function *F2 = getParent(O2);
837
838 return !F1 || !F2 || F1 == F2;
839}
840#endif
841
843 const MemoryLocation &LocB, AAQueryInfo &AAQI,
844 const Instruction *CtxI) {
845 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
846 "BasicAliasAnalysis doesn't support interprocedural queries.");
847 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
848}
849
850/// Checks to see if the specified callsite can clobber the specified memory
851/// object.
852///
853/// Since we only look at local properties of this function, we really can't
854/// say much about this query. We do, however, use simple "address taken"
855/// analysis on local objects.
857 const MemoryLocation &Loc,
858 AAQueryInfo &AAQI) {
859 assert(notDifferentParent(Call, Loc.Ptr) &&
860 "AliasAnalysis query involving multiple functions!");
861
862 const Value *Object = getUnderlyingObject(Loc.Ptr);
863
864 // Calls marked 'tail' cannot read or write allocas from the current frame
865 // because the current frame might be destroyed by the time they run. However,
866 // a tail call may use an alloca with byval. Calling with byval copies the
867 // contents of the alloca into argument registers or stack slots, so there is
868 // no lifetime issue.
869 if (isa<AllocaInst>(Object))
870 if (const CallInst *CI = dyn_cast<CallInst>(Call))
871 if (CI->isTailCall() &&
872 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
874
875 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
876 // modify them even though the alloca is not escaped.
877 if (auto *AI = dyn_cast<AllocaInst>(Object))
878 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
879 return ModRefInfo::Mod;
880
881 // A call can access a locally allocated object either because it is passed as
882 // an argument to the call, or because it has escaped prior to the call.
883 //
884 // Make sure the object has not escaped here, and then check that none of the
885 // call arguments alias the object below.
886 if (!isa<Constant>(Object) && Call != Object &&
887 AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) {
888
889 // Optimistically assume that call doesn't touch Object and check this
890 // assumption in the following loop.
892
893 unsigned OperandNo = 0;
894 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
895 CI != CE; ++CI, ++OperandNo) {
896 if (!(*CI)->getType()->isPointerTy())
897 continue;
898
899 // Call doesn't access memory through this operand, so we don't care
900 // if it aliases with Object.
901 if (Call->doesNotAccessMemory(OperandNo))
902 continue;
903
904 // If this is a no-capture pointer argument, see if we can tell that it
905 // is impossible to alias the pointer we're checking.
906 AliasResult AR =
909 // Operand doesn't alias 'Object', continue looking for other aliases
910 if (AR == AliasResult::NoAlias)
911 continue;
912 // Operand aliases 'Object', but call doesn't modify it. Strengthen
913 // initial assumption and keep looking in case if there are more aliases.
914 if (Call->onlyReadsMemory(OperandNo)) {
915 Result |= ModRefInfo::Ref;
916 continue;
917 }
918 // Operand aliases 'Object' but call only writes into it.
919 if (Call->onlyWritesMemory(OperandNo)) {
920 Result |= ModRefInfo::Mod;
921 continue;
922 }
923 // This operand aliases 'Object' and call reads and writes into it.
924 // Setting ModRef will not yield an early return below, MustAlias is not
925 // used further.
926 Result = ModRefInfo::ModRef;
927 break;
928 }
929
930 // Early return if we improved mod ref information
931 if (!isModAndRefSet(Result))
932 return Result;
933 }
934
935 // If the call is malloc/calloc like, we can assume that it doesn't
936 // modify any IR visible value. This is only valid because we assume these
937 // routines do not read values visible in the IR. TODO: Consider special
938 // casing realloc and strdup routines which access only their arguments as
939 // well. Or alternatively, replace all of this with inaccessiblememonly once
940 // that's implemented fully.
941 if (isMallocOrCallocLikeFn(Call, &TLI)) {
942 // Be conservative if the accessed pointer may alias the allocation -
943 // fallback to the generic handling below.
944 if (AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(Call), Loc, AAQI) ==
947 }
948
949 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
950 // writing so that proper control dependencies are maintained but they never
951 // mod any particular memory location visible to the IR.
952 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
953 // intrinsic is now modeled as reading memory. This prevents hoisting the
954 // invariant.start intrinsic over stores. Consider:
955 // *ptr = 40;
956 // *ptr = 50;
957 // invariant_start(ptr)
958 // int val = *ptr;
959 // print(val);
960 //
961 // This cannot be transformed to:
962 //
963 // *ptr = 40;
964 // invariant_start(ptr)
965 // *ptr = 50;
966 // int val = *ptr;
967 // print(val);
968 //
969 // The transformation will cause the second store to be ignored (based on
970 // rules of invariant.start) and print 40, while the first program always
971 // prints 50.
972 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
973 return ModRefInfo::Ref;
974
975 // Be conservative.
976 return ModRefInfo::ModRef;
977}
978
980 const CallBase *Call2,
981 AAQueryInfo &AAQI) {
982 // Guard intrinsics are marked as arbitrarily writing so that proper control
983 // dependencies are maintained but they never mods any particular memory
984 // location.
985 //
986 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
987 // heap state at the point the guard is issued needs to be consistent in case
988 // the guard invokes the "deopt" continuation.
989
990 // NB! This function is *not* commutative, so we special case two
991 // possibilities for guard intrinsics.
992
993 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
994 return isModSet(getMemoryEffects(Call2, AAQI).getModRef())
997
998 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
999 return isModSet(getMemoryEffects(Call1, AAQI).getModRef())
1002
1003 // Be conservative.
1004 return ModRefInfo::ModRef;
1005}
1006
1007/// Return true if we know V to the base address of the corresponding memory
1008/// object. This implies that any address less than V must be out of bounds
1009/// for the underlying object. Note that just being isIdentifiedObject() is
1010/// not enough - For example, a negative offset from a noalias argument or call
1011/// can be inbounds w.r.t the actual underlying object.
1012static bool isBaseOfObject(const Value *V) {
1013 // TODO: We can handle other cases here
1014 // 1) For GC languages, arguments to functions are often required to be
1015 // base pointers.
1016 // 2) Result of allocation routines are often base pointers. Leverage TLI.
1017 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1018}
1019
1020/// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1021/// another pointer.
1022///
1023/// We know that V1 is a GEP, but we don't know anything about V2.
1024/// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1025/// V2.
1026AliasResult BasicAAResult::aliasGEP(
1027 const GEPOperator *GEP1, LocationSize V1Size,
1028 const Value *V2, LocationSize V2Size,
1029 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1030 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1031 // TODO: This limitation exists for compile-time reasons. Relax it if we
1032 // can avoid exponential pathological cases.
1033 if (!isa<GEPOperator>(V2))
1034 return AliasResult::MayAlias;
1035
1036 // If both accesses have unknown size, we can only check whether the base
1037 // objects don't alias.
1038 AliasResult BaseAlias =
1039 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1040 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1041 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1043 }
1044
1045 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1046 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1047
1048 // Bail if we were not able to decompose anything.
1049 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1050 return AliasResult::MayAlias;
1051
1052 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1053 // symbolic difference.
1054 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1055
1056 // If an inbounds GEP would have to start from an out of bounds address
1057 // for the two to alias, then we can assume noalias.
1058 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1059 V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1060 isBaseOfObject(DecompGEP2.Base))
1061 return AliasResult::NoAlias;
1062
1063 if (isa<GEPOperator>(V2)) {
1064 // Symmetric case to above.
1065 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1066 V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1067 isBaseOfObject(DecompGEP1.Base))
1068 return AliasResult::NoAlias;
1069 }
1070
1071 // For GEPs with identical offsets, we can preserve the size and AAInfo
1072 // when performing the alias check on the underlying objects.
1073 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1074 return AAQI.AAR.alias(MemoryLocation(DecompGEP1.Base, V1Size),
1075 MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1076
1077 // Do the base pointers alias?
1078 AliasResult BaseAlias =
1079 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1080 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1081
1082 // If we get a No or May, then return it immediately, no amount of analysis
1083 // will improve this situation.
1084 if (BaseAlias != AliasResult::MustAlias) {
1085 assert(BaseAlias == AliasResult::NoAlias ||
1086 BaseAlias == AliasResult::MayAlias);
1087 return BaseAlias;
1088 }
1089
1090 // If there is a constant difference between the pointers, but the difference
1091 // is less than the size of the associated memory object, then we know
1092 // that the objects are partially overlapping. If the difference is
1093 // greater, we know they do not overlap.
1094 if (DecompGEP1.VarIndices.empty()) {
1095 APInt &Off = DecompGEP1.Offset;
1096
1097 // Initialize for Off >= 0 (V2 <= GEP1) case.
1098 const Value *LeftPtr = V2;
1099 const Value *RightPtr = GEP1;
1100 LocationSize VLeftSize = V2Size;
1101 LocationSize VRightSize = V1Size;
1102 const bool Swapped = Off.isNegative();
1103
1104 if (Swapped) {
1105 // Swap if we have the situation where:
1106 // + +
1107 // | BaseOffset |
1108 // ---------------->|
1109 // |-->V1Size |-------> V2Size
1110 // GEP1 V2
1111 std::swap(LeftPtr, RightPtr);
1112 std::swap(VLeftSize, VRightSize);
1113 Off = -Off;
1114 }
1115
1116 if (!VLeftSize.hasValue())
1117 return AliasResult::MayAlias;
1118
1119 const uint64_t LSize = VLeftSize.getValue();
1120 if (Off.ult(LSize)) {
1121 // Conservatively drop processing if a phi was visited and/or offset is
1122 // too big.
1124 if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1125 (Off + VRightSize.getValue()).ule(LSize)) {
1126 // Memory referenced by right pointer is nested. Save the offset in
1127 // cache. Note that originally offset estimated as GEP1-V2, but
1128 // AliasResult contains the shift that represents GEP1+Offset=V2.
1129 AR.setOffset(-Off.getSExtValue());
1130 AR.swap(Swapped);
1131 }
1132 return AR;
1133 }
1134 return AliasResult::NoAlias;
1135 }
1136
1137 // We need to know both acess sizes for all the following heuristics.
1138 if (!V1Size.hasValue() || !V2Size.hasValue())
1139 return AliasResult::MayAlias;
1140
1141 APInt GCD;
1142 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1143 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1144 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1145 const APInt &Scale = Index.Scale;
1146 APInt ScaleForGCD = Scale;
1147 if (!Index.IsNSW)
1148 ScaleForGCD =
1150
1151 if (i == 0)
1152 GCD = ScaleForGCD.abs();
1153 else
1154 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1155
1156 ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1157 true, &AC, Index.CxtI);
1158 KnownBits Known =
1159 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1160 CR = CR.intersectWith(
1161 ConstantRange::fromKnownBits(Known, /* Signed */ true),
1163 CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1164
1165 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1166 "Bit widths are normalized to MaxIndexSize");
1167 if (Index.IsNSW)
1168 CR = CR.smul_sat(ConstantRange(Scale));
1169 else
1170 CR = CR.smul_fast(ConstantRange(Scale));
1171
1172 if (Index.IsNegated)
1173 OffsetRange = OffsetRange.sub(CR);
1174 else
1175 OffsetRange = OffsetRange.add(CR);
1176 }
1177
1178 // We now have accesses at two offsets from the same base:
1179 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1180 // 2. 0 with size V2Size
1181 // Using arithmetic modulo GCD, the accesses are at
1182 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1183 // into the range [V2Size..GCD), then we know they cannot overlap.
1184 APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1185 if (ModOffset.isNegative())
1186 ModOffset += GCD; // We want mod, not rem.
1187 if (ModOffset.uge(V2Size.getValue()) &&
1188 (GCD - ModOffset).uge(V1Size.getValue()))
1189 return AliasResult::NoAlias;
1190
1191 // Compute ranges of potentially accessed bytes for both accesses. If the
1192 // interseciton is empty, there can be no overlap.
1193 unsigned BW = OffsetRange.getBitWidth();
1194 ConstantRange Range1 = OffsetRange.add(
1195 ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1196 ConstantRange Range2 =
1197 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1198 if (Range1.intersectWith(Range2).isEmptySet())
1199 return AliasResult::NoAlias;
1200
1201 // Try to determine the range of values for VarIndex such that
1202 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1203 std::optional<APInt> MinAbsVarIndex;
1204 if (DecompGEP1.VarIndices.size() == 1) {
1205 // VarIndex = Scale*V.
1206 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1207 if (Var.Val.TruncBits == 0 &&
1208 isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1209 // If V != 0, then abs(VarIndex) > 0.
1210 MinAbsVarIndex = APInt(Var.Scale.getBitWidth(), 1);
1211
1212 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1213 // potentially wrapping math.
1214 auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1215 if (Var.IsNSW)
1216 return true;
1217
1218 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1219 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1220 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1221 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1222 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1223 if (MaxScaleValueBW <= 0)
1224 return false;
1225 return Var.Scale.ule(
1226 APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1227 };
1228 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1229 // presence of potentially wrapping math.
1230 if (MultiplyByScaleNoWrap(Var)) {
1231 // If V != 0 then abs(VarIndex) >= abs(Scale).
1232 MinAbsVarIndex = Var.Scale.abs();
1233 }
1234 }
1235 } else if (DecompGEP1.VarIndices.size() == 2) {
1236 // VarIndex = Scale*V0 + (-Scale)*V1.
1237 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1238 // Check that MayBeCrossIteration is false, to avoid reasoning about
1239 // inequality of values across loop iterations.
1240 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1241 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1242 if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1243 Var0.Val.hasSameCastsAs(Var1.Val) && !AAQI.MayBeCrossIteration &&
1244 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1245 DT))
1246 MinAbsVarIndex = Var0.Scale.abs();
1247 }
1248
1249 if (MinAbsVarIndex) {
1250 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1251 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1252 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1253 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1254 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1255 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1256 return AliasResult::NoAlias;
1257 }
1258
1259 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1260 return AliasResult::NoAlias;
1261
1262 // Statically, we can see that the base objects are the same, but the
1263 // pointers have dynamic offsets which we can't resolve. And none of our
1264 // little tricks above worked.
1265 return AliasResult::MayAlias;
1266}
1267
1269 // If the results agree, take it.
1270 if (A == B)
1271 return A;
1272 // A mix of PartialAlias and MustAlias is PartialAlias.
1276 // Otherwise, we don't know anything.
1277 return AliasResult::MayAlias;
1278}
1279
1280/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1281/// against another.
1283BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1284 const Value *V2, LocationSize V2Size,
1285 AAQueryInfo &AAQI) {
1286 // If the values are Selects with the same condition, we can do a more precise
1287 // check: just check for aliases between the values on corresponding arms.
1288 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1289 if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1290 AAQI)) {
1291 AliasResult Alias =
1292 AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1293 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1294 if (Alias == AliasResult::MayAlias)
1295 return AliasResult::MayAlias;
1296 AliasResult ThisAlias =
1297 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1298 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1299 return MergeAliasResults(ThisAlias, Alias);
1300 }
1301
1302 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1303 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1304 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1305 MemoryLocation(V2, V2Size), AAQI);
1306 if (Alias == AliasResult::MayAlias)
1307 return AliasResult::MayAlias;
1308
1309 AliasResult ThisAlias =
1310 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1311 MemoryLocation(V2, V2Size), AAQI);
1312 return MergeAliasResults(ThisAlias, Alias);
1313}
1314
1315/// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1316/// another.
1317AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1318 const Value *V2, LocationSize V2Size,
1319 AAQueryInfo &AAQI) {
1320 if (!PN->getNumIncomingValues())
1321 return AliasResult::NoAlias;
1322 // If the values are PHIs in the same block, we can do a more precise
1323 // as well as efficient check: just check for aliases between the values
1324 // on corresponding edges.
1325 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1326 if (PN2->getParent() == PN->getParent()) {
1327 std::optional<AliasResult> Alias;
1328 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1329 AliasResult ThisAlias = AAQI.AAR.alias(
1330 MemoryLocation(PN->getIncomingValue(i), PNSize),
1332 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1333 AAQI);
1334 if (Alias)
1335 *Alias = MergeAliasResults(*Alias, ThisAlias);
1336 else
1337 Alias = ThisAlias;
1338 if (*Alias == AliasResult::MayAlias)
1339 break;
1340 }
1341 return *Alias;
1342 }
1343
1345 // If a phi operand recurses back to the phi, we can still determine NoAlias
1346 // if we don't alias the underlying objects of the other phi operands, as we
1347 // know that the recursive phi needs to be based on them in some way.
1348 bool isRecursive = false;
1349 auto CheckForRecPhi = [&](Value *PV) {
1351 return false;
1352 if (getUnderlyingObject(PV) == PN) {
1353 isRecursive = true;
1354 return true;
1355 }
1356 return false;
1357 };
1358
1359 SmallPtrSet<Value *, 4> UniqueSrc;
1360 Value *OnePhi = nullptr;
1361 for (Value *PV1 : PN->incoming_values()) {
1362 // Skip the phi itself being the incoming value.
1363 if (PV1 == PN)
1364 continue;
1365
1366 if (isa<PHINode>(PV1)) {
1367 if (OnePhi && OnePhi != PV1) {
1368 // To control potential compile time explosion, we choose to be
1369 // conserviate when we have more than one Phi input. It is important
1370 // that we handle the single phi case as that lets us handle LCSSA
1371 // phi nodes and (combined with the recursive phi handling) simple
1372 // pointer induction variable patterns.
1373 return AliasResult::MayAlias;
1374 }
1375 OnePhi = PV1;
1376 }
1377
1378 if (CheckForRecPhi(PV1))
1379 continue;
1380
1381 if (UniqueSrc.insert(PV1).second)
1382 V1Srcs.push_back(PV1);
1383 }
1384
1385 if (OnePhi && UniqueSrc.size() > 1)
1386 // Out of an abundance of caution, allow only the trivial lcssa and
1387 // recursive phi cases.
1388 return AliasResult::MayAlias;
1389
1390 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1391 // value. This should only be possible in blocks unreachable from the entry
1392 // block, but return MayAlias just in case.
1393 if (V1Srcs.empty())
1394 return AliasResult::MayAlias;
1395
1396 // If this PHI node is recursive, indicate that the pointer may be moved
1397 // across iterations. We can only prove NoAlias if different underlying
1398 // objects are involved.
1399 if (isRecursive)
1401
1402 // In the recursive alias queries below, we may compare values from two
1403 // different loop iterations.
1404 SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1405
1406 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(V1Srcs[0], PNSize),
1407 MemoryLocation(V2, V2Size), AAQI);
1408
1409 // Early exit if the check of the first PHI source against V2 is MayAlias.
1410 // Other results are not possible.
1411 if (Alias == AliasResult::MayAlias)
1412 return AliasResult::MayAlias;
1413 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1414 // remain valid to all elements and needs to conservatively return MayAlias.
1415 if (isRecursive && Alias != AliasResult::NoAlias)
1416 return AliasResult::MayAlias;
1417
1418 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1419 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1420 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1421 Value *V = V1Srcs[i];
1422
1423 AliasResult ThisAlias = AAQI.AAR.alias(
1424 MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), AAQI);
1425 Alias = MergeAliasResults(ThisAlias, Alias);
1426 if (Alias == AliasResult::MayAlias)
1427 break;
1428 }
1429
1430 return Alias;
1431}
1432
1433/// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1434/// array references.
1435AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1436 const Value *V2, LocationSize V2Size,
1437 AAQueryInfo &AAQI,
1438 const Instruction *CtxI) {
1439 // If either of the memory references is empty, it doesn't matter what the
1440 // pointer values are.
1441 if (V1Size.isZero() || V2Size.isZero())
1442 return AliasResult::NoAlias;
1443
1444 // Strip off any casts if they exist.
1446 V2 = V2->stripPointerCastsForAliasAnalysis();
1447
1448 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1449 // value for undef that aliases nothing in the program.
1450 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1451 return AliasResult::NoAlias;
1452
1453 // Are we checking for alias of the same value?
1454 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1455 // different iterations. We must therefore make sure that this is not the
1456 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1457 // happen by looking at the visited phi nodes and making sure they cannot
1458 // reach the value.
1459 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1461
1462 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1463 return AliasResult::NoAlias; // Scalars cannot alias each other
1464
1465 // Figure out what objects these things are pointing to if we can.
1468
1469 // Null values in the default address space don't point to any object, so they
1470 // don't alias any other pointer.
1471 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1472 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1473 return AliasResult::NoAlias;
1474 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1475 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1476 return AliasResult::NoAlias;
1477
1478 if (O1 != O2) {
1479 // If V1/V2 point to two different objects, we know that we have no alias.
1481 return AliasResult::NoAlias;
1482
1483 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1484 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1485 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1486 return AliasResult::NoAlias;
1487
1488 // Function arguments can't alias with things that are known to be
1489 // unambigously identified at the function level.
1490 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1491 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1492 return AliasResult::NoAlias;
1493
1494 // If one pointer is the result of a call/invoke or load and the other is a
1495 // non-escaping local object within the same function, then we know the
1496 // object couldn't escape to a point where the call could return it.
1497 //
1498 // Note that if the pointers are in different functions, there are a
1499 // variety of complications. A call with a nocapture argument may still
1500 // temporary store the nocapture argument's value in a temporary memory
1501 // location if that memory location doesn't escape. Or it may pass a
1502 // nocapture value to other functions as long as they don't capture it.
1503 if (isEscapeSource(O1) &&
1504 AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1)))
1505 return AliasResult::NoAlias;
1506 if (isEscapeSource(O2) &&
1507 AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2)))
1508 return AliasResult::NoAlias;
1509 }
1510
1511 // If the size of one access is larger than the entire object on the other
1512 // side, then we know such behavior is undefined and can assume no alias.
1513 bool NullIsValidLocation = NullPointerIsDefined(&F);
1515 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1516 TLI, NullIsValidLocation)) ||
1518 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1519 TLI, NullIsValidLocation)))
1520 return AliasResult::NoAlias;
1521
1522 if (CtxI && EnableSeparateStorageAnalysis) {
1523 for (auto &AssumeVH : AC.assumptions()) {
1524 if (!AssumeVH)
1525 continue;
1526
1527 AssumeInst *Assume = cast<AssumeInst>(AssumeVH);
1528
1529 for (unsigned Idx = 0; Idx < Assume->getNumOperandBundles(); Idx++) {
1531 if (OBU.getTagName() == "separate_storage") {
1532 assert(OBU.Inputs.size() == 2);
1533 const Value *Hint1 = OBU.Inputs[0].get();
1534 const Value *Hint2 = OBU.Inputs[1].get();
1535 // This is often a no-op; instcombine rewrites this for us. No-op
1536 // getUnderlyingObject calls are fast, though.
1537 const Value *HintO1 = getUnderlyingObject(Hint1);
1538 const Value *HintO2 = getUnderlyingObject(Hint2);
1539
1540 if (((O1 == HintO1 && O2 == HintO2) ||
1541 (O1 == HintO2 && O2 == HintO1)) &&
1542 isValidAssumeForContext(Assume, CtxI, DT))
1543 return AliasResult::NoAlias;
1544 }
1545 }
1546 }
1547 }
1548
1549 // If one the accesses may be before the accessed pointer, canonicalize this
1550 // by using unknown after-pointer sizes for both accesses. This is
1551 // equivalent, because regardless of which pointer is lower, one of them
1552 // will always came after the other, as long as the underlying objects aren't
1553 // disjoint. We do this so that the rest of BasicAA does not have to deal
1554 // with accesses before the base pointer, and to improve cache utilization by
1555 // merging equivalent states.
1556 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1557 V1Size = LocationSize::afterPointer();
1558 V2Size = LocationSize::afterPointer();
1559 }
1560
1561 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1562 // for recursive queries. For this reason, this limit is chosen to be large
1563 // enough to be very rarely hit, while still being small enough to avoid
1564 // stack overflows.
1565 if (AAQI.Depth >= 512)
1566 return AliasResult::MayAlias;
1567
1568 // Check the cache before climbing up use-def chains. This also terminates
1569 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1570 // cache key, because some cases where MayBeCrossIteration==false returns
1571 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1572 AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1573 {V2, V2Size, AAQI.MayBeCrossIteration});
1574 const bool Swapped = V1 > V2;
1575 if (Swapped)
1576 std::swap(Locs.first, Locs.second);
1577 const auto &Pair = AAQI.AliasCache.try_emplace(
1579 if (!Pair.second) {
1580 auto &Entry = Pair.first->second;
1581 if (!Entry.isDefinitive()) {
1582 // Remember that we used an assumption.
1583 ++Entry.NumAssumptionUses;
1584 ++AAQI.NumAssumptionUses;
1585 }
1586 // Cache contains sorted {V1,V2} pairs but we should return original order.
1587 auto Result = Entry.Result;
1588 Result.swap(Swapped);
1589 return Result;
1590 }
1591
1592 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1593 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1595 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1596
1597 auto It = AAQI.AliasCache.find(Locs);
1598 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1599 auto &Entry = It->second;
1600
1601 // Check whether a NoAlias assumption has been used, but disproven.
1602 bool AssumptionDisproven =
1603 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1604 if (AssumptionDisproven)
1606
1607 // This is a definitive result now, when considered as a root query.
1608 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1609 Entry.Result = Result;
1610 // Cache contains sorted {V1,V2} pairs.
1611 Entry.Result.swap(Swapped);
1612 Entry.NumAssumptionUses = -1;
1613
1614 // If the assumption has been disproven, remove any results that may have
1615 // been based on this assumption. Do this after the Entry updates above to
1616 // avoid iterator invalidation.
1617 if (AssumptionDisproven)
1618 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1620
1621 // The result may still be based on assumptions higher up in the chain.
1622 // Remember it, so it can be purged from the cache later.
1623 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1624 Result != AliasResult::MayAlias)
1626 return Result;
1627}
1628
1629AliasResult BasicAAResult::aliasCheckRecursive(
1630 const Value *V1, LocationSize V1Size,
1631 const Value *V2, LocationSize V2Size,
1632 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1633 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1634 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1635 if (Result != AliasResult::MayAlias)
1636 return Result;
1637 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1638 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1639 Result.swap();
1640 if (Result != AliasResult::MayAlias)
1641 return Result;
1642 }
1643
1644 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1645 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1646 if (Result != AliasResult::MayAlias)
1647 return Result;
1648 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1649 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1650 Result.swap();
1651 if (Result != AliasResult::MayAlias)
1652 return Result;
1653 }
1654
1655 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1656 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1657 if (Result != AliasResult::MayAlias)
1658 return Result;
1659 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1660 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1661 Result.swap();
1662 if (Result != AliasResult::MayAlias)
1663 return Result;
1664 }
1665
1666 // If both pointers are pointing into the same object and one of them
1667 // accesses the entire object, then the accesses must overlap in some way.
1668 if (O1 == O2) {
1669 bool NullIsValidLocation = NullPointerIsDefined(&F);
1670 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1671 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1672 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1674 }
1675
1676 return AliasResult::MayAlias;
1677}
1678
1679/// Check whether two Values can be considered equivalent.
1680///
1681/// If the values may come from different cycle iterations, this will also
1682/// check that the values are not part of cycle. We have to do this because we
1683/// are looking through phi nodes, that is we say
1684/// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1685bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1686 const Value *V2,
1687 const AAQueryInfo &AAQI) {
1688 if (V != V2)
1689 return false;
1690
1691 if (!AAQI.MayBeCrossIteration)
1692 return true;
1693
1694 // Non-instructions and instructions in the entry block cannot be part of
1695 // a loop.
1696 const Instruction *Inst = dyn_cast<Instruction>(V);
1697 if (!Inst || Inst->getParent()->isEntryBlock())
1698 return true;
1699
1700 // Check whether the instruction is part of a cycle, by checking whether the
1701 // block can (non-trivially) reach itself.
1702 BasicBlock *BB = const_cast<BasicBlock *>(Inst->getParent());
1704 return !Succs.empty() &&
1705 !isPotentiallyReachableFromMany(Succs, BB, nullptr, DT);
1706}
1707
1708/// Computes the symbolic difference between two de-composed GEPs.
1709void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1710 const DecomposedGEP &SrcGEP,
1711 const AAQueryInfo &AAQI) {
1712 DestGEP.Offset -= SrcGEP.Offset;
1713 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1714 // Find V in Dest. This is N^2, but pointer indices almost never have more
1715 // than a few variable indexes.
1716 bool Found = false;
1717 for (auto I : enumerate(DestGEP.VarIndices)) {
1718 VariableGEPIndex &Dest = I.value();
1719 if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) ||
1720 !Dest.Val.hasSameCastsAs(Src.Val))
1721 continue;
1722
1723 // Normalize IsNegated if we're going to lose the NSW flag anyway.
1724 if (Dest.IsNegated) {
1725 Dest.Scale = -Dest.Scale;
1726 Dest.IsNegated = false;
1727 Dest.IsNSW = false;
1728 }
1729
1730 // If we found it, subtract off Scale V's from the entry in Dest. If it
1731 // goes to zero, remove the entry.
1732 if (Dest.Scale != Src.Scale) {
1733 Dest.Scale -= Src.Scale;
1734 Dest.IsNSW = false;
1735 } else {
1736 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1737 }
1738 Found = true;
1739 break;
1740 }
1741
1742 // If we didn't consume this entry, add it to the end of the Dest list.
1743 if (!Found) {
1744 VariableGEPIndex Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1745 /* IsNegated */ true};
1746 DestGEP.VarIndices.push_back(Entry);
1747 }
1748 }
1749}
1750
1751bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1752 LocationSize MaybeV1Size,
1753 LocationSize MaybeV2Size,
1754 AssumptionCache *AC,
1755 DominatorTree *DT,
1756 const AAQueryInfo &AAQI) {
1757 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1758 !MaybeV2Size.hasValue())
1759 return false;
1760
1761 const uint64_t V1Size = MaybeV1Size.getValue();
1762 const uint64_t V2Size = MaybeV2Size.getValue();
1763
1764 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1765
1766 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1767 !Var0.hasNegatedScaleOf(Var1) ||
1768 Var0.Val.V->getType() != Var1.Val.V->getType())
1769 return false;
1770
1771 // We'll strip off the Extensions of Var0 and Var1 and do another round
1772 // of GetLinearExpression decomposition. In the example above, if Var0
1773 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1774
1775 LinearExpression E0 =
1776 GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1777 LinearExpression E1 =
1778 GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1779 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1780 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1781 return false;
1782
1783 // We have a hit - Var0 and Var1 only differ by a constant offset!
1784
1785 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1786 // Var1 is possible to calculate, but we're just interested in the absolute
1787 // minimum difference between the two. The minimum distance may occur due to
1788 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1789 // the minimum distance between %i and %i + 5 is 3.
1790 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1791 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1792 APInt MinDiffBytes =
1793 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1794
1795 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1796 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1797 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1798 // V2Size can fit in the MinDiffBytes gap.
1799 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1800 MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1801}
1802
1803//===----------------------------------------------------------------------===//
1804// BasicAliasAnalysis Pass
1805//===----------------------------------------------------------------------===//
1806
1807AnalysisKey BasicAA::Key;
1808
1810 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1811 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1812 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1813 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT);
1814}
1815
1818}
1819
1820char BasicAAWrapperPass::ID = 0;
1821
1822void BasicAAWrapperPass::anchor() {}
1823
1825 "Basic Alias Analysis (stateless AA impl)", true, true)
1830 "Basic Alias Analysis (stateless AA impl)", true, true)
1831
1833 return new BasicAAWrapperPass();
1834}
1835
1837 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1838 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1839 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1840
1841 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1842 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1843 &DTWP.getDomTree()));
1844
1845 return false;
1846}
1847
1849 AU.setPreservesAll();
1853}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
static const Function * getParent(const Value *V)
static cl::opt< bool > EnableSeparateStorageAnalysis("basic-aa-separate-storage", cl::Hidden, cl::init(false))
static bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
static bool notDifferentParent(const Value *O1, const Value *O2)
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize)
To ensure a pointer offset fits in an integer of size IndexSize (in bits) when that size is smaller t...
static uint64_t getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
basic Basic Alias true
static const unsigned MaxLookupSearchDepth
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
This is the interface for LLVM's primary stateless and local alias analysis.
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1272
Hexagon Common GEP
#define _
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
place backedge safepoints impl
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file provides utility classes that use RAII to save and restore values.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Value * RHS
This class stores info we want to provide to or retain within an alias query.
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
unsigned Depth
Query depth used to distinguish recursive queries.
CaptureInfo * CI
int NumAssumptionUses
How many active NoAlias assumption uses there are.
std::pair< AACacheLoc, AACacheLoc > LocPair
AliasCacheT AliasCache
bool MayBeCrossIteration
Tracks whether the accesses may be on different cycle iterations.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
Definition: APInt.h:76
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:1002
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition: APInt.h:184
APInt abs() const
Get the absolute value.
Definition: APInt.h:1730
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1433
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:307
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition: APInt.h:1583
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1742
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:312
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:217
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1193
The possible results of an alias query.
Definition: AliasAnalysis.h:82
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
@ MayAlias
The two locations may or may not alias.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
void setOffset(int32_t NewOffset)
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:661
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:679
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptions()
Access the list of assumption handles currently tracked for this function.
This is the AA result object for the basic, local, and stateless alias analysis.
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI)
Returns the behavior when calling the given call site.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI, const Instruction *CtxI)
Legacy wrapper pass to provide the BasicAAResult object.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:408
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1190
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2023
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:1967
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
A constant pointer value that points to null.
Definition: Constants.h:533
This class represents a range of values.
Definition: ConstantRange.h:47
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:235
bool erase(const KeyT &Val)
Definition: DenseMap.h:329
iterator end()
Definition: DenseMap.h:84
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:314
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I) override
void removeInstruction(Instruction *I)
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:389
Type * getSourceElementType() const
Definition: Operator.cpp:54
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:433
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
const BasicBlock * getParent() const
Definition: Instruction.h:90
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
bool hasValue() const
uint64_t getValue() const
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isZero() const
bool isPrecise() const
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
Definition: ModRef.h:122
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
Definition: ModRef.h:138
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Definition: ModRef.h:127
Representation for a specific memory location.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
const Value * Ptr
The address of the start of the location.
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
This class represents the LLVM 'select' instruction.
bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I) override
size_type size() const
Definition: SmallPtrSet.h:93
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:451
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Class to represent struct types.
Definition: DerivedTypes.h:213
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_iterator op_begin()
Definition: User.h:234
Value * getOperand(unsigned i) const
Definition: User.h:169
op_iterator op_end()
Definition: User.h:236
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
Definition: Value.cpp:704
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:182
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:166
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2181
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition: APInt.cpp:767
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition: DWP.cpp:440
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2338
bool isPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const BasicBlock *StopBB, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is at least one path from a block in 'Worklist' to 'StopBB' without passing t...
Definition: CFG.cpp:133
auto successors(const MachineBasicBlock *BB)
Instruction * FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, bool StoreCaptures, const DominatorTree &DT, const SmallPtrSetImpl< const Value * > &EphValues, unsigned MaxUsesToExplore=0)
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:2037
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1985
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
FunctionPass * createBasicAAWrapperPass()
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: ModRef.h:27
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
void initializeBasicAAWrapperPassPass(PassRegistry &)
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isModAndRefSet(const ModRefInfo MRI)
Definition: ModRef.h:45
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:184
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
gep_type_iterator gep_type_begin(const User *GEP)
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:231
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
SmallVector< VariableGEPIndex, 4 > VarIndices
void print(raw_ostream &OS) const
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:69
virtual bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I)=0
virtual ~CaptureInfo()=0
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1085
StringRef getTagName() const
Return the tag of this operand bundle as a string.
Definition: InstrTypes.h:1104
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1086
A utility class that uses RAII to save and restore the value of a variable.