LLVM  13.0.0git
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CFG.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <utility>
62 
63 #define DEBUG_TYPE "basicaa"
64 
65 using namespace llvm;
66 
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
69  cl::init(true));
70 
71 /// By default, even on 32-bit architectures we use 64-bit integers for
72 /// calculations. This will allow us to more-aggressively decompose indexing
73 /// expressions calculated using i64 values (e.g., long long in C) which is
74 /// common enough to worry about.
75 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b",
76  cl::Hidden, cl::init(true));
77 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits",
78  cl::Hidden, cl::init(false));
79 
80 /// SearchLimitReached / SearchTimes shows how often the limit of
81 /// to decompose GEPs is reached. It will affect the precision
82 /// of basic alias analysis.
83 STATISTIC(SearchLimitReached, "Number of times the limit to "
84  "decompose GEPs is reached");
85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
86 
87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
89 /// careful with value equivalence. We use reachability to make sure a value
90 /// cannot be involved in a cycle.
92 
93 // The max limit of the search depth in DecomposeGEPExpression() and
94 // getUnderlyingObject(), both functions need to use the same search
95 // depth otherwise the algorithm in aliasGEP will assert.
96 static const unsigned MaxLookupSearchDepth = 6;
97 
100  // We don't care if this analysis itself is preserved, it has no state. But
101  // we need to check that the analyses it depends on have been. Note that we
102  // may be created without handles to some analyses and in that case don't
103  // depend on them.
104  if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
105  (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
106  (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
107  return true;
108 
109  // Otherwise this analysis result remains valid.
110  return false;
111 }
112 
113 //===----------------------------------------------------------------------===//
114 // Useful predicates
115 //===----------------------------------------------------------------------===//
116 
117 /// Returns true if the pointer is one which would have been considered an
118 /// escape by isNonEscapingLocalObject.
119 static bool isEscapeSource(const Value *V) {
120  if (isa<CallBase>(V))
121  return true;
122 
123  if (isa<Argument>(V))
124  return true;
125 
126  // The load case works because isNonEscapingLocalObject considers all
127  // stores to be escapes (it passes true for the StoreCaptures argument
128  // to PointerMayBeCaptured).
129  if (isa<LoadInst>(V))
130  return true;
131 
132  // The inttoptr case works because isNonEscapingLocalObject considers all
133  // means of converting or equating a pointer to an int (ptrtoint, ptr store
134  // which could be followed by an integer load, ptr<->int compare) as
135  // escaping, and objects located at well-known addresses via platform-specific
136  // means cannot be considered non-escaping local objects.
137  if (isa<IntToPtrInst>(V))
138  return true;
139 
140  return false;
141 }
142 
143 /// Returns the size of the object specified by V or UnknownSize if unknown.
144 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
145  const TargetLibraryInfo &TLI,
146  bool NullIsValidLoc,
147  bool RoundToAlign = false) {
148  uint64_t Size;
149  ObjectSizeOpts Opts;
150  Opts.RoundToAlign = RoundToAlign;
151  Opts.NullIsUnknownSize = NullIsValidLoc;
152  if (getObjectSize(V, Size, DL, &TLI, Opts))
153  return Size;
155 }
156 
157 /// Returns true if we can prove that the object specified by V is smaller than
158 /// Size.
159 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
160  const DataLayout &DL,
161  const TargetLibraryInfo &TLI,
162  bool NullIsValidLoc) {
163  // Note that the meanings of the "object" are slightly different in the
164  // following contexts:
165  // c1: llvm::getObjectSize()
166  // c2: llvm.objectsize() intrinsic
167  // c3: isObjectSmallerThan()
168  // c1 and c2 share the same meaning; however, the meaning of "object" in c3
169  // refers to the "entire object".
170  //
171  // Consider this example:
172  // char *p = (char*)malloc(100)
173  // char *q = p+80;
174  //
175  // In the context of c1 and c2, the "object" pointed by q refers to the
176  // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
177  //
178  // However, in the context of c3, the "object" refers to the chunk of memory
179  // being allocated. So, the "object" has 100 bytes, and q points to the middle
180  // the "object". In case q is passed to isObjectSmallerThan() as the 1st
181  // parameter, before the llvm::getObjectSize() is called to get the size of
182  // entire object, we should:
183  // - either rewind the pointer q to the base-address of the object in
184  // question (in this case rewind to p), or
185  // - just give up. It is up to caller to make sure the pointer is pointing
186  // to the base address the object.
187  //
188  // We go for 2nd option for simplicity.
189  if (!isIdentifiedObject(V))
190  return false;
191 
192  // This function needs to use the aligned object size because we allow
193  // reads a bit past the end given sufficient alignment.
194  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
195  /*RoundToAlign*/ true);
196 
197  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
198 }
199 
200 /// Return the minimal extent from \p V to the end of the underlying object,
201 /// assuming the result is used in an aliasing query. E.g., we do use the query
202 /// location size and the fact that null pointers cannot alias here.
203 static uint64_t getMinimalExtentFrom(const Value &V,
204  const LocationSize &LocSize,
205  const DataLayout &DL,
206  bool NullIsValidLoc) {
207  // If we have dereferenceability information we know a lower bound for the
208  // extent as accesses for a lower offset would be valid. We need to exclude
209  // the "or null" part if null is a valid pointer.
210  bool CanBeNull, CanBeFreed;
211  uint64_t DerefBytes =
212  V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
213  DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
214  DerefBytes = CanBeFreed ? 0 : DerefBytes;
215  // If queried with a precise location size, we assume that location size to be
216  // accessed, thus valid.
217  if (LocSize.isPrecise())
218  DerefBytes = std::max(DerefBytes, LocSize.getValue());
219  return DerefBytes;
220 }
221 
222 /// Returns true if we can prove that the object specified by V has size Size.
223 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
224  const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
225  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
226  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
227 }
228 
229 //===----------------------------------------------------------------------===//
230 // GetElementPtr Instruction Decomposition and Analysis
231 //===----------------------------------------------------------------------===//
232 
233 namespace {
234 /// Represents zext(sext(V)).
235 struct ExtendedValue {
236  const Value *V;
237  unsigned ZExtBits;
238  unsigned SExtBits;
239 
240  explicit ExtendedValue(const Value *V, unsigned ZExtBits = 0,
241  unsigned SExtBits = 0)
242  : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {}
243 
244  unsigned getBitWidth() const {
245  return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits;
246  }
247 
248  ExtendedValue withValue(const Value *NewV) const {
249  return ExtendedValue(NewV, ZExtBits, SExtBits);
250  }
251 
252  ExtendedValue withZExtOfValue(const Value *NewV) const {
253  unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
254  NewV->getType()->getPrimitiveSizeInBits();
255  // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
256  return ExtendedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0);
257  }
258 
259  ExtendedValue withSExtOfValue(const Value *NewV) const {
260  unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
261  NewV->getType()->getPrimitiveSizeInBits();
262  // zext(sext(sext(NewV)))
263  return ExtendedValue(NewV, ZExtBits, SExtBits + ExtendBy);
264  }
265 
266  APInt evaluateWith(APInt N) const {
267  assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
268  "Incompatible bit width");
269  if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
270  if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
271  return N;
272  }
273 
274  bool canDistributeOver(bool NUW, bool NSW) const {
275  // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
276  // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
277  return (!ZExtBits || NUW) && (!SExtBits || NSW);
278  }
279 };
280 
281 /// Represents zext(sext(V)) * Scale + Offset.
282 struct LinearExpression {
283  ExtendedValue Val;
284  APInt Scale;
285  APInt Offset;
286 
287  LinearExpression(const ExtendedValue &Val, const APInt &Scale,
288  const APInt &Offset)
289  : Val(Val), Scale(Scale), Offset(Offset) {}
290 
291  LinearExpression(const ExtendedValue &Val) : Val(Val) {
292  unsigned BitWidth = Val.getBitWidth();
293  Scale = APInt(BitWidth, 1);
294  Offset = APInt(BitWidth, 0);
295  }
296 };
297 }
298 
299 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
300 /// B are constant integers.
301 static LinearExpression GetLinearExpression(
302  const ExtendedValue &Val, const DataLayout &DL, unsigned Depth,
303  AssumptionCache *AC, DominatorTree *DT) {
304  // Limit our recursion depth.
305  if (Depth == 6)
306  return Val;
307 
308  if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
309  return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
310  Val.evaluateWith(Const->getValue()));
311 
312  if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
313  if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
314  APInt RHS = Val.evaluateWith(RHSC->getValue());
315  // The only non-OBO case we deal with is or, and only limited to the
316  // case where it is both nuw and nsw.
317  bool NUW = true, NSW = true;
318  if (isa<OverflowingBinaryOperator>(BOp)) {
319  NUW &= BOp->hasNoUnsignedWrap();
320  NSW &= BOp->hasNoSignedWrap();
321  }
322  if (!Val.canDistributeOver(NUW, NSW))
323  return Val;
324 
325  switch (BOp->getOpcode()) {
326  default:
327  // We don't understand this instruction, so we can't decompose it any
328  // further.
329  return Val;
330  case Instruction::Or:
331  // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
332  // analyze it.
333  if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
334  BOp, DT))
335  return Val;
336 
338  case Instruction::Add: {
339  LinearExpression E = GetLinearExpression(
340  Val.withValue(BOp->getOperand(0)), DL, Depth + 1, AC, DT);
341  E.Offset += RHS;
342  return E;
343  }
344  case Instruction::Sub: {
345  LinearExpression E = GetLinearExpression(
346  Val.withValue(BOp->getOperand(0)), DL, Depth + 1, AC, DT);
347  E.Offset -= RHS;
348  return E;
349  }
350  case Instruction::Mul: {
351  LinearExpression E = GetLinearExpression(
352  Val.withValue(BOp->getOperand(0)), DL, Depth + 1, AC, DT);
353  E.Offset *= RHS;
354  E.Scale *= RHS;
355  return E;
356  }
357  case Instruction::Shl:
358  // We're trying to linearize an expression of the kind:
359  // shl i8 -128, 36
360  // where the shift count exceeds the bitwidth of the type.
361  // We can't decompose this further (the expression would return
362  // a poison value).
363  if (RHS.getLimitedValue() > Val.getBitWidth())
364  return Val;
365 
366  LinearExpression E = GetLinearExpression(
367  Val.withValue(BOp->getOperand(0)), DL, Depth + 1, AC, DT);
368  E.Offset <<= RHS.getLimitedValue();
369  E.Scale <<= RHS.getLimitedValue();
370  return E;
371  }
372  }
373  }
374 
375  if (isa<ZExtInst>(Val.V))
376  return GetLinearExpression(
377  Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
378  DL, Depth + 1, AC, DT);
379 
380  if (isa<SExtInst>(Val.V))
381  return GetLinearExpression(
382  Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
383  DL, Depth + 1, AC, DT);
384 
385  return Val;
386 }
387 
388 /// To ensure a pointer offset fits in an integer of size PointerSize
389 /// (in bits) when that size is smaller than the maximum pointer size. This is
390 /// an issue, for example, in particular for 32b pointers with negative indices
391 /// that rely on two's complement wrap-arounds for precise alias information
392 /// where the maximum pointer size is 64b.
394  assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
395  unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
396  return (Offset << ShiftBits).ashr(ShiftBits);
397 }
398 
399 static unsigned getMaxPointerSize(const DataLayout &DL) {
400  unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
401  if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
402  if (DoubleCalcBits) MaxPointerSize *= 2;
403 
404  return MaxPointerSize;
405 }
406 
407 /// If V is a symbolic pointer expression, decompose it into a base pointer
408 /// with a constant offset and a number of scaled symbolic offsets.
409 ///
410 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
411 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
412 /// specified amount, but which may have other unrepresented high bits. As
413 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
414 ///
415 /// This function is capable of analyzing everything that getUnderlyingObject
416 /// can look through. To be able to do that getUnderlyingObject and
417 /// DecomposeGEPExpression must use the same search depth
418 /// (MaxLookupSearchDepth).
419 BasicAAResult::DecomposedGEP
420 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
421  AssumptionCache *AC, DominatorTree *DT) {
422  // Limit recursion depth to limit compile time in crazy cases.
423  unsigned MaxLookup = MaxLookupSearchDepth;
424  SearchTimes++;
425  const Instruction *CxtI = dyn_cast<Instruction>(V);
426 
427  unsigned MaxPointerSize = getMaxPointerSize(DL);
428  DecomposedGEP Decomposed;
429  Decomposed.Offset = APInt(MaxPointerSize, 0);
430  Decomposed.HasCompileTimeConstantScale = true;
431  do {
432  // See if this is a bitcast or GEP.
433  const Operator *Op = dyn_cast<Operator>(V);
434  if (!Op) {
435  // The only non-operator case we can handle are GlobalAliases.
436  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
437  if (!GA->isInterposable()) {
438  V = GA->getAliasee();
439  continue;
440  }
441  }
442  Decomposed.Base = V;
443  return Decomposed;
444  }
445 
446  if (Op->getOpcode() == Instruction::BitCast ||
447  Op->getOpcode() == Instruction::AddrSpaceCast) {
448  V = Op->getOperand(0);
449  continue;
450  }
451 
452  const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
453  if (!GEPOp) {
454  if (const auto *PHI = dyn_cast<PHINode>(V)) {
455  // Look through single-arg phi nodes created by LCSSA.
456  if (PHI->getNumIncomingValues() == 1) {
457  V = PHI->getIncomingValue(0);
458  continue;
459  }
460  } else if (const auto *Call = dyn_cast<CallBase>(V)) {
461  // CaptureTracking can know about special capturing properties of some
462  // intrinsics like launder.invariant.group, that can't be expressed with
463  // the attributes, but have properties like returning aliasing pointer.
464  // Because some analysis may assume that nocaptured pointer is not
465  // returned from some special intrinsic (because function would have to
466  // be marked with returns attribute), it is crucial to use this function
467  // because it should be in sync with CaptureTracking. Not using it may
468  // cause weird miscompilations where 2 aliasing pointers are assumed to
469  // noalias.
470  if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
471  V = RP;
472  continue;
473  }
474  }
475 
476  Decomposed.Base = V;
477  return Decomposed;
478  }
479 
480  // Track whether we've seen at least one in bounds gep, and if so, whether
481  // all geps parsed were in bounds.
482  if (Decomposed.InBounds == None)
483  Decomposed.InBounds = GEPOp->isInBounds();
484  else if (!GEPOp->isInBounds())
485  Decomposed.InBounds = false;
486 
487  // Don't attempt to analyze GEPs over unsized objects.
488  if (!GEPOp->getSourceElementType()->isSized()) {
489  Decomposed.Base = V;
490  return Decomposed;
491  }
492 
493  // Don't attempt to analyze GEPs if index scale is not a compile-time
494  // constant.
495  if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
496  Decomposed.Base = V;
497  Decomposed.HasCompileTimeConstantScale = false;
498  return Decomposed;
499  }
500 
501  unsigned AS = GEPOp->getPointerAddressSpace();
502  // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
503  gep_type_iterator GTI = gep_type_begin(GEPOp);
504  unsigned PointerSize = DL.getPointerSizeInBits(AS);
505  // Assume all GEP operands are constants until proven otherwise.
506  bool GepHasConstantOffset = true;
507  for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
508  I != E; ++I, ++GTI) {
509  const Value *Index = *I;
510  // Compute the (potentially symbolic) offset in bytes for this index.
511  if (StructType *STy = GTI.getStructTypeOrNull()) {
512  // For a struct, add the member offset.
513  unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
514  if (FieldNo == 0)
515  continue;
516 
517  Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
518  continue;
519  }
520 
521  // For an array/pointer, add the element offset, explicitly scaled.
522  if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
523  if (CIdx->isZero())
524  continue;
525  Decomposed.Offset +=
526  DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
527  CIdx->getValue().sextOrTrunc(MaxPointerSize);
528  continue;
529  }
530 
531  GepHasConstantOffset = false;
532 
533  APInt Scale(MaxPointerSize,
534  DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
535  // If the integer type is smaller than the pointer size, it is implicitly
536  // sign extended to pointer size.
537  unsigned Width = Index->getType()->getIntegerBitWidth();
538  unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
539  LinearExpression LE = GetLinearExpression(
540  ExtendedValue(Index, 0, SExtBits), DL, 0, AC, DT);
541 
542  // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
543  // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
544 
545  // It can be the case that, even through C1*V+C2 does not overflow for
546  // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
547  // decompose the expression in this way.
548  //
549  // FIXME: C1*Scale and the other operations in the decomposed
550  // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
551  // possibility.
552  bool Overflow;
553  APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize)
554  .smul_ov(Scale, Overflow);
555  if (Overflow) {
556  LE = LinearExpression(ExtendedValue(Index, 0, SExtBits));
557  } else {
558  Decomposed.Offset += ScaledOffset;
559  Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
560  }
561 
562  // If we already had an occurrence of this index variable, merge this
563  // scale into it. For example, we want to handle:
564  // A[x][x] -> x*16 + x*4 -> x*20
565  // This also ensures that 'x' only appears in the index list once.
566  for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
567  if (Decomposed.VarIndices[i].V == LE.Val.V &&
568  Decomposed.VarIndices[i].ZExtBits == LE.Val.ZExtBits &&
569  Decomposed.VarIndices[i].SExtBits == LE.Val.SExtBits) {
570  Scale += Decomposed.VarIndices[i].Scale;
571  Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
572  break;
573  }
574  }
575 
576  // Make sure that we have a scale that makes sense for this target's
577  // pointer size.
578  Scale = adjustToPointerSize(Scale, PointerSize);
579 
580  if (!!Scale) {
581  VariableGEPIndex Entry = {LE.Val.V, LE.Val.ZExtBits, LE.Val.SExtBits,
582  Scale, CxtI};
583  Decomposed.VarIndices.push_back(Entry);
584  }
585  }
586 
587  // Take care of wrap-arounds
588  if (GepHasConstantOffset)
589  Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
590 
591  // Analyze the base pointer next.
592  V = GEPOp->getOperand(0);
593  } while (--MaxLookup);
594 
595  // If the chain of expressions is too deep, just return early.
596  Decomposed.Base = V;
597  SearchLimitReached++;
598  return Decomposed;
599 }
600 
601 /// Returns whether the given pointer value points to memory that is local to
602 /// the function, with global constants being considered local to all
603 /// functions.
605  AAQueryInfo &AAQI, bool OrLocal) {
606  assert(Visited.empty() && "Visited must be cleared after use!");
607 
608  unsigned MaxLookup = 8;
610  Worklist.push_back(Loc.Ptr);
611  do {
612  const Value *V = getUnderlyingObject(Worklist.pop_back_val());
613  if (!Visited.insert(V).second) {
614  Visited.clear();
615  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
616  }
617 
618  // An alloca instruction defines local memory.
619  if (OrLocal && isa<AllocaInst>(V))
620  continue;
621 
622  // A global constant counts as local memory for our purposes.
623  if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
624  // Note: this doesn't require GV to be "ODR" because it isn't legal for a
625  // global to be marked constant in some modules and non-constant in
626  // others. GV may even be a declaration, not a definition.
627  if (!GV->isConstant()) {
628  Visited.clear();
629  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
630  }
631  continue;
632  }
633 
634  // If both select values point to local memory, then so does the select.
635  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
636  Worklist.push_back(SI->getTrueValue());
637  Worklist.push_back(SI->getFalseValue());
638  continue;
639  }
640 
641  // If all values incoming to a phi node point to local memory, then so does
642  // the phi.
643  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
644  // Don't bother inspecting phi nodes with many operands.
645  if (PN->getNumIncomingValues() > MaxLookup) {
646  Visited.clear();
647  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
648  }
649  append_range(Worklist, PN->incoming_values());
650  continue;
651  }
652 
653  // Otherwise be conservative.
654  Visited.clear();
655  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
656  } while (!Worklist.empty() && --MaxLookup);
657 
658  Visited.clear();
659  return Worklist.empty();
660 }
661 
662 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
663  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
664  return II && II->getIntrinsicID() == IID;
665 }
666 
667 /// Returns the behavior when calling the given call site.
669  if (Call->doesNotAccessMemory())
670  // Can't do better than this.
672 
674 
675  // If the callsite knows it only reads memory, don't return worse
676  // than that.
677  if (Call->onlyReadsMemory())
678  Min = FMRB_OnlyReadsMemory;
679  else if (Call->doesNotReadMemory())
680  Min = FMRB_OnlyWritesMemory;
681 
682  if (Call->onlyAccessesArgMemory())
684  else if (Call->onlyAccessesInaccessibleMemory())
686  else if (Call->onlyAccessesInaccessibleMemOrArgMem())
688 
689  // If the call has operand bundles then aliasing attributes from the function
690  // it calls do not directly apply to the call. This can be made more precise
691  // in the future.
692  if (!Call->hasOperandBundles())
693  if (const Function *F = Call->getCalledFunction())
694  Min =
696 
697  return Min;
698 }
699 
700 /// Returns the behavior when calling the given function. For use when the call
701 /// site is not known.
703  // If the function declares it doesn't access memory, we can't do better.
704  if (F->doesNotAccessMemory())
706 
708 
709  // If the function declares it only reads memory, go with that.
710  if (F->onlyReadsMemory())
711  Min = FMRB_OnlyReadsMemory;
712  else if (F->doesNotReadMemory())
713  Min = FMRB_OnlyWritesMemory;
714 
715  if (F->onlyAccessesArgMemory())
717  else if (F->onlyAccessesInaccessibleMemory())
719  else if (F->onlyAccessesInaccessibleMemOrArgMem())
721 
722  return Min;
723 }
724 
725 /// Returns true if this is a writeonly (i.e Mod only) parameter.
726 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
727  const TargetLibraryInfo &TLI) {
728  if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
729  return true;
730 
731  // We can bound the aliasing properties of memset_pattern16 just as we can
732  // for memcpy/memset. This is particularly important because the
733  // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
734  // whenever possible.
735  // FIXME Consider handling this in InferFunctionAttr.cpp together with other
736  // attributes.
737  LibFunc F;
738  if (Call->getCalledFunction() &&
739  TLI.getLibFunc(*Call->getCalledFunction(), F) &&
740  F == LibFunc_memset_pattern16 && TLI.has(F))
741  if (ArgIdx == 0)
742  return true;
743 
744  // TODO: memset_pattern4, memset_pattern8
745  // TODO: _chk variants
746  // TODO: strcmp, strcpy
747 
748  return false;
749 }
750 
752  unsigned ArgIdx) {
753  // Checking for known builtin intrinsics and target library functions.
754  if (isWriteOnlyParam(Call, ArgIdx, TLI))
755  return ModRefInfo::Mod;
756 
757  if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
758  return ModRefInfo::Ref;
759 
760  if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
761  return ModRefInfo::NoModRef;
762 
763  return AAResultBase::getArgModRefInfo(Call, ArgIdx);
764 }
765 
766 #ifndef NDEBUG
767 static const Function *getParent(const Value *V) {
768  if (const Instruction *inst = dyn_cast<Instruction>(V)) {
769  if (!inst->getParent())
770  return nullptr;
771  return inst->getParent()->getParent();
772  }
773 
774  if (const Argument *arg = dyn_cast<Argument>(V))
775  return arg->getParent();
776 
777  return nullptr;
778 }
779 
780 static bool notDifferentParent(const Value *O1, const Value *O2) {
781 
782  const Function *F1 = getParent(O1);
783  const Function *F2 = getParent(O2);
784 
785  return !F1 || !F2 || F1 == F2;
786 }
787 #endif
788 
790  const MemoryLocation &LocB,
791  AAQueryInfo &AAQI) {
792  assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
793  "BasicAliasAnalysis doesn't support interprocedural queries.");
794  return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
795 }
796 
797 /// Checks to see if the specified callsite can clobber the specified memory
798 /// object.
799 ///
800 /// Since we only look at local properties of this function, we really can't
801 /// say much about this query. We do, however, use simple "address taken"
802 /// analysis on local objects.
804  const MemoryLocation &Loc,
805  AAQueryInfo &AAQI) {
806  assert(notDifferentParent(Call, Loc.Ptr) &&
807  "AliasAnalysis query involving multiple functions!");
808 
809  const Value *Object = getUnderlyingObject(Loc.Ptr);
810 
811  // Calls marked 'tail' cannot read or write allocas from the current frame
812  // because the current frame might be destroyed by the time they run. However,
813  // a tail call may use an alloca with byval. Calling with byval copies the
814  // contents of the alloca into argument registers or stack slots, so there is
815  // no lifetime issue.
816  if (isa<AllocaInst>(Object))
817  if (const CallInst *CI = dyn_cast<CallInst>(Call))
818  if (CI->isTailCall() &&
819  !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
820  return ModRefInfo::NoModRef;
821 
822  // Stack restore is able to modify unescaped dynamic allocas. Assume it may
823  // modify them even though the alloca is not escaped.
824  if (auto *AI = dyn_cast<AllocaInst>(Object))
825  if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
826  return ModRefInfo::Mod;
827 
828  // If the pointer is to a locally allocated object that does not escape,
829  // then the call can not mod/ref the pointer unless the call takes the pointer
830  // as an argument, and itself doesn't capture it.
831  if (!isa<Constant>(Object) && Call != Object &&
833 
834  // Optimistically assume that call doesn't touch Object and check this
835  // assumption in the following loop.
837  bool IsMustAlias = true;
838 
839  unsigned OperandNo = 0;
840  for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
841  CI != CE; ++CI, ++OperandNo) {
842  // Only look at the no-capture or byval pointer arguments. If this
843  // pointer were passed to arguments that were neither of these, then it
844  // couldn't be no-capture.
845  if (!(*CI)->getType()->isPointerTy() ||
846  (!Call->doesNotCapture(OperandNo) &&
847  OperandNo < Call->getNumArgOperands() &&
848  !Call->isByValArgument(OperandNo)))
849  continue;
850 
851  // Call doesn't access memory through this operand, so we don't care
852  // if it aliases with Object.
853  if (Call->doesNotAccessMemory(OperandNo))
854  continue;
855 
856  // If this is a no-capture pointer argument, see if we can tell that it
857  // is impossible to alias the pointer we're checking.
861  if (AR != AliasResult::MustAlias)
862  IsMustAlias = false;
863  // Operand doesn't alias 'Object', continue looking for other aliases
864  if (AR == AliasResult::NoAlias)
865  continue;
866  // Operand aliases 'Object', but call doesn't modify it. Strengthen
867  // initial assumption and keep looking in case if there are more aliases.
868  if (Call->onlyReadsMemory(OperandNo)) {
869  Result = setRef(Result);
870  continue;
871  }
872  // Operand aliases 'Object' but call only writes into it.
873  if (Call->doesNotReadMemory(OperandNo)) {
874  Result = setMod(Result);
875  continue;
876  }
877  // This operand aliases 'Object' and call reads and writes into it.
878  // Setting ModRef will not yield an early return below, MustAlias is not
879  // used further.
880  Result = ModRefInfo::ModRef;
881  break;
882  }
883 
884  // No operand aliases, reset Must bit. Add below if at least one aliases
885  // and all aliases found are MustAlias.
886  if (isNoModRef(Result))
887  IsMustAlias = false;
888 
889  // Early return if we improved mod ref information
890  if (!isModAndRefSet(Result)) {
891  if (isNoModRef(Result))
892  return ModRefInfo::NoModRef;
893  return IsMustAlias ? setMust(Result) : clearMust(Result);
894  }
895  }
896 
897  // If the call is malloc/calloc like, we can assume that it doesn't
898  // modify any IR visible value. This is only valid because we assume these
899  // routines do not read values visible in the IR. TODO: Consider special
900  // casing realloc and strdup routines which access only their arguments as
901  // well. Or alternatively, replace all of this with inaccessiblememonly once
902  // that's implemented fully.
903  if (isMallocOrCallocLikeFn(Call, &TLI)) {
904  // Be conservative if the accessed pointer may alias the allocation -
905  // fallback to the generic handling below.
907  AAQI) == AliasResult::NoAlias)
908  return ModRefInfo::NoModRef;
909  }
910 
911  // The semantics of memcpy intrinsics either exactly overlap or do not
912  // overlap, i.e., source and destination of any given memcpy are either
913  // no-alias or must-alias.
914  if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
915  AliasResult SrcAA =
917  AliasResult DestAA =
919  // It's also possible for Loc to alias both src and dest, or neither.
921  if (SrcAA != AliasResult::NoAlias)
922  rv = setRef(rv);
923  if (DestAA != AliasResult::NoAlias)
924  rv = setMod(rv);
925  return rv;
926  }
927 
928  // Guard intrinsics are marked as arbitrarily writing so that proper control
929  // dependencies are maintained but they never mods any particular memory
930  // location.
931  //
932  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
933  // heap state at the point the guard is issued needs to be consistent in case
934  // the guard invokes the "deopt" continuation.
935  if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
936  return ModRefInfo::Ref;
937  // The same applies to deoptimize which is essentially a guard(false).
938  if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
939  return ModRefInfo::Ref;
940 
941  // Like assumes, invariant.start intrinsics were also marked as arbitrarily
942  // writing so that proper control dependencies are maintained but they never
943  // mod any particular memory location visible to the IR.
944  // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
945  // intrinsic is now modeled as reading memory. This prevents hoisting the
946  // invariant.start intrinsic over stores. Consider:
947  // *ptr = 40;
948  // *ptr = 50;
949  // invariant_start(ptr)
950  // int val = *ptr;
951  // print(val);
952  //
953  // This cannot be transformed to:
954  //
955  // *ptr = 40;
956  // invariant_start(ptr)
957  // *ptr = 50;
958  // int val = *ptr;
959  // print(val);
960  //
961  // The transformation will cause the second store to be ignored (based on
962  // rules of invariant.start) and print 40, while the first program always
963  // prints 50.
964  if (isIntrinsicCall(Call, Intrinsic::invariant_start))
965  return ModRefInfo::Ref;
966 
967  // The AAResultBase base class has some smarts, lets use them.
968  return AAResultBase::getModRefInfo(Call, Loc, AAQI);
969 }
970 
972  const CallBase *Call2,
973  AAQueryInfo &AAQI) {
974  // Guard intrinsics are marked as arbitrarily writing so that proper control
975  // dependencies are maintained but they never mods any particular memory
976  // location.
977  //
978  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
979  // heap state at the point the guard is issued needs to be consistent in case
980  // the guard invokes the "deopt" continuation.
981 
982  // NB! This function is *not* commutative, so we special case two
983  // possibilities for guard intrinsics.
984 
985  if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
989 
990  if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
994 
995  // The AAResultBase base class has some smarts, lets use them.
996  return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
997 }
998 
999 /// Return true if we know V to the base address of the corresponding memory
1000 /// object. This implies that any address less than V must be out of bounds
1001 /// for the underlying object. Note that just being isIdentifiedObject() is
1002 /// not enough - For example, a negative offset from a noalias argument or call
1003 /// can be inbounds w.r.t the actual underlying object.
1004 static bool isBaseOfObject(const Value *V) {
1005  // TODO: We can handle other cases here
1006  // 1) For GC languages, arguments to functions are often required to be
1007  // base pointers.
1008  // 2) Result of allocation routines are often base pointers. Leverage TLI.
1009  return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1010 }
1011 
1012 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1013 /// another pointer.
1014 ///
1015 /// We know that V1 is a GEP, but we don't know anything about V2.
1016 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1017 /// V2.
1018 AliasResult BasicAAResult::aliasGEP(
1019  const GEPOperator *GEP1, LocationSize V1Size,
1020  const Value *V2, LocationSize V2Size,
1021  const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1022  if (!V1Size.hasValue() && !V2Size.hasValue()) {
1023  // TODO: This limitation exists for compile-time reasons. Relax it if we
1024  // can avoid exponential pathological cases.
1025  if (!isa<GEPOperator>(V2))
1026  return AliasResult::MayAlias;
1027 
1028  // If both accesses have unknown size, we can only check whether the base
1029  // objects don't alias.
1030  AliasResult BaseAlias = getBestAAResults().alias(
1031  MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1032  MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1033  return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1035  }
1036 
1037  DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1038  DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1039 
1040  // Don't attempt to analyze the decomposed GEP if index scale is not a
1041  // compile-time constant.
1042  if (!DecompGEP1.HasCompileTimeConstantScale ||
1043  !DecompGEP2.HasCompileTimeConstantScale)
1044  return AliasResult::MayAlias;
1045 
1046  assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1047  "DecomposeGEPExpression returned a result different from "
1048  "getUnderlyingObject");
1049 
1050  // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1051  // symbolic difference.
1052  DecompGEP1.Offset -= DecompGEP2.Offset;
1053  GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1054 
1055  // If an inbounds GEP would have to start from an out of bounds address
1056  // for the two to alias, then we can assume noalias.
1057  if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1058  V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1059  isBaseOfObject(DecompGEP2.Base))
1060  return AliasResult::NoAlias;
1061 
1062  if (isa<GEPOperator>(V2)) {
1063  // Symmetric case to above.
1064  if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1065  V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1066  isBaseOfObject(DecompGEP1.Base))
1067  return AliasResult::NoAlias;
1068  }
1069 
1070  // For GEPs with identical offsets, we can preserve the size and AAInfo
1071  // when performing the alias check on the underlying objects.
1072  if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1073  return getBestAAResults().alias(
1074  MemoryLocation(UnderlyingV1, V1Size),
1075  MemoryLocation(UnderlyingV2, V2Size), AAQI);
1076 
1077  // Do the base pointers alias?
1078  AliasResult BaseAlias = getBestAAResults().alias(
1079  MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1080  MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1081 
1082  // If we get a No or May, then return it immediately, no amount of analysis
1083  // will improve this situation.
1084  if (BaseAlias != AliasResult::MustAlias) {
1085  assert(BaseAlias == AliasResult::NoAlias ||
1086  BaseAlias == AliasResult::MayAlias);
1087  return BaseAlias;
1088  }
1089 
1090  // If there is a constant difference between the pointers, but the difference
1091  // is less than the size of the associated memory object, then we know
1092  // that the objects are partially overlapping. If the difference is
1093  // greater, we know they do not overlap.
1094  if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) {
1095  APInt &Off = DecompGEP1.Offset;
1096 
1097  // Initialize for Off >= 0 (V2 <= GEP1) case.
1098  const Value *LeftPtr = V2;
1099  const Value *RightPtr = GEP1;
1100  LocationSize VLeftSize = V2Size;
1101  LocationSize VRightSize = V1Size;
1102  const bool Swapped = Off.isNegative();
1103 
1104  if (Swapped) {
1105  // Swap if we have the situation where:
1106  // + +
1107  // | BaseOffset |
1108  // ---------------->|
1109  // |-->V1Size |-------> V2Size
1110  // GEP1 V2
1111  std::swap(LeftPtr, RightPtr);
1112  std::swap(VLeftSize, VRightSize);
1113  Off = -Off;
1114  }
1115 
1116  if (VLeftSize.hasValue()) {
1117  const uint64_t LSize = VLeftSize.getValue();
1118  if (Off.ult(LSize)) {
1119  // Conservatively drop processing if a phi was visited and/or offset is
1120  // too big.
1122  if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1123  (Off + VRightSize.getValue()).ule(LSize)) {
1124  // Memory referenced by right pointer is nested. Save the offset in
1125  // cache. Note that originally offset estimated as GEP1-V2, but
1126  // AliasResult contains the shift that represents GEP1+Offset=V2.
1127  AR.setOffset(-Off.getSExtValue());
1128  AR.swap(Swapped);
1129  }
1130  return AR;
1131  }
1132  return AliasResult::NoAlias;
1133  }
1134  }
1135 
1136  if (!DecompGEP1.VarIndices.empty()) {
1137  APInt GCD;
1138  bool AllNonNegative = DecompGEP1.Offset.isNonNegative();
1139  bool AllNonPositive = DecompGEP1.Offset.isNonPositive();
1140  for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1141  const APInt &Scale = DecompGEP1.VarIndices[i].Scale;
1142  if (i == 0)
1143  GCD = Scale.abs();
1144  else
1145  GCD = APIntOps::GreatestCommonDivisor(GCD, Scale.abs());
1146 
1147  if (AllNonNegative || AllNonPositive) {
1148  // If the Value could change between cycles, then any reasoning about
1149  // the Value this cycle may not hold in the next cycle. We'll just
1150  // give up if we can't determine conditions that hold for every cycle:
1151  const Value *V = DecompGEP1.VarIndices[i].V;
1152  const Instruction *CxtI = DecompGEP1.VarIndices[i].CxtI;
1153 
1154  KnownBits Known = computeKnownBits(V, DL, 0, &AC, CxtI, DT);
1155  bool SignKnownZero = Known.isNonNegative();
1156  bool SignKnownOne = Known.isNegative();
1157 
1158  // Zero-extension widens the variable, and so forces the sign
1159  // bit to zero.
1160  bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1161  SignKnownZero |= IsZExt;
1162  SignKnownOne &= !IsZExt;
1163 
1164  AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) ||
1165  (SignKnownOne && Scale.isNonPositive());
1166  AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) ||
1167  (SignKnownOne && Scale.isNonNegative());
1168  }
1169  }
1170 
1171  // We now have accesses at two offsets from the same base:
1172  // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1173  // 2. 0 with size V2Size
1174  // Using arithmetic modulo GCD, the accesses are at
1175  // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1176  // into the range [V2Size..GCD), then we know they cannot overlap.
1177  APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1178  if (ModOffset.isNegative())
1179  ModOffset += GCD; // We want mod, not rem.
1180  if (V1Size.hasValue() && V2Size.hasValue() &&
1181  ModOffset.uge(V2Size.getValue()) &&
1182  (GCD - ModOffset).uge(V1Size.getValue()))
1183  return AliasResult::NoAlias;
1184 
1185  // If we know all the variables are non-negative, then the total offset is
1186  // also non-negative and >= DecompGEP1.Offset. We have the following layout:
1187  // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size]
1188  // If DecompGEP1.Offset >= V2Size, the accesses don't alias.
1189  if (AllNonNegative && V2Size.hasValue() &&
1190  DecompGEP1.Offset.uge(V2Size.getValue()))
1191  return AliasResult::NoAlias;
1192  // Similarly, if the variables are non-positive, then the total offset is
1193  // also non-positive and <= DecompGEP1.Offset. We have the following layout:
1194  // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size)
1195  // If -DecompGEP1.Offset >= V1Size, the accesses don't alias.
1196  if (AllNonPositive && V1Size.hasValue() &&
1197  (-DecompGEP1.Offset).uge(V1Size.getValue()))
1198  return AliasResult::NoAlias;
1199 
1200  if (V1Size.hasValue() && V2Size.hasValue()) {
1201  // Try to determine whether abs(VarIndex) > 0.
1202  Optional<APInt> MinAbsVarIndex;
1203  if (DecompGEP1.VarIndices.size() == 1) {
1204  // VarIndex = Scale*V. If V != 0 then abs(VarIndex) >= abs(Scale).
1205  const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1206  if (isKnownNonZero(Var.V, DL, 0, &AC, Var.CxtI, DT))
1207  MinAbsVarIndex = Var.Scale.abs();
1208  } else if (DecompGEP1.VarIndices.size() == 2) {
1209  // VarIndex = Scale*V0 + (-Scale)*V1.
1210  // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1211  // Check that VisitedPhiBBs is empty, to avoid reasoning about
1212  // inequality of values across loop iterations.
1213  const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1214  const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1215  if (Var0.Scale == -Var1.Scale && Var0.ZExtBits == Var1.ZExtBits &&
1216  Var0.SExtBits == Var1.SExtBits && VisitedPhiBBs.empty() &&
1217  isKnownNonEqual(Var0.V, Var1.V, DL, &AC, /* CxtI */ nullptr, DT))
1218  MinAbsVarIndex = Var0.Scale.abs();
1219  }
1220 
1221  if (MinAbsVarIndex) {
1222  // The constant offset will have added at least +/-MinAbsVarIndex to it.
1223  APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1224  APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1225  // Check that an access at OffsetLo or lower, and an access at OffsetHi
1226  // or higher both do not alias.
1227  if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1228  OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1229  return AliasResult::NoAlias;
1230  }
1231  }
1232 
1233  if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1234  DecompGEP1.Offset, &AC, DT))
1235  return AliasResult::NoAlias;
1236  }
1237 
1238  // Statically, we can see that the base objects are the same, but the
1239  // pointers have dynamic offsets which we can't resolve. And none of our
1240  // little tricks above worked.
1241  return AliasResult::MayAlias;
1242 }
1243 
1245  // If the results agree, take it.
1246  if (A == B)
1247  return A;
1248  // A mix of PartialAlias and MustAlias is PartialAlias.
1252  // Otherwise, we don't know anything.
1253  return AliasResult::MayAlias;
1254 }
1255 
1256 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1257 /// against another.
1259 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1260  const Value *V2, LocationSize V2Size,
1261  AAQueryInfo &AAQI) {
1262  // If the values are Selects with the same condition, we can do a more precise
1263  // check: just check for aliases between the values on corresponding arms.
1264  if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1265  if (SI->getCondition() == SI2->getCondition()) {
1267  MemoryLocation(SI->getTrueValue(), SISize),
1268  MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1269  if (Alias == AliasResult::MayAlias)
1270  return AliasResult::MayAlias;
1271  AliasResult ThisAlias = getBestAAResults().alias(
1272  MemoryLocation(SI->getFalseValue(), SISize),
1273  MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1274  return MergeAliasResults(ThisAlias, Alias);
1275  }
1276 
1277  // If both arms of the Select node NoAlias or MustAlias V2, then returns
1278  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1280  MemoryLocation(V2, V2Size),
1281  MemoryLocation(SI->getTrueValue(), SISize), AAQI);
1282  if (Alias == AliasResult::MayAlias)
1283  return AliasResult::MayAlias;
1284 
1285  AliasResult ThisAlias = getBestAAResults().alias(
1286  MemoryLocation(V2, V2Size),
1287  MemoryLocation(SI->getFalseValue(), SISize), AAQI);
1288  return MergeAliasResults(ThisAlias, Alias);
1289 }
1290 
1291 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1292 /// another.
1293 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1294  const Value *V2, LocationSize V2Size,
1295  AAQueryInfo &AAQI) {
1296  // If the values are PHIs in the same block, we can do a more precise
1297  // as well as efficient check: just check for aliases between the values
1298  // on corresponding edges.
1299  if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1300  if (PN2->getParent() == PN->getParent()) {
1301  Optional<AliasResult> Alias;
1302  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1303  AliasResult ThisAlias = getBestAAResults().alias(
1304  MemoryLocation(PN->getIncomingValue(i), PNSize),
1306  PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1307  AAQI);
1308  if (Alias)
1309  *Alias = MergeAliasResults(*Alias, ThisAlias);
1310  else
1311  Alias = ThisAlias;
1312  if (*Alias == AliasResult::MayAlias)
1313  break;
1314  }
1315  return *Alias;
1316  }
1317 
1318  SmallVector<Value *, 4> V1Srcs;
1319  // If a phi operand recurses back to the phi, we can still determine NoAlias
1320  // if we don't alias the underlying objects of the other phi operands, as we
1321  // know that the recursive phi needs to be based on them in some way.
1322  bool isRecursive = false;
1323  auto CheckForRecPhi = [&](Value *PV) {
1324  if (!EnableRecPhiAnalysis)
1325  return false;
1326  if (getUnderlyingObject(PV) == PN) {
1327  isRecursive = true;
1328  return true;
1329  }
1330  return false;
1331  };
1332 
1333  if (PV) {
1334  // If we have PhiValues then use it to get the underlying phi values.
1335  const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1336  // If we have more phi values than the search depth then return MayAlias
1337  // conservatively to avoid compile time explosion. The worst possible case
1338  // is if both sides are PHI nodes. In which case, this is O(m x n) time
1339  // where 'm' and 'n' are the number of PHI sources.
1340  if (PhiValueSet.size() > MaxLookupSearchDepth)
1341  return AliasResult::MayAlias;
1342  // Add the values to V1Srcs
1343  for (Value *PV1 : PhiValueSet) {
1344  if (CheckForRecPhi(PV1))
1345  continue;
1346  V1Srcs.push_back(PV1);
1347  }
1348  } else {
1349  // If we don't have PhiInfo then just look at the operands of the phi itself
1350  // FIXME: Remove this once we can guarantee that we have PhiInfo always
1351  SmallPtrSet<Value *, 4> UniqueSrc;
1352  Value *OnePhi = nullptr;
1353  for (Value *PV1 : PN->incoming_values()) {
1354  if (isa<PHINode>(PV1)) {
1355  if (OnePhi && OnePhi != PV1) {
1356  // To control potential compile time explosion, we choose to be
1357  // conserviate when we have more than one Phi input. It is important
1358  // that we handle the single phi case as that lets us handle LCSSA
1359  // phi nodes and (combined with the recursive phi handling) simple
1360  // pointer induction variable patterns.
1361  return AliasResult::MayAlias;
1362  }
1363  OnePhi = PV1;
1364  }
1365 
1366  if (CheckForRecPhi(PV1))
1367  continue;
1368 
1369  if (UniqueSrc.insert(PV1).second)
1370  V1Srcs.push_back(PV1);
1371  }
1372 
1373  if (OnePhi && UniqueSrc.size() > 1)
1374  // Out of an abundance of caution, allow only the trivial lcssa and
1375  // recursive phi cases.
1376  return AliasResult::MayAlias;
1377  }
1378 
1379  // If V1Srcs is empty then that means that the phi has no underlying non-phi
1380  // value. This should only be possible in blocks unreachable from the entry
1381  // block, but return MayAlias just in case.
1382  if (V1Srcs.empty())
1383  return AliasResult::MayAlias;
1384 
1385  // If this PHI node is recursive, indicate that the pointer may be moved
1386  // across iterations. We can only prove NoAlias if different underlying
1387  // objects are involved.
1388  if (isRecursive)
1390 
1391  // In the recursive alias queries below, we may compare values from two
1392  // different loop iterations. Keep track of visited phi blocks, which will
1393  // be used when determining value equivalence.
1394  bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1395  auto _ = make_scope_exit([&]() {
1396  if (BlockInserted)
1397  VisitedPhiBBs.erase(PN->getParent());
1398  });
1399 
1400  // If we inserted a block into VisitedPhiBBs, alias analysis results that
1401  // have been cached earlier may no longer be valid. Perform recursive queries
1402  // with a new AAQueryInfo.
1403  AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1404  AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1405 
1407  MemoryLocation(V2, V2Size),
1408  MemoryLocation(V1Srcs[0], PNSize), *UseAAQI);
1409 
1410  // Early exit if the check of the first PHI source against V2 is MayAlias.
1411  // Other results are not possible.
1412  if (Alias == AliasResult::MayAlias)
1413  return AliasResult::MayAlias;
1414  // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1415  // remain valid to all elements and needs to conservatively return MayAlias.
1416  if (isRecursive && Alias != AliasResult::NoAlias)
1417  return AliasResult::MayAlias;
1418 
1419  // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1420  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1421  for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1422  Value *V = V1Srcs[i];
1423 
1424  AliasResult ThisAlias = getBestAAResults().alias(
1425  MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI);
1426  Alias = MergeAliasResults(ThisAlias, Alias);
1427  if (Alias == AliasResult::MayAlias)
1428  break;
1429  }
1430 
1431  return Alias;
1432 }
1433 
1434 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1435 /// array references.
1436 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1437  const Value *V2, LocationSize V2Size,
1438  AAQueryInfo &AAQI) {
1439  // If either of the memory references is empty, it doesn't matter what the
1440  // pointer values are.
1441  if (V1Size.isZero() || V2Size.isZero())
1442  return AliasResult::NoAlias;
1443 
1444  // Strip off any casts if they exist.
1446  V2 = V2->stripPointerCastsForAliasAnalysis();
1447 
1448  // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1449  // value for undef that aliases nothing in the program.
1450  if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1451  return AliasResult::NoAlias;
1452 
1453  // Are we checking for alias of the same value?
1454  // Because we look 'through' phi nodes, we could look at "Value" pointers from
1455  // different iterations. We must therefore make sure that this is not the
1456  // case. The function isValueEqualInPotentialCycles ensures that this cannot
1457  // happen by looking at the visited phi nodes and making sure they cannot
1458  // reach the value.
1459  if (isValueEqualInPotentialCycles(V1, V2))
1460  return AliasResult::MustAlias;
1461 
1462  if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1463  return AliasResult::NoAlias; // Scalars cannot alias each other
1464 
1465  // Figure out what objects these things are pointing to if we can.
1468 
1469  // Null values in the default address space don't point to any object, so they
1470  // don't alias any other pointer.
1471  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1472  if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1473  return AliasResult::NoAlias;
1474  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1475  if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1476  return AliasResult::NoAlias;
1477 
1478  if (O1 != O2) {
1479  // If V1/V2 point to two different objects, we know that we have no alias.
1480  if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1481  return AliasResult::NoAlias;
1482 
1483  // Constant pointers can't alias with non-const isIdentifiedObject objects.
1484  if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1485  (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1486  return AliasResult::NoAlias;
1487 
1488  // Function arguments can't alias with things that are known to be
1489  // unambigously identified at the function level.
1490  if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1491  (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1492  return AliasResult::NoAlias;
1493 
1494  // If one pointer is the result of a call/invoke or load and the other is a
1495  // non-escaping local object within the same function, then we know the
1496  // object couldn't escape to a point where the call could return it.
1497  //
1498  // Note that if the pointers are in different functions, there are a
1499  // variety of complications. A call with a nocapture argument may still
1500  // temporary store the nocapture argument's value in a temporary memory
1501  // location if that memory location doesn't escape. Or it may pass a
1502  // nocapture value to other functions as long as they don't capture it.
1503  if (isEscapeSource(O1) &&
1505  return AliasResult::NoAlias;
1506  if (isEscapeSource(O2) &&
1508  return AliasResult::NoAlias;
1509  }
1510 
1511  // If the size of one access is larger than the entire object on the other
1512  // side, then we know such behavior is undefined and can assume no alias.
1513  bool NullIsValidLocation = NullPointerIsDefined(&F);
1514  if ((isObjectSmallerThan(
1515  O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1516  TLI, NullIsValidLocation)) ||
1518  O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1519  TLI, NullIsValidLocation)))
1520  return AliasResult::NoAlias;
1521 
1522  // If one the accesses may be before the accessed pointer, canonicalize this
1523  // by using unknown after-pointer sizes for both accesses. This is
1524  // equivalent, because regardless of which pointer is lower, one of them
1525  // will always came after the other, as long as the underlying objects aren't
1526  // disjoint. We do this so that the rest of BasicAA does not have to deal
1527  // with accesses before the base pointer, and to improve cache utilization by
1528  // merging equivalent states.
1529  if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1530  V1Size = LocationSize::afterPointer();
1531  V2Size = LocationSize::afterPointer();
1532  }
1533 
1534  // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1535  // for recursive queries. For this reason, this limit is chosen to be large
1536  // enough to be very rarely hit, while still being small enough to avoid
1537  // stack overflows.
1538  if (AAQI.Depth >= 512)
1539  return AliasResult::MayAlias;
1540 
1541  // Check the cache before climbing up use-def chains. This also terminates
1542  // otherwise infinitely recursive queries.
1543  AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1544  const bool Swapped = V1 > V2;
1545  if (Swapped)
1546  std::swap(Locs.first, Locs.second);
1547  const auto &Pair = AAQI.AliasCache.try_emplace(
1549  if (!Pair.second) {
1550  auto &Entry = Pair.first->second;
1551  if (!Entry.isDefinitive()) {
1552  // Remember that we used an assumption.
1553  ++Entry.NumAssumptionUses;
1554  ++AAQI.NumAssumptionUses;
1555  }
1556  // Cache contains sorted {V1,V2} pairs but we should return original order.
1557  auto Result = Entry.Result;
1558  Result.swap(Swapped);
1559  return Result;
1560  }
1561 
1562  int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1563  unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1565  aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1566 
1567  auto It = AAQI.AliasCache.find(Locs);
1568  assert(It != AAQI.AliasCache.end() && "Must be in cache");
1569  auto &Entry = It->second;
1570 
1571  // Check whether a NoAlias assumption has been used, but disproven.
1572  bool AssumptionDisproven =
1573  Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1574  if (AssumptionDisproven)
1576 
1577  // This is a definitive result now, when considered as a root query.
1578  AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1579  Entry.Result = Result;
1580  // Cache contains sorted {V1,V2} pairs.
1581  Entry.Result.swap(Swapped);
1582  Entry.NumAssumptionUses = -1;
1583 
1584  // If the assumption has been disproven, remove any results that may have
1585  // been based on this assumption. Do this after the Entry updates above to
1586  // avoid iterator invalidation.
1587  if (AssumptionDisproven)
1588  while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1590 
1591  // The result may still be based on assumptions higher up in the chain.
1592  // Remember it, so it can be purged from the cache later.
1593  if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1594  Result != AliasResult::MayAlias)
1595  AAQI.AssumptionBasedResults.push_back(Locs);
1596  return Result;
1597 }
1598 
1599 AliasResult BasicAAResult::aliasCheckRecursive(
1600  const Value *V1, LocationSize V1Size,
1601  const Value *V2, LocationSize V2Size,
1602  AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1603  if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1604  AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1605  if (Result != AliasResult::MayAlias)
1606  return Result;
1607  } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1608  AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1609  if (Result != AliasResult::MayAlias)
1610  return Result;
1611  }
1612 
1613  if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1614  AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1615  if (Result != AliasResult::MayAlias)
1616  return Result;
1617  } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1618  AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1619  if (Result != AliasResult::MayAlias)
1620  return Result;
1621  }
1622 
1623  if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1624  AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1625  if (Result != AliasResult::MayAlias)
1626  return Result;
1627  } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1628  AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1629  if (Result != AliasResult::MayAlias)
1630  return Result;
1631  }
1632 
1633  // If both pointers are pointing into the same object and one of them
1634  // accesses the entire object, then the accesses must overlap in some way.
1635  if (O1 == O2) {
1636  bool NullIsValidLocation = NullPointerIsDefined(&F);
1637  if (V1Size.isPrecise() && V2Size.isPrecise() &&
1638  (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1639  isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1641  }
1642 
1643  return AliasResult::MayAlias;
1644 }
1645 
1646 /// Check whether two Values can be considered equivalent.
1647 ///
1648 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1649 /// they can not be part of a cycle in the value graph by looking at all
1650 /// visited phi nodes an making sure that the phis cannot reach the value. We
1651 /// have to do this because we are looking through phi nodes (That is we say
1652 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1653 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1654  const Value *V2) {
1655  if (V != V2)
1656  return false;
1657 
1658  const Instruction *Inst = dyn_cast<Instruction>(V);
1659  if (!Inst)
1660  return true;
1661 
1662  if (VisitedPhiBBs.empty())
1663  return true;
1664 
1665  if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1666  return false;
1667 
1668  // Make sure that the visited phis cannot reach the Value. This ensures that
1669  // the Values cannot come from different iterations of a potential cycle the
1670  // phi nodes could be involved in.
1671  for (auto *P : VisitedPhiBBs)
1672  if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1673  return false;
1674 
1675  return true;
1676 }
1677 
1678 /// Computes the symbolic difference between two de-composed GEPs.
1679 ///
1680 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1681 /// instructions GEP1 and GEP2 which have common base pointers.
1682 void BasicAAResult::GetIndexDifference(
1684  const SmallVectorImpl<VariableGEPIndex> &Src) {
1685  if (Src.empty())
1686  return;
1687 
1688  for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1689  const Value *V = Src[i].V;
1690  unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1691  APInt Scale = Src[i].Scale;
1692 
1693  // Find V in Dest. This is N^2, but pointer indices almost never have more
1694  // than a few variable indexes.
1695  for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1696  if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1697  Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1698  continue;
1699 
1700  // If we found it, subtract off Scale V's from the entry in Dest. If it
1701  // goes to zero, remove the entry.
1702  if (Dest[j].Scale != Scale)
1703  Dest[j].Scale -= Scale;
1704  else
1705  Dest.erase(Dest.begin() + j);
1706  Scale = 0;
1707  break;
1708  }
1709 
1710  // If we didn't consume this entry, add it to the end of the Dest list.
1711  if (!!Scale) {
1712  VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale, Src[i].CxtI};
1713  Dest.push_back(Entry);
1714  }
1715  }
1716 }
1717 
1718 bool BasicAAResult::constantOffsetHeuristic(
1719  const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1720  LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset,
1721  AssumptionCache *AC, DominatorTree *DT) {
1722  if (VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1723  !MaybeV2Size.hasValue())
1724  return false;
1725 
1726  const uint64_t V1Size = MaybeV1Size.getValue();
1727  const uint64_t V2Size = MaybeV2Size.getValue();
1728 
1729  const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1730 
1731  if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1732  Var0.Scale != -Var1.Scale || Var0.V->getType() != Var1.V->getType())
1733  return false;
1734 
1735  // We'll strip off the Extensions of Var0 and Var1 and do another round
1736  // of GetLinearExpression decomposition. In the example above, if Var0
1737  // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1738 
1739  LinearExpression E0 =
1740  GetLinearExpression(ExtendedValue(Var0.V), DL, 0, AC, DT);
1741  LinearExpression E1 =
1742  GetLinearExpression(ExtendedValue(Var1.V), DL, 0, AC, DT);
1743  if (E0.Scale != E1.Scale || E0.Val.ZExtBits != E1.Val.ZExtBits ||
1744  E0.Val.SExtBits != E1.Val.SExtBits ||
1745  !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1746  return false;
1747 
1748  // We have a hit - Var0 and Var1 only differ by a constant offset!
1749 
1750  // If we've been sext'ed then zext'd the maximum difference between Var0 and
1751  // Var1 is possible to calculate, but we're just interested in the absolute
1752  // minimum difference between the two. The minimum distance may occur due to
1753  // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1754  // the minimum distance between %i and %i + 5 is 3.
1755  APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1756  MinDiff = APIntOps::umin(MinDiff, Wrapped);
1757  APInt MinDiffBytes =
1758  MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1759 
1760  // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1761  // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1762  // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1763  // V2Size can fit in the MinDiffBytes gap.
1764  return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
1765  MinDiffBytes.uge(V2Size + BaseOffset.abs());
1766 }
1767 
1768 //===----------------------------------------------------------------------===//
1769 // BasicAliasAnalysis Pass
1770 //===----------------------------------------------------------------------===//
1771 
1772 AnalysisKey BasicAA::Key;
1773 
1775  auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1776  auto &AC = AM.getResult<AssumptionAnalysis>(F);
1777  auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1778  auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1779  return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1780 }
1781 
1784 }
1785 
1786 char BasicAAWrapperPass::ID = 0;
1787 
1788 void BasicAAWrapperPass::anchor() {}
1789 
1791  "Basic Alias Analysis (stateless AA impl)", true, true)
1797  "Basic Alias Analysis (stateless AA impl)", true, true)
1798 
1800  return new BasicAAWrapperPass();
1801 }
1802 
1804  auto &ACT = getAnalysis<AssumptionCacheTracker>();
1805  auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1806  auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1807  auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1808 
1809  Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1810  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1811  &DTWP.getDomTree(),
1812  PVWP ? &PVWP->getResult() : nullptr));
1813 
1814  return false;
1815 }
1816 
1818  AU.setPreservesAll();
1823 }
1824 
1826  return BasicAAResult(
1827  F.getParent()->getDataLayout(), F,
1828  P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1829  P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1830 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
i
i
Definition: README.txt:29
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:155
llvm::ModRefInfo::NoModRef
@ NoModRef
The access neither references nor modifies the value stored in memory.
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
AssumptionCache.h
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:272
llvm::AliasResult::MayAlias
@ MayAlias
The two locations may or may not alias.
Definition: AliasAnalysis.h:102
llvm::isMallocOrCallocLikeFn
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
Definition: MemoryBuiltins.cpp:298
llvm::AliasResult::PartialAlias
@ PartialAlias
The two locations alias, but only due to a partial overlap.
Definition: AliasAnalysis.h:104
llvm
Definition: AllocatorList.h:23
llvm::SmallVectorImpl::erase
iterator erase(const_iterator CI)
Definition: SmallVector.h:705
llvm::Operator
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:30
llvm::SmallPtrSetImpl::erase
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:378
llvm::MaskedValueIsZero
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
Definition: ValueTracking.cpp:360
llvm::PHINode::incoming_values
op_range incoming_values()
Definition: Instructions.h:2658
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
ForceAtLeast64Bits
static cl::opt< bool > ForceAtLeast64Bits("basic-aa-force-at-least-64b", cl::Hidden, cl::init(true))
By default, even on 32-bit architectures we use 64-bit integers for calculations.
llvm::isModAndRefSet
LLVM_NODISCARD bool isModAndRefSet(const ModRefInfo MRI)
Definition: AliasAnalysis.h:192
llvm::BasicAA::run
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
Definition: BasicAliasAnalysis.cpp:1774
llvm::generic_gep_type_iterator
Definition: GetElementPtrTypeIterator.h:31
llvm::LocationSize::afterPointer
constexpr static LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
Definition: MemoryLocation.h:123
Metadata.h
IntrinsicInst.h
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:229
llvm::isNonEscapingLocalObject
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
Definition: CaptureTracking.cpp:377
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:769
llvm::MemoryLocation::Ptr
const Value * Ptr
The address of the start of the location.
Definition: MemoryLocation.h:217
llvm::Function
Definition: Function.h:61
llvm::AnalysisManager::Invalidator::invalidate
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:674
getMinimalExtentFrom
static uint64_t getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
Definition: BasicAliasAnalysis.cpp:203
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Pass.h
llvm::IntrinsicInst::getIntrinsicID
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:52
llvm::SetVector::size
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:77
GetElementPtrTypeIterator.h
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
Statistic.h
CaptureTracking.h
llvm::initializeBasicAAWrapperPassPass
void initializeBasicAAWrapperPassPass(PassRegistry &)
llvm::GlobalVariable
Definition: GlobalVariable.h:40
llvm::APInt::zextOrTrunc
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:948
llvm::createModRefInfo
LLVM_NODISCARD ModRefInfo createModRefInfo(const FunctionModRefBehavior FMRB)
Definition: AliasAnalysis.h:377
llvm::BasicAAWrapperPass::runOnFunction
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition: BasicAliasAnalysis.cpp:1803
llvm::GlobalAlias
Definition: GlobalAlias.h:27
ValueTracking.h
MaxLookupSearchDepth
static const unsigned MaxLookupSearchDepth
Definition: BasicAliasAnalysis.cpp:96
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::DenseMapBase::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:302
llvm::isKnownNonZero
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
Definition: ValueTracking.cpp:309
llvm::PhiValuesWrapperPass
Wrapper pass for the legacy pass manager.
Definition: PhiValues.h:139
APInt.h
llvm::Depth
@ Depth
Definition: SIMachineScheduler.h:34
llvm::MemoryLocation::Size
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
Definition: MemoryLocation.h:226
MemoryBuiltins.h
llvm::ObjectSizeOpts::NullIsUnknownSize
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Definition: MemoryBuiltins.h:208
isIntrinsicCall
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
Definition: BasicAliasAnalysis.cpp:662
llvm::Optional
Definition: APInt.h:33
llvm::AliasResult
The possible results of an alias query.
Definition: AliasAnalysis.h:81
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::SmallPtrSet< Value *, 4 >
llvm::ObjectSizeOpts::RoundToAlign
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Definition: MemoryBuiltins.h:204
impl
place backedge safepoints impl
Definition: PlaceSafepoints.cpp:612
llvm::AAQueryInfo::IsCapturedCache
IsCapturedCacheT IsCapturedCache
Definition: AliasAnalysis.h:429
Operator.h
getBitWidth
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Definition: ValueTracking.cpp:88
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::gep_type_begin
gep_type_iterator gep_type_begin(const User *GEP)
Definition: GetElementPtrTypeIterator.h:139
llvm::LocationSize::isPrecise
bool isPrecise() const
Definition: MemoryLocation.h:165
BasicAliasAnalysis.h
llvm::FMRB_UnknownModRefBehavior
@ FMRB_UnknownModRefBehavior
This indicates that the function could not be classified into one of the behaviors above.
Definition: AliasAnalysis.h:368
llvm::BasicAAWrapperPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: BasicAliasAnalysis.cpp:1817
llvm::APIntOps::umin
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2185
F
#define F(x, y, z)
Definition: MD5.cpp:56
isEscapeSource
static bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
Definition: BasicAliasAnalysis.cpp:119
llvm::FMRB_OnlyAccessesInaccessibleMem
@ FMRB_OnlyAccessesInaccessibleMem
The only memory references in this function (if it has any) are references of memory that is otherwis...
Definition: AliasAnalysis.h:315
KnownBits.h
AliasAnalysis.h
llvm::isIdentifiedFunctionLocal
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
Definition: AliasAnalysis.cpp:991
llvm::ConstantPointerNull
A constant pointer value that points to null.
Definition: Constants.h:533
llvm::AAQueryInfo
This class stores info we want to provide to or retain within an alias query.
Definition: AliasAnalysis.h:414
isObjectSmallerThan
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
Definition: BasicAliasAnalysis.cpp:159
Instruction.h
CommandLine.h
llvm::FMRB_OnlyAccessesInaccessibleOrArgMem
@ FMRB_OnlyAccessesInaccessibleOrArgMem
The function may perform non-volatile loads and stores of objects pointed to by its pointer-typed arg...
Definition: AliasAnalysis.h:347
llvm::APInt::uge
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1313
getMaxPointerSize
static unsigned getMaxPointerSize(const DataLayout &DL)
Definition: BasicAliasAnalysis.cpp:399
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:77
llvm::KnownBits::isNonNegative
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:99
llvm::LocationSize::mayBeBeforePointer
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
Definition: MemoryLocation.h:173
llvm::APInt::isNonNegative
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:369
llvm::APInt::isNegative
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:364
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
llvm::ModRefInfo::Ref
@ Ref
The access may reference the value stored in memory.
Constants.h
llvm::BasicAAWrapperPass::ID
static char ID
Definition: BasicAliasAnalysis.h:251
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition: Instructions.h:2668
llvm::AArch64::RP
@ RP
Definition: AArch64ISelLowering.h:470
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::LocationSize
Definition: MemoryLocation.h:65
llvm::LibFunc
LibFunc
Definition: TargetLibraryInfo.h:34
Intrinsics.h
DoubleCalcBits
static cl::opt< bool > DoubleCalcBits("basic-aa-double-calc-bits", cl::Hidden, cl::init(false))
InstrTypes.h
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::MemoryLocation::getForSource
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
Definition: MemoryLocation.cpp:105
llvm::LocationSize::getValue
uint64_t getValue() const
Definition: MemoryLocation.h:158
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::APInt::getLimitedValue
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:487
TargetLibraryInfo.h
llvm::isPotentiallyReachable
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:236
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::TargetLibraryInfo::getLibFunc
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Definition: TargetLibraryInfo.h:278
llvm::make_scope_exit
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:58
llvm::Instruction
Definition: Instruction.h:45
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:281
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::AArch64CC::LE
@ LE
Definition: AArch64BaseInfo.h:249
llvm::AAQueryInfo::AssumptionBasedResults
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
Definition: AliasAnalysis.h:440
llvm::getArgumentAliasingToReturnedPointer
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
Definition: ValueTracking.cpp:4259
llvm::getUnderlyingObject
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Definition: ValueTracking.cpp:4314
SmallPtrSet.h
MaxNumPhiBBsValueReachabilityCheck
const unsigned MaxNumPhiBBsValueReachabilityCheck
Cutoff after which to stop analysing a set of phi nodes potentially involved in a cycle.
Definition: BasicAliasAnalysis.cpp:91
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", "Basic Alias Analysis (stateless AA impl)", true, true) INITIALIZE_PASS_END(BasicAAWrapperPass
llvm::KnownBits::isNegative
bool isNegative() const
Returns true if this value is known to be negative.
Definition: KnownBits.h:96
llvm::AnalysisManager::Invalidator
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:656
isObjectSize
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
Definition: BasicAliasAnalysis.cpp:223
GetLinearExpression
static LinearExpression GetLinearExpression(const ExtendedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
Definition: BasicAliasAnalysis.cpp:301
llvm::generic_gep_type_iterator::getStructTypeOrNull
StructType * getStructTypeOrNull() const
Definition: GetElementPtrTypeIterator.h:123
llvm::isModSet
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
Definition: AliasAnalysis.h:196
llvm::getObjectSize
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
Definition: MemoryBuiltins.cpp:515
llvm::AAResultBase::AAResultsProxy::alias
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI)
Definition: AliasAnalysis.h:1105
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2664
llvm::None
const NoneType None
Definition: None.h:23
Type.h
llvm::APInt::srem
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1763
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
llvm::PhiValues::getValuesForPhi
const ValueSet & getValuesForPhi(const PHINode *PN)
Get the underlying values of a phi.
Definition: PhiValues.cpp:125
llvm::setRef
LLVM_NODISCARD ModRefInfo setRef(const ModRefInfo MRI)
Definition: AliasAnalysis.h:210
llvm::BasicAAResult::pointsToConstantMemory
bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool OrLocal)
Chases pointers until we find a (constant global) or not.
Definition: BasicAliasAnalysis.cpp:604
llvm::OutputFileType::Object
@ Object
llvm::clearMust
LLVM_NODISCARD ModRefInfo clearMust(const ModRefInfo MRI)
Definition: AliasAnalysis.h:228
llvm::AAQueryInfo::LocPair
std::pair< AACacheLoc, AACacheLoc > LocPair
Definition: AliasAnalysis.h:416
llvm::BasicAAResult::alias
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI)
Definition: BasicAliasAnalysis.cpp:789
llvm::cl::opt< bool >
llvm::BasicAAResult::invalidate
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
Definition: BasicAliasAnalysis.cpp:98
llvm::isNoModRef
LLVM_NODISCARD bool isNoModRef(const ModRefInfo MRI)
Definition: AliasAnalysis.h:185
llvm::Value::getPointerDereferenceableBytes
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition: Value.cpp:804
llvm::FMRB_OnlyAccessesArgumentPointees
@ FMRB_OnlyAccessesArgumentPointees
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
Definition: AliasAnalysis.h:294
Index
uint32_t Index
Definition: ELFObjHandler.cpp:84
llvm::TargetLibraryInfoWrapperPass
Definition: TargetLibraryInfo.h:447
llvm::LocationSize::hasValue
bool hasValue() const
Definition: MemoryLocation.h:155
llvm::LocationSize::isZero
bool isZero() const
Definition: MemoryLocation.h:170
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:572
llvm::AssumptionAnalysis
A function analysis which provides an AssumptionCache.
Definition: AssumptionCache.h:169
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::BasicAAWrapperPass
Legacy wrapper pass to provide the BasicAAResult object.
Definition: BasicAliasAnalysis.h:245
llvm::AAQueryInfo::NumAssumptionUses
int NumAssumptionUses
How many active NoAlias assumption uses there are.
Definition: AliasAnalysis.h:435
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
MemoryLocation.h
llvm::AnalysisKey
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:72
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
llvm::PhiValuesAnalysis
The analysis pass which yields a PhiValues.
Definition: PhiValues.h:116
llvm::isKnownNonEqual
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
Definition: ValueTracking.cpp:348
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
llvm::computeKnownBits
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Definition: ValueTracking.cpp:212
llvm::ModRefInfo
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: AliasAnalysis.h:148
llvm::AAQueryInfo::CacheEntry
Definition: AliasAnalysis.h:417
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:840
llvm::AliasResult::setOffset
void setOffset(int32_t NewOffset)
Definition: AliasAnalysis.h:122
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::generic_gep_type_iterator::getIndexedType
Type * getIndexedType() const
Definition: GetElementPtrTypeIterator.h:72
llvm::Value::stripPointerCastsForAliasAnalysis
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
Definition: Value.cpp:665
llvm::SelectInst
This class represents the LLVM 'select' instruction.
Definition: Instructions.h:1715
llvm::TargetLibraryInfo::has
bool has(LibFunc F) const
Tests whether a library function is available.
Definition: TargetLibraryInfo.h:312
llvm::FMRB_DoesNotAccessMemory
@ FMRB_DoesNotAccessMemory
This function does not perform any non-local loads or stores to memory.
Definition: AliasAnalysis.h:268
llvm::AAResultBase::getArgModRefInfo
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Definition: AliasAnalysis.h:1174
llvm::AMDGPU::IsaInfo::TargetIDSetting::Off
@ Off
llvm::GEPOperator
Definition: Operator.h:457
_
#define _
Definition: HexagonMCCodeEmitter.cpp:47
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:70
CFG.h
llvm::AliasResult::swap
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
Definition: AliasAnalysis.h:130
llvm::AssumptionCacheTracker
An immutable pass that tracks lazily created AssumptionCache objects.
Definition: AssumptionCache.h:200
llvm::BinaryOperator
Definition: InstrTypes.h:190
llvm::BasicAAResult::getModRefInfo
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
Definition: BasicAliasAnalysis.cpp:803
DataLayout.h
llvm::StructType
Class to represent struct types.
Definition: DerivedTypes.h:212
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition: AssumptionCache.h:41
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
llvm::BasicAAResult::getArgModRefInfo
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
Definition: BasicAliasAnalysis.cpp:751
getParent
static const Function * getParent(const Value *V)
Definition: BasicAliasAnalysis.cpp:767
llvm::FMRB_OnlyReadsMemory
@ FMRB_OnlyReadsMemory
This function does not perform any non-local stores or volatile loads, but may read from any memory l...
Definition: AliasAnalysis.h:357
llvm::AliasResult::NoAlias
@ NoAlias
The two locations do not alias at all.
Definition: AliasAnalysis.h:99
Compiler.h
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:1667
llvm::ModRefInfo::Mod
@ Mod
The access may modify the value stored in memory.
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::AliasResult::MustAlias
@ MustAlias
The two locations precisely alias each other.
Definition: AliasAnalysis.h:106
llvm::TargetLibraryInfoWrapperPass::getTLI
TargetLibraryInfo & getTLI(const Function &F)
Definition: TargetLibraryInfo.h:459
llvm::ObjectSizeOpts
Various options to control the behavior of getObjectSize.
Definition: MemoryBuiltins.h:188
llvm::FunctionModRefBehavior
FunctionModRefBehavior
Summary of how a function affects memory in the program.
Definition: AliasAnalysis.h:262
llvm::BasicAAResult
This is the AA result object for the basic, local, and stateless alias analysis.
Definition: BasicAliasAnalysis.h:50
LLVM_FALLTHROUGH
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:281
adjustToPointerSize
static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize)
To ensure a pointer offset fits in an integer of size PointerSize (in bits) when that size is smaller...
Definition: BasicAliasAnalysis.cpp:393
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:92
llvm::AAResultBase::getModRefInfo
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Definition: AliasAnalysis.h:1186
Argument.h
llvm::GEPOperator::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:509
PhiValues.h
llvm::BasicAAWrapperPass::BasicAAWrapperPass
BasicAAWrapperPass()
Definition: BasicAliasAnalysis.cpp:1782
llvm::createBasicAAWrapperPass
FunctionPass * createBasicAAWrapperPass()
Definition: BasicAliasAnalysis.cpp:1799
Attributes.h
j
return j(j<< 16)
Constant.h
llvm::NVPTX::PTXLdStInstCode::V2
@ V2
Definition: NVPTX.h:123
llvm::KnownBits
Definition: KnownBits.h:23
llvm::isIdentifiedObject
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
Definition: AliasAnalysis.cpp:979
EnableRecPhiAnalysis
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
llvm::AssumptionCacheTracker::getAssumptionCache
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
Definition: AssumptionCache.cpp:272
llvm::AnalysisUsage::setPreservesAll
void setPreservesAll()
Set by analyses that do not transform their input at all.
Definition: PassAnalysisSupport.h:130
llvm::LocationSize::beforeOrAfterPointer
constexpr static LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
Definition: MemoryLocation.h:129
llvm::DenseMapBase::end
iterator end()
Definition: DenseMap.h:83
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:314
llvm::GEPOperator::isInBounds
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:474
GlobalVariable.h
llvm::AnalysisManager::getCachedResult
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:788
Casting.h
Function.h
llvm::AAQueryInfo::AliasCache
AliasCacheT AliasCache
Definition: AliasAnalysis.h:426
llvm::BitWidth
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition: TargetLibraryInfo.h:208
llvm::MemoryLocation::UnknownSize
@ UnknownSize
Definition: MemoryLocation.h:214
llvm::APInt::isNonPositive
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
Definition: APInt.h:396
llvm::orc::ReadOnly
static constexpr sys::Memory::ProtectionFlags ReadOnly
Definition: DebugObjectManagerPlugin.cpp:111
isWriteOnlyParam
static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo &TLI)
Returns true if this is a writeonly (i.e Mod only) parameter.
Definition: BasicAliasAnalysis.cpp:726
llvm::AAResultBase::pointsToConstantMemory
bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool OrLocal)
Definition: AliasAnalysis.h:1169
llvm::MCID::Add
@ Add
Definition: MCInstrDesc.h:183
GlobalAlias.h
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:403
llvm::AAQueryInfo::withEmptyCache
AAQueryInfo withEmptyCache()
Create a new AAQueryInfo based on this one, but with the cache cleared.
Definition: AliasAnalysis.h:447
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:45
llvm::APInt::abs
APInt abs() const
Get the absolute value;.
Definition: APInt.h:1870
llvm::createLegacyPMBasicAAResult
BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F)
A helper for the legacy pass manager to create a BasicAAResult object populated to the best of our ab...
Definition: BasicAliasAnalysis.cpp:1825
llvm::DominatorTreeAnalysis
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:252
llvm::FMRB_OnlyWritesMemory
@ FMRB_OnlyWritesMemory
Definition: AliasAnalysis.h:364
llvm::AAResultBase::getBestAAResults
AAResultsProxy getBestAAResults()
Get a proxy for the best AA result set to query at this time.
Definition: AliasAnalysis.h:1161
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
aa
basic aa
Definition: BasicAliasAnalysis.cpp:1796
Instructions.h
llvm::GEPOperator::getSourceElementType
Type * getSourceElementType() const
Definition: Operator.cpp:22
SmallVector.h
User.h
llvm::User::op_begin
op_iterator op_begin()
Definition: User.h:234
Dominators.h
llvm::APIntOps::GreatestCommonDivisor
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition: APInt.cpp:719
llvm::SmallPtrSetImplBase::empty
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
N
#define N
llvm::DenseMapBase::try_emplace
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:222
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:94
InstructionSimplify.h
llvm::PHINode::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Definition: Instructions.h:2688
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::setMust
LLVM_NODISCARD ModRefInfo setMust(const ModRefInfo MRI)
Definition: AliasAnalysis.h:214
llvm::PHINode
Definition: Instructions.h:2572
ScopeExit.h
llvm::SmallVectorImpl< VariableGEPIndex >
llvm::MemoryLocation::getBeforeOrAfter
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
Definition: MemoryLocation.h:275
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1164
llvm::AnalysisUsage::addRequiredTransitive
AnalysisUsage & addRequiredTransitive()
Definition: PassAnalysisSupport.h:81
DerivedTypes.h
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:307
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:44
llvm::AnalysisUsage::addUsedIfAvailable
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
Definition: PassAnalysisSupport.h:117
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1450
llvm::MemoryLocation::getForDest
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Definition: MemoryLocation.cpp:126
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::NullPointerIsDefined
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1874
true
basic Basic Alias true
Definition: BasicAliasAnalysis.cpp:1797
Value.h
InitializePasses.h
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
llvm::TargetLibraryAnalysis
Analysis pass providing the TargetLibraryInfo.
Definition: TargetLibraryInfo.h:422
llvm::BasicAAResult::getModRefBehavior
FunctionModRefBehavior getModRefBehavior(const CallBase *Call)
Returns the behavior when calling the given call site.
Definition: BasicAliasAnalysis.cpp:668
llvm::ModRefInfo::ModRef
@ ModRef
The access may reference and may modify the value stored in memory.
llvm::MemoryLocation
Representation for a specific memory location.
Definition: MemoryLocation.h:209
MergeAliasResults
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
Definition: BasicAliasAnalysis.cpp:1244
llvm::AAQueryInfo::Depth
unsigned Depth
Query depth used to distinguish recursive queries.
Definition: AliasAnalysis.h:432
llvm::setMod
LLVM_NODISCARD ModRefInfo setMod(const ModRefInfo MRI)
Definition: AliasAnalysis.h:206
notDifferentParent
static bool notDifferentParent(const Value *O1, const Value *O2)
Definition: BasicAliasAnalysis.cpp:780
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:44
llvm::Type::getPrimitiveSizeInBits
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:122
isBaseOfObject
static bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
Definition: BasicAliasAnalysis.cpp:1004
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
llvm::User::op_end
op_iterator op_end()
Definition: User.h:236