LLVM  14.0.0git
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CFG.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <utility>
62 
63 #define DEBUG_TYPE "basicaa"
64 
65 using namespace llvm;
66 
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
69  cl::init(true));
70 
71 /// By default, even on 32-bit architectures we use 64-bit integers for
72 /// calculations. This will allow us to more-aggressively decompose indexing
73 /// expressions calculated using i64 values (e.g., long long in C) which is
74 /// common enough to worry about.
75 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b",
76  cl::Hidden, cl::init(true));
77 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits",
78  cl::Hidden, cl::init(false));
79 
80 /// SearchLimitReached / SearchTimes shows how often the limit of
81 /// to decompose GEPs is reached. It will affect the precision
82 /// of basic alias analysis.
83 STATISTIC(SearchLimitReached, "Number of times the limit to "
84  "decompose GEPs is reached");
85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
86 
87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
89 /// careful with value equivalence. We use reachability to make sure a value
90 /// cannot be involved in a cycle.
92 
93 // The max limit of the search depth in DecomposeGEPExpression() and
94 // getUnderlyingObject(), both functions need to use the same search
95 // depth otherwise the algorithm in aliasGEP will assert.
96 static const unsigned MaxLookupSearchDepth = 6;
97 
100  // We don't care if this analysis itself is preserved, it has no state. But
101  // we need to check that the analyses it depends on have been. Note that we
102  // may be created without handles to some analyses and in that case don't
103  // depend on them.
104  if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
105  (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
106  (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
107  return true;
108 
109  // Otherwise this analysis result remains valid.
110  return false;
111 }
112 
113 //===----------------------------------------------------------------------===//
114 // Useful predicates
115 //===----------------------------------------------------------------------===//
116 
117 /// Returns true if the pointer is one which would have been considered an
118 /// escape by isNonEscapingLocalObject.
119 static bool isEscapeSource(const Value *V) {
120  if (isa<CallBase>(V))
121  return true;
122 
123  if (isa<Argument>(V))
124  return true;
125 
126  // The load case works because isNonEscapingLocalObject considers all
127  // stores to be escapes (it passes true for the StoreCaptures argument
128  // to PointerMayBeCaptured).
129  if (isa<LoadInst>(V))
130  return true;
131 
132  // The inttoptr case works because isNonEscapingLocalObject considers all
133  // means of converting or equating a pointer to an int (ptrtoint, ptr store
134  // which could be followed by an integer load, ptr<->int compare) as
135  // escaping, and objects located at well-known addresses via platform-specific
136  // means cannot be considered non-escaping local objects.
137  if (isa<IntToPtrInst>(V))
138  return true;
139 
140  return false;
141 }
142 
143 /// Returns the size of the object specified by V or UnknownSize if unknown.
144 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
145  const TargetLibraryInfo &TLI,
146  bool NullIsValidLoc,
147  bool RoundToAlign = false) {
148  uint64_t Size;
149  ObjectSizeOpts Opts;
150  Opts.RoundToAlign = RoundToAlign;
151  Opts.NullIsUnknownSize = NullIsValidLoc;
152  if (getObjectSize(V, Size, DL, &TLI, Opts))
153  return Size;
155 }
156 
157 /// Returns true if we can prove that the object specified by V is smaller than
158 /// Size.
159 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
160  const DataLayout &DL,
161  const TargetLibraryInfo &TLI,
162  bool NullIsValidLoc) {
163  // Note that the meanings of the "object" are slightly different in the
164  // following contexts:
165  // c1: llvm::getObjectSize()
166  // c2: llvm.objectsize() intrinsic
167  // c3: isObjectSmallerThan()
168  // c1 and c2 share the same meaning; however, the meaning of "object" in c3
169  // refers to the "entire object".
170  //
171  // Consider this example:
172  // char *p = (char*)malloc(100)
173  // char *q = p+80;
174  //
175  // In the context of c1 and c2, the "object" pointed by q refers to the
176  // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
177  //
178  // However, in the context of c3, the "object" refers to the chunk of memory
179  // being allocated. So, the "object" has 100 bytes, and q points to the middle
180  // the "object". In case q is passed to isObjectSmallerThan() as the 1st
181  // parameter, before the llvm::getObjectSize() is called to get the size of
182  // entire object, we should:
183  // - either rewind the pointer q to the base-address of the object in
184  // question (in this case rewind to p), or
185  // - just give up. It is up to caller to make sure the pointer is pointing
186  // to the base address the object.
187  //
188  // We go for 2nd option for simplicity.
189  if (!isIdentifiedObject(V))
190  return false;
191 
192  // This function needs to use the aligned object size because we allow
193  // reads a bit past the end given sufficient alignment.
194  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
195  /*RoundToAlign*/ true);
196 
197  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
198 }
199 
200 /// Return the minimal extent from \p V to the end of the underlying object,
201 /// assuming the result is used in an aliasing query. E.g., we do use the query
202 /// location size and the fact that null pointers cannot alias here.
203 static uint64_t getMinimalExtentFrom(const Value &V,
204  const LocationSize &LocSize,
205  const DataLayout &DL,
206  bool NullIsValidLoc) {
207  // If we have dereferenceability information we know a lower bound for the
208  // extent as accesses for a lower offset would be valid. We need to exclude
209  // the "or null" part if null is a valid pointer.
210  bool CanBeNull, CanBeFreed;
211  uint64_t DerefBytes =
212  V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
213  DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
214  DerefBytes = CanBeFreed ? 0 : DerefBytes;
215  // If queried with a precise location size, we assume that location size to be
216  // accessed, thus valid.
217  if (LocSize.isPrecise())
218  DerefBytes = std::max(DerefBytes, LocSize.getValue());
219  return DerefBytes;
220 }
221 
222 /// Returns true if we can prove that the object specified by V has size Size.
223 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
224  const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
225  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
226  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
227 }
228 
229 //===----------------------------------------------------------------------===//
230 // GetElementPtr Instruction Decomposition and Analysis
231 //===----------------------------------------------------------------------===//
232 
233 namespace {
234 /// Represents zext(sext(V)).
235 struct ExtendedValue {
236  const Value *V;
237  unsigned ZExtBits;
238  unsigned SExtBits;
239 
240  explicit ExtendedValue(const Value *V, unsigned ZExtBits = 0,
241  unsigned SExtBits = 0)
242  : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {}
243 
244  unsigned getBitWidth() const {
245  return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits;
246  }
247 
248  ExtendedValue withValue(const Value *NewV) const {
249  return ExtendedValue(NewV, ZExtBits, SExtBits);
250  }
251 
252  ExtendedValue withZExtOfValue(const Value *NewV) const {
253  unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
254  NewV->getType()->getPrimitiveSizeInBits();
255  // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
256  return ExtendedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0);
257  }
258 
259  ExtendedValue withSExtOfValue(const Value *NewV) const {
260  unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
261  NewV->getType()->getPrimitiveSizeInBits();
262  // zext(sext(sext(NewV)))
263  return ExtendedValue(NewV, ZExtBits, SExtBits + ExtendBy);
264  }
265 
266  APInt evaluateWith(APInt N) const {
267  assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
268  "Incompatible bit width");
269  if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
270  if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
271  return N;
272  }
273 
274  bool canDistributeOver(bool NUW, bool NSW) const {
275  // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
276  // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
277  return (!ZExtBits || NUW) && (!SExtBits || NSW);
278  }
279 };
280 
281 /// Represents zext(sext(V)) * Scale + Offset.
282 struct LinearExpression {
283  ExtendedValue Val;
284  APInt Scale;
285  APInt Offset;
286 
287  /// True if all operations in this expression are NSW.
288  bool IsNSW;
289 
290  LinearExpression(const ExtendedValue &Val, const APInt &Scale,
291  const APInt &Offset, bool IsNSW)
292  : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
293 
294  LinearExpression(const ExtendedValue &Val) : Val(Val), IsNSW(true) {
295  unsigned BitWidth = Val.getBitWidth();
296  Scale = APInt(BitWidth, 1);
297  Offset = APInt(BitWidth, 0);
298  }
299 };
300 }
301 
302 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
303 /// B are constant integers.
304 static LinearExpression GetLinearExpression(
305  const ExtendedValue &Val, const DataLayout &DL, unsigned Depth,
306  AssumptionCache *AC, DominatorTree *DT) {
307  // Limit our recursion depth.
308  if (Depth == 6)
309  return Val;
310 
311  if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
312  return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
313  Val.evaluateWith(Const->getValue()), true);
314 
315  if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
316  if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
317  APInt RHS = Val.evaluateWith(RHSC->getValue());
318  // The only non-OBO case we deal with is or, and only limited to the
319  // case where it is both nuw and nsw.
320  bool NUW = true, NSW = true;
321  if (isa<OverflowingBinaryOperator>(BOp)) {
322  NUW &= BOp->hasNoUnsignedWrap();
323  NSW &= BOp->hasNoSignedWrap();
324  }
325  if (!Val.canDistributeOver(NUW, NSW))
326  return Val;
327 
328  LinearExpression E(Val);
329  switch (BOp->getOpcode()) {
330  default:
331  // We don't understand this instruction, so we can't decompose it any
332  // further.
333  return Val;
334  case Instruction::Or:
335  // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
336  // analyze it.
337  if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
338  BOp, DT))
339  return Val;
340 
342  case Instruction::Add: {
343  E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
344  Depth + 1, AC, DT);
345  E.Offset += RHS;
346  E.IsNSW &= NSW;
347  break;
348  }
349  case Instruction::Sub: {
350  E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
351  Depth + 1, AC, DT);
352  E.Offset -= RHS;
353  E.IsNSW &= NSW;
354  break;
355  }
356  case Instruction::Mul: {
357  E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
358  Depth + 1, AC, DT);
359  E.Offset *= RHS;
360  E.Scale *= RHS;
361  E.IsNSW &= NSW;
362  break;
363  }
364  case Instruction::Shl:
365  // We're trying to linearize an expression of the kind:
366  // shl i8 -128, 36
367  // where the shift count exceeds the bitwidth of the type.
368  // We can't decompose this further (the expression would return
369  // a poison value).
370  if (RHS.getLimitedValue() > Val.getBitWidth())
371  return Val;
372 
373  E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
374  Depth + 1, AC, DT);
375  E.Offset <<= RHS.getLimitedValue();
376  E.Scale <<= RHS.getLimitedValue();
377  E.IsNSW &= NSW;
378  break;
379  }
380  return E;
381  }
382  }
383 
384  if (isa<ZExtInst>(Val.V))
385  return GetLinearExpression(
386  Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
387  DL, Depth + 1, AC, DT);
388 
389  if (isa<SExtInst>(Val.V))
390  return GetLinearExpression(
391  Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
392  DL, Depth + 1, AC, DT);
393 
394  return Val;
395 }
396 
397 /// To ensure a pointer offset fits in an integer of size PointerSize
398 /// (in bits) when that size is smaller than the maximum pointer size. This is
399 /// an issue, for example, in particular for 32b pointers with negative indices
400 /// that rely on two's complement wrap-arounds for precise alias information
401 /// where the maximum pointer size is 64b.
403  assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
404  unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
405  return (Offset << ShiftBits).ashr(ShiftBits);
406 }
407 
408 static unsigned getMaxPointerSize(const DataLayout &DL) {
409  unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
410  if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
411  if (DoubleCalcBits) MaxPointerSize *= 2;
412 
413  return MaxPointerSize;
414 }
415 
416 /// If V is a symbolic pointer expression, decompose it into a base pointer
417 /// with a constant offset and a number of scaled symbolic offsets.
418 ///
419 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
420 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
421 /// specified amount, but which may have other unrepresented high bits. As
422 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
423 ///
424 /// This function is capable of analyzing everything that getUnderlyingObject
425 /// can look through. To be able to do that getUnderlyingObject and
426 /// DecomposeGEPExpression must use the same search depth
427 /// (MaxLookupSearchDepth).
428 BasicAAResult::DecomposedGEP
429 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
430  AssumptionCache *AC, DominatorTree *DT) {
431  // Limit recursion depth to limit compile time in crazy cases.
432  unsigned MaxLookup = MaxLookupSearchDepth;
433  SearchTimes++;
434  const Instruction *CxtI = dyn_cast<Instruction>(V);
435 
436  unsigned MaxPointerSize = getMaxPointerSize(DL);
437  DecomposedGEP Decomposed;
438  Decomposed.Offset = APInt(MaxPointerSize, 0);
439  Decomposed.HasCompileTimeConstantScale = true;
440  do {
441  // See if this is a bitcast or GEP.
442  const Operator *Op = dyn_cast<Operator>(V);
443  if (!Op) {
444  // The only non-operator case we can handle are GlobalAliases.
445  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
446  if (!GA->isInterposable()) {
447  V = GA->getAliasee();
448  continue;
449  }
450  }
451  Decomposed.Base = V;
452  return Decomposed;
453  }
454 
455  if (Op->getOpcode() == Instruction::BitCast ||
456  Op->getOpcode() == Instruction::AddrSpaceCast) {
457  V = Op->getOperand(0);
458  continue;
459  }
460 
461  const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
462  if (!GEPOp) {
463  if (const auto *PHI = dyn_cast<PHINode>(V)) {
464  // Look through single-arg phi nodes created by LCSSA.
465  if (PHI->getNumIncomingValues() == 1) {
466  V = PHI->getIncomingValue(0);
467  continue;
468  }
469  } else if (const auto *Call = dyn_cast<CallBase>(V)) {
470  // CaptureTracking can know about special capturing properties of some
471  // intrinsics like launder.invariant.group, that can't be expressed with
472  // the attributes, but have properties like returning aliasing pointer.
473  // Because some analysis may assume that nocaptured pointer is not
474  // returned from some special intrinsic (because function would have to
475  // be marked with returns attribute), it is crucial to use this function
476  // because it should be in sync with CaptureTracking. Not using it may
477  // cause weird miscompilations where 2 aliasing pointers are assumed to
478  // noalias.
479  if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
480  V = RP;
481  continue;
482  }
483  }
484 
485  Decomposed.Base = V;
486  return Decomposed;
487  }
488 
489  // Track whether we've seen at least one in bounds gep, and if so, whether
490  // all geps parsed were in bounds.
491  if (Decomposed.InBounds == None)
492  Decomposed.InBounds = GEPOp->isInBounds();
493  else if (!GEPOp->isInBounds())
494  Decomposed.InBounds = false;
495 
496  // Don't attempt to analyze GEPs over unsized objects.
497  if (!GEPOp->getSourceElementType()->isSized()) {
498  Decomposed.Base = V;
499  return Decomposed;
500  }
501 
502  // Don't attempt to analyze GEPs if index scale is not a compile-time
503  // constant.
504  if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
505  Decomposed.Base = V;
506  Decomposed.HasCompileTimeConstantScale = false;
507  return Decomposed;
508  }
509 
510  unsigned AS = GEPOp->getPointerAddressSpace();
511  // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
512  gep_type_iterator GTI = gep_type_begin(GEPOp);
513  unsigned PointerSize = DL.getPointerSizeInBits(AS);
514  // Assume all GEP operands are constants until proven otherwise.
515  bool GepHasConstantOffset = true;
516  for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
517  I != E; ++I, ++GTI) {
518  const Value *Index = *I;
519  // Compute the (potentially symbolic) offset in bytes for this index.
520  if (StructType *STy = GTI.getStructTypeOrNull()) {
521  // For a struct, add the member offset.
522  unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
523  if (FieldNo == 0)
524  continue;
525 
526  Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
527  continue;
528  }
529 
530  // For an array/pointer, add the element offset, explicitly scaled.
531  if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
532  if (CIdx->isZero())
533  continue;
534  Decomposed.Offset +=
535  DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
536  CIdx->getValue().sextOrTrunc(MaxPointerSize);
537  continue;
538  }
539 
540  GepHasConstantOffset = false;
541 
542  APInt Scale(MaxPointerSize,
543  DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
544  // If the integer type is smaller than the pointer size, it is implicitly
545  // sign extended to pointer size.
546  unsigned Width = Index->getType()->getIntegerBitWidth();
547  unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
548  LinearExpression LE = GetLinearExpression(
549  ExtendedValue(Index, 0, SExtBits), DL, 0, AC, DT);
550 
551  // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
552  // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
553 
554  // It can be the case that, even through C1*V+C2 does not overflow for
555  // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
556  // decompose the expression in this way.
557  //
558  // FIXME: C1*Scale and the other operations in the decomposed
559  // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
560  // possibility.
561  bool Overflow;
562  APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize)
563  .smul_ov(Scale, Overflow);
564  if (Overflow) {
565  LE = LinearExpression(ExtendedValue(Index, 0, SExtBits));
566  } else {
567  Decomposed.Offset += ScaledOffset;
568  Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
569  }
570 
571  // If we already had an occurrence of this index variable, merge this
572  // scale into it. For example, we want to handle:
573  // A[x][x] -> x*16 + x*4 -> x*20
574  // This also ensures that 'x' only appears in the index list once.
575  for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
576  if (Decomposed.VarIndices[i].V == LE.Val.V &&
577  Decomposed.VarIndices[i].ZExtBits == LE.Val.ZExtBits &&
578  Decomposed.VarIndices[i].SExtBits == LE.Val.SExtBits) {
579  Scale += Decomposed.VarIndices[i].Scale;
580  Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
581  break;
582  }
583  }
584 
585  // Make sure that we have a scale that makes sense for this target's
586  // pointer size.
587  Scale = adjustToPointerSize(Scale, PointerSize);
588 
589  if (!!Scale) {
590  VariableGEPIndex Entry = {
591  LE.Val.V, LE.Val.ZExtBits, LE.Val.SExtBits, Scale, CxtI, LE.IsNSW};
592  Decomposed.VarIndices.push_back(Entry);
593  }
594  }
595 
596  // Take care of wrap-arounds
597  if (GepHasConstantOffset)
598  Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
599 
600  // Analyze the base pointer next.
601  V = GEPOp->getOperand(0);
602  } while (--MaxLookup);
603 
604  // If the chain of expressions is too deep, just return early.
605  Decomposed.Base = V;
606  SearchLimitReached++;
607  return Decomposed;
608 }
609 
610 /// Returns whether the given pointer value points to memory that is local to
611 /// the function, with global constants being considered local to all
612 /// functions.
614  AAQueryInfo &AAQI, bool OrLocal) {
615  assert(Visited.empty() && "Visited must be cleared after use!");
616 
617  unsigned MaxLookup = 8;
619  Worklist.push_back(Loc.Ptr);
620  do {
621  const Value *V = getUnderlyingObject(Worklist.pop_back_val());
622  if (!Visited.insert(V).second) {
623  Visited.clear();
624  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
625  }
626 
627  // An alloca instruction defines local memory.
628  if (OrLocal && isa<AllocaInst>(V))
629  continue;
630 
631  // A global constant counts as local memory for our purposes.
632  if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
633  // Note: this doesn't require GV to be "ODR" because it isn't legal for a
634  // global to be marked constant in some modules and non-constant in
635  // others. GV may even be a declaration, not a definition.
636  if (!GV->isConstant()) {
637  Visited.clear();
638  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
639  }
640  continue;
641  }
642 
643  // If both select values point to local memory, then so does the select.
644  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
645  Worklist.push_back(SI->getTrueValue());
646  Worklist.push_back(SI->getFalseValue());
647  continue;
648  }
649 
650  // If all values incoming to a phi node point to local memory, then so does
651  // the phi.
652  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
653  // Don't bother inspecting phi nodes with many operands.
654  if (PN->getNumIncomingValues() > MaxLookup) {
655  Visited.clear();
656  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
657  }
658  append_range(Worklist, PN->incoming_values());
659  continue;
660  }
661 
662  // Otherwise be conservative.
663  Visited.clear();
664  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
665  } while (!Worklist.empty() && --MaxLookup);
666 
667  Visited.clear();
668  return Worklist.empty();
669 }
670 
671 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
672  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
673  return II && II->getIntrinsicID() == IID;
674 }
675 
676 /// Returns the behavior when calling the given call site.
678  if (Call->doesNotAccessMemory())
679  // Can't do better than this.
681 
683 
684  // If the callsite knows it only reads memory, don't return worse
685  // than that.
686  if (Call->onlyReadsMemory())
687  Min = FMRB_OnlyReadsMemory;
688  else if (Call->doesNotReadMemory())
689  Min = FMRB_OnlyWritesMemory;
690 
691  if (Call->onlyAccessesArgMemory())
693  else if (Call->onlyAccessesInaccessibleMemory())
695  else if (Call->onlyAccessesInaccessibleMemOrArgMem())
697 
698  // If the call has operand bundles then aliasing attributes from the function
699  // it calls do not directly apply to the call. This can be made more precise
700  // in the future.
701  if (!Call->hasOperandBundles())
702  if (const Function *F = Call->getCalledFunction())
703  Min =
705 
706  return Min;
707 }
708 
709 /// Returns the behavior when calling the given function. For use when the call
710 /// site is not known.
712  // If the function declares it doesn't access memory, we can't do better.
713  if (F->doesNotAccessMemory())
715 
717 
718  // If the function declares it only reads memory, go with that.
719  if (F->onlyReadsMemory())
720  Min = FMRB_OnlyReadsMemory;
721  else if (F->doesNotReadMemory())
722  Min = FMRB_OnlyWritesMemory;
723 
724  if (F->onlyAccessesArgMemory())
726  else if (F->onlyAccessesInaccessibleMemory())
728  else if (F->onlyAccessesInaccessibleMemOrArgMem())
730 
731  return Min;
732 }
733 
734 /// Returns true if this is a writeonly (i.e Mod only) parameter.
735 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
736  const TargetLibraryInfo &TLI) {
737  if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
738  return true;
739 
740  // We can bound the aliasing properties of memset_pattern16 just as we can
741  // for memcpy/memset. This is particularly important because the
742  // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
743  // whenever possible.
744  // FIXME Consider handling this in InferFunctionAttr.cpp together with other
745  // attributes.
746  LibFunc F;
747  if (Call->getCalledFunction() &&
748  TLI.getLibFunc(*Call->getCalledFunction(), F) &&
749  F == LibFunc_memset_pattern16 && TLI.has(F))
750  if (ArgIdx == 0)
751  return true;
752 
753  // TODO: memset_pattern4, memset_pattern8
754  // TODO: _chk variants
755  // TODO: strcmp, strcpy
756 
757  return false;
758 }
759 
761  unsigned ArgIdx) {
762  // Checking for known builtin intrinsics and target library functions.
763  if (isWriteOnlyParam(Call, ArgIdx, TLI))
764  return ModRefInfo::Mod;
765 
766  if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
767  return ModRefInfo::Ref;
768 
769  if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
770  return ModRefInfo::NoModRef;
771 
772  return AAResultBase::getArgModRefInfo(Call, ArgIdx);
773 }
774 
775 #ifndef NDEBUG
776 static const Function *getParent(const Value *V) {
777  if (const Instruction *inst = dyn_cast<Instruction>(V)) {
778  if (!inst->getParent())
779  return nullptr;
780  return inst->getParent()->getParent();
781  }
782 
783  if (const Argument *arg = dyn_cast<Argument>(V))
784  return arg->getParent();
785 
786  return nullptr;
787 }
788 
789 static bool notDifferentParent(const Value *O1, const Value *O2) {
790 
791  const Function *F1 = getParent(O1);
792  const Function *F2 = getParent(O2);
793 
794  return !F1 || !F2 || F1 == F2;
795 }
796 #endif
797 
799  const MemoryLocation &LocB,
800  AAQueryInfo &AAQI) {
801  assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
802  "BasicAliasAnalysis doesn't support interprocedural queries.");
803  return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
804 }
805 
806 /// Checks to see if the specified callsite can clobber the specified memory
807 /// object.
808 ///
809 /// Since we only look at local properties of this function, we really can't
810 /// say much about this query. We do, however, use simple "address taken"
811 /// analysis on local objects.
813  const MemoryLocation &Loc,
814  AAQueryInfo &AAQI) {
815  assert(notDifferentParent(Call, Loc.Ptr) &&
816  "AliasAnalysis query involving multiple functions!");
817 
818  const Value *Object = getUnderlyingObject(Loc.Ptr);
819 
820  // Calls marked 'tail' cannot read or write allocas from the current frame
821  // because the current frame might be destroyed by the time they run. However,
822  // a tail call may use an alloca with byval. Calling with byval copies the
823  // contents of the alloca into argument registers or stack slots, so there is
824  // no lifetime issue.
825  if (isa<AllocaInst>(Object))
826  if (const CallInst *CI = dyn_cast<CallInst>(Call))
827  if (CI->isTailCall() &&
828  !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
829  return ModRefInfo::NoModRef;
830 
831  // Stack restore is able to modify unescaped dynamic allocas. Assume it may
832  // modify them even though the alloca is not escaped.
833  if (auto *AI = dyn_cast<AllocaInst>(Object))
834  if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
835  return ModRefInfo::Mod;
836 
837  // If the pointer is to a locally allocated object that does not escape,
838  // then the call can not mod/ref the pointer unless the call takes the pointer
839  // as an argument, and itself doesn't capture it.
840  if (!isa<Constant>(Object) && Call != Object &&
842 
843  // Optimistically assume that call doesn't touch Object and check this
844  // assumption in the following loop.
846  bool IsMustAlias = true;
847 
848  unsigned OperandNo = 0;
849  for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
850  CI != CE; ++CI, ++OperandNo) {
851  // Only look at the no-capture or byval pointer arguments. If this
852  // pointer were passed to arguments that were neither of these, then it
853  // couldn't be no-capture.
854  if (!(*CI)->getType()->isPointerTy() ||
855  (!Call->doesNotCapture(OperandNo) &&
856  OperandNo < Call->getNumArgOperands() &&
857  !Call->isByValArgument(OperandNo)))
858  continue;
859 
860  // Call doesn't access memory through this operand, so we don't care
861  // if it aliases with Object.
862  if (Call->doesNotAccessMemory(OperandNo))
863  continue;
864 
865  // If this is a no-capture pointer argument, see if we can tell that it
866  // is impossible to alias the pointer we're checking.
867  AliasResult AR = getBestAAResults().alias(
870  if (AR != AliasResult::MustAlias)
871  IsMustAlias = false;
872  // Operand doesn't alias 'Object', continue looking for other aliases
873  if (AR == AliasResult::NoAlias)
874  continue;
875  // Operand aliases 'Object', but call doesn't modify it. Strengthen
876  // initial assumption and keep looking in case if there are more aliases.
877  if (Call->onlyReadsMemory(OperandNo)) {
878  Result = setRef(Result);
879  continue;
880  }
881  // Operand aliases 'Object' but call only writes into it.
882  if (Call->doesNotReadMemory(OperandNo)) {
883  Result = setMod(Result);
884  continue;
885  }
886  // This operand aliases 'Object' and call reads and writes into it.
887  // Setting ModRef will not yield an early return below, MustAlias is not
888  // used further.
889  Result = ModRefInfo::ModRef;
890  break;
891  }
892 
893  // No operand aliases, reset Must bit. Add below if at least one aliases
894  // and all aliases found are MustAlias.
895  if (isNoModRef(Result))
896  IsMustAlias = false;
897 
898  // Early return if we improved mod ref information
899  if (!isModAndRefSet(Result)) {
900  if (isNoModRef(Result))
901  return ModRefInfo::NoModRef;
902  return IsMustAlias ? setMust(Result) : clearMust(Result);
903  }
904  }
905 
906  // If the call is malloc/calloc like, we can assume that it doesn't
907  // modify any IR visible value. This is only valid because we assume these
908  // routines do not read values visible in the IR. TODO: Consider special
909  // casing realloc and strdup routines which access only their arguments as
910  // well. Or alternatively, replace all of this with inaccessiblememonly once
911  // that's implemented fully.
912  if (isMallocOrCallocLikeFn(Call, &TLI)) {
913  // Be conservative if the accessed pointer may alias the allocation -
914  // fallback to the generic handling below.
916  AAQI) == AliasResult::NoAlias)
917  return ModRefInfo::NoModRef;
918  }
919 
920  // The semantics of memcpy intrinsics either exactly overlap or do not
921  // overlap, i.e., source and destination of any given memcpy are either
922  // no-alias or must-alias.
923  if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
924  AliasResult SrcAA =
925  getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
926  AliasResult DestAA =
927  getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
928  // It's also possible for Loc to alias both src and dest, or neither.
930  if (SrcAA != AliasResult::NoAlias)
931  rv = setRef(rv);
932  if (DestAA != AliasResult::NoAlias)
933  rv = setMod(rv);
934  return rv;
935  }
936 
937  // Guard intrinsics are marked as arbitrarily writing so that proper control
938  // dependencies are maintained but they never mods any particular memory
939  // location.
940  //
941  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
942  // heap state at the point the guard is issued needs to be consistent in case
943  // the guard invokes the "deopt" continuation.
944  if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
945  return ModRefInfo::Ref;
946  // The same applies to deoptimize which is essentially a guard(false).
947  if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
948  return ModRefInfo::Ref;
949 
950  // Like assumes, invariant.start intrinsics were also marked as arbitrarily
951  // writing so that proper control dependencies are maintained but they never
952  // mod any particular memory location visible to the IR.
953  // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
954  // intrinsic is now modeled as reading memory. This prevents hoisting the
955  // invariant.start intrinsic over stores. Consider:
956  // *ptr = 40;
957  // *ptr = 50;
958  // invariant_start(ptr)
959  // int val = *ptr;
960  // print(val);
961  //
962  // This cannot be transformed to:
963  //
964  // *ptr = 40;
965  // invariant_start(ptr)
966  // *ptr = 50;
967  // int val = *ptr;
968  // print(val);
969  //
970  // The transformation will cause the second store to be ignored (based on
971  // rules of invariant.start) and print 40, while the first program always
972  // prints 50.
973  if (isIntrinsicCall(Call, Intrinsic::invariant_start))
974  return ModRefInfo::Ref;
975 
976  // The AAResultBase base class has some smarts, lets use them.
977  return AAResultBase::getModRefInfo(Call, Loc, AAQI);
978 }
979 
981  const CallBase *Call2,
982  AAQueryInfo &AAQI) {
983  // Guard intrinsics are marked as arbitrarily writing so that proper control
984  // dependencies are maintained but they never mods any particular memory
985  // location.
986  //
987  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
988  // heap state at the point the guard is issued needs to be consistent in case
989  // the guard invokes the "deopt" continuation.
990 
991  // NB! This function is *not* commutative, so we special case two
992  // possibilities for guard intrinsics.
993 
994  if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
998 
999  if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1001  ? ModRefInfo::Mod
1003 
1004  // The AAResultBase base class has some smarts, lets use them.
1005  return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1006 }
1007 
1008 /// Return true if we know V to the base address of the corresponding memory
1009 /// object. This implies that any address less than V must be out of bounds
1010 /// for the underlying object. Note that just being isIdentifiedObject() is
1011 /// not enough - For example, a negative offset from a noalias argument or call
1012 /// can be inbounds w.r.t the actual underlying object.
1013 static bool isBaseOfObject(const Value *V) {
1014  // TODO: We can handle other cases here
1015  // 1) For GC languages, arguments to functions are often required to be
1016  // base pointers.
1017  // 2) Result of allocation routines are often base pointers. Leverage TLI.
1018  return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1019 }
1020 
1021 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1022 /// another pointer.
1023 ///
1024 /// We know that V1 is a GEP, but we don't know anything about V2.
1025 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1026 /// V2.
1027 AliasResult BasicAAResult::aliasGEP(
1028  const GEPOperator *GEP1, LocationSize V1Size,
1029  const Value *V2, LocationSize V2Size,
1030  const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1031  if (!V1Size.hasValue() && !V2Size.hasValue()) {
1032  // TODO: This limitation exists for compile-time reasons. Relax it if we
1033  // can avoid exponential pathological cases.
1034  if (!isa<GEPOperator>(V2))
1035  return AliasResult::MayAlias;
1036 
1037  // If both accesses have unknown size, we can only check whether the base
1038  // objects don't alias.
1039  AliasResult BaseAlias = getBestAAResults().alias(
1040  MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1041  MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1042  return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1044  }
1045 
1046  DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1047  DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1048 
1049  // Don't attempt to analyze the decomposed GEP if index scale is not a
1050  // compile-time constant.
1051  if (!DecompGEP1.HasCompileTimeConstantScale ||
1052  !DecompGEP2.HasCompileTimeConstantScale)
1053  return AliasResult::MayAlias;
1054 
1055  assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1056  "DecomposeGEPExpression returned a result different from "
1057  "getUnderlyingObject");
1058 
1059  // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1060  // symbolic difference.
1061  DecompGEP1.Offset -= DecompGEP2.Offset;
1062  GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1063 
1064  // If an inbounds GEP would have to start from an out of bounds address
1065  // for the two to alias, then we can assume noalias.
1066  if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1067  V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1068  isBaseOfObject(DecompGEP2.Base))
1069  return AliasResult::NoAlias;
1070 
1071  if (isa<GEPOperator>(V2)) {
1072  // Symmetric case to above.
1073  if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1074  V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1075  isBaseOfObject(DecompGEP1.Base))
1076  return AliasResult::NoAlias;
1077  }
1078 
1079  // For GEPs with identical offsets, we can preserve the size and AAInfo
1080  // when performing the alias check on the underlying objects.
1081  if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1082  return getBestAAResults().alias(
1083  MemoryLocation(UnderlyingV1, V1Size),
1084  MemoryLocation(UnderlyingV2, V2Size), AAQI);
1085 
1086  // Do the base pointers alias?
1087  AliasResult BaseAlias = getBestAAResults().alias(
1088  MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1089  MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1090 
1091  // If we get a No or May, then return it immediately, no amount of analysis
1092  // will improve this situation.
1093  if (BaseAlias != AliasResult::MustAlias) {
1094  assert(BaseAlias == AliasResult::NoAlias ||
1095  BaseAlias == AliasResult::MayAlias);
1096  return BaseAlias;
1097  }
1098 
1099  // If there is a constant difference between the pointers, but the difference
1100  // is less than the size of the associated memory object, then we know
1101  // that the objects are partially overlapping. If the difference is
1102  // greater, we know they do not overlap.
1103  if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) {
1104  APInt &Off = DecompGEP1.Offset;
1105 
1106  // Initialize for Off >= 0 (V2 <= GEP1) case.
1107  const Value *LeftPtr = V2;
1108  const Value *RightPtr = GEP1;
1109  LocationSize VLeftSize = V2Size;
1110  LocationSize VRightSize = V1Size;
1111  const bool Swapped = Off.isNegative();
1112 
1113  if (Swapped) {
1114  // Swap if we have the situation where:
1115  // + +
1116  // | BaseOffset |
1117  // ---------------->|
1118  // |-->V1Size |-------> V2Size
1119  // GEP1 V2
1120  std::swap(LeftPtr, RightPtr);
1121  std::swap(VLeftSize, VRightSize);
1122  Off = -Off;
1123  }
1124 
1125  if (VLeftSize.hasValue()) {
1126  const uint64_t LSize = VLeftSize.getValue();
1127  if (Off.ult(LSize)) {
1128  // Conservatively drop processing if a phi was visited and/or offset is
1129  // too big.
1131  if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1132  (Off + VRightSize.getValue()).ule(LSize)) {
1133  // Memory referenced by right pointer is nested. Save the offset in
1134  // cache. Note that originally offset estimated as GEP1-V2, but
1135  // AliasResult contains the shift that represents GEP1+Offset=V2.
1136  AR.setOffset(-Off.getSExtValue());
1137  AR.swap(Swapped);
1138  }
1139  return AR;
1140  }
1141  return AliasResult::NoAlias;
1142  }
1143  }
1144 
1145  if (!DecompGEP1.VarIndices.empty()) {
1146  APInt GCD;
1147  bool AllNonNegative = DecompGEP1.Offset.isNonNegative();
1148  bool AllNonPositive = DecompGEP1.Offset.isNonPositive();
1149  for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1150  APInt Scale = DecompGEP1.VarIndices[i].Scale;
1151  APInt ScaleForGCD = DecompGEP1.VarIndices[i].Scale;
1152  if (!DecompGEP1.VarIndices[i].IsNSW)
1153  ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(),
1154  Scale.countTrailingZeros());
1155 
1156  if (i == 0)
1157  GCD = ScaleForGCD.abs();
1158  else
1159  GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1160 
1161  if (AllNonNegative || AllNonPositive) {
1162  // If the Value could change between cycles, then any reasoning about
1163  // the Value this cycle may not hold in the next cycle. We'll just
1164  // give up if we can't determine conditions that hold for every cycle:
1165  const Value *V = DecompGEP1.VarIndices[i].V;
1166  const Instruction *CxtI = DecompGEP1.VarIndices[i].CxtI;
1167 
1168  KnownBits Known = computeKnownBits(V, DL, 0, &AC, CxtI, DT);
1169  bool SignKnownZero = Known.isNonNegative();
1170  bool SignKnownOne = Known.isNegative();
1171 
1172  // Zero-extension widens the variable, and so forces the sign
1173  // bit to zero.
1174  bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1175  SignKnownZero |= IsZExt;
1176  SignKnownOne &= !IsZExt;
1177 
1178  AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) ||
1179  (SignKnownOne && Scale.isNonPositive());
1180  AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) ||
1181  (SignKnownOne && Scale.isNonNegative());
1182  }
1183  }
1184 
1185  // We now have accesses at two offsets from the same base:
1186  // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1187  // 2. 0 with size V2Size
1188  // Using arithmetic modulo GCD, the accesses are at
1189  // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1190  // into the range [V2Size..GCD), then we know they cannot overlap.
1191  APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1192  if (ModOffset.isNegative())
1193  ModOffset += GCD; // We want mod, not rem.
1194  if (V1Size.hasValue() && V2Size.hasValue() &&
1195  ModOffset.uge(V2Size.getValue()) &&
1196  (GCD - ModOffset).uge(V1Size.getValue()))
1197  return AliasResult::NoAlias;
1198 
1199  // If we know all the variables are non-negative, then the total offset is
1200  // also non-negative and >= DecompGEP1.Offset. We have the following layout:
1201  // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size]
1202  // If DecompGEP1.Offset >= V2Size, the accesses don't alias.
1203  if (AllNonNegative && V2Size.hasValue() &&
1204  DecompGEP1.Offset.uge(V2Size.getValue()))
1205  return AliasResult::NoAlias;
1206  // Similarly, if the variables are non-positive, then the total offset is
1207  // also non-positive and <= DecompGEP1.Offset. We have the following layout:
1208  // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size)
1209  // If -DecompGEP1.Offset >= V1Size, the accesses don't alias.
1210  if (AllNonPositive && V1Size.hasValue() &&
1211  (-DecompGEP1.Offset).uge(V1Size.getValue()))
1212  return AliasResult::NoAlias;
1213 
1214  if (V1Size.hasValue() && V2Size.hasValue()) {
1215  // Try to determine whether abs(VarIndex) > 0.
1216  Optional<APInt> MinAbsVarIndex;
1217  if (DecompGEP1.VarIndices.size() == 1) {
1218  // VarIndex = Scale*V. If V != 0 then abs(VarIndex) >= abs(Scale).
1219  const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1220  if (isKnownNonZero(Var.V, DL, 0, &AC, Var.CxtI, DT))
1221  MinAbsVarIndex = Var.Scale.abs();
1222  } else if (DecompGEP1.VarIndices.size() == 2) {
1223  // VarIndex = Scale*V0 + (-Scale)*V1.
1224  // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1225  // Check that VisitedPhiBBs is empty, to avoid reasoning about
1226  // inequality of values across loop iterations.
1227  const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1228  const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1229  if (Var0.Scale == -Var1.Scale && Var0.ZExtBits == Var1.ZExtBits &&
1230  Var0.SExtBits == Var1.SExtBits && VisitedPhiBBs.empty() &&
1231  isKnownNonEqual(Var0.V, Var1.V, DL, &AC, /* CxtI */ nullptr, DT))
1232  MinAbsVarIndex = Var0.Scale.abs();
1233  }
1234 
1235  if (MinAbsVarIndex) {
1236  // The constant offset will have added at least +/-MinAbsVarIndex to it.
1237  APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1238  APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1239  // Check that an access at OffsetLo or lower, and an access at OffsetHi
1240  // or higher both do not alias.
1241  if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1242  OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1243  return AliasResult::NoAlias;
1244  }
1245  }
1246 
1247  if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1248  DecompGEP1.Offset, &AC, DT))
1249  return AliasResult::NoAlias;
1250  }
1251 
1252  // Statically, we can see that the base objects are the same, but the
1253  // pointers have dynamic offsets which we can't resolve. And none of our
1254  // little tricks above worked.
1255  return AliasResult::MayAlias;
1256 }
1257 
1259  // If the results agree, take it.
1260  if (A == B)
1261  return A;
1262  // A mix of PartialAlias and MustAlias is PartialAlias.
1266  // Otherwise, we don't know anything.
1267  return AliasResult::MayAlias;
1268 }
1269 
1270 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1271 /// against another.
1273 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1274  const Value *V2, LocationSize V2Size,
1275  AAQueryInfo &AAQI) {
1276  // If the values are Selects with the same condition, we can do a more precise
1277  // check: just check for aliases between the values on corresponding arms.
1278  if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1279  if (SI->getCondition() == SI2->getCondition()) {
1280  AliasResult Alias = getBestAAResults().alias(
1281  MemoryLocation(SI->getTrueValue(), SISize),
1282  MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1283  if (Alias == AliasResult::MayAlias)
1284  return AliasResult::MayAlias;
1285  AliasResult ThisAlias = getBestAAResults().alias(
1286  MemoryLocation(SI->getFalseValue(), SISize),
1287  MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1288  return MergeAliasResults(ThisAlias, Alias);
1289  }
1290 
1291  // If both arms of the Select node NoAlias or MustAlias V2, then returns
1292  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1293  AliasResult Alias = getBestAAResults().alias(
1294  MemoryLocation(V2, V2Size),
1295  MemoryLocation(SI->getTrueValue(), SISize), AAQI);
1296  if (Alias == AliasResult::MayAlias)
1297  return AliasResult::MayAlias;
1298 
1299  AliasResult ThisAlias = getBestAAResults().alias(
1300  MemoryLocation(V2, V2Size),
1301  MemoryLocation(SI->getFalseValue(), SISize), AAQI);
1302  return MergeAliasResults(ThisAlias, Alias);
1303 }
1304 
1305 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1306 /// another.
1307 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1308  const Value *V2, LocationSize V2Size,
1309  AAQueryInfo &AAQI) {
1310  if (!PN->getNumIncomingValues())
1311  return AliasResult::NoAlias;
1312  // If the values are PHIs in the same block, we can do a more precise
1313  // as well as efficient check: just check for aliases between the values
1314  // on corresponding edges.
1315  if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1316  if (PN2->getParent() == PN->getParent()) {
1317  Optional<AliasResult> Alias;
1318  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1319  AliasResult ThisAlias = getBestAAResults().alias(
1320  MemoryLocation(PN->getIncomingValue(i), PNSize),
1322  PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1323  AAQI);
1324  if (Alias)
1325  *Alias = MergeAliasResults(*Alias, ThisAlias);
1326  else
1327  Alias = ThisAlias;
1328  if (*Alias == AliasResult::MayAlias)
1329  break;
1330  }
1331  return *Alias;
1332  }
1333 
1334  SmallVector<Value *, 4> V1Srcs;
1335  // If a phi operand recurses back to the phi, we can still determine NoAlias
1336  // if we don't alias the underlying objects of the other phi operands, as we
1337  // know that the recursive phi needs to be based on them in some way.
1338  bool isRecursive = false;
1339  auto CheckForRecPhi = [&](Value *PV) {
1340  if (!EnableRecPhiAnalysis)
1341  return false;
1342  if (getUnderlyingObject(PV) == PN) {
1343  isRecursive = true;
1344  return true;
1345  }
1346  return false;
1347  };
1348 
1349  if (PV) {
1350  // If we have PhiValues then use it to get the underlying phi values.
1351  const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1352  // If we have more phi values than the search depth then return MayAlias
1353  // conservatively to avoid compile time explosion. The worst possible case
1354  // is if both sides are PHI nodes. In which case, this is O(m x n) time
1355  // where 'm' and 'n' are the number of PHI sources.
1356  if (PhiValueSet.size() > MaxLookupSearchDepth)
1357  return AliasResult::MayAlias;
1358  // Add the values to V1Srcs
1359  for (Value *PV1 : PhiValueSet) {
1360  if (CheckForRecPhi(PV1))
1361  continue;
1362  V1Srcs.push_back(PV1);
1363  }
1364  } else {
1365  // If we don't have PhiInfo then just look at the operands of the phi itself
1366  // FIXME: Remove this once we can guarantee that we have PhiInfo always
1367  SmallPtrSet<Value *, 4> UniqueSrc;
1368  Value *OnePhi = nullptr;
1369  for (Value *PV1 : PN->incoming_values()) {
1370  if (isa<PHINode>(PV1)) {
1371  if (OnePhi && OnePhi != PV1) {
1372  // To control potential compile time explosion, we choose to be
1373  // conserviate when we have more than one Phi input. It is important
1374  // that we handle the single phi case as that lets us handle LCSSA
1375  // phi nodes and (combined with the recursive phi handling) simple
1376  // pointer induction variable patterns.
1377  return AliasResult::MayAlias;
1378  }
1379  OnePhi = PV1;
1380  }
1381 
1382  if (CheckForRecPhi(PV1))
1383  continue;
1384 
1385  if (UniqueSrc.insert(PV1).second)
1386  V1Srcs.push_back(PV1);
1387  }
1388 
1389  if (OnePhi && UniqueSrc.size() > 1)
1390  // Out of an abundance of caution, allow only the trivial lcssa and
1391  // recursive phi cases.
1392  return AliasResult::MayAlias;
1393  }
1394 
1395  // If V1Srcs is empty then that means that the phi has no underlying non-phi
1396  // value. This should only be possible in blocks unreachable from the entry
1397  // block, but return MayAlias just in case.
1398  if (V1Srcs.empty())
1399  return AliasResult::MayAlias;
1400 
1401  // If this PHI node is recursive, indicate that the pointer may be moved
1402  // across iterations. We can only prove NoAlias if different underlying
1403  // objects are involved.
1404  if (isRecursive)
1406 
1407  // In the recursive alias queries below, we may compare values from two
1408  // different loop iterations. Keep track of visited phi blocks, which will
1409  // be used when determining value equivalence.
1410  bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1411  auto _ = make_scope_exit([&]() {
1412  if (BlockInserted)
1413  VisitedPhiBBs.erase(PN->getParent());
1414  });
1415 
1416  // If we inserted a block into VisitedPhiBBs, alias analysis results that
1417  // have been cached earlier may no longer be valid. Perform recursive queries
1418  // with a new AAQueryInfo.
1419  AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1420  AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1421 
1422  AliasResult Alias = getBestAAResults().alias(
1423  MemoryLocation(V2, V2Size),
1424  MemoryLocation(V1Srcs[0], PNSize), *UseAAQI);
1425 
1426  // Early exit if the check of the first PHI source against V2 is MayAlias.
1427  // Other results are not possible.
1428  if (Alias == AliasResult::MayAlias)
1429  return AliasResult::MayAlias;
1430  // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1431  // remain valid to all elements and needs to conservatively return MayAlias.
1432  if (isRecursive && Alias != AliasResult::NoAlias)
1433  return AliasResult::MayAlias;
1434 
1435  // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1436  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1437  for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1438  Value *V = V1Srcs[i];
1439 
1440  AliasResult ThisAlias = getBestAAResults().alias(
1441  MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI);
1442  Alias = MergeAliasResults(ThisAlias, Alias);
1443  if (Alias == AliasResult::MayAlias)
1444  break;
1445  }
1446 
1447  return Alias;
1448 }
1449 
1450 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1451 /// array references.
1452 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1453  const Value *V2, LocationSize V2Size,
1454  AAQueryInfo &AAQI) {
1455  // If either of the memory references is empty, it doesn't matter what the
1456  // pointer values are.
1457  if (V1Size.isZero() || V2Size.isZero())
1458  return AliasResult::NoAlias;
1459 
1460  // Strip off any casts if they exist.
1462  V2 = V2->stripPointerCastsForAliasAnalysis();
1463 
1464  // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1465  // value for undef that aliases nothing in the program.
1466  if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1467  return AliasResult::NoAlias;
1468 
1469  // Are we checking for alias of the same value?
1470  // Because we look 'through' phi nodes, we could look at "Value" pointers from
1471  // different iterations. We must therefore make sure that this is not the
1472  // case. The function isValueEqualInPotentialCycles ensures that this cannot
1473  // happen by looking at the visited phi nodes and making sure they cannot
1474  // reach the value.
1475  if (isValueEqualInPotentialCycles(V1, V2))
1476  return AliasResult::MustAlias;
1477 
1478  if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1479  return AliasResult::NoAlias; // Scalars cannot alias each other
1480 
1481  // Figure out what objects these things are pointing to if we can.
1484 
1485  // Null values in the default address space don't point to any object, so they
1486  // don't alias any other pointer.
1487  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1488  if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1489  return AliasResult::NoAlias;
1490  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1491  if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1492  return AliasResult::NoAlias;
1493 
1494  if (O1 != O2) {
1495  // If V1/V2 point to two different objects, we know that we have no alias.
1496  if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1497  return AliasResult::NoAlias;
1498 
1499  // Constant pointers can't alias with non-const isIdentifiedObject objects.
1500  if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1501  (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1502  return AliasResult::NoAlias;
1503 
1504  // Function arguments can't alias with things that are known to be
1505  // unambigously identified at the function level.
1506  if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1507  (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1508  return AliasResult::NoAlias;
1509 
1510  // If one pointer is the result of a call/invoke or load and the other is a
1511  // non-escaping local object within the same function, then we know the
1512  // object couldn't escape to a point where the call could return it.
1513  //
1514  // Note that if the pointers are in different functions, there are a
1515  // variety of complications. A call with a nocapture argument may still
1516  // temporary store the nocapture argument's value in a temporary memory
1517  // location if that memory location doesn't escape. Or it may pass a
1518  // nocapture value to other functions as long as they don't capture it.
1519  if (isEscapeSource(O1) &&
1521  return AliasResult::NoAlias;
1522  if (isEscapeSource(O2) &&
1524  return AliasResult::NoAlias;
1525  }
1526 
1527  // If the size of one access is larger than the entire object on the other
1528  // side, then we know such behavior is undefined and can assume no alias.
1529  bool NullIsValidLocation = NullPointerIsDefined(&F);
1530  if ((isObjectSmallerThan(
1531  O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1532  TLI, NullIsValidLocation)) ||
1534  O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1535  TLI, NullIsValidLocation)))
1536  return AliasResult::NoAlias;
1537 
1538  // If one the accesses may be before the accessed pointer, canonicalize this
1539  // by using unknown after-pointer sizes for both accesses. This is
1540  // equivalent, because regardless of which pointer is lower, one of them
1541  // will always came after the other, as long as the underlying objects aren't
1542  // disjoint. We do this so that the rest of BasicAA does not have to deal
1543  // with accesses before the base pointer, and to improve cache utilization by
1544  // merging equivalent states.
1545  if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1546  V1Size = LocationSize::afterPointer();
1547  V2Size = LocationSize::afterPointer();
1548  }
1549 
1550  // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1551  // for recursive queries. For this reason, this limit is chosen to be large
1552  // enough to be very rarely hit, while still being small enough to avoid
1553  // stack overflows.
1554  if (AAQI.Depth >= 512)
1555  return AliasResult::MayAlias;
1556 
1557  // Check the cache before climbing up use-def chains. This also terminates
1558  // otherwise infinitely recursive queries.
1559  AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1560  const bool Swapped = V1 > V2;
1561  if (Swapped)
1562  std::swap(Locs.first, Locs.second);
1563  const auto &Pair = AAQI.AliasCache.try_emplace(
1565  if (!Pair.second) {
1566  auto &Entry = Pair.first->second;
1567  if (!Entry.isDefinitive()) {
1568  // Remember that we used an assumption.
1569  ++Entry.NumAssumptionUses;
1570  ++AAQI.NumAssumptionUses;
1571  }
1572  // Cache contains sorted {V1,V2} pairs but we should return original order.
1573  auto Result = Entry.Result;
1574  Result.swap(Swapped);
1575  return Result;
1576  }
1577 
1578  int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1579  unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1581  aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1582 
1583  auto It = AAQI.AliasCache.find(Locs);
1584  assert(It != AAQI.AliasCache.end() && "Must be in cache");
1585  auto &Entry = It->second;
1586 
1587  // Check whether a NoAlias assumption has been used, but disproven.
1588  bool AssumptionDisproven =
1589  Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1590  if (AssumptionDisproven)
1592 
1593  // This is a definitive result now, when considered as a root query.
1594  AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1595  Entry.Result = Result;
1596  // Cache contains sorted {V1,V2} pairs.
1597  Entry.Result.swap(Swapped);
1598  Entry.NumAssumptionUses = -1;
1599 
1600  // If the assumption has been disproven, remove any results that may have
1601  // been based on this assumption. Do this after the Entry updates above to
1602  // avoid iterator invalidation.
1603  if (AssumptionDisproven)
1604  while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1606 
1607  // The result may still be based on assumptions higher up in the chain.
1608  // Remember it, so it can be purged from the cache later.
1609  if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1610  Result != AliasResult::MayAlias)
1611  AAQI.AssumptionBasedResults.push_back(Locs);
1612  return Result;
1613 }
1614 
1615 AliasResult BasicAAResult::aliasCheckRecursive(
1616  const Value *V1, LocationSize V1Size,
1617  const Value *V2, LocationSize V2Size,
1618  AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1619  if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1620  AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1621  if (Result != AliasResult::MayAlias)
1622  return Result;
1623  } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1624  AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1625  if (Result != AliasResult::MayAlias)
1626  return Result;
1627  }
1628 
1629  if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1630  AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1631  if (Result != AliasResult::MayAlias)
1632  return Result;
1633  } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1634  AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1635  if (Result != AliasResult::MayAlias)
1636  return Result;
1637  }
1638 
1639  if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1640  AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1641  if (Result != AliasResult::MayAlias)
1642  return Result;
1643  } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1644  AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1645  if (Result != AliasResult::MayAlias)
1646  return Result;
1647  }
1648 
1649  // If both pointers are pointing into the same object and one of them
1650  // accesses the entire object, then the accesses must overlap in some way.
1651  if (O1 == O2) {
1652  bool NullIsValidLocation = NullPointerIsDefined(&F);
1653  if (V1Size.isPrecise() && V2Size.isPrecise() &&
1654  (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1655  isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1657  }
1658 
1659  return AliasResult::MayAlias;
1660 }
1661 
1662 /// Check whether two Values can be considered equivalent.
1663 ///
1664 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1665 /// they can not be part of a cycle in the value graph by looking at all
1666 /// visited phi nodes an making sure that the phis cannot reach the value. We
1667 /// have to do this because we are looking through phi nodes (That is we say
1668 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1669 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1670  const Value *V2) {
1671  if (V != V2)
1672  return false;
1673 
1674  const Instruction *Inst = dyn_cast<Instruction>(V);
1675  if (!Inst)
1676  return true;
1677 
1678  if (VisitedPhiBBs.empty())
1679  return true;
1680 
1681  if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1682  return false;
1683 
1684  // Make sure that the visited phis cannot reach the Value. This ensures that
1685  // the Values cannot come from different iterations of a potential cycle the
1686  // phi nodes could be involved in.
1687  for (auto *P : VisitedPhiBBs)
1688  if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1689  return false;
1690 
1691  return true;
1692 }
1693 
1694 /// Computes the symbolic difference between two de-composed GEPs.
1695 ///
1696 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1697 /// instructions GEP1 and GEP2 which have common base pointers.
1698 void BasicAAResult::GetIndexDifference(
1700  const SmallVectorImpl<VariableGEPIndex> &Src) {
1701  if (Src.empty())
1702  return;
1703 
1704  for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1705  const Value *V = Src[i].V;
1706  unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1707  APInt Scale = Src[i].Scale;
1708 
1709  // Find V in Dest. This is N^2, but pointer indices almost never have more
1710  // than a few variable indexes.
1711  for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1712  if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1713  Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1714  continue;
1715 
1716  // If we found it, subtract off Scale V's from the entry in Dest. If it
1717  // goes to zero, remove the entry.
1718  if (Dest[j].Scale != Scale) {
1719  Dest[j].Scale -= Scale;
1720  Dest[j].IsNSW = false;
1721  } else
1722  Dest.erase(Dest.begin() + j);
1723  Scale = 0;
1724  break;
1725  }
1726 
1727  // If we didn't consume this entry, add it to the end of the Dest list.
1728  if (!!Scale) {
1729  VariableGEPIndex Entry = {V, ZExtBits, SExtBits,
1730  -Scale, Src[i].CxtI, Src[i].IsNSW};
1731  Dest.push_back(Entry);
1732  }
1733  }
1734 }
1735 
1736 bool BasicAAResult::constantOffsetHeuristic(
1737  const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1738  LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset,
1739  AssumptionCache *AC, DominatorTree *DT) {
1740  if (VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1741  !MaybeV2Size.hasValue())
1742  return false;
1743 
1744  const uint64_t V1Size = MaybeV1Size.getValue();
1745  const uint64_t V2Size = MaybeV2Size.getValue();
1746 
1747  const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1748 
1749  if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1750  Var0.Scale != -Var1.Scale || Var0.V->getType() != Var1.V->getType())
1751  return false;
1752 
1753  // We'll strip off the Extensions of Var0 and Var1 and do another round
1754  // of GetLinearExpression decomposition. In the example above, if Var0
1755  // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1756 
1757  LinearExpression E0 =
1758  GetLinearExpression(ExtendedValue(Var0.V), DL, 0, AC, DT);
1759  LinearExpression E1 =
1760  GetLinearExpression(ExtendedValue(Var1.V), DL, 0, AC, DT);
1761  if (E0.Scale != E1.Scale || E0.Val.ZExtBits != E1.Val.ZExtBits ||
1762  E0.Val.SExtBits != E1.Val.SExtBits ||
1763  !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1764  return false;
1765 
1766  // We have a hit - Var0 and Var1 only differ by a constant offset!
1767 
1768  // If we've been sext'ed then zext'd the maximum difference between Var0 and
1769  // Var1 is possible to calculate, but we're just interested in the absolute
1770  // minimum difference between the two. The minimum distance may occur due to
1771  // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1772  // the minimum distance between %i and %i + 5 is 3.
1773  APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1774  MinDiff = APIntOps::umin(MinDiff, Wrapped);
1775  APInt MinDiffBytes =
1776  MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1777 
1778  // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1779  // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1780  // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1781  // V2Size can fit in the MinDiffBytes gap.
1782  return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
1783  MinDiffBytes.uge(V2Size + BaseOffset.abs());
1784 }
1785 
1786 //===----------------------------------------------------------------------===//
1787 // BasicAliasAnalysis Pass
1788 //===----------------------------------------------------------------------===//
1789 
1790 AnalysisKey BasicAA::Key;
1791 
1793  auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1794  auto &AC = AM.getResult<AssumptionAnalysis>(F);
1795  auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1796  auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1797  return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1798 }
1799 
1802 }
1803 
1804 char BasicAAWrapperPass::ID = 0;
1805 
1806 void BasicAAWrapperPass::anchor() {}
1807 
1809  "Basic Alias Analysis (stateless AA impl)", true, true)
1815  "Basic Alias Analysis (stateless AA impl)", true, true)
1816 
1818  return new BasicAAWrapperPass();
1819 }
1820 
1822  auto &ACT = getAnalysis<AssumptionCacheTracker>();
1823  auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1824  auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1825  auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1826 
1827  Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1828  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1829  &DTWP.getDomTree(),
1830  PVWP ? &PVWP->getResult() : nullptr));
1831 
1832  return false;
1833 }
1834 
1836  AU.setPreservesAll();
1841 }
1842 
1844  return BasicAAResult(
1845  F.getParent()->getDataLayout(), F,
1846  P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1847  P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1848 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
i
i
Definition: README.txt:29
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:155
llvm::ModRefInfo::NoModRef
@ NoModRef
The access neither references nor modifies the value stored in memory.
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
AssumptionCache.h
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:274
llvm::AliasResult::MayAlias
@ MayAlias
The two locations may or may not alias.
Definition: AliasAnalysis.h:102
llvm::isMallocOrCallocLikeFn
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
Definition: MemoryBuiltins.cpp:298
llvm::AliasResult::PartialAlias
@ PartialAlias
The two locations alias, but only due to a partial overlap.
Definition: AliasAnalysis.h:104
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::SmallVectorImpl::erase
iterator erase(const_iterator CI)
Definition: SmallVector.h:705
llvm::Operator
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
llvm::SmallPtrSetImpl::erase
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:378
llvm::MaskedValueIsZero
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
Definition: ValueTracking.cpp:361
llvm::PHINode::incoming_values
op_range incoming_values()
Definition: Instructions.h:2713
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
ForceAtLeast64Bits
static cl::opt< bool > ForceAtLeast64Bits("basic-aa-force-at-least-64b", cl::Hidden, cl::init(true))
By default, even on 32-bit architectures we use 64-bit integers for calculations.
llvm::isModAndRefSet
LLVM_NODISCARD bool isModAndRefSet(const ModRefInfo MRI)
Definition: AliasAnalysis.h:192
llvm::BasicAA::run
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
Definition: BasicAliasAnalysis.cpp:1792
llvm::generic_gep_type_iterator
Definition: GetElementPtrTypeIterator.h:31
llvm::LocationSize::afterPointer
constexpr static LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
Definition: MemoryLocation.h:123
Metadata.h
IntrinsicInst.h
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:228
llvm::isNonEscapingLocalObject
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
Definition: CaptureTracking.cpp:377
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:769
llvm::MemoryLocation::Ptr
const Value * Ptr
The address of the start of the location.
Definition: MemoryLocation.h:217
llvm::Function
Definition: Function.h:61
llvm::AnalysisManager::Invalidator::invalidate
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:674
getMinimalExtentFrom
static uint64_t getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
Definition: BasicAliasAnalysis.cpp:203
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Pass.h
llvm::IntrinsicInst::getIntrinsicID
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:52
llvm::SetVector::size
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:77
GetElementPtrTypeIterator.h
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
Statistic.h
CaptureTracking.h
llvm::initializeBasicAAWrapperPassPass
void initializeBasicAAWrapperPassPass(PassRegistry &)
llvm::GlobalVariable
Definition: GlobalVariable.h:40
llvm::APInt::zextOrTrunc
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:952
llvm::createModRefInfo
LLVM_NODISCARD ModRefInfo createModRefInfo(const FunctionModRefBehavior FMRB)
Definition: AliasAnalysis.h:377
llvm::BasicAAWrapperPass::runOnFunction
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition: BasicAliasAnalysis.cpp:1821
llvm::GlobalAlias
Definition: GlobalAlias.h:27
ValueTracking.h
MaxLookupSearchDepth
static const unsigned MaxLookupSearchDepth
Definition: BasicAliasAnalysis.cpp:96
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::DenseMapBase::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:302
llvm::isKnownNonZero
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
Definition: ValueTracking.cpp:310
llvm::PhiValuesWrapperPass
Wrapper pass for the legacy pass manager.
Definition: PhiValues.h:139
APInt.h
llvm::Depth
@ Depth
Definition: SIMachineScheduler.h:34
llvm::MemoryLocation::Size
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
Definition: MemoryLocation.h:226
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1581
MemoryBuiltins.h
llvm::ObjectSizeOpts::NullIsUnknownSize
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Definition: MemoryBuiltins.h:208
isIntrinsicCall
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
Definition: BasicAliasAnalysis.cpp:671
llvm::Optional
Definition: APInt.h:33
llvm::AliasResult
The possible results of an alias query.
Definition: AliasAnalysis.h:81
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::SmallPtrSet< Value *, 4 >
llvm::ObjectSizeOpts::RoundToAlign
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Definition: MemoryBuiltins.h:204
impl
place backedge safepoints impl
Definition: PlaceSafepoints.cpp:612
llvm::AAQueryInfo::IsCapturedCache
IsCapturedCacheT IsCapturedCache
Definition: AliasAnalysis.h:429
Operator.h
getBitWidth
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Definition: ValueTracking.cpp:89
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::gep_type_begin
gep_type_iterator gep_type_begin(const User *GEP)
Definition: GetElementPtrTypeIterator.h:139
llvm::LocationSize::isPrecise
bool isPrecise() const
Definition: MemoryLocation.h:165
BasicAliasAnalysis.h
llvm::FMRB_UnknownModRefBehavior
@ FMRB_UnknownModRefBehavior
This indicates that the function could not be classified into one of the behaviors above.
Definition: AliasAnalysis.h:368
llvm::BasicAAWrapperPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: BasicAliasAnalysis.cpp:1835
llvm::APIntOps::umin
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2178
F
#define F(x, y, z)
Definition: MD5.cpp:56
isEscapeSource
static bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
Definition: BasicAliasAnalysis.cpp:119
llvm::FMRB_OnlyAccessesInaccessibleMem
@ FMRB_OnlyAccessesInaccessibleMem
The only memory references in this function (if it has any) are references of memory that is otherwis...
Definition: AliasAnalysis.h:315
KnownBits.h
AliasAnalysis.h
llvm::isIdentifiedFunctionLocal
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
Definition: AliasAnalysis.cpp:991
llvm::ConstantPointerNull
A constant pointer value that points to null.
Definition: Constants.h:534
llvm::AAQueryInfo
This class stores info we want to provide to or retain within an alias query.
Definition: AliasAnalysis.h:414
isObjectSmallerThan
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
Definition: BasicAliasAnalysis.cpp:159
Instruction.h
CommandLine.h
llvm::FMRB_OnlyAccessesInaccessibleOrArgMem
@ FMRB_OnlyAccessesInaccessibleOrArgMem
The function may perform non-volatile loads and stores of objects pointed to by its pointer-typed arg...
Definition: AliasAnalysis.h:347
llvm::APInt::uge
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1313
getMaxPointerSize
static unsigned getMaxPointerSize(const DataLayout &DL)
Definition: BasicAliasAnalysis.cpp:408
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
llvm::KnownBits::isNonNegative
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:99
llvm::LocationSize::mayBeBeforePointer
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
Definition: MemoryLocation.h:173
llvm::APInt::isNonNegative
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:369
llvm::APInt::isNegative
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:364
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
llvm::ModRefInfo::Ref
@ Ref
The access may reference the value stored in memory.
Constants.h
llvm::BasicAAWrapperPass::ID
static char ID
Definition: BasicAliasAnalysis.h:250
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition: Instructions.h:2723
llvm::AArch64::RP
@ RP
Definition: AArch64ISelLowering.h:471
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::LocationSize
Definition: MemoryLocation.h:65
llvm::LibFunc
LibFunc
Definition: TargetLibraryInfo.h:34
Intrinsics.h
DoubleCalcBits
static cl::opt< bool > DoubleCalcBits("basic-aa-double-calc-bits", cl::Hidden, cl::init(false))
InstrTypes.h
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::MemoryLocation::getForSource
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
Definition: MemoryLocation.cpp:105
llvm::LocationSize::getValue
uint64_t getValue() const
Definition: MemoryLocation.h:158
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::APInt::getLimitedValue
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:487
TargetLibraryInfo.h
llvm::isPotentiallyReachable
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:236
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::TargetLibraryInfo::getLibFunc
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Definition: TargetLibraryInfo.h:289
llvm::make_scope_exit
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:58
llvm::Instruction
Definition: Instruction.h:45
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:281
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::AArch64CC::LE
@ LE
Definition: AArch64BaseInfo.h:268
llvm::AAQueryInfo::AssumptionBasedResults
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
Definition: AliasAnalysis.h:440
llvm::getArgumentAliasingToReturnedPointer
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
Definition: ValueTracking.cpp:4301
llvm::getUnderlyingObject
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Definition: ValueTracking.cpp:4356
llvm::MemoryLocation::UnknownSize
@ UnknownSize
Definition: MemoryLocation.h:214
SmallPtrSet.h
MaxNumPhiBBsValueReachabilityCheck
const unsigned MaxNumPhiBBsValueReachabilityCheck
Cutoff after which to stop analysing a set of phi nodes potentially involved in a cycle.
Definition: BasicAliasAnalysis.cpp:91
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", "Basic Alias Analysis (stateless AA impl)", true, true) INITIALIZE_PASS_END(BasicAAWrapperPass
llvm::KnownBits::isNegative
bool isNegative() const
Returns true if this value is known to be negative.
Definition: KnownBits.h:96
llvm::AnalysisManager::Invalidator
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:656
isObjectSize
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
Definition: BasicAliasAnalysis.cpp:223
llvm::APInt::countTrailingZeros
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
Definition: APInt.h:1700
GetLinearExpression
static LinearExpression GetLinearExpression(const ExtendedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
Definition: BasicAliasAnalysis.cpp:304
llvm::generic_gep_type_iterator::getStructTypeOrNull
StructType * getStructTypeOrNull() const
Definition: GetElementPtrTypeIterator.h:123
llvm::isModSet
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
Definition: AliasAnalysis.h:196
llvm::getObjectSize
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
Definition: MemoryBuiltins.cpp:515
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2719
llvm::None
const NoneType None
Definition: None.h:23
Type.h
llvm::APInt::srem
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1767
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
llvm::PhiValues::getValuesForPhi
const ValueSet & getValuesForPhi(const PHINode *PN)
Get the underlying values of a phi.
Definition: PhiValues.cpp:125
llvm::setRef
LLVM_NODISCARD ModRefInfo setRef(const ModRefInfo MRI)
Definition: AliasAnalysis.h:210
llvm::BasicAAResult::pointsToConstantMemory
bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool OrLocal)
Chases pointers until we find a (constant global) or not.
Definition: BasicAliasAnalysis.cpp:613
llvm::OutputFileType::Object
@ Object
llvm::clearMust
LLVM_NODISCARD ModRefInfo clearMust(const ModRefInfo MRI)
Definition: AliasAnalysis.h:228
llvm::APInt::getOneBitSet
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:593
llvm::AAQueryInfo::LocPair
std::pair< AACacheLoc, AACacheLoc > LocPair
Definition: AliasAnalysis.h:416
llvm::BasicAAResult::alias
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI)
Definition: BasicAliasAnalysis.cpp:798
llvm::cl::opt< bool >
llvm::BasicAAResult::invalidate
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
Definition: BasicAliasAnalysis.cpp:98
llvm::isNoModRef
LLVM_NODISCARD bool isNoModRef(const ModRefInfo MRI)
Definition: AliasAnalysis.h:185
llvm::Value::getPointerDereferenceableBytes
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition: Value.cpp:830
llvm::FMRB_OnlyAccessesArgumentPointees
@ FMRB_OnlyAccessesArgumentPointees
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
Definition: AliasAnalysis.h:294
Index
uint32_t Index
Definition: ELFObjHandler.cpp:84
llvm::TargetLibraryInfoWrapperPass
Definition: TargetLibraryInfo.h:463
llvm::LocationSize::hasValue
bool hasValue() const
Definition: MemoryLocation.h:155
llvm::LocationSize::isZero
bool isZero() const
Definition: MemoryLocation.h:170
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:572
llvm::AssumptionAnalysis
A function analysis which provides an AssumptionCache.
Definition: AssumptionCache.h:169
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::BasicAAWrapperPass
Legacy wrapper pass to provide the BasicAAResult object.
Definition: BasicAliasAnalysis.h:244
llvm::AAQueryInfo::NumAssumptionUses
int NumAssumptionUses
How many active NoAlias assumption uses there are.
Definition: AliasAnalysis.h:435
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
MemoryLocation.h
llvm::AnalysisKey
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:72
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
llvm::PhiValuesAnalysis
The analysis pass which yields a PhiValues.
Definition: PhiValues.h:116
llvm::isKnownNonEqual
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
Definition: ValueTracking.cpp:349
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
llvm::computeKnownBits
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Definition: ValueTracking.cpp:213
llvm::ModRefInfo
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: AliasAnalysis.h:148
llvm::AAQueryInfo::CacheEntry
Definition: AliasAnalysis.h:417
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:840
llvm::AliasResult::setOffset
void setOffset(int32_t NewOffset)
Definition: AliasAnalysis.h:122
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::generic_gep_type_iterator::getIndexedType
Type * getIndexedType() const
Definition: GetElementPtrTypeIterator.h:72
llvm::Value::stripPointerCastsForAliasAnalysis
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
Definition: Value.cpp:691
llvm::SelectInst
This class represents the LLVM 'select' instruction.
Definition: Instructions.h:1740
llvm::TargetLibraryInfo::has
bool has(LibFunc F) const
Tests whether a library function is available.
Definition: TargetLibraryInfo.h:323
llvm::FMRB_DoesNotAccessMemory
@ FMRB_DoesNotAccessMemory
This function does not perform any non-local loads or stores to memory.
Definition: AliasAnalysis.h:268
llvm::AAResultBase::getArgModRefInfo
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Definition: AliasAnalysis.h:1174
llvm::AMDGPU::IsaInfo::TargetIDSetting::Off
@ Off
llvm::GEPOperator
Definition: Operator.h:458
_
#define _
Definition: HexagonMCCodeEmitter.cpp:47
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:70
CFG.h
llvm::AliasResult::swap
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
Definition: AliasAnalysis.h:130
llvm::AssumptionCacheTracker
An immutable pass that tracks lazily created AssumptionCache objects.
Definition: AssumptionCache.h:200
llvm::BinaryOperator
Definition: InstrTypes.h:189
llvm::BasicAAResult::getModRefInfo
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
Definition: BasicAliasAnalysis.cpp:812
DataLayout.h
llvm::StructType
Class to represent struct types.
Definition: DerivedTypes.h:212
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition: AssumptionCache.h:41
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
llvm::BasicAAResult::getArgModRefInfo
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
Definition: BasicAliasAnalysis.cpp:760
getParent
static const Function * getParent(const Value *V)
Definition: BasicAliasAnalysis.cpp:776
llvm::FMRB_OnlyReadsMemory
@ FMRB_OnlyReadsMemory
This function does not perform any non-local stores or volatile loads, but may read from any memory l...
Definition: AliasAnalysis.h:357
llvm::AliasResult::NoAlias
@ NoAlias
The two locations do not alias at all.
Definition: AliasAnalysis.h:99
Compiler.h
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:1744
llvm::ModRefInfo::Mod
@ Mod
The access may modify the value stored in memory.
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::AliasResult::MustAlias
@ MustAlias
The two locations precisely alias each other.
Definition: AliasAnalysis.h:106
llvm::TargetLibraryInfoWrapperPass::getTLI
TargetLibraryInfo & getTLI(const Function &F)
Definition: TargetLibraryInfo.h:475
llvm::ObjectSizeOpts
Various options to control the behavior of getObjectSize.
Definition: MemoryBuiltins.h:188
llvm::FunctionModRefBehavior
FunctionModRefBehavior
Summary of how a function affects memory in the program.
Definition: AliasAnalysis.h:262
llvm::BasicAAResult
This is the AA result object for the basic, local, and stateless alias analysis.
Definition: BasicAliasAnalysis.h:50
LLVM_FALLTHROUGH
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:273
adjustToPointerSize
static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize)
To ensure a pointer offset fits in an integer of size PointerSize (in bits) when that size is smaller...
Definition: BasicAliasAnalysis.cpp:402
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:92
llvm::AAResultBase::getModRefInfo
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Definition: AliasAnalysis.h:1186
Argument.h
llvm::GEPOperator::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:518
PhiValues.h
llvm::BasicAAWrapperPass::BasicAAWrapperPass
BasicAAWrapperPass()
Definition: BasicAliasAnalysis.cpp:1800
llvm::createBasicAAWrapperPass
FunctionPass * createBasicAAWrapperPass()
Definition: BasicAliasAnalysis.cpp:1817
Attributes.h
j
return j(j<< 16)
Constant.h
llvm::NVPTX::PTXLdStInstCode::V2
@ V2
Definition: NVPTX.h:123
llvm::KnownBits
Definition: KnownBits.h:23
llvm::isIdentifiedObject
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
Definition: AliasAnalysis.cpp:979
EnableRecPhiAnalysis
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
llvm::AssumptionCacheTracker::getAssumptionCache
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
Definition: AssumptionCache.cpp:272
llvm::AnalysisUsage::setPreservesAll
void setPreservesAll()
Set by analyses that do not transform their input at all.
Definition: PassAnalysisSupport.h:130
llvm::LocationSize::beforeOrAfterPointer
constexpr static LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
Definition: MemoryLocation.h:129
llvm::DenseMapBase::end
iterator end()
Definition: DenseMap.h:83
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:314
llvm::GEPOperator::isInBounds
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:475
GlobalVariable.h
llvm::AnalysisManager::getCachedResult
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:788
Casting.h
Function.h
llvm::AAQueryInfo::AliasCache
AliasCacheT AliasCache
Definition: AliasAnalysis.h:426
llvm::BitWidth
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition: TargetLibraryInfo.h:219
llvm::APInt::isNonPositive
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
Definition: APInt.h:396
llvm::orc::ReadOnly
static constexpr sys::Memory::ProtectionFlags ReadOnly
Definition: DebugObjectManagerPlugin.cpp:111
isWriteOnlyParam
static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo &TLI)
Returns true if this is a writeonly (i.e Mod only) parameter.
Definition: BasicAliasAnalysis.cpp:735
llvm::AAResultBase::pointsToConstantMemory
bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool OrLocal)
Definition: AliasAnalysis.h:1169
llvm::MCID::Add
@ Add
Definition: MCInstrDesc.h:183
GlobalAlias.h
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:403
llvm::AAQueryInfo::withEmptyCache
AAQueryInfo withEmptyCache()
Create a new AAQueryInfo based on this one, but with the cache cleared.
Definition: AliasAnalysis.h:447
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:45
llvm::APInt::abs
APInt abs() const
Get the absolute value;.
Definition: APInt.h:1863
llvm::createLegacyPMBasicAAResult
BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F)
A helper for the legacy pass manager to create a BasicAAResult object populated to the best of our ab...
Definition: BasicAliasAnalysis.cpp:1843
llvm::DominatorTreeAnalysis
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:252
llvm::FMRB_OnlyWritesMemory
@ FMRB_OnlyWritesMemory
Definition: AliasAnalysis.h:364
llvm::AAResultBase< BasicAAResult >::getBestAAResults
AAResultsProxy getBestAAResults()
Get a proxy for the best AA result set to query at this time.
Definition: AliasAnalysis.h:1161
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
aa
basic aa
Definition: BasicAliasAnalysis.cpp:1814
Instructions.h
llvm::GEPOperator::getSourceElementType
Type * getSourceElementType() const
Definition: Operator.cpp:22
SmallVector.h
User.h
llvm::User::op_begin
op_iterator op_begin()
Definition: User.h:234
Dominators.h
llvm::APIntOps::GreatestCommonDivisor
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition: APInt.cpp:723
llvm::SmallPtrSetImplBase::empty
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
N
#define N
llvm::DenseMapBase::try_emplace
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:222
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:94
InstructionSimplify.h
llvm::PHINode::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Definition: Instructions.h:2743
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::setMust
LLVM_NODISCARD ModRefInfo setMust(const ModRefInfo MRI)
Definition: AliasAnalysis.h:214
llvm::PHINode
Definition: Instructions.h:2627
ScopeExit.h
llvm::SmallVectorImpl< VariableGEPIndex >
llvm::MemoryLocation::getBeforeOrAfter
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
Definition: MemoryLocation.h:275
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1161
llvm::AnalysisUsage::addRequiredTransitive
AnalysisUsage & addRequiredTransitive()
Definition: PassAnalysisSupport.h:81
DerivedTypes.h
llvm::SmallSetVector< Value *, 4 >
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:44
llvm::AnalysisUsage::addUsedIfAvailable
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
Definition: PassAnalysisSupport.h:117
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1475
llvm::MemoryLocation::getForDest
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Definition: MemoryLocation.cpp:126
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::NullPointerIsDefined
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1925
true
basic Basic Alias true
Definition: BasicAliasAnalysis.cpp:1815
Value.h
InitializePasses.h
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
llvm::TargetLibraryAnalysis
Analysis pass providing the TargetLibraryInfo.
Definition: TargetLibraryInfo.h:438
llvm::BasicAAResult::getModRefBehavior
FunctionModRefBehavior getModRefBehavior(const CallBase *Call)
Returns the behavior when calling the given call site.
Definition: BasicAliasAnalysis.cpp:677
llvm::ModRefInfo::ModRef
@ ModRef
The access may reference and may modify the value stored in memory.
llvm::MemoryLocation
Representation for a specific memory location.
Definition: MemoryLocation.h:209
MergeAliasResults
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
Definition: BasicAliasAnalysis.cpp:1258
llvm::AAQueryInfo::Depth
unsigned Depth
Query depth used to distinguish recursive queries.
Definition: AliasAnalysis.h:432
llvm::setMod
LLVM_NODISCARD ModRefInfo setMod(const ModRefInfo MRI)
Definition: AliasAnalysis.h:206
notDifferentParent
static bool notDifferentParent(const Value *O1, const Value *O2)
Definition: BasicAliasAnalysis.cpp:789
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:44
llvm::Type::getPrimitiveSizeInBits
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:128
isBaseOfObject
static bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
Definition: BasicAliasAnalysis.cpp:1013
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
llvm::User::op_end
op_iterator op_end()
Definition: User.h:236