LLVM  14.0.0git
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CFG.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/ConstantRange.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/KnownBits.h"
59 #include <cassert>
60 #include <cstdint>
61 #include <cstdlib>
62 #include <utility>
63 
64 #define DEBUG_TYPE "basicaa"
65 
66 using namespace llvm;
67 
68 /// Enable analysis of recursive PHI nodes.
69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
70  cl::init(true));
71 
72 /// SearchLimitReached / SearchTimes shows how often the limit of
73 /// to decompose GEPs is reached. It will affect the precision
74 /// of basic alias analysis.
75 STATISTIC(SearchLimitReached, "Number of times the limit to "
76  "decompose GEPs is reached");
77 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
78 
79 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
80 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
81 /// careful with value equivalence. We use reachability to make sure a value
82 /// cannot be involved in a cycle.
84 
85 // The max limit of the search depth in DecomposeGEPExpression() and
86 // getUnderlyingObject().
87 static const unsigned MaxLookupSearchDepth = 6;
88 
91  // We don't care if this analysis itself is preserved, it has no state. But
92  // we need to check that the analyses it depends on have been. Note that we
93  // may be created without handles to some analyses and in that case don't
94  // depend on them.
95  if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
96  (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
97  (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
98  return true;
99 
100  // Otherwise this analysis result remains valid.
101  return false;
102 }
103 
104 //===----------------------------------------------------------------------===//
105 // Useful predicates
106 //===----------------------------------------------------------------------===//
107 
108 /// Returns true if the pointer is one which would have been considered an
109 /// escape by isNonEscapingLocalObject.
110 static bool isEscapeSource(const Value *V) {
111  if (isa<CallBase>(V))
112  return true;
113 
114  // The load case works because isNonEscapingLocalObject considers all
115  // stores to be escapes (it passes true for the StoreCaptures argument
116  // to PointerMayBeCaptured).
117  if (isa<LoadInst>(V))
118  return true;
119 
120  // The inttoptr case works because isNonEscapingLocalObject considers all
121  // means of converting or equating a pointer to an int (ptrtoint, ptr store
122  // which could be followed by an integer load, ptr<->int compare) as
123  // escaping, and objects located at well-known addresses via platform-specific
124  // means cannot be considered non-escaping local objects.
125  if (isa<IntToPtrInst>(V))
126  return true;
127 
128  return false;
129 }
130 
131 /// Returns the size of the object specified by V or UnknownSize if unknown.
132 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
133  const TargetLibraryInfo &TLI,
134  bool NullIsValidLoc,
135  bool RoundToAlign = false) {
136  uint64_t Size;
137  ObjectSizeOpts Opts;
138  Opts.RoundToAlign = RoundToAlign;
139  Opts.NullIsUnknownSize = NullIsValidLoc;
140  if (getObjectSize(V, Size, DL, &TLI, Opts))
141  return Size;
143 }
144 
145 /// Returns true if we can prove that the object specified by V is smaller than
146 /// Size.
147 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
148  const DataLayout &DL,
149  const TargetLibraryInfo &TLI,
150  bool NullIsValidLoc) {
151  // Note that the meanings of the "object" are slightly different in the
152  // following contexts:
153  // c1: llvm::getObjectSize()
154  // c2: llvm.objectsize() intrinsic
155  // c3: isObjectSmallerThan()
156  // c1 and c2 share the same meaning; however, the meaning of "object" in c3
157  // refers to the "entire object".
158  //
159  // Consider this example:
160  // char *p = (char*)malloc(100)
161  // char *q = p+80;
162  //
163  // In the context of c1 and c2, the "object" pointed by q refers to the
164  // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
165  //
166  // However, in the context of c3, the "object" refers to the chunk of memory
167  // being allocated. So, the "object" has 100 bytes, and q points to the middle
168  // the "object". In case q is passed to isObjectSmallerThan() as the 1st
169  // parameter, before the llvm::getObjectSize() is called to get the size of
170  // entire object, we should:
171  // - either rewind the pointer q to the base-address of the object in
172  // question (in this case rewind to p), or
173  // - just give up. It is up to caller to make sure the pointer is pointing
174  // to the base address the object.
175  //
176  // We go for 2nd option for simplicity.
177  if (!isIdentifiedObject(V))
178  return false;
179 
180  // This function needs to use the aligned object size because we allow
181  // reads a bit past the end given sufficient alignment.
182  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
183  /*RoundToAlign*/ true);
184 
185  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
186 }
187 
188 /// Return the minimal extent from \p V to the end of the underlying object,
189 /// assuming the result is used in an aliasing query. E.g., we do use the query
190 /// location size and the fact that null pointers cannot alias here.
192  const LocationSize &LocSize,
193  const DataLayout &DL,
194  bool NullIsValidLoc) {
195  // If we have dereferenceability information we know a lower bound for the
196  // extent as accesses for a lower offset would be valid. We need to exclude
197  // the "or null" part if null is a valid pointer. We can ignore frees, as an
198  // access after free would be undefined behavior.
199  bool CanBeNull, CanBeFreed;
200  uint64_t DerefBytes =
201  V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
202  DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
203  // If queried with a precise location size, we assume that location size to be
204  // accessed, thus valid.
205  if (LocSize.isPrecise())
206  DerefBytes = std::max(DerefBytes, LocSize.getValue());
207  return DerefBytes;
208 }
209 
210 /// Returns true if we can prove that the object specified by V has size Size.
211 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
212  const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
213  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
214  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
215 }
216 
217 //===----------------------------------------------------------------------===//
218 // CaptureInfo implementations
219 //===----------------------------------------------------------------------===//
220 
221 CaptureInfo::~CaptureInfo() = default;
222 
224  const Instruction *I) {
225  return isNonEscapingLocalObject(Object, &IsCapturedCache);
226 }
227 
229  const Instruction *I) {
231  return false;
232 
233  auto Iter = EarliestEscapes.insert({Object, nullptr});
234  if (Iter.second) {
235  Instruction *EarliestCapture = FindEarliestCapture(
236  Object, *const_cast<Function *>(I->getFunction()),
237  /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
238  if (EarliestCapture) {
239  auto Ins = Inst2Obj.insert({EarliestCapture, {}});
240  Ins.first->second.push_back(Object);
241  }
242  Iter.first->second = EarliestCapture;
243  }
244 
245  // No capturing instruction.
246  if (!Iter.first->second)
247  return true;
248 
249  return I != Iter.first->second &&
250  !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI);
251 }
252 
254  auto Iter = Inst2Obj.find(I);
255  if (Iter != Inst2Obj.end()) {
256  for (const Value *Obj : Iter->second)
257  EarliestEscapes.erase(Obj);
258  Inst2Obj.erase(I);
259  }
260 }
261 
262 //===----------------------------------------------------------------------===//
263 // GetElementPtr Instruction Decomposition and Analysis
264 //===----------------------------------------------------------------------===//
265 
266 namespace {
267 /// Represents zext(sext(trunc(V))).
268 struct CastedValue {
269  const Value *V;
270  unsigned ZExtBits = 0;
271  unsigned SExtBits = 0;
272  unsigned TruncBits = 0;
273 
274  explicit CastedValue(const Value *V) : V(V) {}
275  explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
276  unsigned TruncBits)
277  : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
278 
279  unsigned getBitWidth() const {
280  return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
281  SExtBits;
282  }
283 
284  CastedValue withValue(const Value *NewV) const {
285  return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
286  }
287 
288  /// Replace V with zext(NewV)
289  CastedValue withZExtOfValue(const Value *NewV) const {
290  unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
291  NewV->getType()->getPrimitiveSizeInBits();
292  if (ExtendBy <= TruncBits)
293  return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
294 
295  // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
296  ExtendBy -= TruncBits;
297  return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
298  }
299 
300  /// Replace V with sext(NewV)
301  CastedValue withSExtOfValue(const Value *NewV) const {
302  unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
303  NewV->getType()->getPrimitiveSizeInBits();
304  if (ExtendBy <= TruncBits)
305  return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
306 
307  // zext(sext(sext(NewV)))
308  ExtendBy -= TruncBits;
309  return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
310  }
311 
312  APInt evaluateWith(APInt N) const {
313  assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
314  "Incompatible bit width");
315  if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
316  if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
317  if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
318  return N;
319  }
320 
321  ConstantRange evaluateWith(ConstantRange N) const {
322  assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
323  "Incompatible bit width");
324  if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
325  if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
326  if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
327  return N;
328  }
329 
330  bool canDistributeOver(bool NUW, bool NSW) const {
331  // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
332  // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
333  // trunc(x op y) == trunc(x) op trunc(y)
334  return (!ZExtBits || NUW) && (!SExtBits || NSW);
335  }
336 
337  bool hasSameCastsAs(const CastedValue &Other) const {
338  return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
339  TruncBits == Other.TruncBits;
340  }
341 };
342 
343 /// Represents zext(sext(trunc(V))) * Scale + Offset.
344 struct LinearExpression {
345  CastedValue Val;
346  APInt Scale;
347  APInt Offset;
348 
349  /// True if all operations in this expression are NSW.
350  bool IsNSW;
351 
352  LinearExpression(const CastedValue &Val, const APInt &Scale,
353  const APInt &Offset, bool IsNSW)
354  : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
355 
356  LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
357  unsigned BitWidth = Val.getBitWidth();
358  Scale = APInt(BitWidth, 1);
359  Offset = APInt(BitWidth, 0);
360  }
361 };
362 }
363 
364 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
365 /// B are constant integers.
366 static LinearExpression GetLinearExpression(
367  const CastedValue &Val, const DataLayout &DL, unsigned Depth,
368  AssumptionCache *AC, DominatorTree *DT) {
369  // Limit our recursion depth.
370  if (Depth == 6)
371  return Val;
372 
373  if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
374  return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
375  Val.evaluateWith(Const->getValue()), true);
376 
377  if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
378  if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
379  APInt RHS = Val.evaluateWith(RHSC->getValue());
380  // The only non-OBO case we deal with is or, and only limited to the
381  // case where it is both nuw and nsw.
382  bool NUW = true, NSW = true;
383  if (isa<OverflowingBinaryOperator>(BOp)) {
384  NUW &= BOp->hasNoUnsignedWrap();
385  NSW &= BOp->hasNoSignedWrap();
386  }
387  if (!Val.canDistributeOver(NUW, NSW))
388  return Val;
389 
390  // While we can distribute over trunc, we cannot preserve nowrap flags
391  // in that case.
392  if (Val.TruncBits)
393  NUW = NSW = false;
394 
395  LinearExpression E(Val);
396  switch (BOp->getOpcode()) {
397  default:
398  // We don't understand this instruction, so we can't decompose it any
399  // further.
400  return Val;
401  case Instruction::Or:
402  // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
403  // analyze it.
404  if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
405  BOp, DT))
406  return Val;
407 
409  case Instruction::Add: {
410  E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
411  Depth + 1, AC, DT);
412  E.Offset += RHS;
413  E.IsNSW &= NSW;
414  break;
415  }
416  case Instruction::Sub: {
417  E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
418  Depth + 1, AC, DT);
419  E.Offset -= RHS;
420  E.IsNSW &= NSW;
421  break;
422  }
423  case Instruction::Mul: {
424  E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
425  Depth + 1, AC, DT);
426  E.Offset *= RHS;
427  E.Scale *= RHS;
428  E.IsNSW &= NSW;
429  break;
430  }
431  case Instruction::Shl:
432  // We're trying to linearize an expression of the kind:
433  // shl i8 -128, 36
434  // where the shift count exceeds the bitwidth of the type.
435  // We can't decompose this further (the expression would return
436  // a poison value).
437  if (RHS.getLimitedValue() > Val.getBitWidth())
438  return Val;
439 
440  E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
441  Depth + 1, AC, DT);
442  E.Offset <<= RHS.getLimitedValue();
443  E.Scale <<= RHS.getLimitedValue();
444  E.IsNSW &= NSW;
445  break;
446  }
447  return E;
448  }
449  }
450 
451  if (isa<ZExtInst>(Val.V))
452  return GetLinearExpression(
453  Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
454  DL, Depth + 1, AC, DT);
455 
456  if (isa<SExtInst>(Val.V))
457  return GetLinearExpression(
458  Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
459  DL, Depth + 1, AC, DT);
460 
461  return Val;
462 }
463 
464 /// To ensure a pointer offset fits in an integer of size PointerSize
465 /// (in bits) when that size is smaller than the maximum pointer size. This is
466 /// an issue, for example, in particular for 32b pointers with negative indices
467 /// that rely on two's complement wrap-arounds for precise alias information
468 /// where the maximum pointer size is 64b.
470  assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
471  unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
472  return (Offset << ShiftBits).ashr(ShiftBits);
473 }
474 
475 namespace {
476 // A linear transformation of a Value; this class represents
477 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
478 struct VariableGEPIndex {
479  CastedValue Val;
480  APInt Scale;
481 
482  // Context instruction to use when querying information about this index.
483  const Instruction *CxtI;
484 
485  /// True if all operations in this expression are NSW.
486  bool IsNSW;
487 
488  void dump() const {
489  print(dbgs());
490  dbgs() << "\n";
491  }
492  void print(raw_ostream &OS) const {
493  OS << "(V=" << Val.V->getName()
494  << ", zextbits=" << Val.ZExtBits
495  << ", sextbits=" << Val.SExtBits
496  << ", truncbits=" << Val.TruncBits
497  << ", scale=" << Scale << ")";
498  }
499 };
500 }
501 
502 // Represents the internal structure of a GEP, decomposed into a base pointer,
503 // constant offsets, and variable scaled indices.
505  // Base pointer of the GEP
506  const Value *Base;
507  // Total constant offset from base.
509  // Scaled variable (non-constant) indices.
511  // Are all operations inbounds GEPs or non-indexing operations?
512  // (None iff expression doesn't involve any geps)
514 
515  void dump() const {
516  print(dbgs());
517  dbgs() << "\n";
518  }
519  void print(raw_ostream &OS) const {
520  OS << "(DecomposedGEP Base=" << Base->getName()
521  << ", Offset=" << Offset
522  << ", VarIndices=[";
523  for (size_t i = 0; i < VarIndices.size(); i++) {
524  if (i != 0)
525  OS << ", ";
526  VarIndices[i].print(OS);
527  }
528  OS << "])";
529  }
530 };
531 
532 
533 /// If V is a symbolic pointer expression, decompose it into a base pointer
534 /// with a constant offset and a number of scaled symbolic offsets.
535 ///
536 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
537 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
538 /// specified amount, but which may have other unrepresented high bits. As
539 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
541 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
542  AssumptionCache *AC, DominatorTree *DT) {
543  // Limit recursion depth to limit compile time in crazy cases.
544  unsigned MaxLookup = MaxLookupSearchDepth;
545  SearchTimes++;
546  const Instruction *CxtI = dyn_cast<Instruction>(V);
547 
548  unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
549  DecomposedGEP Decomposed;
550  Decomposed.Offset = APInt(MaxPointerSize, 0);
551  do {
552  // See if this is a bitcast or GEP.
553  const Operator *Op = dyn_cast<Operator>(V);
554  if (!Op) {
555  // The only non-operator case we can handle are GlobalAliases.
556  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
557  if (!GA->isInterposable()) {
558  V = GA->getAliasee();
559  continue;
560  }
561  }
562  Decomposed.Base = V;
563  return Decomposed;
564  }
565 
566  if (Op->getOpcode() == Instruction::BitCast ||
567  Op->getOpcode() == Instruction::AddrSpaceCast) {
568  V = Op->getOperand(0);
569  continue;
570  }
571 
572  const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
573  if (!GEPOp) {
574  if (const auto *PHI = dyn_cast<PHINode>(V)) {
575  // Look through single-arg phi nodes created by LCSSA.
576  if (PHI->getNumIncomingValues() == 1) {
577  V = PHI->getIncomingValue(0);
578  continue;
579  }
580  } else if (const auto *Call = dyn_cast<CallBase>(V)) {
581  // CaptureTracking can know about special capturing properties of some
582  // intrinsics like launder.invariant.group, that can't be expressed with
583  // the attributes, but have properties like returning aliasing pointer.
584  // Because some analysis may assume that nocaptured pointer is not
585  // returned from some special intrinsic (because function would have to
586  // be marked with returns attribute), it is crucial to use this function
587  // because it should be in sync with CaptureTracking. Not using it may
588  // cause weird miscompilations where 2 aliasing pointers are assumed to
589  // noalias.
590  if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
591  V = RP;
592  continue;
593  }
594  }
595 
596  Decomposed.Base = V;
597  return Decomposed;
598  }
599 
600  // Track whether we've seen at least one in bounds gep, and if so, whether
601  // all geps parsed were in bounds.
602  if (Decomposed.InBounds == None)
603  Decomposed.InBounds = GEPOp->isInBounds();
604  else if (!GEPOp->isInBounds())
605  Decomposed.InBounds = false;
606 
607  assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
608 
609  // Don't attempt to analyze GEPs if index scale is not a compile-time
610  // constant.
611  if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
612  Decomposed.Base = V;
613  return Decomposed;
614  }
615 
616  unsigned AS = GEPOp->getPointerAddressSpace();
617  // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
618  gep_type_iterator GTI = gep_type_begin(GEPOp);
619  unsigned PointerSize = DL.getPointerSizeInBits(AS);
620  // Assume all GEP operands are constants until proven otherwise.
621  bool GepHasConstantOffset = true;
622  for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
623  I != E; ++I, ++GTI) {
624  const Value *Index = *I;
625  // Compute the (potentially symbolic) offset in bytes for this index.
626  if (StructType *STy = GTI.getStructTypeOrNull()) {
627  // For a struct, add the member offset.
628  unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
629  if (FieldNo == 0)
630  continue;
631 
632  Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
633  continue;
634  }
635 
636  // For an array/pointer, add the element offset, explicitly scaled.
637  if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
638  if (CIdx->isZero())
639  continue;
640  Decomposed.Offset +=
641  DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
642  CIdx->getValue().sextOrTrunc(MaxPointerSize);
643  continue;
644  }
645 
646  GepHasConstantOffset = false;
647 
648  APInt Scale(MaxPointerSize,
649  DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
650  // If the integer type is smaller than the pointer size, it is implicitly
651  // sign extended to pointer size.
652  unsigned Width = Index->getType()->getIntegerBitWidth();
653  unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
654  unsigned TruncBits = PointerSize < Width ? Width - PointerSize : 0;
655  LinearExpression LE = GetLinearExpression(
656  CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
657 
658  // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
659  // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
660  Decomposed.Offset += LE.Offset.sextOrTrunc(MaxPointerSize) * Scale;
661  Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
662 
663  // If we already had an occurrence of this index variable, merge this
664  // scale into it. For example, we want to handle:
665  // A[x][x] -> x*16 + x*4 -> x*20
666  // This also ensures that 'x' only appears in the index list once.
667  for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
668  if (Decomposed.VarIndices[i].Val.V == LE.Val.V &&
669  Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
670  Scale += Decomposed.VarIndices[i].Scale;
671  Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
672  break;
673  }
674  }
675 
676  // Make sure that we have a scale that makes sense for this target's
677  // pointer size.
678  Scale = adjustToPointerSize(Scale, PointerSize);
679 
680  if (!!Scale) {
681  VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW};
682  Decomposed.VarIndices.push_back(Entry);
683  }
684  }
685 
686  // Take care of wrap-arounds
687  if (GepHasConstantOffset)
688  Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
689 
690  // Analyze the base pointer next.
691  V = GEPOp->getOperand(0);
692  } while (--MaxLookup);
693 
694  // If the chain of expressions is too deep, just return early.
695  Decomposed.Base = V;
696  SearchLimitReached++;
697  return Decomposed;
698 }
699 
700 /// Returns whether the given pointer value points to memory that is local to
701 /// the function, with global constants being considered local to all
702 /// functions.
704  AAQueryInfo &AAQI, bool OrLocal) {
705  assert(Visited.empty() && "Visited must be cleared after use!");
706 
707  unsigned MaxLookup = 8;
709  Worklist.push_back(Loc.Ptr);
710  do {
711  const Value *V = getUnderlyingObject(Worklist.pop_back_val());
712  if (!Visited.insert(V).second) {
713  Visited.clear();
714  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
715  }
716 
717  // An alloca instruction defines local memory.
718  if (OrLocal && isa<AllocaInst>(V))
719  continue;
720 
721  // A global constant counts as local memory for our purposes.
722  if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
723  // Note: this doesn't require GV to be "ODR" because it isn't legal for a
724  // global to be marked constant in some modules and non-constant in
725  // others. GV may even be a declaration, not a definition.
726  if (!GV->isConstant()) {
727  Visited.clear();
728  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
729  }
730  continue;
731  }
732 
733  // If both select values point to local memory, then so does the select.
734  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
735  Worklist.push_back(SI->getTrueValue());
736  Worklist.push_back(SI->getFalseValue());
737  continue;
738  }
739 
740  // If all values incoming to a phi node point to local memory, then so does
741  // the phi.
742  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
743  // Don't bother inspecting phi nodes with many operands.
744  if (PN->getNumIncomingValues() > MaxLookup) {
745  Visited.clear();
746  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
747  }
748  append_range(Worklist, PN->incoming_values());
749  continue;
750  }
751 
752  // Otherwise be conservative.
753  Visited.clear();
754  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
755  } while (!Worklist.empty() && --MaxLookup);
756 
757  Visited.clear();
758  return Worklist.empty();
759 }
760 
761 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
762  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
763  return II && II->getIntrinsicID() == IID;
764 }
765 
766 /// Returns the behavior when calling the given call site.
768  if (Call->doesNotAccessMemory())
769  // Can't do better than this.
771 
773 
774  // If the callsite knows it only reads memory, don't return worse
775  // than that.
776  if (Call->onlyReadsMemory())
777  Min = FMRB_OnlyReadsMemory;
778  else if (Call->doesNotReadMemory())
779  Min = FMRB_OnlyWritesMemory;
780 
781  if (Call->onlyAccessesArgMemory())
783  else if (Call->onlyAccessesInaccessibleMemory())
785  else if (Call->onlyAccessesInaccessibleMemOrArgMem())
787 
788  // If the call has operand bundles then aliasing attributes from the function
789  // it calls do not directly apply to the call. This can be made more precise
790  // in the future.
791  if (!Call->hasOperandBundles())
792  if (const Function *F = Call->getCalledFunction())
793  Min =
795 
796  return Min;
797 }
798 
799 /// Returns the behavior when calling the given function. For use when the call
800 /// site is not known.
802  // If the function declares it doesn't access memory, we can't do better.
803  if (F->doesNotAccessMemory())
805 
807 
808  // If the function declares it only reads memory, go with that.
809  if (F->onlyReadsMemory())
810  Min = FMRB_OnlyReadsMemory;
811  else if (F->doesNotReadMemory())
812  Min = FMRB_OnlyWritesMemory;
813 
814  if (F->onlyAccessesArgMemory())
816  else if (F->onlyAccessesInaccessibleMemory())
818  else if (F->onlyAccessesInaccessibleMemOrArgMem())
820 
821  return Min;
822 }
823 
824 /// Returns true if this is a writeonly (i.e Mod only) parameter.
825 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
826  const TargetLibraryInfo &TLI) {
827  if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
828  return true;
829 
830  // We can bound the aliasing properties of memset_pattern16 just as we can
831  // for memcpy/memset. This is particularly important because the
832  // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
833  // whenever possible.
834  // FIXME Consider handling this in InferFunctionAttr.cpp together with other
835  // attributes.
836  LibFunc F;
837  if (Call->getCalledFunction() &&
838  TLI.getLibFunc(*Call->getCalledFunction(), F) &&
839  F == LibFunc_memset_pattern16 && TLI.has(F))
840  if (ArgIdx == 0)
841  return true;
842 
843  // TODO: memset_pattern4, memset_pattern8
844  // TODO: _chk variants
845  // TODO: strcmp, strcpy
846 
847  return false;
848 }
849 
851  unsigned ArgIdx) {
852  // Checking for known builtin intrinsics and target library functions.
853  if (isWriteOnlyParam(Call, ArgIdx, TLI))
854  return ModRefInfo::Mod;
855 
856  if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
857  return ModRefInfo::Ref;
858 
859  if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
860  return ModRefInfo::NoModRef;
861 
862  return AAResultBase::getArgModRefInfo(Call, ArgIdx);
863 }
864 
865 #ifndef NDEBUG
866 static const Function *getParent(const Value *V) {
867  if (const Instruction *inst = dyn_cast<Instruction>(V)) {
868  if (!inst->getParent())
869  return nullptr;
870  return inst->getParent()->getParent();
871  }
872 
873  if (const Argument *arg = dyn_cast<Argument>(V))
874  return arg->getParent();
875 
876  return nullptr;
877 }
878 
879 static bool notDifferentParent(const Value *O1, const Value *O2) {
880 
881  const Function *F1 = getParent(O1);
882  const Function *F2 = getParent(O2);
883 
884  return !F1 || !F2 || F1 == F2;
885 }
886 #endif
887 
889  const MemoryLocation &LocB,
890  AAQueryInfo &AAQI) {
891  assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
892  "BasicAliasAnalysis doesn't support interprocedural queries.");
893  return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
894 }
895 
896 /// Checks to see if the specified callsite can clobber the specified memory
897 /// object.
898 ///
899 /// Since we only look at local properties of this function, we really can't
900 /// say much about this query. We do, however, use simple "address taken"
901 /// analysis on local objects.
903  const MemoryLocation &Loc,
904  AAQueryInfo &AAQI) {
905  assert(notDifferentParent(Call, Loc.Ptr) &&
906  "AliasAnalysis query involving multiple functions!");
907 
908  const Value *Object = getUnderlyingObject(Loc.Ptr);
909 
910  // Calls marked 'tail' cannot read or write allocas from the current frame
911  // because the current frame might be destroyed by the time they run. However,
912  // a tail call may use an alloca with byval. Calling with byval copies the
913  // contents of the alloca into argument registers or stack slots, so there is
914  // no lifetime issue.
915  if (isa<AllocaInst>(Object))
916  if (const CallInst *CI = dyn_cast<CallInst>(Call))
917  if (CI->isTailCall() &&
918  !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
919  return ModRefInfo::NoModRef;
920 
921  // Stack restore is able to modify unescaped dynamic allocas. Assume it may
922  // modify them even though the alloca is not escaped.
923  if (auto *AI = dyn_cast<AllocaInst>(Object))
924  if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
925  return ModRefInfo::Mod;
926 
927  // If the pointer is to a locally allocated object that does not escape,
928  // then the call can not mod/ref the pointer unless the call takes the pointer
929  // as an argument, and itself doesn't capture it.
930  if (!isa<Constant>(Object) && Call != Object &&
931  AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) {
932 
933  // Optimistically assume that call doesn't touch Object and check this
934  // assumption in the following loop.
936  bool IsMustAlias = true;
937 
938  unsigned OperandNo = 0;
939  for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
940  CI != CE; ++CI, ++OperandNo) {
941  // Only look at the no-capture or byval pointer arguments. If this
942  // pointer were passed to arguments that were neither of these, then it
943  // couldn't be no-capture.
944  if (!(*CI)->getType()->isPointerTy() ||
945  (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() &&
946  !Call->isByValArgument(OperandNo)))
947  continue;
948 
949  // Call doesn't access memory through this operand, so we don't care
950  // if it aliases with Object.
951  if (Call->doesNotAccessMemory(OperandNo))
952  continue;
953 
954  // If this is a no-capture pointer argument, see if we can tell that it
955  // is impossible to alias the pointer we're checking.
956  AliasResult AR = getBestAAResults().alias(
959  if (AR != AliasResult::MustAlias)
960  IsMustAlias = false;
961  // Operand doesn't alias 'Object', continue looking for other aliases
962  if (AR == AliasResult::NoAlias)
963  continue;
964  // Operand aliases 'Object', but call doesn't modify it. Strengthen
965  // initial assumption and keep looking in case if there are more aliases.
966  if (Call->onlyReadsMemory(OperandNo)) {
967  Result = setRef(Result);
968  continue;
969  }
970  // Operand aliases 'Object' but call only writes into it.
971  if (Call->doesNotReadMemory(OperandNo)) {
972  Result = setMod(Result);
973  continue;
974  }
975  // This operand aliases 'Object' and call reads and writes into it.
976  // Setting ModRef will not yield an early return below, MustAlias is not
977  // used further.
978  Result = ModRefInfo::ModRef;
979  break;
980  }
981 
982  // No operand aliases, reset Must bit. Add below if at least one aliases
983  // and all aliases found are MustAlias.
984  if (isNoModRef(Result))
985  IsMustAlias = false;
986 
987  // Early return if we improved mod ref information
988  if (!isModAndRefSet(Result)) {
989  if (isNoModRef(Result))
990  return ModRefInfo::NoModRef;
991  return IsMustAlias ? setMust(Result) : clearMust(Result);
992  }
993  }
994 
995  // If the call is malloc/calloc like, we can assume that it doesn't
996  // modify any IR visible value. This is only valid because we assume these
997  // routines do not read values visible in the IR. TODO: Consider special
998  // casing realloc and strdup routines which access only their arguments as
999  // well. Or alternatively, replace all of this with inaccessiblememonly once
1000  // that's implemented fully.
1001  if (isMallocOrCallocLikeFn(Call, &TLI)) {
1002  // Be conservative if the accessed pointer may alias the allocation -
1003  // fallback to the generic handling below.
1005  AAQI) == AliasResult::NoAlias)
1006  return ModRefInfo::NoModRef;
1007  }
1008 
1009  // The semantics of memcpy intrinsics either exactly overlap or do not
1010  // overlap, i.e., source and destination of any given memcpy are either
1011  // no-alias or must-alias.
1012  if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
1013  AliasResult SrcAA =
1014  getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
1015  AliasResult DestAA =
1016  getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
1017  // It's also possible for Loc to alias both src and dest, or neither.
1019  if (SrcAA != AliasResult::NoAlias)
1020  rv = setRef(rv);
1021  if (DestAA != AliasResult::NoAlias)
1022  rv = setMod(rv);
1023  return rv;
1024  }
1025 
1026  // Guard intrinsics are marked as arbitrarily writing so that proper control
1027  // dependencies are maintained but they never mods any particular memory
1028  // location.
1029  //
1030  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1031  // heap state at the point the guard is issued needs to be consistent in case
1032  // the guard invokes the "deopt" continuation.
1033  if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
1034  return ModRefInfo::Ref;
1035  // The same applies to deoptimize which is essentially a guard(false).
1036  if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
1037  return ModRefInfo::Ref;
1038 
1039  // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1040  // writing so that proper control dependencies are maintained but they never
1041  // mod any particular memory location visible to the IR.
1042  // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1043  // intrinsic is now modeled as reading memory. This prevents hoisting the
1044  // invariant.start intrinsic over stores. Consider:
1045  // *ptr = 40;
1046  // *ptr = 50;
1047  // invariant_start(ptr)
1048  // int val = *ptr;
1049  // print(val);
1050  //
1051  // This cannot be transformed to:
1052  //
1053  // *ptr = 40;
1054  // invariant_start(ptr)
1055  // *ptr = 50;
1056  // int val = *ptr;
1057  // print(val);
1058  //
1059  // The transformation will cause the second store to be ignored (based on
1060  // rules of invariant.start) and print 40, while the first program always
1061  // prints 50.
1062  if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1063  return ModRefInfo::Ref;
1064 
1065  // The AAResultBase base class has some smarts, lets use them.
1066  return AAResultBase::getModRefInfo(Call, Loc, AAQI);
1067 }
1068 
1070  const CallBase *Call2,
1071  AAQueryInfo &AAQI) {
1072  // Guard intrinsics are marked as arbitrarily writing so that proper control
1073  // dependencies are maintained but they never mods any particular memory
1074  // location.
1075  //
1076  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1077  // heap state at the point the guard is issued needs to be consistent in case
1078  // the guard invokes the "deopt" continuation.
1079 
1080  // NB! This function is *not* commutative, so we special case two
1081  // possibilities for guard intrinsics.
1082 
1083  if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1085  ? ModRefInfo::Ref
1087 
1088  if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1090  ? ModRefInfo::Mod
1092 
1093  // The AAResultBase base class has some smarts, lets use them.
1094  return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1095 }
1096 
1097 /// Return true if we know V to the base address of the corresponding memory
1098 /// object. This implies that any address less than V must be out of bounds
1099 /// for the underlying object. Note that just being isIdentifiedObject() is
1100 /// not enough - For example, a negative offset from a noalias argument or call
1101 /// can be inbounds w.r.t the actual underlying object.
1102 static bool isBaseOfObject(const Value *V) {
1103  // TODO: We can handle other cases here
1104  // 1) For GC languages, arguments to functions are often required to be
1105  // base pointers.
1106  // 2) Result of allocation routines are often base pointers. Leverage TLI.
1107  return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1108 }
1109 
1110 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1111 /// another pointer.
1112 ///
1113 /// We know that V1 is a GEP, but we don't know anything about V2.
1114 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1115 /// V2.
1116 AliasResult BasicAAResult::aliasGEP(
1117  const GEPOperator *GEP1, LocationSize V1Size,
1118  const Value *V2, LocationSize V2Size,
1119  const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1120  if (!V1Size.hasValue() && !V2Size.hasValue()) {
1121  // TODO: This limitation exists for compile-time reasons. Relax it if we
1122  // can avoid exponential pathological cases.
1123  if (!isa<GEPOperator>(V2))
1124  return AliasResult::MayAlias;
1125 
1126  // If both accesses have unknown size, we can only check whether the base
1127  // objects don't alias.
1128  AliasResult BaseAlias = getBestAAResults().alias(
1129  MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1130  MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1131  return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1133  }
1134 
1135  DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1136  DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1137 
1138  // Bail if we were not able to decompose anything.
1139  if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1140  return AliasResult::MayAlias;
1141 
1142  // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1143  // symbolic difference.
1144  subtractDecomposedGEPs(DecompGEP1, DecompGEP2);
1145 
1146  // If an inbounds GEP would have to start from an out of bounds address
1147  // for the two to alias, then we can assume noalias.
1148  if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1149  V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1150  isBaseOfObject(DecompGEP2.Base))
1151  return AliasResult::NoAlias;
1152 
1153  if (isa<GEPOperator>(V2)) {
1154  // Symmetric case to above.
1155  if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1156  V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1157  isBaseOfObject(DecompGEP1.Base))
1158  return AliasResult::NoAlias;
1159  }
1160 
1161  // For GEPs with identical offsets, we can preserve the size and AAInfo
1162  // when performing the alias check on the underlying objects.
1163  if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1164  return getBestAAResults().alias(MemoryLocation(DecompGEP1.Base, V1Size),
1165  MemoryLocation(DecompGEP2.Base, V2Size),
1166  AAQI);
1167 
1168  // Do the base pointers alias?
1169  AliasResult BaseAlias = getBestAAResults().alias(
1170  MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1171  MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1172 
1173  // If we get a No or May, then return it immediately, no amount of analysis
1174  // will improve this situation.
1175  if (BaseAlias != AliasResult::MustAlias) {
1176  assert(BaseAlias == AliasResult::NoAlias ||
1177  BaseAlias == AliasResult::MayAlias);
1178  return BaseAlias;
1179  }
1180 
1181  // If there is a constant difference between the pointers, but the difference
1182  // is less than the size of the associated memory object, then we know
1183  // that the objects are partially overlapping. If the difference is
1184  // greater, we know they do not overlap.
1185  if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) {
1186  APInt &Off = DecompGEP1.Offset;
1187 
1188  // Initialize for Off >= 0 (V2 <= GEP1) case.
1189  const Value *LeftPtr = V2;
1190  const Value *RightPtr = GEP1;
1191  LocationSize VLeftSize = V2Size;
1192  LocationSize VRightSize = V1Size;
1193  const bool Swapped = Off.isNegative();
1194 
1195  if (Swapped) {
1196  // Swap if we have the situation where:
1197  // + +
1198  // | BaseOffset |
1199  // ---------------->|
1200  // |-->V1Size |-------> V2Size
1201  // GEP1 V2
1202  std::swap(LeftPtr, RightPtr);
1203  std::swap(VLeftSize, VRightSize);
1204  Off = -Off;
1205  }
1206 
1207  if (VLeftSize.hasValue()) {
1208  const uint64_t LSize = VLeftSize.getValue();
1209  if (Off.ult(LSize)) {
1210  // Conservatively drop processing if a phi was visited and/or offset is
1211  // too big.
1213  if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1214  (Off + VRightSize.getValue()).ule(LSize)) {
1215  // Memory referenced by right pointer is nested. Save the offset in
1216  // cache. Note that originally offset estimated as GEP1-V2, but
1217  // AliasResult contains the shift that represents GEP1+Offset=V2.
1218  AR.setOffset(-Off.getSExtValue());
1219  AR.swap(Swapped);
1220  }
1221  return AR;
1222  }
1223  return AliasResult::NoAlias;
1224  }
1225  }
1226 
1227  if (!DecompGEP1.VarIndices.empty()) {
1228  APInt GCD;
1229  ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1230  for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1231  const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1232  const APInt &Scale = Index.Scale;
1233  APInt ScaleForGCD = Scale;
1234  if (!Index.IsNSW)
1235  ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(),
1236  Scale.countTrailingZeros());
1237 
1238  if (i == 0)
1239  GCD = ScaleForGCD.abs();
1240  else
1241  GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1242 
1243  ConstantRange CR =
1244  computeConstantRange(Index.Val.V, true, &AC, Index.CxtI);
1245  KnownBits Known =
1246  computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1247  CR = CR.intersectWith(
1248  ConstantRange::fromKnownBits(Known, /* Signed */ true),
1250 
1251  assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1252  "Bit widths are normalized to MaxPointerSize");
1253  OffsetRange = OffsetRange.add(
1254  Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth())
1255  .smul_fast(ConstantRange(Scale)));
1256  }
1257 
1258  // We now have accesses at two offsets from the same base:
1259  // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1260  // 2. 0 with size V2Size
1261  // Using arithmetic modulo GCD, the accesses are at
1262  // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1263  // into the range [V2Size..GCD), then we know they cannot overlap.
1264  APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1265  if (ModOffset.isNegative())
1266  ModOffset += GCD; // We want mod, not rem.
1267  if (V1Size.hasValue() && V2Size.hasValue() &&
1268  ModOffset.uge(V2Size.getValue()) &&
1269  (GCD - ModOffset).uge(V1Size.getValue()))
1270  return AliasResult::NoAlias;
1271 
1272  if (V1Size.hasValue() && V2Size.hasValue()) {
1273  // Compute ranges of potentially accessed bytes for both accesses. If the
1274  // interseciton is empty, there can be no overlap.
1275  unsigned BW = OffsetRange.getBitWidth();
1276  ConstantRange Range1 = OffsetRange.add(
1277  ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1278  ConstantRange Range2 =
1279  ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1280  if (Range1.intersectWith(Range2).isEmptySet())
1281  return AliasResult::NoAlias;
1282  }
1283 
1284  if (V1Size.hasValue() && V2Size.hasValue()) {
1285  // Try to determine the range of values for VarIndex such that
1286  // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1287  Optional<APInt> MinAbsVarIndex;
1288  if (DecompGEP1.VarIndices.size() == 1) {
1289  // VarIndex = Scale*V.
1290  const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1291  if (Var.Val.TruncBits == 0 &&
1292  isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1293  // If V != 0 then abs(VarIndex) >= abs(Scale).
1294  MinAbsVarIndex = Var.Scale.abs();
1295  }
1296  } else if (DecompGEP1.VarIndices.size() == 2) {
1297  // VarIndex = Scale*V0 + (-Scale)*V1.
1298  // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1299  // Check that VisitedPhiBBs is empty, to avoid reasoning about
1300  // inequality of values across loop iterations.
1301  const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1302  const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1303  if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 &&
1304  Var0.Val.hasSameCastsAs(Var1.Val) && VisitedPhiBBs.empty() &&
1305  isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1306  DT))
1307  MinAbsVarIndex = Var0.Scale.abs();
1308  }
1309 
1310  if (MinAbsVarIndex) {
1311  // The constant offset will have added at least +/-MinAbsVarIndex to it.
1312  APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1313  APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1314  // We know that Offset <= OffsetLo || Offset >= OffsetHi
1315  if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1316  OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1317  return AliasResult::NoAlias;
1318  }
1319  }
1320 
1321  if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT))
1322  return AliasResult::NoAlias;
1323  }
1324 
1325  // Statically, we can see that the base objects are the same, but the
1326  // pointers have dynamic offsets which we can't resolve. And none of our
1327  // little tricks above worked.
1328  return AliasResult::MayAlias;
1329 }
1330 
1332  // If the results agree, take it.
1333  if (A == B)
1334  return A;
1335  // A mix of PartialAlias and MustAlias is PartialAlias.
1339  // Otherwise, we don't know anything.
1340  return AliasResult::MayAlias;
1341 }
1342 
1343 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1344 /// against another.
1346 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1347  const Value *V2, LocationSize V2Size,
1348  AAQueryInfo &AAQI) {
1349  // If the values are Selects with the same condition, we can do a more precise
1350  // check: just check for aliases between the values on corresponding arms.
1351  if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1352  if (SI->getCondition() == SI2->getCondition()) {
1353  AliasResult Alias = getBestAAResults().alias(
1354  MemoryLocation(SI->getTrueValue(), SISize),
1355  MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1356  if (Alias == AliasResult::MayAlias)
1357  return AliasResult::MayAlias;
1358  AliasResult ThisAlias = getBestAAResults().alias(
1359  MemoryLocation(SI->getFalseValue(), SISize),
1360  MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1361  return MergeAliasResults(ThisAlias, Alias);
1362  }
1363 
1364  // If both arms of the Select node NoAlias or MustAlias V2, then returns
1365  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1366  AliasResult Alias = getBestAAResults().alias(
1367  MemoryLocation(V2, V2Size),
1368  MemoryLocation(SI->getTrueValue(), SISize), AAQI);
1369  if (Alias == AliasResult::MayAlias)
1370  return AliasResult::MayAlias;
1371 
1372  AliasResult ThisAlias = getBestAAResults().alias(
1373  MemoryLocation(V2, V2Size),
1374  MemoryLocation(SI->getFalseValue(), SISize), AAQI);
1375  return MergeAliasResults(ThisAlias, Alias);
1376 }
1377 
1378 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1379 /// another.
1380 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1381  const Value *V2, LocationSize V2Size,
1382  AAQueryInfo &AAQI) {
1383  if (!PN->getNumIncomingValues())
1384  return AliasResult::NoAlias;
1385  // If the values are PHIs in the same block, we can do a more precise
1386  // as well as efficient check: just check for aliases between the values
1387  // on corresponding edges.
1388  if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1389  if (PN2->getParent() == PN->getParent()) {
1390  Optional<AliasResult> Alias;
1391  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1392  AliasResult ThisAlias = getBestAAResults().alias(
1393  MemoryLocation(PN->getIncomingValue(i), PNSize),
1395  PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1396  AAQI);
1397  if (Alias)
1398  *Alias = MergeAliasResults(*Alias, ThisAlias);
1399  else
1400  Alias = ThisAlias;
1401  if (*Alias == AliasResult::MayAlias)
1402  break;
1403  }
1404  return *Alias;
1405  }
1406 
1407  SmallVector<Value *, 4> V1Srcs;
1408  // If a phi operand recurses back to the phi, we can still determine NoAlias
1409  // if we don't alias the underlying objects of the other phi operands, as we
1410  // know that the recursive phi needs to be based on them in some way.
1411  bool isRecursive = false;
1412  auto CheckForRecPhi = [&](Value *PV) {
1413  if (!EnableRecPhiAnalysis)
1414  return false;
1415  if (getUnderlyingObject(PV) == PN) {
1416  isRecursive = true;
1417  return true;
1418  }
1419  return false;
1420  };
1421 
1422  if (PV) {
1423  // If we have PhiValues then use it to get the underlying phi values.
1424  const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1425  // If we have more phi values than the search depth then return MayAlias
1426  // conservatively to avoid compile time explosion. The worst possible case
1427  // is if both sides are PHI nodes. In which case, this is O(m x n) time
1428  // where 'm' and 'n' are the number of PHI sources.
1429  if (PhiValueSet.size() > MaxLookupSearchDepth)
1430  return AliasResult::MayAlias;
1431  // Add the values to V1Srcs
1432  for (Value *PV1 : PhiValueSet) {
1433  if (CheckForRecPhi(PV1))
1434  continue;
1435  V1Srcs.push_back(PV1);
1436  }
1437  } else {
1438  // If we don't have PhiInfo then just look at the operands of the phi itself
1439  // FIXME: Remove this once we can guarantee that we have PhiInfo always
1440  SmallPtrSet<Value *, 4> UniqueSrc;
1441  Value *OnePhi = nullptr;
1442  for (Value *PV1 : PN->incoming_values()) {
1443  if (isa<PHINode>(PV1)) {
1444  if (OnePhi && OnePhi != PV1) {
1445  // To control potential compile time explosion, we choose to be
1446  // conserviate when we have more than one Phi input. It is important
1447  // that we handle the single phi case as that lets us handle LCSSA
1448  // phi nodes and (combined with the recursive phi handling) simple
1449  // pointer induction variable patterns.
1450  return AliasResult::MayAlias;
1451  }
1452  OnePhi = PV1;
1453  }
1454 
1455  if (CheckForRecPhi(PV1))
1456  continue;
1457 
1458  if (UniqueSrc.insert(PV1).second)
1459  V1Srcs.push_back(PV1);
1460  }
1461 
1462  if (OnePhi && UniqueSrc.size() > 1)
1463  // Out of an abundance of caution, allow only the trivial lcssa and
1464  // recursive phi cases.
1465  return AliasResult::MayAlias;
1466  }
1467 
1468  // If V1Srcs is empty then that means that the phi has no underlying non-phi
1469  // value. This should only be possible in blocks unreachable from the entry
1470  // block, but return MayAlias just in case.
1471  if (V1Srcs.empty())
1472  return AliasResult::MayAlias;
1473 
1474  // If this PHI node is recursive, indicate that the pointer may be moved
1475  // across iterations. We can only prove NoAlias if different underlying
1476  // objects are involved.
1477  if (isRecursive)
1479 
1480  // In the recursive alias queries below, we may compare values from two
1481  // different loop iterations. Keep track of visited phi blocks, which will
1482  // be used when determining value equivalence.
1483  bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1484  auto _ = make_scope_exit([&]() {
1485  if (BlockInserted)
1486  VisitedPhiBBs.erase(PN->getParent());
1487  });
1488 
1489  // If we inserted a block into VisitedPhiBBs, alias analysis results that
1490  // have been cached earlier may no longer be valid. Perform recursive queries
1491  // with a new AAQueryInfo.
1492  AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1493  AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1494 
1495  AliasResult Alias = getBestAAResults().alias(
1496  MemoryLocation(V2, V2Size),
1497  MemoryLocation(V1Srcs[0], PNSize), *UseAAQI);
1498 
1499  // Early exit if the check of the first PHI source against V2 is MayAlias.
1500  // Other results are not possible.
1501  if (Alias == AliasResult::MayAlias)
1502  return AliasResult::MayAlias;
1503  // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1504  // remain valid to all elements and needs to conservatively return MayAlias.
1505  if (isRecursive && Alias != AliasResult::NoAlias)
1506  return AliasResult::MayAlias;
1507 
1508  // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1509  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1510  for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1511  Value *V = V1Srcs[i];
1512 
1513  AliasResult ThisAlias = getBestAAResults().alias(
1514  MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI);
1515  Alias = MergeAliasResults(ThisAlias, Alias);
1516  if (Alias == AliasResult::MayAlias)
1517  break;
1518  }
1519 
1520  return Alias;
1521 }
1522 
1523 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1524 /// array references.
1525 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1526  const Value *V2, LocationSize V2Size,
1527  AAQueryInfo &AAQI) {
1528  // If either of the memory references is empty, it doesn't matter what the
1529  // pointer values are.
1530  if (V1Size.isZero() || V2Size.isZero())
1531  return AliasResult::NoAlias;
1532 
1533  // Strip off any casts if they exist.
1535  V2 = V2->stripPointerCastsForAliasAnalysis();
1536 
1537  // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1538  // value for undef that aliases nothing in the program.
1539  if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1540  return AliasResult::NoAlias;
1541 
1542  // Are we checking for alias of the same value?
1543  // Because we look 'through' phi nodes, we could look at "Value" pointers from
1544  // different iterations. We must therefore make sure that this is not the
1545  // case. The function isValueEqualInPotentialCycles ensures that this cannot
1546  // happen by looking at the visited phi nodes and making sure they cannot
1547  // reach the value.
1548  if (isValueEqualInPotentialCycles(V1, V2))
1549  return AliasResult::MustAlias;
1550 
1551  if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1552  return AliasResult::NoAlias; // Scalars cannot alias each other
1553 
1554  // Figure out what objects these things are pointing to if we can.
1557 
1558  // Null values in the default address space don't point to any object, so they
1559  // don't alias any other pointer.
1560  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1561  if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1562  return AliasResult::NoAlias;
1563  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1564  if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1565  return AliasResult::NoAlias;
1566 
1567  if (O1 != O2) {
1568  // If V1/V2 point to two different objects, we know that we have no alias.
1569  if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1570  return AliasResult::NoAlias;
1571 
1572  // Constant pointers can't alias with non-const isIdentifiedObject objects.
1573  if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1574  (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1575  return AliasResult::NoAlias;
1576 
1577  // Function arguments can't alias with things that are known to be
1578  // unambigously identified at the function level.
1579  if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1580  (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1581  return AliasResult::NoAlias;
1582 
1583  // If one pointer is the result of a call/invoke or load and the other is a
1584  // non-escaping local object within the same function, then we know the
1585  // object couldn't escape to a point where the call could return it.
1586  //
1587  // Note that if the pointers are in different functions, there are a
1588  // variety of complications. A call with a nocapture argument may still
1589  // temporary store the nocapture argument's value in a temporary memory
1590  // location if that memory location doesn't escape. Or it may pass a
1591  // nocapture value to other functions as long as they don't capture it.
1592  if (isEscapeSource(O1) &&
1593  AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1)))
1594  return AliasResult::NoAlias;
1595  if (isEscapeSource(O2) &&
1596  AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2)))
1597  return AliasResult::NoAlias;
1598  }
1599 
1600  // If the size of one access is larger than the entire object on the other
1601  // side, then we know such behavior is undefined and can assume no alias.
1602  bool NullIsValidLocation = NullPointerIsDefined(&F);
1603  if ((isObjectSmallerThan(
1604  O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1605  TLI, NullIsValidLocation)) ||
1607  O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1608  TLI, NullIsValidLocation)))
1609  return AliasResult::NoAlias;
1610 
1611  // If one the accesses may be before the accessed pointer, canonicalize this
1612  // by using unknown after-pointer sizes for both accesses. This is
1613  // equivalent, because regardless of which pointer is lower, one of them
1614  // will always came after the other, as long as the underlying objects aren't
1615  // disjoint. We do this so that the rest of BasicAA does not have to deal
1616  // with accesses before the base pointer, and to improve cache utilization by
1617  // merging equivalent states.
1618  if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1619  V1Size = LocationSize::afterPointer();
1620  V2Size = LocationSize::afterPointer();
1621  }
1622 
1623  // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1624  // for recursive queries. For this reason, this limit is chosen to be large
1625  // enough to be very rarely hit, while still being small enough to avoid
1626  // stack overflows.
1627  if (AAQI.Depth >= 512)
1628  return AliasResult::MayAlias;
1629 
1630  // Check the cache before climbing up use-def chains. This also terminates
1631  // otherwise infinitely recursive queries.
1632  AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1633  const bool Swapped = V1 > V2;
1634  if (Swapped)
1635  std::swap(Locs.first, Locs.second);
1636  const auto &Pair = AAQI.AliasCache.try_emplace(
1638  if (!Pair.second) {
1639  auto &Entry = Pair.first->second;
1640  if (!Entry.isDefinitive()) {
1641  // Remember that we used an assumption.
1642  ++Entry.NumAssumptionUses;
1643  ++AAQI.NumAssumptionUses;
1644  }
1645  // Cache contains sorted {V1,V2} pairs but we should return original order.
1646  auto Result = Entry.Result;
1647  Result.swap(Swapped);
1648  return Result;
1649  }
1650 
1651  int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1652  unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1654  aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1655 
1656  auto It = AAQI.AliasCache.find(Locs);
1657  assert(It != AAQI.AliasCache.end() && "Must be in cache");
1658  auto &Entry = It->second;
1659 
1660  // Check whether a NoAlias assumption has been used, but disproven.
1661  bool AssumptionDisproven =
1662  Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1663  if (AssumptionDisproven)
1665 
1666  // This is a definitive result now, when considered as a root query.
1667  AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1668  Entry.Result = Result;
1669  // Cache contains sorted {V1,V2} pairs.
1670  Entry.Result.swap(Swapped);
1671  Entry.NumAssumptionUses = -1;
1672 
1673  // If the assumption has been disproven, remove any results that may have
1674  // been based on this assumption. Do this after the Entry updates above to
1675  // avoid iterator invalidation.
1676  if (AssumptionDisproven)
1677  while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1679 
1680  // The result may still be based on assumptions higher up in the chain.
1681  // Remember it, so it can be purged from the cache later.
1682  if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1683  Result != AliasResult::MayAlias)
1684  AAQI.AssumptionBasedResults.push_back(Locs);
1685  return Result;
1686 }
1687 
1688 AliasResult BasicAAResult::aliasCheckRecursive(
1689  const Value *V1, LocationSize V1Size,
1690  const Value *V2, LocationSize V2Size,
1691  AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1692  if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1693  AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1694  if (Result != AliasResult::MayAlias)
1695  return Result;
1696  } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1697  AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1698  if (Result != AliasResult::MayAlias)
1699  return Result;
1700  }
1701 
1702  if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1703  AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1704  if (Result != AliasResult::MayAlias)
1705  return Result;
1706  } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1707  AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1708  if (Result != AliasResult::MayAlias)
1709  return Result;
1710  }
1711 
1712  if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1713  AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1714  if (Result != AliasResult::MayAlias)
1715  return Result;
1716  } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1717  AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1718  if (Result != AliasResult::MayAlias)
1719  return Result;
1720  }
1721 
1722  // If both pointers are pointing into the same object and one of them
1723  // accesses the entire object, then the accesses must overlap in some way.
1724  if (O1 == O2) {
1725  bool NullIsValidLocation = NullPointerIsDefined(&F);
1726  if (V1Size.isPrecise() && V2Size.isPrecise() &&
1727  (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1728  isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1730  }
1731 
1732  return AliasResult::MayAlias;
1733 }
1734 
1735 /// Check whether two Values can be considered equivalent.
1736 ///
1737 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1738 /// they can not be part of a cycle in the value graph by looking at all
1739 /// visited phi nodes an making sure that the phis cannot reach the value. We
1740 /// have to do this because we are looking through phi nodes (That is we say
1741 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1742 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1743  const Value *V2) {
1744  if (V != V2)
1745  return false;
1746 
1747  const Instruction *Inst = dyn_cast<Instruction>(V);
1748  if (!Inst)
1749  return true;
1750 
1751  if (VisitedPhiBBs.empty())
1752  return true;
1753 
1754  if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1755  return false;
1756 
1757  // Make sure that the visited phis cannot reach the Value. This ensures that
1758  // the Values cannot come from different iterations of a potential cycle the
1759  // phi nodes could be involved in.
1760  for (auto *P : VisitedPhiBBs)
1761  if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1762  return false;
1763 
1764  return true;
1765 }
1766 
1767 /// Computes the symbolic difference between two de-composed GEPs.
1768 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1769  const DecomposedGEP &SrcGEP) {
1770  DestGEP.Offset -= SrcGEP.Offset;
1771  for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1772  // Find V in Dest. This is N^2, but pointer indices almost never have more
1773  // than a few variable indexes.
1774  bool Found = false;
1775  for (auto I : enumerate(DestGEP.VarIndices)) {
1776  VariableGEPIndex &Dest = I.value();
1777  if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) ||
1778  !Dest.Val.hasSameCastsAs(Src.Val))
1779  continue;
1780 
1781  // If we found it, subtract off Scale V's from the entry in Dest. If it
1782  // goes to zero, remove the entry.
1783  if (Dest.Scale != Src.Scale) {
1784  Dest.Scale -= Src.Scale;
1785  Dest.IsNSW = false;
1786  } else {
1787  DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1788  }
1789  Found = true;
1790  break;
1791  }
1792 
1793  // If we didn't consume this entry, add it to the end of the Dest list.
1794  if (!Found) {
1795  VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW};
1796  DestGEP.VarIndices.push_back(Entry);
1797  }
1798  }
1799 }
1800 
1801 bool BasicAAResult::constantOffsetHeuristic(
1802  const DecomposedGEP &GEP, LocationSize MaybeV1Size,
1803  LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) {
1804  if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1805  !MaybeV2Size.hasValue())
1806  return false;
1807 
1808  const uint64_t V1Size = MaybeV1Size.getValue();
1809  const uint64_t V2Size = MaybeV2Size.getValue();
1810 
1811  const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1812 
1813  if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1814  Var0.Scale != -Var1.Scale ||
1815  Var0.Val.V->getType() != Var1.Val.V->getType())
1816  return false;
1817 
1818  // We'll strip off the Extensions of Var0 and Var1 and do another round
1819  // of GetLinearExpression decomposition. In the example above, if Var0
1820  // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1821 
1822  LinearExpression E0 =
1823  GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1824  LinearExpression E1 =
1825  GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1826  if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1827  !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1828  return false;
1829 
1830  // We have a hit - Var0 and Var1 only differ by a constant offset!
1831 
1832  // If we've been sext'ed then zext'd the maximum difference between Var0 and
1833  // Var1 is possible to calculate, but we're just interested in the absolute
1834  // minimum difference between the two. The minimum distance may occur due to
1835  // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1836  // the minimum distance between %i and %i + 5 is 3.
1837  APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1838  MinDiff = APIntOps::umin(MinDiff, Wrapped);
1839  APInt MinDiffBytes =
1840  MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1841 
1842  // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1843  // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1844  // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1845  // V2Size can fit in the MinDiffBytes gap.
1846  return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1847  MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1848 }
1849 
1850 //===----------------------------------------------------------------------===//
1851 // BasicAliasAnalysis Pass
1852 //===----------------------------------------------------------------------===//
1853 
1854 AnalysisKey BasicAA::Key;
1855 
1857  auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1858  auto &AC = AM.getResult<AssumptionAnalysis>(F);
1859  auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1860  auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1861  return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1862 }
1863 
1866 }
1867 
1868 char BasicAAWrapperPass::ID = 0;
1869 
1870 void BasicAAWrapperPass::anchor() {}
1871 
1873  "Basic Alias Analysis (stateless AA impl)", true, true)
1879  "Basic Alias Analysis (stateless AA impl)", true, true)
1880 
1882  return new BasicAAWrapperPass();
1883 }
1884 
1886  auto &ACT = getAnalysis<AssumptionCacheTracker>();
1887  auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1888  auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1889  auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1890 
1891  Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1892  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1893  &DTWP.getDomTree(),
1894  PVWP ? &PVWP->getResult() : nullptr));
1895 
1896  return false;
1897 }
1898 
1900  AU.setPreservesAll();
1905 }
1906 
1908  return BasicAAResult(
1909  F.getParent()->getDataLayout(), F,
1910  P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1911  P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1912 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
i
i
Definition: README.txt:29
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:155
llvm::ModRefInfo::NoModRef
@ NoModRef
The access neither references nor modifies the value stored in memory.
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
AssumptionCache.h
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:263
llvm::AliasResult::MayAlias
@ MayAlias
The two locations may or may not alias.
Definition: AliasAnalysis.h:103
llvm::EarliestEscapeInfo::removeInstruction
void removeInstruction(Instruction *I)
Definition: BasicAliasAnalysis.cpp:253
llvm::isMallocOrCallocLikeFn
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
Definition: MemoryBuiltins.cpp:297
llvm::AliasResult::PartialAlias
@ PartialAlias
The two locations alias, but only due to a partial overlap.
Definition: AliasAnalysis.h:105
llvm
This file implements support for optimizing divisions by a constant.
Definition: AllocatorList.h:23
llvm::FindEarliestCapture
Instruction * FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, bool StoreCaptures, const DominatorTree &DT, unsigned MaxUsesToExplore=0)
Definition: CaptureTracking.cpp:269
llvm::Operator
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
llvm::SmallPtrSetImpl::erase
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:378
llvm::MaskedValueIsZero
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
Definition: ValueTracking.cpp:367
llvm::PHINode::incoming_values
op_range incoming_values()
Definition: Instructions.h:2719
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::isModAndRefSet
LLVM_NODISCARD bool isModAndRefSet(const ModRefInfo MRI)
Definition: AliasAnalysis.h:193
llvm::BasicAA::run
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
Definition: BasicAliasAnalysis.cpp:1856
llvm::generic_gep_type_iterator
Definition: GetElementPtrTypeIterator.h:31
llvm::LocationSize::afterPointer
constexpr static LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
Definition: MemoryLocation.h:123
print
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
Definition: ArchiveWriter.cpp:147
Metadata.h
IntrinsicInst.h
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:217
llvm::isNonEscapingLocalObject
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
Definition: CaptureTracking.cpp:456
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:783
llvm::MemoryLocation::Ptr
const Value * Ptr
The address of the start of the location.
Definition: MemoryLocation.h:217
llvm::Function
Definition: Function.h:62
llvm::AnalysisManager::Invalidator::invalidate
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:688
getMinimalExtentFrom
static uint64_t getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
Definition: BasicAliasAnalysis.cpp:191
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Pass.h
llvm::IntrinsicInst::getIntrinsicID
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:52
llvm::BasicAAResult::DecomposedGEP::print
void print(raw_ostream &OS) const
Definition: BasicAliasAnalysis.cpp:519
llvm::SetVector::size
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:77
GetElementPtrTypeIterator.h
llvm::SmallVector< VariableGEPIndex, 4 >
Statistic.h
CaptureTracking.h
llvm::enumerate
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
Definition: STLExtras.h:1981
llvm::initializeBasicAAWrapperPassPass
void initializeBasicAAWrapperPassPass(PassRegistry &)
llvm::GlobalVariable
Definition: GlobalVariable.h:40
llvm::APInt::zextOrTrunc
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:970
llvm::createModRefInfo
LLVM_NODISCARD ModRefInfo createModRefInfo(const FunctionModRefBehavior FMRB)
Definition: AliasAnalysis.h:378
llvm::BasicAAWrapperPass::runOnFunction
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition: BasicAliasAnalysis.cpp:1885
llvm::GlobalAlias
Definition: GlobalAlias.h:28
ValueTracking.h
MaxLookupSearchDepth
static const unsigned MaxLookupSearchDepth
Definition: BasicAliasAnalysis.cpp:87
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::DenseMapBase::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:302
llvm::isKnownNonZero
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
Definition: ValueTracking.cpp:316
llvm::PhiValuesWrapperPass
Wrapper pass for the legacy pass manager.
Definition: PhiValues.h:139
APInt.h
llvm::Depth
@ Depth
Definition: SIMachineScheduler.h:36
llvm::MemoryLocation::Size
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
Definition: MemoryLocation.h:226
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1410
MemoryBuiltins.h
llvm::ObjectSizeOpts::NullIsUnknownSize
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Definition: MemoryBuiltins.h:208
GetLinearExpression
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
Definition: BasicAliasAnalysis.cpp:366
isIntrinsicCall
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
Definition: BasicAliasAnalysis.cpp:761
llvm::BasicAAResult::DecomposedGEP::dump
void dump() const
Definition: BasicAliasAnalysis.cpp:515
llvm::Optional< bool >
llvm::AliasResult
The possible results of an alias query.
Definition: AliasAnalysis.h:82
Offset
uint64_t Offset
Definition: ELFObjHandler.cpp:81
llvm::SmallPtrSet< Value *, 4 >
llvm::ObjectSizeOpts::RoundToAlign
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Definition: MemoryBuiltins.h:204
impl
place backedge safepoints impl
Definition: PlaceSafepoints.cpp:612
Operator.h
getBitWidth
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Definition: ValueTracking.cpp:100
llvm::dump
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
Definition: SparseBitVector.h:876
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::gep_type_begin
gep_type_iterator gep_type_begin(const User *GEP)
Definition: GetElementPtrTypeIterator.h:139
llvm::LocationSize::isPrecise
bool isPrecise() const
Definition: MemoryLocation.h:165
BasicAliasAnalysis.h
llvm::FMRB_UnknownModRefBehavior
@ FMRB_UnknownModRefBehavior
This indicates that the function could not be classified into one of the behaviors above.
Definition: AliasAnalysis.h:369
llvm::BasicAAWrapperPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: BasicAliasAnalysis.cpp:1899
llvm::APIntOps::umin
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2128
llvm::BasicAAResult::DecomposedGEP::VarIndices
SmallVector< VariableGEPIndex, 4 > VarIndices
Definition: BasicAliasAnalysis.cpp:510
F
#define F(x, y, z)
Definition: MD5.cpp:56
isEscapeSource
static bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
Definition: BasicAliasAnalysis.cpp:110
llvm::FMRB_OnlyAccessesInaccessibleMem
@ FMRB_OnlyAccessesInaccessibleMem
The only memory references in this function (if it has any) are references of memory that is otherwis...
Definition: AliasAnalysis.h:316
KnownBits.h
AliasAnalysis.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::ConstantRange::Signed
@ Signed
Definition: ConstantRange.h:292
llvm::isIdentifiedFunctionLocal
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
Definition: AliasAnalysis.cpp:985
llvm::ConstantPointerNull
A constant pointer value that points to null.
Definition: Constants.h:534
llvm::AAQueryInfo
This class stores info we want to provide to or retain within an alias query.
Definition: AliasAnalysis.h:459
isObjectSmallerThan
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
Definition: BasicAliasAnalysis.cpp:147
Instruction.h
CommandLine.h
llvm::FMRB_OnlyAccessesInaccessibleOrArgMem
@ FMRB_OnlyAccessesInaccessibleOrArgMem
The function may perform non-volatile loads and stores of objects pointed to by its pointer-typed arg...
Definition: AliasAnalysis.h:348
llvm::APInt::uge
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1152
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
llvm::LocationSize::mayBeBeforePointer
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
Definition: MemoryLocation.h:173
llvm::APInt::isNonNegative
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:317
llvm::APInt::isNegative
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:312
llvm::CaptureInfo::~CaptureInfo
virtual ~CaptureInfo()=0
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
llvm::ModRefInfo::Ref
@ Ref
The access may reference the value stored in memory.
Constants.h
llvm::BasicAAWrapperPass::ID
static char ID
Definition: BasicAliasAnalysis.h:182
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition: Instructions.h:2729
llvm::BasicAAResult::DecomposedGEP::Base
const Value * Base
Definition: BasicAliasAnalysis.cpp:506
llvm::AArch64::RP
@ RP
Definition: AArch64ISelLowering.h:475
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::LocationSize
Definition: MemoryLocation.h:65
llvm::LibFunc
LibFunc
Definition: TargetLibraryInfo.h:34
Intrinsics.h
InstrTypes.h
llvm::EarliestEscapeInfo::isNotCapturedBeforeOrAt
bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I) override
Definition: BasicAliasAnalysis.cpp:228
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::MemoryLocation::getForSource
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
Definition: MemoryLocation.cpp:95
llvm::LocationSize::getValue
uint64_t getValue() const
Definition: MemoryLocation.h:158
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::AAQueryInfo::CI
CaptureInfo * CI
Definition: AliasAnalysis.h:473
llvm::APInt::getLimitedValue
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:456
TargetLibraryInfo.h
llvm::isPotentiallyReachable
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:236
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::TargetLibraryInfo::getLibFunc
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Definition: TargetLibraryInfo.h:291
llvm::make_scope_exit
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:58
llvm::Instruction
Definition: Instruction.h:45
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:287
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
llvm::AArch64CC::LE
@ LE
Definition: AArch64BaseInfo.h:268
llvm::AAQueryInfo::AssumptionBasedResults
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
Definition: AliasAnalysis.h:484
llvm::getArgumentAliasingToReturnedPointer
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
Definition: ValueTracking.cpp:4334
llvm::getUnderlyingObject
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Definition: ValueTracking.cpp:4389
llvm::MemoryLocation::UnknownSize
@ UnknownSize
Definition: MemoryLocation.h:214
SmallPtrSet.h
MaxNumPhiBBsValueReachabilityCheck
const unsigned MaxNumPhiBBsValueReachabilityCheck
Cutoff after which to stop analysing a set of phi nodes potentially involved in a cycle.
Definition: BasicAliasAnalysis.cpp:83
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", "Basic Alias Analysis (stateless AA impl)", true, true) INITIALIZE_PASS_END(BasicAAWrapperPass
llvm::AnalysisManager::Invalidator
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:670
isObjectSize
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
Definition: BasicAliasAnalysis.cpp:211
llvm::ConstantRange::add
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
Definition: ConstantRange.cpp:907
llvm::APInt::countTrailingZeros
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
Definition: APInt.h:1534
llvm::generic_gep_type_iterator::getStructTypeOrNull
StructType * getStructTypeOrNull() const
Definition: GetElementPtrTypeIterator.h:123
llvm::isModSet
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
Definition: AliasAnalysis.h:197
llvm::getObjectSize
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
Definition: MemoryBuiltins.cpp:514
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2725
llvm::None
const NoneType None
Definition: None.h:23
Type.h
llvm::APInt::srem
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1728
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
llvm::PhiValues::getValuesForPhi
const ValueSet & getValuesForPhi(const PHINode *PN)
Get the underlying values of a phi.
Definition: PhiValues.cpp:125
llvm::setRef
LLVM_NODISCARD ModRefInfo setRef(const ModRefInfo MRI)
Definition: AliasAnalysis.h:211
llvm::BasicAAResult::pointsToConstantMemory
bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool OrLocal)
Chases pointers until we find a (constant global) or not.
Definition: BasicAliasAnalysis.cpp:703
llvm::OutputFileType::Object
@ Object
llvm::clearMust
LLVM_NODISCARD ModRefInfo clearMust(const ModRefInfo MRI)
Definition: AliasAnalysis.h:229
llvm::APInt::getOneBitSet
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:222
llvm::AAQueryInfo::LocPair
std::pair< AACacheLoc, AACacheLoc > LocPair
Definition: AliasAnalysis.h:461
llvm::BasicAAResult::alias
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI)
Definition: BasicAliasAnalysis.cpp:888
llvm::cl::opt< bool >
llvm::CaptureInfo::isNotCapturedBeforeOrAt
virtual bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I)=0
llvm::BasicAAResult::invalidate
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
Definition: BasicAliasAnalysis.cpp:89
llvm::isNoModRef
LLVM_NODISCARD bool isNoModRef(const ModRefInfo MRI)
Definition: AliasAnalysis.h:186
llvm::ConstantRange::getBitWidth
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
Definition: ConstantRange.h:177
llvm::Value::getPointerDereferenceableBytes
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition: Value.cpp:845
llvm::FMRB_OnlyAccessesArgumentPointees
@ FMRB_OnlyAccessesArgumentPointees
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
Definition: AliasAnalysis.h:295
Index
uint32_t Index
Definition: ELFObjHandler.cpp:84
llvm::TargetLibraryInfoWrapperPass
Definition: TargetLibraryInfo.h:465
uint64_t
llvm::LocationSize::hasValue
bool hasValue() const
Definition: MemoryLocation.h:155
llvm::LocationSize::isZero
bool isZero() const
Definition: MemoryLocation.h:170
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:578
llvm::AssumptionAnalysis
A function analysis which provides an AssumptionCache.
Definition: AssumptionCache.h:169
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::BasicAAWrapperPass
Legacy wrapper pass to provide the BasicAAResult object.
Definition: BasicAliasAnalysis.h:176
llvm::AAQueryInfo::NumAssumptionUses
int NumAssumptionUses
How many active NoAlias assumption uses there are.
Definition: AliasAnalysis.h:479
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
MemoryLocation.h
llvm::AnalysisKey
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:72
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:441
llvm::PhiValuesAnalysis
The analysis pass which yields a PhiValues.
Definition: PhiValues.h:116
llvm::isKnownNonEqual
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
Definition: ValueTracking.cpp:355
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
llvm::computeKnownBits
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Definition: ValueTracking.cpp:224
llvm::ModRefInfo
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: AliasAnalysis.h:149
llvm::AAQueryInfo::CacheEntry
Definition: AliasAnalysis.h:462
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:840
llvm::AliasResult::setOffset
void setOffset(int32_t NewOffset)
Definition: AliasAnalysis.h:123
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::generic_gep_type_iterator::getIndexedType
Type * getIndexedType() const
Definition: GetElementPtrTypeIterator.h:72
llvm::Value::stripPointerCastsForAliasAnalysis
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
Definition: Value.cpp:703
llvm::SelectInst
This class represents the LLVM 'select' instruction.
Definition: Instructions.h:1738
llvm::TargetLibraryInfo::has
bool has(LibFunc F) const
Tests whether a library function is available.
Definition: TargetLibraryInfo.h:325
llvm::BasicAAResult::DecomposedGEP::Offset
APInt Offset
Definition: BasicAliasAnalysis.cpp:508
llvm::FMRB_DoesNotAccessMemory
@ FMRB_DoesNotAccessMemory
This function does not perform any non-local loads or stores to memory.
Definition: AliasAnalysis.h:269
llvm::AAResultBase::getArgModRefInfo
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Definition: AliasAnalysis.h:1229
llvm::AMDGPU::IsaInfo::TargetIDSetting::Off
@ Off
llvm::GEPOperator
Definition: Operator.h:465
_
#define _
Definition: HexagonMCCodeEmitter.cpp:47
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
CFG.h
llvm::AliasResult::swap
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
Definition: AliasAnalysis.h:131
llvm::BasicAAResult::DecomposedGEP::InBounds
Optional< bool > InBounds
Definition: BasicAliasAnalysis.cpp:513
llvm::AssumptionCacheTracker
An immutable pass that tracks lazily created AssumptionCache objects.
Definition: AssumptionCache.h:200
llvm::BinaryOperator
Definition: InstrTypes.h:189
llvm::BasicAAResult::getModRefInfo
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
Definition: BasicAliasAnalysis.cpp:902
DataLayout.h
llvm::StructType
Class to represent struct types.
Definition: DerivedTypes.h:213
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition: AssumptionCache.h:41
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::BasicAAResult::getArgModRefInfo
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
Definition: BasicAliasAnalysis.cpp:850
getParent
static const Function * getParent(const Value *V)
Definition: BasicAliasAnalysis.cpp:866
llvm::FMRB_OnlyReadsMemory
@ FMRB_OnlyReadsMemory
This function does not perform any non-local stores or volatile loads, but may read from any memory l...
Definition: AliasAnalysis.h:358
llvm::AliasResult::NoAlias
@ NoAlias
The two locations do not alias at all.
Definition: AliasAnalysis.h:100
Compiler.h
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:1748
llvm::ModRefInfo::Mod
@ Mod
The access may modify the value stored in memory.
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
ConstantRange.h
llvm::AliasResult::MustAlias
@ MustAlias
The two locations precisely alias each other.
Definition: AliasAnalysis.h:107
llvm::TargetLibraryInfoWrapperPass::getTLI
TargetLibraryInfo & getTLI(const Function &F)
Definition: TargetLibraryInfo.h:477
llvm::ObjectSizeOpts
Various options to control the behavior of getObjectSize.
Definition: MemoryBuiltins.h:188
llvm::FunctionModRefBehavior
FunctionModRefBehavior
Summary of how a function affects memory in the program.
Definition: AliasAnalysis.h:263
llvm::BasicAAResult
This is the AA result object for the basic, local, and stateless alias analysis.
Definition: BasicAliasAnalysis.h:47
LLVM_FALLTHROUGH
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:286
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
llvm::SimpleCaptureInfo::isNotCapturedBeforeOrAt
bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I) override
Definition: BasicAliasAnalysis.cpp:223
llvm::computeConstantRange
ConstantRange computeConstantRange(const Value *V, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
Definition: ValueTracking.cpp:7067
adjustToPointerSize
static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize)
To ensure a pointer offset fits in an integer of size PointerSize (in bits) when that size is smaller...
Definition: BasicAliasAnalysis.cpp:469
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:92
llvm::AAResultBase::getModRefInfo
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Definition: AliasAnalysis.h:1241
Argument.h
llvm::GEPOperator::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:525
PhiValues.h
llvm::BasicAAWrapperPass::BasicAAWrapperPass
BasicAAWrapperPass()
Definition: BasicAliasAnalysis.cpp:1864
llvm::createBasicAAWrapperPass
FunctionPass * createBasicAAWrapperPass()
Definition: BasicAliasAnalysis.cpp:1881
Attributes.h
Constant.h
llvm::NVPTX::PTXLdStInstCode::V2
@ V2
Definition: NVPTX.h:123
llvm::KnownBits
Definition: KnownBits.h:23
llvm::isIdentifiedObject
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
Definition: AliasAnalysis.cpp:973
EnableRecPhiAnalysis
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
llvm::AssumptionCacheTracker::getAssumptionCache
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
Definition: AssumptionCache.cpp:272
llvm::AnalysisUsage::setPreservesAll
void setPreservesAll()
Set by analyses that do not transform their input at all.
Definition: PassAnalysisSupport.h:130
llvm::LocationSize::beforeOrAfterPointer
constexpr static LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
Definition: MemoryLocation.h:129
llvm::DenseMapBase::end
iterator end()
Definition: DenseMap.h:83
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:324
llvm::GEPOperator::isInBounds
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:482
GlobalVariable.h
llvm::AnalysisManager::getCachedResult
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:802
Casting.h
Function.h
llvm::AAQueryInfo::AliasCache
AliasCacheT AliasCache
Definition: AliasAnalysis.h:471
llvm::BitWidth
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition: TargetLibraryInfo.h:221
isWriteOnlyParam
static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo &TLI)
Returns true if this is a writeonly (i.e Mod only) parameter.
Definition: BasicAliasAnalysis.cpp:825
llvm::AAResultBase::pointsToConstantMemory
bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool OrLocal)
Definition: AliasAnalysis.h:1224
llvm::MCID::Add
@ Add
Definition: MCInstrDesc.h:183
GlobalAlias.h
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:413
llvm::ConstantRange
This class represents a range of values.
Definition: ConstantRange.h:47
llvm::AAQueryInfo::withEmptyCache
AAQueryInfo withEmptyCache()
Create a new AAQueryInfo based on this one, but with the cache cleared.
Definition: AliasAnalysis.h:491
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:45
llvm::BasicAAResult::DecomposedGEP
Definition: BasicAliasAnalysis.cpp:504
llvm::APInt::abs
APInt abs() const
Get the absolute value.
Definition: APInt.h:1677
llvm::createLegacyPMBasicAAResult
BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F)
A helper for the legacy pass manager to create a BasicAAResult object populated to the best of our ab...
Definition: BasicAliasAnalysis.cpp:1907
llvm::DominatorTreeAnalysis
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:252
llvm::FMRB_OnlyWritesMemory
@ FMRB_OnlyWritesMemory
Definition: AliasAnalysis.h:365
llvm::AAResultBase< BasicAAResult >::getBestAAResults
AAResultsProxy getBestAAResults()
Get a proxy for the best AA result set to query at this time.
Definition: AliasAnalysis.h:1216
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
aa
basic aa
Definition: BasicAliasAnalysis.cpp:1878
Instructions.h
llvm::GEPOperator::getSourceElementType
Type * getSourceElementType() const
Definition: Operator.cpp:47
llvm::ConstantRange::intersectWith
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
Definition: ConstantRange.cpp:467
SmallVector.h
User.h
llvm::ConstantRange::fromKnownBits
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
Definition: ConstantRange.cpp:58
llvm::User::op_begin
op_iterator op_begin()
Definition: User.h:234
Dominators.h
llvm::APIntOps::GreatestCommonDivisor
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition: APInt.cpp:744
llvm::SmallPtrSetImplBase::empty
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
N
#define N
llvm::DenseMapBase::try_emplace
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:222
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:94
InstructionSimplify.h
llvm::PHINode::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Definition: Instructions.h:2749
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::setMust
LLVM_NODISCARD ModRefInfo setMust(const ModRefInfo MRI)
Definition: AliasAnalysis.h:215
llvm::PHINode
Definition: Instructions.h:2633
ScopeExit.h
llvm::MipsISD::Ins
@ Ins
Definition: MipsISelLowering.h:157
llvm::MemoryLocation::getBeforeOrAfter
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
Definition: MemoryLocation.h:275
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1161
llvm::AnalysisUsage::addRequiredTransitive
AnalysisUsage & addRequiredTransitive()
Definition: PassAnalysisSupport.h:81
DerivedTypes.h
llvm::SmallSetVector< Value *, 4 >
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:44
llvm::AnalysisUsage::addUsedIfAvailable
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
Definition: PassAnalysisSupport.h:117
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1475
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:172
llvm::MemoryLocation::getForDest
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Definition: MemoryLocation.cpp:113
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::ConstantRange::isEmptySet
bool isEmptySet() const
Return true if this set contains no members.
Definition: ConstantRange.cpp:313
llvm::NullPointerIsDefined
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:1965
true
basic Basic Alias true
Definition: BasicAliasAnalysis.cpp:1879
Value.h
InitializePasses.h
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::TargetLibraryAnalysis
Analysis pass providing the TargetLibraryInfo.
Definition: TargetLibraryInfo.h:440
llvm::BasicAAResult::getModRefBehavior
FunctionModRefBehavior getModRefBehavior(const CallBase *Call)
Returns the behavior when calling the given call site.
Definition: BasicAliasAnalysis.cpp:767
llvm::ModRefInfo::ModRef
@ ModRef
The access may reference and may modify the value stored in memory.
llvm::MemoryLocation
Representation for a specific memory location.
Definition: MemoryLocation.h:209
MergeAliasResults
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
Definition: BasicAliasAnalysis.cpp:1331
llvm::AAQueryInfo::Depth
unsigned Depth
Query depth used to distinguish recursive queries.
Definition: AliasAnalysis.h:476
llvm::setMod
LLVM_NODISCARD ModRefInfo setMod(const ModRefInfo MRI)
Definition: AliasAnalysis.h:207
Other
Optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1184
notDifferentParent
static bool notDifferentParent(const Value *O1, const Value *O2)
Definition: BasicAliasAnalysis.cpp:879
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:44
llvm::Type::getPrimitiveSizeInBits
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:166
isBaseOfObject
static bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
Definition: BasicAliasAnalysis.cpp:1102
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:37
llvm::User::op_end
op_iterator op_end()
Definition: User.h:236