LLVM  16.0.0git
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CFG.h"
30 #include "llvm/IR/Argument.h"
31 #include "llvm/IR/Attributes.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/ConstantRange.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Operator.h"
48 #include "llvm/IR/Type.h"
49 #include "llvm/IR/User.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/InitializePasses.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/KnownBits.h"
57 #include <cassert>
58 #include <cstdint>
59 #include <cstdlib>
60 #include <utility>
61 
62 #define DEBUG_TYPE "basicaa"
63 
64 using namespace llvm;
65 
66 /// Enable analysis of recursive PHI nodes.
67 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
68  cl::init(true));
69 
70 /// SearchLimitReached / SearchTimes shows how often the limit of
71 /// to decompose GEPs is reached. It will affect the precision
72 /// of basic alias analysis.
73 STATISTIC(SearchLimitReached, "Number of times the limit to "
74  "decompose GEPs is reached");
75 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
76 
77 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
78 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
79 /// careful with value equivalence. We use reachability to make sure a value
80 /// cannot be involved in a cycle.
82 
83 // The max limit of the search depth in DecomposeGEPExpression() and
84 // getUnderlyingObject().
85 static const unsigned MaxLookupSearchDepth = 6;
86 
89  // We don't care if this analysis itself is preserved, it has no state. But
90  // we need to check that the analyses it depends on have been. Note that we
91  // may be created without handles to some analyses and in that case don't
92  // depend on them.
93  if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
94  (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
95  (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
96  return true;
97 
98  // Otherwise this analysis result remains valid.
99  return false;
100 }
101 
102 //===----------------------------------------------------------------------===//
103 // Useful predicates
104 //===----------------------------------------------------------------------===//
105 
106 /// Returns the size of the object specified by V or UnknownSize if unknown.
107 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
108  const TargetLibraryInfo &TLI,
109  bool NullIsValidLoc,
110  bool RoundToAlign = false) {
111  uint64_t Size;
112  ObjectSizeOpts Opts;
113  Opts.RoundToAlign = RoundToAlign;
114  Opts.NullIsUnknownSize = NullIsValidLoc;
115  if (getObjectSize(V, Size, DL, &TLI, Opts))
116  return Size;
118 }
119 
120 /// Returns true if we can prove that the object specified by V is smaller than
121 /// Size.
122 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
123  const DataLayout &DL,
124  const TargetLibraryInfo &TLI,
125  bool NullIsValidLoc) {
126  // Note that the meanings of the "object" are slightly different in the
127  // following contexts:
128  // c1: llvm::getObjectSize()
129  // c2: llvm.objectsize() intrinsic
130  // c3: isObjectSmallerThan()
131  // c1 and c2 share the same meaning; however, the meaning of "object" in c3
132  // refers to the "entire object".
133  //
134  // Consider this example:
135  // char *p = (char*)malloc(100)
136  // char *q = p+80;
137  //
138  // In the context of c1 and c2, the "object" pointed by q refers to the
139  // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
140  //
141  // However, in the context of c3, the "object" refers to the chunk of memory
142  // being allocated. So, the "object" has 100 bytes, and q points to the middle
143  // the "object". In case q is passed to isObjectSmallerThan() as the 1st
144  // parameter, before the llvm::getObjectSize() is called to get the size of
145  // entire object, we should:
146  // - either rewind the pointer q to the base-address of the object in
147  // question (in this case rewind to p), or
148  // - just give up. It is up to caller to make sure the pointer is pointing
149  // to the base address the object.
150  //
151  // We go for 2nd option for simplicity.
152  if (!isIdentifiedObject(V))
153  return false;
154 
155  // This function needs to use the aligned object size because we allow
156  // reads a bit past the end given sufficient alignment.
157  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
158  /*RoundToAlign*/ true);
159 
160  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
161 }
162 
163 /// Return the minimal extent from \p V to the end of the underlying object,
164 /// assuming the result is used in an aliasing query. E.g., we do use the query
165 /// location size and the fact that null pointers cannot alias here.
167  const LocationSize &LocSize,
168  const DataLayout &DL,
169  bool NullIsValidLoc) {
170  // If we have dereferenceability information we know a lower bound for the
171  // extent as accesses for a lower offset would be valid. We need to exclude
172  // the "or null" part if null is a valid pointer. We can ignore frees, as an
173  // access after free would be undefined behavior.
174  bool CanBeNull, CanBeFreed;
175  uint64_t DerefBytes =
176  V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
177  DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
178  // If queried with a precise location size, we assume that location size to be
179  // accessed, thus valid.
180  if (LocSize.isPrecise())
181  DerefBytes = std::max(DerefBytes, LocSize.getValue());
182  return DerefBytes;
183 }
184 
185 /// Returns true if we can prove that the object specified by V has size Size.
186 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
187  const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
188  uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
189  return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
190 }
191 
192 //===----------------------------------------------------------------------===//
193 // CaptureInfo implementations
194 //===----------------------------------------------------------------------===//
195 
196 CaptureInfo::~CaptureInfo() = default;
197 
199  const Instruction *I) {
200  return isNonEscapingLocalObject(Object, &IsCapturedCache);
201 }
202 
204  const Instruction *I) {
206  return false;
207 
208  auto Iter = EarliestEscapes.insert({Object, nullptr});
209  if (Iter.second) {
210  Instruction *EarliestCapture = FindEarliestCapture(
211  Object, *const_cast<Function *>(I->getFunction()),
212  /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT, EphValues);
213  if (EarliestCapture) {
214  auto Ins = Inst2Obj.insert({EarliestCapture, {}});
215  Ins.first->second.push_back(Object);
216  }
217  Iter.first->second = EarliestCapture;
218  }
219 
220  // No capturing instruction.
221  if (!Iter.first->second)
222  return true;
223 
224  return I != Iter.first->second &&
225  !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI);
226 }
227 
229  auto Iter = Inst2Obj.find(I);
230  if (Iter != Inst2Obj.end()) {
231  for (const Value *Obj : Iter->second)
232  EarliestEscapes.erase(Obj);
233  Inst2Obj.erase(I);
234  }
235 }
236 
237 //===----------------------------------------------------------------------===//
238 // GetElementPtr Instruction Decomposition and Analysis
239 //===----------------------------------------------------------------------===//
240 
241 namespace {
242 /// Represents zext(sext(trunc(V))).
243 struct CastedValue {
244  const Value *V;
245  unsigned ZExtBits = 0;
246  unsigned SExtBits = 0;
247  unsigned TruncBits = 0;
248 
249  explicit CastedValue(const Value *V) : V(V) {}
250  explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
251  unsigned TruncBits)
252  : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
253 
254  unsigned getBitWidth() const {
255  return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
256  SExtBits;
257  }
258 
259  CastedValue withValue(const Value *NewV) const {
260  return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
261  }
262 
263  /// Replace V with zext(NewV)
264  CastedValue withZExtOfValue(const Value *NewV) const {
265  unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
266  NewV->getType()->getPrimitiveSizeInBits();
267  if (ExtendBy <= TruncBits)
268  return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
269 
270  // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
271  ExtendBy -= TruncBits;
272  return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
273  }
274 
275  /// Replace V with sext(NewV)
276  CastedValue withSExtOfValue(const Value *NewV) const {
277  unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
278  NewV->getType()->getPrimitiveSizeInBits();
279  if (ExtendBy <= TruncBits)
280  return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
281 
282  // zext(sext(sext(NewV)))
283  ExtendBy -= TruncBits;
284  return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
285  }
286 
287  APInt evaluateWith(APInt N) const {
288  assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
289  "Incompatible bit width");
290  if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
291  if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
292  if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
293  return N;
294  }
295 
296  ConstantRange evaluateWith(ConstantRange N) const {
297  assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
298  "Incompatible bit width");
299  if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
300  if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
301  if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
302  return N;
303  }
304 
305  bool canDistributeOver(bool NUW, bool NSW) const {
306  // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
307  // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
308  // trunc(x op y) == trunc(x) op trunc(y)
309  return (!ZExtBits || NUW) && (!SExtBits || NSW);
310  }
311 
312  bool hasSameCastsAs(const CastedValue &Other) const {
313  return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
314  TruncBits == Other.TruncBits;
315  }
316 };
317 
318 /// Represents zext(sext(trunc(V))) * Scale + Offset.
319 struct LinearExpression {
320  CastedValue Val;
321  APInt Scale;
322  APInt Offset;
323 
324  /// True if all operations in this expression are NSW.
325  bool IsNSW;
326 
327  LinearExpression(const CastedValue &Val, const APInt &Scale,
328  const APInt &Offset, bool IsNSW)
329  : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
330 
331  LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
332  unsigned BitWidth = Val.getBitWidth();
333  Scale = APInt(BitWidth, 1);
334  Offset = APInt(BitWidth, 0);
335  }
336 
337  LinearExpression mul(const APInt &Other, bool MulIsNSW) const {
338  // The check for zero offset is necessary, because generally
339  // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
340  bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
341  return LinearExpression(Val, Scale * Other, Offset * Other, NSW);
342  }
343 };
344 }
345 
346 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
347 /// B are constant integers.
348 static LinearExpression GetLinearExpression(
349  const CastedValue &Val, const DataLayout &DL, unsigned Depth,
350  AssumptionCache *AC, DominatorTree *DT) {
351  // Limit our recursion depth.
352  if (Depth == 6)
353  return Val;
354 
355  if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
356  return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
357  Val.evaluateWith(Const->getValue()), true);
358 
359  if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
360  if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
361  APInt RHS = Val.evaluateWith(RHSC->getValue());
362  // The only non-OBO case we deal with is or, and only limited to the
363  // case where it is both nuw and nsw.
364  bool NUW = true, NSW = true;
365  if (isa<OverflowingBinaryOperator>(BOp)) {
366  NUW &= BOp->hasNoUnsignedWrap();
367  NSW &= BOp->hasNoSignedWrap();
368  }
369  if (!Val.canDistributeOver(NUW, NSW))
370  return Val;
371 
372  // While we can distribute over trunc, we cannot preserve nowrap flags
373  // in that case.
374  if (Val.TruncBits)
375  NUW = NSW = false;
376 
377  LinearExpression E(Val);
378  switch (BOp->getOpcode()) {
379  default:
380  // We don't understand this instruction, so we can't decompose it any
381  // further.
382  return Val;
383  case Instruction::Or:
384  // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
385  // analyze it.
386  if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
387  BOp, DT))
388  return Val;
389 
390  [[fallthrough]];
391  case Instruction::Add: {
392  E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
393  Depth + 1, AC, DT);
394  E.Offset += RHS;
395  E.IsNSW &= NSW;
396  break;
397  }
398  case Instruction::Sub: {
399  E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
400  Depth + 1, AC, DT);
401  E.Offset -= RHS;
402  E.IsNSW &= NSW;
403  break;
404  }
405  case Instruction::Mul:
406  E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
407  Depth + 1, AC, DT)
408  .mul(RHS, NSW);
409  break;
410  case Instruction::Shl:
411  // We're trying to linearize an expression of the kind:
412  // shl i8 -128, 36
413  // where the shift count exceeds the bitwidth of the type.
414  // We can't decompose this further (the expression would return
415  // a poison value).
416  if (RHS.getLimitedValue() > Val.getBitWidth())
417  return Val;
418 
419  E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
420  Depth + 1, AC, DT);
421  E.Offset <<= RHS.getLimitedValue();
422  E.Scale <<= RHS.getLimitedValue();
423  E.IsNSW &= NSW;
424  break;
425  }
426  return E;
427  }
428  }
429 
430  if (isa<ZExtInst>(Val.V))
431  return GetLinearExpression(
432  Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
433  DL, Depth + 1, AC, DT);
434 
435  if (isa<SExtInst>(Val.V))
436  return GetLinearExpression(
437  Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
438  DL, Depth + 1, AC, DT);
439 
440  return Val;
441 }
442 
443 /// To ensure a pointer offset fits in an integer of size IndexSize
444 /// (in bits) when that size is smaller than the maximum index size. This is
445 /// an issue, for example, in particular for 32b pointers with negative indices
446 /// that rely on two's complement wrap-arounds for precise alias information
447 /// where the maximum index size is 64b.
448 static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize) {
449  assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
450  unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
451  return (Offset << ShiftBits).ashr(ShiftBits);
452 }
453 
454 namespace {
455 // A linear transformation of a Value; this class represents
456 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
457 struct VariableGEPIndex {
458  CastedValue Val;
459  APInt Scale;
460 
461  // Context instruction to use when querying information about this index.
462  const Instruction *CxtI;
463 
464  /// True if all operations in this expression are NSW.
465  bool IsNSW;
466 
467  void dump() const {
468  print(dbgs());
469  dbgs() << "\n";
470  }
471  void print(raw_ostream &OS) const {
472  OS << "(V=" << Val.V->getName()
473  << ", zextbits=" << Val.ZExtBits
474  << ", sextbits=" << Val.SExtBits
475  << ", truncbits=" << Val.TruncBits
476  << ", scale=" << Scale << ")";
477  }
478 };
479 }
480 
481 // Represents the internal structure of a GEP, decomposed into a base pointer,
482 // constant offsets, and variable scaled indices.
484  // Base pointer of the GEP
485  const Value *Base;
486  // Total constant offset from base.
488  // Scaled variable (non-constant) indices.
490  // Are all operations inbounds GEPs or non-indexing operations?
491  // (None iff expression doesn't involve any geps)
493 
494  void dump() const {
495  print(dbgs());
496  dbgs() << "\n";
497  }
498  void print(raw_ostream &OS) const {
499  OS << "(DecomposedGEP Base=" << Base->getName()
500  << ", Offset=" << Offset
501  << ", VarIndices=[";
502  for (size_t i = 0; i < VarIndices.size(); i++) {
503  if (i != 0)
504  OS << ", ";
505  VarIndices[i].print(OS);
506  }
507  OS << "])";
508  }
509 };
510 
511 
512 /// If V is a symbolic pointer expression, decompose it into a base pointer
513 /// with a constant offset and a number of scaled symbolic offsets.
514 ///
515 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
516 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
517 /// specified amount, but which may have other unrepresented high bits. As
518 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
520 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
521  AssumptionCache *AC, DominatorTree *DT) {
522  // Limit recursion depth to limit compile time in crazy cases.
523  unsigned MaxLookup = MaxLookupSearchDepth;
524  SearchTimes++;
525  const Instruction *CxtI = dyn_cast<Instruction>(V);
526 
527  unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
528  DecomposedGEP Decomposed;
529  Decomposed.Offset = APInt(MaxIndexSize, 0);
530  do {
531  // See if this is a bitcast or GEP.
532  const Operator *Op = dyn_cast<Operator>(V);
533  if (!Op) {
534  // The only non-operator case we can handle are GlobalAliases.
535  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
536  if (!GA->isInterposable()) {
537  V = GA->getAliasee();
538  continue;
539  }
540  }
541  Decomposed.Base = V;
542  return Decomposed;
543  }
544 
545  if (Op->getOpcode() == Instruction::BitCast ||
546  Op->getOpcode() == Instruction::AddrSpaceCast) {
547  V = Op->getOperand(0);
548  continue;
549  }
550 
551  const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
552  if (!GEPOp) {
553  if (const auto *PHI = dyn_cast<PHINode>(V)) {
554  // Look through single-arg phi nodes created by LCSSA.
555  if (PHI->getNumIncomingValues() == 1) {
556  V = PHI->getIncomingValue(0);
557  continue;
558  }
559  } else if (const auto *Call = dyn_cast<CallBase>(V)) {
560  // CaptureTracking can know about special capturing properties of some
561  // intrinsics like launder.invariant.group, that can't be expressed with
562  // the attributes, but have properties like returning aliasing pointer.
563  // Because some analysis may assume that nocaptured pointer is not
564  // returned from some special intrinsic (because function would have to
565  // be marked with returns attribute), it is crucial to use this function
566  // because it should be in sync with CaptureTracking. Not using it may
567  // cause weird miscompilations where 2 aliasing pointers are assumed to
568  // noalias.
569  if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
570  V = RP;
571  continue;
572  }
573  }
574 
575  Decomposed.Base = V;
576  return Decomposed;
577  }
578 
579  // Track whether we've seen at least one in bounds gep, and if so, whether
580  // all geps parsed were in bounds.
581  if (Decomposed.InBounds == None)
582  Decomposed.InBounds = GEPOp->isInBounds();
583  else if (!GEPOp->isInBounds())
584  Decomposed.InBounds = false;
585 
586  assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
587 
588  unsigned AS = GEPOp->getPointerAddressSpace();
589  // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
590  gep_type_iterator GTI = gep_type_begin(GEPOp);
591  unsigned IndexSize = DL.getIndexSizeInBits(AS);
592  // Assume all GEP operands are constants until proven otherwise.
593  bool GepHasConstantOffset = true;
594  for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
595  I != E; ++I, ++GTI) {
596  const Value *Index = *I;
597  // Compute the (potentially symbolic) offset in bytes for this index.
598  if (StructType *STy = GTI.getStructTypeOrNull()) {
599  // For a struct, add the member offset.
600  unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
601  if (FieldNo == 0)
602  continue;
603 
604  Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
605  continue;
606  }
607 
608  // For an array/pointer, add the element offset, explicitly scaled.
609  if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
610  if (CIdx->isZero())
611  continue;
612 
613  // Don't attempt to analyze GEPs if the scalable index is not zero.
614  TypeSize AllocTypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
615  if (AllocTypeSize.isScalable()) {
616  Decomposed.Base = V;
617  return Decomposed;
618  }
619 
620  Decomposed.Offset += AllocTypeSize.getFixedSize() *
621  CIdx->getValue().sextOrTrunc(MaxIndexSize);
622  continue;
623  }
624 
625  TypeSize AllocTypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
626  if (AllocTypeSize.isScalable()) {
627  Decomposed.Base = V;
628  return Decomposed;
629  }
630 
631  GepHasConstantOffset = false;
632 
633  // If the integer type is smaller than the index size, it is implicitly
634  // sign extended or truncated to index size.
635  unsigned Width = Index->getType()->getIntegerBitWidth();
636  unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
637  unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
638  LinearExpression LE = GetLinearExpression(
639  CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
640 
641  // Scale by the type size.
642  unsigned TypeSize = AllocTypeSize.getFixedSize();
643  LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
644  Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
645  APInt Scale = LE.Scale.sext(MaxIndexSize);
646 
647  // If we already had an occurrence of this index variable, merge this
648  // scale into it. For example, we want to handle:
649  // A[x][x] -> x*16 + x*4 -> x*20
650  // This also ensures that 'x' only appears in the index list once.
651  for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
652  if (Decomposed.VarIndices[i].Val.V == LE.Val.V &&
653  Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
654  Scale += Decomposed.VarIndices[i].Scale;
655  Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
656  break;
657  }
658  }
659 
660  // Make sure that we have a scale that makes sense for this target's
661  // index size.
662  Scale = adjustToIndexSize(Scale, IndexSize);
663 
664  if (!!Scale) {
665  VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW};
666  Decomposed.VarIndices.push_back(Entry);
667  }
668  }
669 
670  // Take care of wrap-arounds
671  if (GepHasConstantOffset)
672  Decomposed.Offset = adjustToIndexSize(Decomposed.Offset, IndexSize);
673 
674  // Analyze the base pointer next.
675  V = GEPOp->getOperand(0);
676  } while (--MaxLookup);
677 
678  // If the chain of expressions is too deep, just return early.
679  Decomposed.Base = V;
680  SearchLimitReached++;
681  return Decomposed;
682 }
683 
684 /// Returns whether the given pointer value points to memory that is local to
685 /// the function, with global constants being considered local to all
686 /// functions.
688  AAQueryInfo &AAQI, bool OrLocal) {
689  assert(Visited.empty() && "Visited must be cleared after use!");
690  auto _ = make_scope_exit([&]{ Visited.clear(); });
691 
692  unsigned MaxLookup = 8;
694  Worklist.push_back(Loc.Ptr);
695  do {
696  const Value *V = getUnderlyingObject(Worklist.pop_back_val());
697  if (!Visited.insert(V).second)
698  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
699 
700  // An alloca instruction defines local memory.
701  if (OrLocal && isa<AllocaInst>(V))
702  continue;
703 
704  // A global constant counts as local memory for our purposes.
705  if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
706  // Note: this doesn't require GV to be "ODR" because it isn't legal for a
707  // global to be marked constant in some modules and non-constant in
708  // others. GV may even be a declaration, not a definition.
709  if (!GV->isConstant())
710  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
711  continue;
712  }
713 
714  // If both select values point to local memory, then so does the select.
715  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
716  Worklist.push_back(SI->getTrueValue());
717  Worklist.push_back(SI->getFalseValue());
718  continue;
719  }
720 
721  // If all values incoming to a phi node point to local memory, then so does
722  // the phi.
723  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
724  // Don't bother inspecting phi nodes with many operands.
725  if (PN->getNumIncomingValues() > MaxLookup)
726  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
727  append_range(Worklist, PN->incoming_values());
728  continue;
729  }
730 
731  // Otherwise be conservative.
732  return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
733  } while (!Worklist.empty() && --MaxLookup);
734 
735  return Worklist.empty();
736 }
737 
738 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
739  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
740  return II && II->getIntrinsicID() == IID;
741 }
742 
744  if (Attrs.hasAttribute(Attribute::ReadNone))
746 
748  if (Attrs.hasAttribute(Attribute::ReadOnly))
749  MR = ModRefInfo::Ref;
750  else if (Attrs.hasAttribute(Attribute::WriteOnly))
751  MR = ModRefInfo::Mod;
752 
753  if (Attrs.hasAttribute(Attribute::ArgMemOnly))
755  if (Attrs.hasAttribute(Attribute::InaccessibleMemOnly))
757  if (Attrs.hasAttribute(Attribute::InaccessibleMemOrArgMemOnly))
759  return FunctionModRefBehavior(MR);
760 }
761 
762 /// Returns the behavior when calling the given call site.
765  getModRefBehaviorFromAttrs(Call->getAttributes().getFnAttrs());
766 
767  if (const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
768  FunctionModRefBehavior FMRB = getBestAAResults().getModRefBehavior(F);
769  // Operand bundles on the call may also read or write memory, in addition
770  // to the behavior of the called function.
771  if (Call->hasReadingOperandBundles())
773  if (Call->hasClobberingOperandBundles())
775  Min &= FMRB;
776  }
777 
778  return Min;
779 }
780 
781 /// Returns the behavior when calling the given function. For use when the call
782 /// site is not known.
784  switch (F->getIntrinsicID()) {
785  case Intrinsic::experimental_guard:
786  case Intrinsic::experimental_deoptimize:
787  // These intrinsics can read arbitrary memory, and additionally modref
788  // inaccessible memory to model control dependence.
791  }
792 
793  return getModRefBehaviorFromAttrs(F->getAttributes().getFnAttrs());
794 }
795 
796 /// Returns true if this is a writeonly (i.e Mod only) parameter.
797 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
798  const TargetLibraryInfo &TLI) {
799  if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
800  return true;
801 
802  // We can bound the aliasing properties of memset_pattern16 just as we can
803  // for memcpy/memset. This is particularly important because the
804  // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
805  // whenever possible.
806  // FIXME Consider handling this in InferFunctionAttr.cpp together with other
807  // attributes.
808  LibFunc F;
809  if (Call->getCalledFunction() &&
810  TLI.getLibFunc(*Call->getCalledFunction(), F) &&
811  F == LibFunc_memset_pattern16 && TLI.has(F))
812  if (ArgIdx == 0)
813  return true;
814 
815  // TODO: memset_pattern4, memset_pattern8
816  // TODO: _chk variants
817  // TODO: strcmp, strcpy
818 
819  return false;
820 }
821 
823  unsigned ArgIdx) {
824  // Checking for known builtin intrinsics and target library functions.
825  if (isWriteOnlyParam(Call, ArgIdx, TLI))
826  return ModRefInfo::Mod;
827 
828  if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
829  return ModRefInfo::Ref;
830 
831  if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
832  return ModRefInfo::NoModRef;
833 
834  return AAResultBase::getArgModRefInfo(Call, ArgIdx);
835 }
836 
837 #ifndef NDEBUG
838 static const Function *getParent(const Value *V) {
839  if (const Instruction *inst = dyn_cast<Instruction>(V)) {
840  if (!inst->getParent())
841  return nullptr;
842  return inst->getParent()->getParent();
843  }
844 
845  if (const Argument *arg = dyn_cast<Argument>(V))
846  return arg->getParent();
847 
848  return nullptr;
849 }
850 
851 static bool notDifferentParent(const Value *O1, const Value *O2) {
852 
853  const Function *F1 = getParent(O1);
854  const Function *F2 = getParent(O2);
855 
856  return !F1 || !F2 || F1 == F2;
857 }
858 #endif
859 
861  const MemoryLocation &LocB,
862  AAQueryInfo &AAQI) {
863  assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
864  "BasicAliasAnalysis doesn't support interprocedural queries.");
865  return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
866 }
867 
868 /// Checks to see if the specified callsite can clobber the specified memory
869 /// object.
870 ///
871 /// Since we only look at local properties of this function, we really can't
872 /// say much about this query. We do, however, use simple "address taken"
873 /// analysis on local objects.
875  const MemoryLocation &Loc,
876  AAQueryInfo &AAQI) {
877  assert(notDifferentParent(Call, Loc.Ptr) &&
878  "AliasAnalysis query involving multiple functions!");
879 
880  const Value *Object = getUnderlyingObject(Loc.Ptr);
881 
882  // Calls marked 'tail' cannot read or write allocas from the current frame
883  // because the current frame might be destroyed by the time they run. However,
884  // a tail call may use an alloca with byval. Calling with byval copies the
885  // contents of the alloca into argument registers or stack slots, so there is
886  // no lifetime issue.
887  if (isa<AllocaInst>(Object))
888  if (const CallInst *CI = dyn_cast<CallInst>(Call))
889  if (CI->isTailCall() &&
890  !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
891  return ModRefInfo::NoModRef;
892 
893  // Stack restore is able to modify unescaped dynamic allocas. Assume it may
894  // modify them even though the alloca is not escaped.
895  if (auto *AI = dyn_cast<AllocaInst>(Object))
896  if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
897  return ModRefInfo::Mod;
898 
899  // If the pointer is to a locally allocated object that does not escape,
900  // then the call can not mod/ref the pointer unless the call takes the pointer
901  // as an argument, and itself doesn't capture it.
902  if (!isa<Constant>(Object) && Call != Object &&
903  AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) {
904 
905  // Optimistically assume that call doesn't touch Object and check this
906  // assumption in the following loop.
908 
909  unsigned OperandNo = 0;
910  for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
911  CI != CE; ++CI, ++OperandNo) {
912  // Only look at the no-capture or byval pointer arguments. If this
913  // pointer were passed to arguments that were neither of these, then it
914  // couldn't be no-capture.
915  if (!(*CI)->getType()->isPointerTy() ||
916  (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() &&
917  !Call->isByValArgument(OperandNo)))
918  continue;
919 
920  // Call doesn't access memory through this operand, so we don't care
921  // if it aliases with Object.
922  if (Call->doesNotAccessMemory(OperandNo))
923  continue;
924 
925  // If this is a no-capture pointer argument, see if we can tell that it
926  // is impossible to alias the pointer we're checking.
927  AliasResult AR = getBestAAResults().alias(
930  // Operand doesn't alias 'Object', continue looking for other aliases
931  if (AR == AliasResult::NoAlias)
932  continue;
933  // Operand aliases 'Object', but call doesn't modify it. Strengthen
934  // initial assumption and keep looking in case if there are more aliases.
935  if (Call->onlyReadsMemory(OperandNo)) {
936  Result |= ModRefInfo::Ref;
937  continue;
938  }
939  // Operand aliases 'Object' but call only writes into it.
940  if (Call->onlyWritesMemory(OperandNo)) {
941  Result |= ModRefInfo::Mod;
942  continue;
943  }
944  // This operand aliases 'Object' and call reads and writes into it.
945  // Setting ModRef will not yield an early return below, MustAlias is not
946  // used further.
947  Result = ModRefInfo::ModRef;
948  break;
949  }
950 
951  // Early return if we improved mod ref information
952  if (!isModAndRefSet(Result))
953  return Result;
954  }
955 
956  // If the call is malloc/calloc like, we can assume that it doesn't
957  // modify any IR visible value. This is only valid because we assume these
958  // routines do not read values visible in the IR. TODO: Consider special
959  // casing realloc and strdup routines which access only their arguments as
960  // well. Or alternatively, replace all of this with inaccessiblememonly once
961  // that's implemented fully.
962  if (isMallocOrCallocLikeFn(Call, &TLI)) {
963  // Be conservative if the accessed pointer may alias the allocation -
964  // fallback to the generic handling below.
966  AAQI) == AliasResult::NoAlias)
967  return ModRefInfo::NoModRef;
968  }
969 
970  // Like assumes, invariant.start intrinsics were also marked as arbitrarily
971  // writing so that proper control dependencies are maintained but they never
972  // mod any particular memory location visible to the IR.
973  // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
974  // intrinsic is now modeled as reading memory. This prevents hoisting the
975  // invariant.start intrinsic over stores. Consider:
976  // *ptr = 40;
977  // *ptr = 50;
978  // invariant_start(ptr)
979  // int val = *ptr;
980  // print(val);
981  //
982  // This cannot be transformed to:
983  //
984  // *ptr = 40;
985  // invariant_start(ptr)
986  // *ptr = 50;
987  // int val = *ptr;
988  // print(val);
989  //
990  // The transformation will cause the second store to be ignored (based on
991  // rules of invariant.start) and print 40, while the first program always
992  // prints 50.
993  if (isIntrinsicCall(Call, Intrinsic::invariant_start))
994  return ModRefInfo::Ref;
995 
996  // The AAResultBase base class has some smarts, lets use them.
997  return AAResultBase::getModRefInfo(Call, Loc, AAQI);
998 }
999 
1001  const CallBase *Call2,
1002  AAQueryInfo &AAQI) {
1003  // Guard intrinsics are marked as arbitrarily writing so that proper control
1004  // dependencies are maintained but they never mods any particular memory
1005  // location.
1006  //
1007  // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1008  // heap state at the point the guard is issued needs to be consistent in case
1009  // the guard invokes the "deopt" continuation.
1010 
1011  // NB! This function is *not* commutative, so we special case two
1012  // possibilities for guard intrinsics.
1013 
1014  if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1015  return isModSet(getModRefBehavior(Call2).getModRef())
1016  ? ModRefInfo::Ref
1018 
1019  if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1020  return isModSet(getModRefBehavior(Call1).getModRef())
1021  ? ModRefInfo::Mod
1023 
1024  // The AAResultBase base class has some smarts, lets use them.
1025  return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1026 }
1027 
1028 /// Return true if we know V to the base address of the corresponding memory
1029 /// object. This implies that any address less than V must be out of bounds
1030 /// for the underlying object. Note that just being isIdentifiedObject() is
1031 /// not enough - For example, a negative offset from a noalias argument or call
1032 /// can be inbounds w.r.t the actual underlying object.
1033 static bool isBaseOfObject(const Value *V) {
1034  // TODO: We can handle other cases here
1035  // 1) For GC languages, arguments to functions are often required to be
1036  // base pointers.
1037  // 2) Result of allocation routines are often base pointers. Leverage TLI.
1038  return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1039 }
1040 
1041 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1042 /// another pointer.
1043 ///
1044 /// We know that V1 is a GEP, but we don't know anything about V2.
1045 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1046 /// V2.
1047 AliasResult BasicAAResult::aliasGEP(
1048  const GEPOperator *GEP1, LocationSize V1Size,
1049  const Value *V2, LocationSize V2Size,
1050  const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1051  if (!V1Size.hasValue() && !V2Size.hasValue()) {
1052  // TODO: This limitation exists for compile-time reasons. Relax it if we
1053  // can avoid exponential pathological cases.
1054  if (!isa<GEPOperator>(V2))
1055  return AliasResult::MayAlias;
1056 
1057  // If both accesses have unknown size, we can only check whether the base
1058  // objects don't alias.
1059  AliasResult BaseAlias = getBestAAResults().alias(
1060  MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1061  MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1062  return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1064  }
1065 
1066  DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1067  DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1068 
1069  // Bail if we were not able to decompose anything.
1070  if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1071  return AliasResult::MayAlias;
1072 
1073  // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1074  // symbolic difference.
1075  subtractDecomposedGEPs(DecompGEP1, DecompGEP2);
1076 
1077  // If an inbounds GEP would have to start from an out of bounds address
1078  // for the two to alias, then we can assume noalias.
1079  if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1080  V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1081  isBaseOfObject(DecompGEP2.Base))
1082  return AliasResult::NoAlias;
1083 
1084  if (isa<GEPOperator>(V2)) {
1085  // Symmetric case to above.
1086  if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1087  V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1088  isBaseOfObject(DecompGEP1.Base))
1089  return AliasResult::NoAlias;
1090  }
1091 
1092  // For GEPs with identical offsets, we can preserve the size and AAInfo
1093  // when performing the alias check on the underlying objects.
1094  if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1095  return getBestAAResults().alias(MemoryLocation(DecompGEP1.Base, V1Size),
1096  MemoryLocation(DecompGEP2.Base, V2Size),
1097  AAQI);
1098 
1099  // Do the base pointers alias?
1100  AliasResult BaseAlias = getBestAAResults().alias(
1101  MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1102  MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1103 
1104  // If we get a No or May, then return it immediately, no amount of analysis
1105  // will improve this situation.
1106  if (BaseAlias != AliasResult::MustAlias) {
1107  assert(BaseAlias == AliasResult::NoAlias ||
1108  BaseAlias == AliasResult::MayAlias);
1109  return BaseAlias;
1110  }
1111 
1112  // If there is a constant difference between the pointers, but the difference
1113  // is less than the size of the associated memory object, then we know
1114  // that the objects are partially overlapping. If the difference is
1115  // greater, we know they do not overlap.
1116  if (DecompGEP1.VarIndices.empty()) {
1117  APInt &Off = DecompGEP1.Offset;
1118 
1119  // Initialize for Off >= 0 (V2 <= GEP1) case.
1120  const Value *LeftPtr = V2;
1121  const Value *RightPtr = GEP1;
1122  LocationSize VLeftSize = V2Size;
1123  LocationSize VRightSize = V1Size;
1124  const bool Swapped = Off.isNegative();
1125 
1126  if (Swapped) {
1127  // Swap if we have the situation where:
1128  // + +
1129  // | BaseOffset |
1130  // ---------------->|
1131  // |-->V1Size |-------> V2Size
1132  // GEP1 V2
1133  std::swap(LeftPtr, RightPtr);
1134  std::swap(VLeftSize, VRightSize);
1135  Off = -Off;
1136  }
1137 
1138  if (!VLeftSize.hasValue())
1139  return AliasResult::MayAlias;
1140 
1141  const uint64_t LSize = VLeftSize.getValue();
1142  if (Off.ult(LSize)) {
1143  // Conservatively drop processing if a phi was visited and/or offset is
1144  // too big.
1146  if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1147  (Off + VRightSize.getValue()).ule(LSize)) {
1148  // Memory referenced by right pointer is nested. Save the offset in
1149  // cache. Note that originally offset estimated as GEP1-V2, but
1150  // AliasResult contains the shift that represents GEP1+Offset=V2.
1151  AR.setOffset(-Off.getSExtValue());
1152  AR.swap(Swapped);
1153  }
1154  return AR;
1155  }
1156  return AliasResult::NoAlias;
1157  }
1158 
1159  // We need to know both acess sizes for all the following heuristics.
1160  if (!V1Size.hasValue() || !V2Size.hasValue())
1161  return AliasResult::MayAlias;
1162 
1163  APInt GCD;
1164  ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1165  for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1166  const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1167  const APInt &Scale = Index.Scale;
1168  APInt ScaleForGCD = Scale;
1169  if (!Index.IsNSW)
1170  ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(),
1171  Scale.countTrailingZeros());
1172 
1173  if (i == 0)
1174  GCD = ScaleForGCD.abs();
1175  else
1176  GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1177 
1178  ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1179  true, &AC, Index.CxtI);
1180  KnownBits Known =
1181  computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1182  CR = CR.intersectWith(
1183  ConstantRange::fromKnownBits(Known, /* Signed */ true),
1185  CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1186 
1187  assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1188  "Bit widths are normalized to MaxIndexSize");
1189  if (Index.IsNSW)
1190  OffsetRange = OffsetRange.add(CR.smul_sat(ConstantRange(Scale)));
1191  else
1192  OffsetRange = OffsetRange.add(CR.smul_fast(ConstantRange(Scale)));
1193  }
1194 
1195  // We now have accesses at two offsets from the same base:
1196  // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1197  // 2. 0 with size V2Size
1198  // Using arithmetic modulo GCD, the accesses are at
1199  // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1200  // into the range [V2Size..GCD), then we know they cannot overlap.
1201  APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1202  if (ModOffset.isNegative())
1203  ModOffset += GCD; // We want mod, not rem.
1204  if (ModOffset.uge(V2Size.getValue()) &&
1205  (GCD - ModOffset).uge(V1Size.getValue()))
1206  return AliasResult::NoAlias;
1207 
1208  // Compute ranges of potentially accessed bytes for both accesses. If the
1209  // interseciton is empty, there can be no overlap.
1210  unsigned BW = OffsetRange.getBitWidth();
1211  ConstantRange Range1 = OffsetRange.add(
1212  ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1213  ConstantRange Range2 =
1214  ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1215  if (Range1.intersectWith(Range2).isEmptySet())
1216  return AliasResult::NoAlias;
1217 
1218  // Try to determine the range of values for VarIndex such that
1219  // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1220  Optional<APInt> MinAbsVarIndex;
1221  if (DecompGEP1.VarIndices.size() == 1) {
1222  // VarIndex = Scale*V.
1223  const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1224  if (Var.Val.TruncBits == 0 &&
1225  isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1226  // If V != 0, then abs(VarIndex) > 0.
1227  MinAbsVarIndex = APInt(Var.Scale.getBitWidth(), 1);
1228 
1229  // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1230  // potentially wrapping math.
1231  auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1232  if (Var.IsNSW)
1233  return true;
1234 
1235  int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1236  // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1237  // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1238  // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1239  int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1240  if (MaxScaleValueBW <= 0)
1241  return false;
1242  return Var.Scale.ule(
1243  APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1244  };
1245  // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1246  // presence of potentially wrapping math.
1247  if (MultiplyByScaleNoWrap(Var)) {
1248  // If V != 0 then abs(VarIndex) >= abs(Scale).
1249  MinAbsVarIndex = Var.Scale.abs();
1250  }
1251  }
1252  } else if (DecompGEP1.VarIndices.size() == 2) {
1253  // VarIndex = Scale*V0 + (-Scale)*V1.
1254  // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1255  // Check that VisitedPhiBBs is empty, to avoid reasoning about
1256  // inequality of values across loop iterations.
1257  const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1258  const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1259  if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 &&
1260  Var0.Val.hasSameCastsAs(Var1.Val) && VisitedPhiBBs.empty() &&
1261  isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1262  DT))
1263  MinAbsVarIndex = Var0.Scale.abs();
1264  }
1265 
1266  if (MinAbsVarIndex) {
1267  // The constant offset will have added at least +/-MinAbsVarIndex to it.
1268  APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1269  APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1270  // We know that Offset <= OffsetLo || Offset >= OffsetHi
1271  if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1272  OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1273  return AliasResult::NoAlias;
1274  }
1275 
1276  if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT))
1277  return AliasResult::NoAlias;
1278 
1279  // Statically, we can see that the base objects are the same, but the
1280  // pointers have dynamic offsets which we can't resolve. And none of our
1281  // little tricks above worked.
1282  return AliasResult::MayAlias;
1283 }
1284 
1286  // If the results agree, take it.
1287  if (A == B)
1288  return A;
1289  // A mix of PartialAlias and MustAlias is PartialAlias.
1293  // Otherwise, we don't know anything.
1294  return AliasResult::MayAlias;
1295 }
1296 
1297 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1298 /// against another.
1300 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1301  const Value *V2, LocationSize V2Size,
1302  AAQueryInfo &AAQI) {
1303  // If the values are Selects with the same condition, we can do a more precise
1304  // check: just check for aliases between the values on corresponding arms.
1305  if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1306  if (SI->getCondition() == SI2->getCondition()) {
1307  AliasResult Alias = getBestAAResults().alias(
1308  MemoryLocation(SI->getTrueValue(), SISize),
1309  MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1310  if (Alias == AliasResult::MayAlias)
1311  return AliasResult::MayAlias;
1312  AliasResult ThisAlias = getBestAAResults().alias(
1313  MemoryLocation(SI->getFalseValue(), SISize),
1314  MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1315  return MergeAliasResults(ThisAlias, Alias);
1316  }
1317 
1318  // If both arms of the Select node NoAlias or MustAlias V2, then returns
1319  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1320  AliasResult Alias =
1321  getBestAAResults().alias(MemoryLocation(SI->getTrueValue(), SISize),
1322  MemoryLocation(V2, V2Size), AAQI);
1323  if (Alias == AliasResult::MayAlias)
1324  return AliasResult::MayAlias;
1325 
1326  AliasResult ThisAlias =
1327  getBestAAResults().alias(MemoryLocation(SI->getFalseValue(), SISize),
1328  MemoryLocation(V2, V2Size), AAQI);
1329  return MergeAliasResults(ThisAlias, Alias);
1330 }
1331 
1332 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1333 /// another.
1334 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1335  const Value *V2, LocationSize V2Size,
1336  AAQueryInfo &AAQI) {
1337  if (!PN->getNumIncomingValues())
1338  return AliasResult::NoAlias;
1339  // If the values are PHIs in the same block, we can do a more precise
1340  // as well as efficient check: just check for aliases between the values
1341  // on corresponding edges.
1342  if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1343  if (PN2->getParent() == PN->getParent()) {
1344  Optional<AliasResult> Alias;
1345  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1346  AliasResult ThisAlias = getBestAAResults().alias(
1347  MemoryLocation(PN->getIncomingValue(i), PNSize),
1349  PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1350  AAQI);
1351  if (Alias)
1352  *Alias = MergeAliasResults(*Alias, ThisAlias);
1353  else
1354  Alias = ThisAlias;
1355  if (*Alias == AliasResult::MayAlias)
1356  break;
1357  }
1358  return *Alias;
1359  }
1360 
1361  SmallVector<Value *, 4> V1Srcs;
1362  // If a phi operand recurses back to the phi, we can still determine NoAlias
1363  // if we don't alias the underlying objects of the other phi operands, as we
1364  // know that the recursive phi needs to be based on them in some way.
1365  bool isRecursive = false;
1366  auto CheckForRecPhi = [&](Value *PV) {
1367  if (!EnableRecPhiAnalysis)
1368  return false;
1369  if (getUnderlyingObject(PV) == PN) {
1370  isRecursive = true;
1371  return true;
1372  }
1373  return false;
1374  };
1375 
1376  if (PV) {
1377  // If we have PhiValues then use it to get the underlying phi values.
1378  const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1379  // If we have more phi values than the search depth then return MayAlias
1380  // conservatively to avoid compile time explosion. The worst possible case
1381  // is if both sides are PHI nodes. In which case, this is O(m x n) time
1382  // where 'm' and 'n' are the number of PHI sources.
1383  if (PhiValueSet.size() > MaxLookupSearchDepth)
1384  return AliasResult::MayAlias;
1385  // Add the values to V1Srcs
1386  for (Value *PV1 : PhiValueSet) {
1387  if (CheckForRecPhi(PV1))
1388  continue;
1389  V1Srcs.push_back(PV1);
1390  }
1391  } else {
1392  // If we don't have PhiInfo then just look at the operands of the phi itself
1393  // FIXME: Remove this once we can guarantee that we have PhiInfo always
1394  SmallPtrSet<Value *, 4> UniqueSrc;
1395  Value *OnePhi = nullptr;
1396  for (Value *PV1 : PN->incoming_values()) {
1397  if (isa<PHINode>(PV1)) {
1398  if (OnePhi && OnePhi != PV1) {
1399  // To control potential compile time explosion, we choose to be
1400  // conserviate when we have more than one Phi input. It is important
1401  // that we handle the single phi case as that lets us handle LCSSA
1402  // phi nodes and (combined with the recursive phi handling) simple
1403  // pointer induction variable patterns.
1404  return AliasResult::MayAlias;
1405  }
1406  OnePhi = PV1;
1407  }
1408 
1409  if (CheckForRecPhi(PV1))
1410  continue;
1411 
1412  if (UniqueSrc.insert(PV1).second)
1413  V1Srcs.push_back(PV1);
1414  }
1415 
1416  if (OnePhi && UniqueSrc.size() > 1)
1417  // Out of an abundance of caution, allow only the trivial lcssa and
1418  // recursive phi cases.
1419  return AliasResult::MayAlias;
1420  }
1421 
1422  // If V1Srcs is empty then that means that the phi has no underlying non-phi
1423  // value. This should only be possible in blocks unreachable from the entry
1424  // block, but return MayAlias just in case.
1425  if (V1Srcs.empty())
1426  return AliasResult::MayAlias;
1427 
1428  // If this PHI node is recursive, indicate that the pointer may be moved
1429  // across iterations. We can only prove NoAlias if different underlying
1430  // objects are involved.
1431  if (isRecursive)
1433 
1434  // In the recursive alias queries below, we may compare values from two
1435  // different loop iterations. Keep track of visited phi blocks, which will
1436  // be used when determining value equivalence.
1437  bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1438  auto _ = make_scope_exit([&]() {
1439  if (BlockInserted)
1440  VisitedPhiBBs.erase(PN->getParent());
1441  });
1442 
1443  // If we inserted a block into VisitedPhiBBs, alias analysis results that
1444  // have been cached earlier may no longer be valid. Perform recursive queries
1445  // with a new AAQueryInfo.
1446  AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1447  AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1448 
1449  AliasResult Alias = getBestAAResults().alias(
1450  MemoryLocation(V1Srcs[0], PNSize), MemoryLocation(V2, V2Size), *UseAAQI);
1451 
1452  // Early exit if the check of the first PHI source against V2 is MayAlias.
1453  // Other results are not possible.
1454  if (Alias == AliasResult::MayAlias)
1455  return AliasResult::MayAlias;
1456  // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1457  // remain valid to all elements and needs to conservatively return MayAlias.
1458  if (isRecursive && Alias != AliasResult::NoAlias)
1459  return AliasResult::MayAlias;
1460 
1461  // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1462  // NoAlias / MustAlias. Otherwise, returns MayAlias.
1463  for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1464  Value *V = V1Srcs[i];
1465 
1466  AliasResult ThisAlias = getBestAAResults().alias(
1467  MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), *UseAAQI);
1468  Alias = MergeAliasResults(ThisAlias, Alias);
1469  if (Alias == AliasResult::MayAlias)
1470  break;
1471  }
1472 
1473  return Alias;
1474 }
1475 
1476 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1477 /// array references.
1478 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1479  const Value *V2, LocationSize V2Size,
1480  AAQueryInfo &AAQI) {
1481  // If either of the memory references is empty, it doesn't matter what the
1482  // pointer values are.
1483  if (V1Size.isZero() || V2Size.isZero())
1484  return AliasResult::NoAlias;
1485 
1486  // Strip off any casts if they exist.
1488  V2 = V2->stripPointerCastsForAliasAnalysis();
1489 
1490  // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1491  // value for undef that aliases nothing in the program.
1492  if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1493  return AliasResult::NoAlias;
1494 
1495  // Are we checking for alias of the same value?
1496  // Because we look 'through' phi nodes, we could look at "Value" pointers from
1497  // different iterations. We must therefore make sure that this is not the
1498  // case. The function isValueEqualInPotentialCycles ensures that this cannot
1499  // happen by looking at the visited phi nodes and making sure they cannot
1500  // reach the value.
1501  if (isValueEqualInPotentialCycles(V1, V2))
1502  return AliasResult::MustAlias;
1503 
1504  if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1505  return AliasResult::NoAlias; // Scalars cannot alias each other
1506 
1507  // Figure out what objects these things are pointing to if we can.
1510 
1511  // Null values in the default address space don't point to any object, so they
1512  // don't alias any other pointer.
1513  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1514  if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1515  return AliasResult::NoAlias;
1516  if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1517  if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1518  return AliasResult::NoAlias;
1519 
1520  if (O1 != O2) {
1521  // If V1/V2 point to two different objects, we know that we have no alias.
1522  if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1523  return AliasResult::NoAlias;
1524 
1525  // Constant pointers can't alias with non-const isIdentifiedObject objects.
1526  if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1527  (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1528  return AliasResult::NoAlias;
1529 
1530  // Function arguments can't alias with things that are known to be
1531  // unambigously identified at the function level.
1532  if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1533  (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1534  return AliasResult::NoAlias;
1535 
1536  // If one pointer is the result of a call/invoke or load and the other is a
1537  // non-escaping local object within the same function, then we know the
1538  // object couldn't escape to a point where the call could return it.
1539  //
1540  // Note that if the pointers are in different functions, there are a
1541  // variety of complications. A call with a nocapture argument may still
1542  // temporary store the nocapture argument's value in a temporary memory
1543  // location if that memory location doesn't escape. Or it may pass a
1544  // nocapture value to other functions as long as they don't capture it.
1545  if (isEscapeSource(O1) &&
1546  AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1)))
1547  return AliasResult::NoAlias;
1548  if (isEscapeSource(O2) &&
1549  AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2)))
1550  return AliasResult::NoAlias;
1551  }
1552 
1553  // If the size of one access is larger than the entire object on the other
1554  // side, then we know such behavior is undefined and can assume no alias.
1555  bool NullIsValidLocation = NullPointerIsDefined(&F);
1556  if ((isObjectSmallerThan(
1557  O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1558  TLI, NullIsValidLocation)) ||
1560  O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1561  TLI, NullIsValidLocation)))
1562  return AliasResult::NoAlias;
1563 
1564  // If one the accesses may be before the accessed pointer, canonicalize this
1565  // by using unknown after-pointer sizes for both accesses. This is
1566  // equivalent, because regardless of which pointer is lower, one of them
1567  // will always came after the other, as long as the underlying objects aren't
1568  // disjoint. We do this so that the rest of BasicAA does not have to deal
1569  // with accesses before the base pointer, and to improve cache utilization by
1570  // merging equivalent states.
1571  if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1572  V1Size = LocationSize::afterPointer();
1573  V2Size = LocationSize::afterPointer();
1574  }
1575 
1576  // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1577  // for recursive queries. For this reason, this limit is chosen to be large
1578  // enough to be very rarely hit, while still being small enough to avoid
1579  // stack overflows.
1580  if (AAQI.Depth >= 512)
1581  return AliasResult::MayAlias;
1582 
1583  // Check the cache before climbing up use-def chains. This also terminates
1584  // otherwise infinitely recursive queries.
1585  AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1586  const bool Swapped = V1 > V2;
1587  if (Swapped)
1588  std::swap(Locs.first, Locs.second);
1589  const auto &Pair = AAQI.AliasCache.try_emplace(
1591  if (!Pair.second) {
1592  auto &Entry = Pair.first->second;
1593  if (!Entry.isDefinitive()) {
1594  // Remember that we used an assumption.
1595  ++Entry.NumAssumptionUses;
1596  ++AAQI.NumAssumptionUses;
1597  }
1598  // Cache contains sorted {V1,V2} pairs but we should return original order.
1599  auto Result = Entry.Result;
1600  Result.swap(Swapped);
1601  return Result;
1602  }
1603 
1604  int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1605  unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1607  aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1608 
1609  auto It = AAQI.AliasCache.find(Locs);
1610  assert(It != AAQI.AliasCache.end() && "Must be in cache");
1611  auto &Entry = It->second;
1612 
1613  // Check whether a NoAlias assumption has been used, but disproven.
1614  bool AssumptionDisproven =
1615  Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1616  if (AssumptionDisproven)
1618 
1619  // This is a definitive result now, when considered as a root query.
1620  AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1621  Entry.Result = Result;
1622  // Cache contains sorted {V1,V2} pairs.
1623  Entry.Result.swap(Swapped);
1624  Entry.NumAssumptionUses = -1;
1625 
1626  // If the assumption has been disproven, remove any results that may have
1627  // been based on this assumption. Do this after the Entry updates above to
1628  // avoid iterator invalidation.
1629  if (AssumptionDisproven)
1630  while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1632 
1633  // The result may still be based on assumptions higher up in the chain.
1634  // Remember it, so it can be purged from the cache later.
1635  if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1636  Result != AliasResult::MayAlias)
1637  AAQI.AssumptionBasedResults.push_back(Locs);
1638  return Result;
1639 }
1640 
1641 AliasResult BasicAAResult::aliasCheckRecursive(
1642  const Value *V1, LocationSize V1Size,
1643  const Value *V2, LocationSize V2Size,
1644  AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1645  if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1646  AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1647  if (Result != AliasResult::MayAlias)
1648  return Result;
1649  } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1650  AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1651  Result.swap();
1652  if (Result != AliasResult::MayAlias)
1653  return Result;
1654  }
1655 
1656  if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1657  AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1658  if (Result != AliasResult::MayAlias)
1659  return Result;
1660  } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1661  AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1662  Result.swap();
1663  if (Result != AliasResult::MayAlias)
1664  return Result;
1665  }
1666 
1667  if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1668  AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1669  if (Result != AliasResult::MayAlias)
1670  return Result;
1671  } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1672  AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1673  Result.swap();
1674  if (Result != AliasResult::MayAlias)
1675  return Result;
1676  }
1677 
1678  // If both pointers are pointing into the same object and one of them
1679  // accesses the entire object, then the accesses must overlap in some way.
1680  if (O1 == O2) {
1681  bool NullIsValidLocation = NullPointerIsDefined(&F);
1682  if (V1Size.isPrecise() && V2Size.isPrecise() &&
1683  (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1684  isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1686  }
1687 
1688  return AliasResult::MayAlias;
1689 }
1690 
1691 /// Check whether two Values can be considered equivalent.
1692 ///
1693 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1694 /// they can not be part of a cycle in the value graph by looking at all
1695 /// visited phi nodes an making sure that the phis cannot reach the value. We
1696 /// have to do this because we are looking through phi nodes (That is we say
1697 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1698 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1699  const Value *V2) {
1700  if (V != V2)
1701  return false;
1702 
1703  const Instruction *Inst = dyn_cast<Instruction>(V);
1704  if (!Inst)
1705  return true;
1706 
1707  if (VisitedPhiBBs.empty())
1708  return true;
1709 
1710  if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1711  return false;
1712 
1713  // Make sure that the visited phis cannot reach the Value. This ensures that
1714  // the Values cannot come from different iterations of a potential cycle the
1715  // phi nodes could be involved in.
1716  for (const auto *P : VisitedPhiBBs)
1717  if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1718  return false;
1719 
1720  return true;
1721 }
1722 
1723 /// Computes the symbolic difference between two de-composed GEPs.
1724 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1725  const DecomposedGEP &SrcGEP) {
1726  DestGEP.Offset -= SrcGEP.Offset;
1727  for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1728  // Find V in Dest. This is N^2, but pointer indices almost never have more
1729  // than a few variable indexes.
1730  bool Found = false;
1731  for (auto I : enumerate(DestGEP.VarIndices)) {
1732  VariableGEPIndex &Dest = I.value();
1733  if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) ||
1734  !Dest.Val.hasSameCastsAs(Src.Val))
1735  continue;
1736 
1737  // If we found it, subtract off Scale V's from the entry in Dest. If it
1738  // goes to zero, remove the entry.
1739  if (Dest.Scale != Src.Scale) {
1740  Dest.Scale -= Src.Scale;
1741  Dest.IsNSW = false;
1742  } else {
1743  DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1744  }
1745  Found = true;
1746  break;
1747  }
1748 
1749  // If we didn't consume this entry, add it to the end of the Dest list.
1750  if (!Found) {
1751  VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW};
1752  DestGEP.VarIndices.push_back(Entry);
1753  }
1754  }
1755 }
1756 
1757 bool BasicAAResult::constantOffsetHeuristic(
1758  const DecomposedGEP &GEP, LocationSize MaybeV1Size,
1759  LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) {
1760  if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1761  !MaybeV2Size.hasValue())
1762  return false;
1763 
1764  const uint64_t V1Size = MaybeV1Size.getValue();
1765  const uint64_t V2Size = MaybeV2Size.getValue();
1766 
1767  const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1768 
1769  if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1770  Var0.Scale != -Var1.Scale ||
1771  Var0.Val.V->getType() != Var1.Val.V->getType())
1772  return false;
1773 
1774  // We'll strip off the Extensions of Var0 and Var1 and do another round
1775  // of GetLinearExpression decomposition. In the example above, if Var0
1776  // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1777 
1778  LinearExpression E0 =
1779  GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1780  LinearExpression E1 =
1781  GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1782  if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1783  !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1784  return false;
1785 
1786  // We have a hit - Var0 and Var1 only differ by a constant offset!
1787 
1788  // If we've been sext'ed then zext'd the maximum difference between Var0 and
1789  // Var1 is possible to calculate, but we're just interested in the absolute
1790  // minimum difference between the two. The minimum distance may occur due to
1791  // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1792  // the minimum distance between %i and %i + 5 is 3.
1793  APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1794  MinDiff = APIntOps::umin(MinDiff, Wrapped);
1795  APInt MinDiffBytes =
1796  MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1797 
1798  // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1799  // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1800  // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1801  // V2Size can fit in the MinDiffBytes gap.
1802  return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1803  MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1804 }
1805 
1806 //===----------------------------------------------------------------------===//
1807 // BasicAliasAnalysis Pass
1808 //===----------------------------------------------------------------------===//
1809 
1810 AnalysisKey BasicAA::Key;
1811 
1813  auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1814  auto &AC = AM.getResult<AssumptionAnalysis>(F);
1815  auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1816  auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1817  return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1818 }
1819 
1822 }
1823 
1824 char BasicAAWrapperPass::ID = 0;
1825 
1826 void BasicAAWrapperPass::anchor() {}
1827 
1829  "Basic Alias Analysis (stateless AA impl)", true, true)
1835  "Basic Alias Analysis (stateless AA impl)", true, true)
1836 
1838  return new BasicAAWrapperPass();
1839 }
1840 
1842  auto &ACT = getAnalysis<AssumptionCacheTracker>();
1843  auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1844  auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1845  auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1846 
1847  Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1848  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1849  &DTWP.getDomTree(),
1850  PVWP ? &PVWP->getResult() : nullptr));
1851 
1852  return false;
1853 }
1854 
1856  AU.setPreservesAll();
1861 }
1862 
1864  return BasicAAResult(
1865  F.getParent()->getDataLayout(), F,
1866  P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1867  P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1868 }
i
i
Definition: README.txt:29
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
llvm::ModRefInfo::NoModRef
@ NoModRef
The access neither references nor modifies the value stored in memory.
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
AssumptionCache.h
Attrs
Function Attrs
Definition: README_ALTIVEC.txt:215
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:269
llvm::AliasResult::MayAlias
@ MayAlias
The two locations may or may not alias.
Definition: AliasAnalysis.h:103
llvm::EarliestEscapeInfo::removeInstruction
void removeInstruction(Instruction *I)
Definition: BasicAliasAnalysis.cpp:228
llvm::AliasResult::PartialAlias
@ PartialAlias
The two locations alias, but only due to a partial overlap.
Definition: AliasAnalysis.h:105
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::Operator
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:32
llvm::SmallPtrSetImpl::erase
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:379
llvm::MaskedValueIsZero
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
Definition: ValueTracking.cpp:391
llvm::PHINode::incoming_values
op_range incoming_values()
Definition: Instructions.h:2785
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
llvm::BasicAA::run
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
Definition: BasicAliasAnalysis.cpp:1812
llvm::generic_gep_type_iterator
Definition: GetElementPtrTypeIterator.h:31
llvm::LocationSize::afterPointer
constexpr static LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
Definition: MemoryLocation.h:124
print
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
Definition: ArchiveWriter.cpp:189
PHI
Rewrite undef for PHI
Definition: AMDGPURewriteUndefForPHI.cpp:101
IntrinsicInst.h
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:223
llvm::isNonEscapingLocalObject
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
Definition: CaptureTracking.cpp:488
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
llvm::MemoryLocation::Ptr
const Value * Ptr
The address of the start of the location.
Definition: MemoryLocation.h:218
llvm::TypeSize::getFixedSize
ScalarTy getFixedSize() const
Definition: TypeSize.h:444
llvm::Function
Definition: Function.h:60
llvm::AnalysisManager::Invalidator::invalidate
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:679
getMinimalExtentFrom
static uint64_t getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
Definition: BasicAliasAnalysis.cpp:166
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Pass.h
llvm::IntrinsicInst::getIntrinsicID
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:53
llvm::BasicAAResult::DecomposedGEP::print
void print(raw_ostream &OS) const
Definition: BasicAliasAnalysis.cpp:498
llvm::SetVector::size
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:77
GetElementPtrTypeIterator.h
llvm::FunctionModRefBehavior::readOnly
static FunctionModRefBehavior readOnly()
Create FunctionModRefBehavior that can read any memory.
Definition: AliasAnalysis.h:267
llvm::FunctionModRefBehavior::none
static FunctionModRefBehavior none()
Create FunctionModRefBehavior that cannot read or write any memory.
Definition: AliasAnalysis.h:262
llvm::SmallVector< VariableGEPIndex, 4 >
Statistic.h
CaptureTracking.h
llvm::enumerate
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
Definition: STLExtras.h:2068
llvm::initializeBasicAAWrapperPassPass
void initializeBasicAAWrapperPassPass(PassRegistry &)
llvm::APInt::getMaxValue
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition: APInt.h:186
llvm::GlobalVariable
Definition: GlobalVariable.h:39
llvm::APInt::zextOrTrunc
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:994
llvm::isModSet
bool isModSet(const ModRefInfo MRI)
Definition: AliasAnalysis.h:168
llvm::BasicAAWrapperPass::runOnFunction
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition: BasicAliasAnalysis.cpp:1841
llvm::GlobalAlias
Definition: GlobalAlias.h:28
ValueTracking.h
MaxLookupSearchDepth
static const unsigned MaxLookupSearchDepth
Definition: BasicAliasAnalysis.cpp:85
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::DenseMapBase::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:302
llvm::isKnownNonZero
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
Definition: ValueTracking.cpp:340
llvm::PhiValuesWrapperPass
Wrapper pass for the legacy pass manager.
Definition: PhiValues.h:138
APInt.h
llvm::Depth
@ Depth
Definition: SIMachineScheduler.h:36
true
basic Basic Alias true
Definition: BasicAliasAnalysis.cpp:1835
llvm::MemoryLocation::Size
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
Definition: MemoryLocation.h:227
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1411
MemoryBuiltins.h
llvm::ObjectSizeOpts::NullIsUnknownSize
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Definition: MemoryBuiltins.h:162
GetLinearExpression
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
Definition: BasicAliasAnalysis.cpp:348
isIntrinsicCall
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
Definition: BasicAliasAnalysis.cpp:738
llvm::BasicAAResult::DecomposedGEP::dump
void dump() const
Definition: BasicAliasAnalysis.cpp:494
llvm::Optional< bool >
llvm::AliasResult
The possible results of an alias query.
Definition: AliasAnalysis.h:82
llvm::SmallPtrSet< Value *, 4 >
llvm::ObjectSizeOpts::RoundToAlign
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Definition: MemoryBuiltins.h:158
impl
place backedge safepoints impl
Definition: PlaceSafepoints.cpp:613
Operator.h
getBitWidth
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Definition: ValueTracking.cpp:98
llvm::max
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:337
llvm::dump
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
Definition: SparseBitVector.h:877
RHS
Value * RHS
Definition: X86PartialReduction.cpp:76
llvm::gep_type_begin
gep_type_iterator gep_type_begin(const User *GEP)
Definition: GetElementPtrTypeIterator.h:123
llvm::LocationSize::isPrecise
bool isPrecise() const
Definition: MemoryLocation.h:166
BasicAliasAnalysis.h
llvm::BasicAAWrapperPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: BasicAliasAnalysis.cpp:1855
llvm::LinearPolySize::isScalable
bool isScalable() const
Returns whether the size is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:298
llvm::APIntOps::umin
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2137
llvm::BasicAAResult::DecomposedGEP::VarIndices
SmallVector< VariableGEPIndex, 4 > VarIndices
Definition: BasicAliasAnalysis.cpp:489
F
#define F(x, y, z)
Definition: MD5.cpp:55
KnownBits.h
AliasAnalysis.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::ConstantRange::Signed
@ Signed
Definition: ConstantRange.h:319
llvm::isIdentifiedFunctionLocal
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
Definition: AliasAnalysis.cpp:968
llvm::ConstantPointerNull
A constant pointer value that points to null.
Definition: Constants.h:535
llvm::AAQueryInfo
This class stores info we want to provide to or retain within an alias query.
Definition: AliasAnalysis.h:469
isObjectSmallerThan
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
Definition: BasicAliasAnalysis.cpp:122
Instruction.h
CommandLine.h
llvm::APInt::uge
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1171
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
llvm::LocationSize::mayBeBeforePointer
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
Definition: MemoryLocation.h:174
llvm::APInt::isNonNegative
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:317
llvm::APInt::isNegative
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:312
llvm::CaptureInfo::~CaptureInfo
virtual ~CaptureInfo()=0
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:24
llvm::ModRefInfo::Ref
@ Ref
The access may reference the value stored in memory.
Constants.h
llvm::BasicAAWrapperPass::ID
static char ID
Definition: BasicAliasAnalysis.h:179
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition: Instructions.h:2795
llvm::BasicAAResult::DecomposedGEP::Base
const Value * Base
Definition: BasicAliasAnalysis.cpp:485
llvm::AArch64::RP
@ RP
Definition: AArch64ISelLowering.h:477
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::LocationSize
Definition: MemoryLocation.h:66
llvm::FunctionModRefBehavior::argMemOnly
static FunctionModRefBehavior argMemOnly(ModRefInfo MR)
Create FunctionModRefBehavior that can only access argument memory.
Definition: AliasAnalysis.h:277
llvm::LibFunc
LibFunc
Definition: TargetLibraryInfo.h:35
Intrinsics.h
InstrTypes.h
llvm::EarliestEscapeInfo::isNotCapturedBeforeOrAt
bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I) override
Definition: BasicAliasAnalysis.cpp:203
llvm::FunctionModRefBehavior::inaccessibleMemOnly
static FunctionModRefBehavior inaccessibleMemOnly(ModRefInfo MR)
Create FunctionModRefBehavior that can only access inaccessible memory.
Definition: AliasAnalysis.h:282
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::LocationSize::getValue
uint64_t getValue() const
Definition: MemoryLocation.h:159
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::AAQueryInfo::CI
CaptureInfo * CI
Definition: AliasAnalysis.h:483
TargetLibraryInfo.h
llvm::dwarf::Index
Index
Definition: Dwarf.h:472
llvm::isPotentiallyReachable
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:232
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::TargetLibraryInfo::getLibFunc
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Definition: TargetLibraryInfo.h:294
llvm::Instruction
Definition: Instruction.h:42
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:302
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
llvm::AArch64CC::LE
@ LE
Definition: AArch64BaseInfo.h:268
llvm::AAQueryInfo::AssumptionBasedResults
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
Definition: AliasAnalysis.h:494
llvm::getArgumentAliasingToReturnedPointer
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
Definition: ValueTracking.cpp:4432
llvm::getUnderlyingObject
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Definition: ValueTracking.cpp:4487
SmallPtrSet.h
MaxNumPhiBBsValueReachabilityCheck
const unsigned MaxNumPhiBBsValueReachabilityCheck
Cutoff after which to stop analysing a set of phi nodes potentially involved in a cycle.
Definition: BasicAliasAnalysis.cpp:81
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", "Basic Alias Analysis (stateless AA impl)", true, true) INITIALIZE_PASS_END(BasicAAWrapperPass
llvm::AnalysisManager::Invalidator
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:661
isObjectSize
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
Definition: BasicAliasAnalysis.cpp:186
llvm::ConstantRange::add
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
Definition: ConstantRange.cpp:989
llvm::APInt::countTrailingZeros
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
Definition: APInt.h:1543
llvm::generic_gep_type_iterator::getStructTypeOrNull
StructType * getStructTypeOrNull() const
Definition: GetElementPtrTypeIterator.h:116
llvm::ConstantRange::smul_fast
ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
Definition: ConstantRange.cpp:1139
llvm::getObjectSize
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
Definition: MemoryBuiltins.cpp:614
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2791
llvm::None
const NoneType None
Definition: None.h:24
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
Type.h
llvm::APInt::srem
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1734
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
llvm::PhiValues::getValuesForPhi
const ValueSet & getValuesForPhi(const PHINode *PN)
Get the underlying values of a phi.
Definition: PhiValues.cpp:125
llvm::BasicAAResult::pointsToConstantMemory
bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool OrLocal)
Chases pointers until we find a (constant global) or not.
Definition: BasicAliasAnalysis.cpp:687
llvm::OutputFileType::Object
@ Object
llvm::APInt::getOneBitSet
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:222
llvm::AAQueryInfo::LocPair
std::pair< AACacheLoc, AACacheLoc > LocPair
Definition: AliasAnalysis.h:471
llvm::SmallPtrSetImplBase::empty
bool empty() const
Definition: SmallPtrSet.h:92
llvm::BasicAAResult::alias
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI)
Definition: BasicAliasAnalysis.cpp:860
llvm::cl::opt< bool >
llvm::CaptureInfo::isNotCapturedBeforeOrAt
virtual bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I)=0
llvm::FindEarliestCapture
Instruction * FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, bool StoreCaptures, const DominatorTree &DT, const SmallPtrSetImpl< const Value * > &EphValues, unsigned MaxUsesToExplore=0)
Definition: CaptureTracking.cpp:294
llvm::BasicAAResult::invalidate
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
Definition: BasicAliasAnalysis.cpp:87
llvm::ConstantRange::getBitWidth
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
Definition: ConstantRange.h:204
llvm::Value::getPointerDereferenceableBytes
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
Definition: Value.cpp:843
llvm::AMDGPU::Hwreg::Offset
Offset
Definition: SIDefines.h:416
llvm::TargetLibraryInfoWrapperPass
Definition: TargetLibraryInfo.h:468
uint64_t
llvm::LocationSize::hasValue
bool hasValue() const
Definition: MemoryLocation.h:156
llvm::LocationSize::isZero
bool isZero() const
Definition: MemoryLocation.h:171
llvm::isModAndRefSet
bool isModAndRefSet(const ModRefInfo MRI)
Definition: AliasAnalysis.h:165
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:644
llvm::AssumptionAnalysis
A function analysis which provides an AssumptionCache.
Definition: AssumptionCache.h:173
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::BasicAAWrapperPass
Legacy wrapper pass to provide the BasicAAResult object.
Definition: BasicAliasAnalysis.h:173
llvm::AAQueryInfo::NumAssumptionUses
int NumAssumptionUses
How many active NoAlias assumption uses there are.
Definition: AliasAnalysis.h:489
llvm::FunctionModRefBehavior::inaccessibleOrArgMemOnly
static FunctionModRefBehavior inaccessibleOrArgMemOnly(ModRefInfo MR)
Create FunctionModRefBehavior that can only access inaccessible or argument memory.
Definition: AliasAnalysis.h:288
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:53
MemoryLocation.h
llvm::AnalysisKey
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:69
llvm::FunctionModRefBehavior::writeOnly
static FunctionModRefBehavior writeOnly()
Create FunctionModRefBehavior that can write any memory.
Definition: AliasAnalysis.h:272
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:439
llvm::PhiValuesAnalysis
The analysis pass which yields a PhiValues.
Definition: PhiValues.h:115
llvm::isKnownNonEqual
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
Definition: ValueTracking.cpp:379
adjustToIndexSize
static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize)
To ensure a pointer offset fits in an integer of size IndexSize (in bits) when that size is smaller t...
Definition: BasicAliasAnalysis.cpp:448
llvm::ConstantRange::smul_sat
ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
Definition: ConstantRange.cpp:1582
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
llvm::computeKnownBits
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Definition: ValueTracking.cpp:222
llvm::ModRefInfo
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: AliasAnalysis.h:147
llvm::AAQueryInfo::CacheEntry
Definition: AliasAnalysis.h:472
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:853
llvm::AliasResult::setOffset
void setOffset(int32_t NewOffset)
Definition: AliasAnalysis.h:123
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::generic_gep_type_iterator::getIndexedType
Type * getIndexedType() const
Definition: GetElementPtrTypeIterator.h:70
llvm::Value::stripPointerCastsForAliasAnalysis
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
Definition: Value.cpp:701
llvm::SelectInst
This class represents the LLVM 'select' instruction.
Definition: Instructions.h:1737
llvm::TargetLibraryInfo::has
bool has(LibFunc F) const
Tests whether a library function is available.
Definition: TargetLibraryInfo.h:328
llvm::BasicAAResult::DecomposedGEP::Offset
APInt Offset
Definition: BasicAliasAnalysis.cpp:487
llvm::AAResultBase::getArgModRefInfo
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Definition: AliasAnalysis.h:1189
llvm::isEscapeSource
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
Definition: AliasAnalysis.cpp:972
llvm::AMDGPU::IsaInfo::TargetIDSetting::Off
@ Off
llvm::GEPOperator
Definition: Operator.h:375
_
#define _
Definition: HexagonMCCodeEmitter.cpp:47
llvm::APInt
Class for arbitrary precision integers.
Definition: APInt.h:75
CFG.h
llvm::AliasResult::swap
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
Definition: AliasAnalysis.h:131
llvm::BasicAAResult::DecomposedGEP::InBounds
Optional< bool > InBounds
Definition: BasicAliasAnalysis.cpp:492
llvm::AssumptionCacheTracker
An immutable pass that tracks lazily created AssumptionCache objects.
Definition: AssumptionCache.h:202
llvm::BinaryOperator
Definition: InstrTypes.h:188
Mul
BinaryOperator * Mul
Definition: X86PartialReduction.cpp:70
llvm::BasicAAResult::getModRefInfo
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
Definition: BasicAliasAnalysis.cpp:874
DataLayout.h
llvm::StructType
Class to represent struct types.
Definition: DerivedTypes.h:213
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition: AssumptionCache.h:42
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::BasicAAResult::getArgModRefInfo
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
Definition: BasicAliasAnalysis.cpp:822
getParent
static const Function * getParent(const Value *V)
Definition: BasicAliasAnalysis.cpp:838
llvm::AliasResult::NoAlias
@ NoAlias
The two locations do not alias at all.
Definition: AliasAnalysis.h:100
Compiler.h
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:1818
llvm::ModRefInfo::Mod
@ Mod
The access may modify the value stored in memory.
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::computeConstantRange
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
Definition: ValueTracking.cpp:7253
ConstantRange.h
llvm::AliasResult::MustAlias
@ MustAlias
The two locations precisely alias each other.
Definition: AliasAnalysis.h:107
llvm::TargetLibraryInfoWrapperPass::getTLI
TargetLibraryInfo & getTLI(const Function &F)
Definition: TargetLibraryInfo.h:480
llvm::ObjectSizeOpts
Various options to control the behavior of getObjectSize.
Definition: MemoryBuiltins.h:138
llvm::BasicAAResult
This is the AA result object for the basic, local, and stateless alias analysis.
Definition: BasicAliasAnalysis.h:44
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
llvm::SimpleCaptureInfo::isNotCapturedBeforeOrAt
bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I) override
Definition: BasicAliasAnalysis.cpp:198
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition: SmallPtrSet.h:93
llvm::make_scope_exit
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
llvm::FunctionModRefBehavior
Summary of how a function affects memory in the program.
Definition: AliasAnalysis.h:208
llvm::AAResultBase::getModRefInfo
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Definition: AliasAnalysis.h:1201
Argument.h
llvm::APInt::zext
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:973
llvm::GEPOperator::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:435
PhiValues.h
llvm::BasicAAWrapperPass::BasicAAWrapperPass
BasicAAWrapperPass()
Definition: BasicAliasAnalysis.cpp:1820
llvm::createBasicAAWrapperPass
FunctionPass * createBasicAAWrapperPass()
Definition: BasicAliasAnalysis.cpp:1837
Attributes.h
Constant.h
llvm::NVPTX::PTXLdStInstCode::V2
@ V2
Definition: NVPTX.h:123
getModRefBehaviorFromAttrs
static FunctionModRefBehavior getModRefBehaviorFromAttrs(AttributeSet Attrs)
Definition: BasicAliasAnalysis.cpp:743
llvm::KnownBits
Definition: KnownBits.h:23
llvm::isIdentifiedObject
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
Definition: AliasAnalysis.cpp:956
EnableRecPhiAnalysis
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
llvm::AssumptionCacheTracker::getAssumptionCache
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
Definition: AssumptionCache.cpp:285
llvm::AnalysisUsage::setPreservesAll
void setPreservesAll()
Set by analyses that do not transform their input at all.
Definition: PassAnalysisSupport.h:130
llvm::LocationSize::beforeOrAfterPointer
constexpr static LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
Definition: MemoryLocation.h:130
llvm::DenseMapBase::end
iterator end()
Definition: DenseMap.h:84
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:348
llvm::GEPOperator::isInBounds
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:392
GlobalVariable.h
llvm::TypeSize
Definition: TypeSize.h:435
llvm::AnalysisManager::getCachedResult
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:793
Casting.h
Function.h
llvm::AAQueryInfo::AliasCache
AliasCacheT AliasCache
Definition: AliasAnalysis.h:481
llvm::BitWidth
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:147
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition: TargetLibraryInfo.h:222
isWriteOnlyParam
static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo &TLI)
Returns true if this is a writeonly (i.e Mod only) parameter.
Definition: BasicAliasAnalysis.cpp:797
llvm::AAResultBase::pointsToConstantMemory
bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool OrLocal)
Definition: AliasAnalysis.h:1184
llvm::MCID::Add
@ Add
Definition: MCInstrDesc.h:185
GlobalAlias.h
llvm::AMDGPU::Hwreg::Width
Width
Definition: SIDefines.h:439
llvm::ConstantRange
This class represents a range of values.
Definition: ConstantRange.h:47
llvm::AAQueryInfo::withEmptyCache
AAQueryInfo withEmptyCache()
Create a new AAQueryInfo based on this one, but with the cache cleared.
Definition: AliasAnalysis.h:501
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:46
llvm::BasicAAResult::DecomposedGEP
Definition: BasicAliasAnalysis.cpp:483
llvm::APInt::abs
APInt abs() const
Get the absolute value.
Definition: APInt.h:1686
AA
llvm::createLegacyPMBasicAAResult
BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F)
A helper for the legacy pass manager to create a BasicAAResult object populated to the best of our ab...
Definition: BasicAliasAnalysis.cpp:1863
llvm::DominatorTreeAnalysis
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:267
llvm::AAResultBase< BasicAAResult >::getBestAAResults
AAResultsProxy getBestAAResults()
Get a proxy for the best AA result set to query at this time.
Definition: AliasAnalysis.h:1176
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
aa
basic aa
Definition: BasicAliasAnalysis.cpp:1834
Instructions.h
llvm::MemoryLocation::UnknownSize
@ UnknownSize
Definition: MemoryLocation.h:215
llvm::GEPOperator::getSourceElementType
Type * getSourceElementType() const
Definition: Operator.cpp:47
llvm::AttributeSet
Definition: Attributes.h:290
llvm::ConstantRange::intersectWith
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
Definition: ConstantRange.cpp:523
SmallVector.h
User.h
llvm::ConstantRange::fromKnownBits
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
Definition: ConstantRange.cpp:58
llvm::User::op_begin
op_iterator op_begin()
Definition: User.h:234
Dominators.h
llvm::APIntOps::GreatestCommonDivisor
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition: APInt.cpp:759
N
#define N
llvm::DenseMapBase::try_emplace
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:222
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:91
llvm::SmallVectorImpl::pop_back_val
T pop_back_val()
Definition: SmallVector.h:659
llvm::PHINode::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Definition: Instructions.h:2815
llvm::PHINode
Definition: Instructions.h:2699
ScopeExit.h
llvm::MipsISD::Ins
@ Ins
Definition: MipsISelLowering.h:160
llvm::MemoryLocation::getBeforeOrAfter
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
Definition: MemoryLocation.h:278
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1174
llvm::AnalysisUsage::addRequiredTransitive
AnalysisUsage & addRequiredTransitive()
Definition: PassAnalysisSupport.h:81
DerivedTypes.h
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:307
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:42
llvm::AnalysisUsage::addUsedIfAvailable
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
Definition: PassAnalysisSupport.h:117
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:308
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1474
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:171
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::ConstantRange::isEmptySet
bool isEmptySet() const
Return true if this set contains no members.
Definition: ConstantRange.cpp:370
llvm::ConstantRange::sextOrTrunc
ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
Definition: ConstantRange.cpp:869
llvm::NullPointerIsDefined
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2049
Value.h
InitializePasses.h
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::TargetLibraryAnalysis
Analysis pass providing the TargetLibraryInfo.
Definition: TargetLibraryInfo.h:443
llvm::BasicAAResult::getModRefBehavior
FunctionModRefBehavior getModRefBehavior(const CallBase *Call)
Returns the behavior when calling the given call site.
Definition: BasicAliasAnalysis.cpp:763
llvm::ModRefInfo::ModRef
@ ModRef
The access may reference and may modify the value stored in memory.
llvm::MemoryLocation
Representation for a specific memory location.
Definition: MemoryLocation.h:210
llvm::isMallocOrCallocLikeFn
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
Definition: MemoryBuiltins.cpp:332
MergeAliasResults
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
Definition: BasicAliasAnalysis.cpp:1285
llvm::AAQueryInfo::Depth
unsigned Depth
Query depth used to distinguish recursive queries.
Definition: AliasAnalysis.h:486
Other
Optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1247
notDifferentParent
static bool notDifferentParent(const Value *O1, const Value *O2)
Definition: BasicAliasAnalysis.cpp:851
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
llvm::Type::getPrimitiveSizeInBits
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:164
isBaseOfObject
static bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
Definition: BasicAliasAnalysis.cpp:1033
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38
llvm::User::op_end
op_iterator op_end()
Definition: User.h:236