LLVM 17.0.0git
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the primary stateless implementation of the
10// Alias Analysis interface that implements identities (two different
11// globals cannot alias, etc), but does no stateful analysis.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ScopeExit.h"
20#include "llvm/ADT/Statistic.h"
23#include "llvm/Analysis/CFG.h"
29#include "llvm/IR/Argument.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
39#include "llvm/IR/GlobalAlias.h"
41#include "llvm/IR/InstrTypes.h"
42#include "llvm/IR/Instruction.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/Operator.h"
47#include "llvm/IR/Type.h"
48#include "llvm/IR/User.h"
49#include "llvm/IR/Value.h"
51#include "llvm/Pass.h"
57#include <cassert>
58#include <cstdint>
59#include <cstdlib>
60#include <optional>
61#include <utility>
62
63#define DEBUG_TYPE "basicaa"
64
65using namespace llvm;
66
67/// Enable analysis of recursive PHI nodes.
69 cl::init(true));
70
71static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
72 cl::Hidden, cl::init(false));
73
74/// SearchLimitReached / SearchTimes shows how often the limit of
75/// to decompose GEPs is reached. It will affect the precision
76/// of basic alias analysis.
77STATISTIC(SearchLimitReached, "Number of times the limit to "
78 "decompose GEPs is reached");
79STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
80
81// The max limit of the search depth in DecomposeGEPExpression() and
82// getUnderlyingObject().
83static const unsigned MaxLookupSearchDepth = 6;
84
87 // We don't care if this analysis itself is preserved, it has no state. But
88 // we need to check that the analyses it depends on have been. Note that we
89 // may be created without handles to some analyses and in that case don't
90 // depend on them.
91 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
92 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)))
93 return true;
94
95 // Otherwise this analysis result remains valid.
96 return false;
97}
98
99//===----------------------------------------------------------------------===//
100// Useful predicates
101//===----------------------------------------------------------------------===//
102
103/// Returns the size of the object specified by V or UnknownSize if unknown.
104static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
105 const TargetLibraryInfo &TLI,
106 bool NullIsValidLoc,
107 bool RoundToAlign = false) {
109 ObjectSizeOpts Opts;
110 Opts.RoundToAlign = RoundToAlign;
111 Opts.NullIsUnknownSize = NullIsValidLoc;
112 if (getObjectSize(V, Size, DL, &TLI, Opts))
113 return Size;
115}
116
117/// Returns true if we can prove that the object specified by V is smaller than
118/// Size.
120 const DataLayout &DL,
121 const TargetLibraryInfo &TLI,
122 bool NullIsValidLoc) {
123 // Note that the meanings of the "object" are slightly different in the
124 // following contexts:
125 // c1: llvm::getObjectSize()
126 // c2: llvm.objectsize() intrinsic
127 // c3: isObjectSmallerThan()
128 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
129 // refers to the "entire object".
130 //
131 // Consider this example:
132 // char *p = (char*)malloc(100)
133 // char *q = p+80;
134 //
135 // In the context of c1 and c2, the "object" pointed by q refers to the
136 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
137 //
138 // However, in the context of c3, the "object" refers to the chunk of memory
139 // being allocated. So, the "object" has 100 bytes, and q points to the middle
140 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
141 // parameter, before the llvm::getObjectSize() is called to get the size of
142 // entire object, we should:
143 // - either rewind the pointer q to the base-address of the object in
144 // question (in this case rewind to p), or
145 // - just give up. It is up to caller to make sure the pointer is pointing
146 // to the base address the object.
147 //
148 // We go for 2nd option for simplicity.
149 if (!isIdentifiedObject(V))
150 return false;
151
152 // This function needs to use the aligned object size because we allow
153 // reads a bit past the end given sufficient alignment.
154 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
155 /*RoundToAlign*/ true);
156
157 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
158}
159
160/// Return the minimal extent from \p V to the end of the underlying object,
161/// assuming the result is used in an aliasing query. E.g., we do use the query
162/// location size and the fact that null pointers cannot alias here.
164 const LocationSize &LocSize,
165 const DataLayout &DL,
166 bool NullIsValidLoc) {
167 // If we have dereferenceability information we know a lower bound for the
168 // extent as accesses for a lower offset would be valid. We need to exclude
169 // the "or null" part if null is a valid pointer. We can ignore frees, as an
170 // access after free would be undefined behavior.
171 bool CanBeNull, CanBeFreed;
172 uint64_t DerefBytes =
173 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
174 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
175 // If queried with a precise location size, we assume that location size to be
176 // accessed, thus valid.
177 if (LocSize.isPrecise())
178 DerefBytes = std::max(DerefBytes, LocSize.getValue());
179 return DerefBytes;
180}
181
182/// Returns true if we can prove that the object specified by V has size Size.
183static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
184 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
185 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
186 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
187}
188
189//===----------------------------------------------------------------------===//
190// CaptureInfo implementations
191//===----------------------------------------------------------------------===//
192
193CaptureInfo::~CaptureInfo() = default;
194
196 const Instruction *I) {
197 return isNonEscapingLocalObject(Object, &IsCapturedCache);
198}
199
201 const Instruction *I) {
203 return false;
204
205 auto Iter = EarliestEscapes.insert({Object, nullptr});
206 if (Iter.second) {
207 Instruction *EarliestCapture = FindEarliestCapture(
208 Object, *const_cast<Function *>(I->getFunction()),
209 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT, EphValues);
210 if (EarliestCapture) {
211 auto Ins = Inst2Obj.insert({EarliestCapture, {}});
212 Ins.first->second.push_back(Object);
213 }
214 Iter.first->second = EarliestCapture;
215 }
216
217 // No capturing instruction.
218 if (!Iter.first->second)
219 return true;
220
221 return I != Iter.first->second &&
222 !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI);
223}
224
226 auto Iter = Inst2Obj.find(I);
227 if (Iter != Inst2Obj.end()) {
228 for (const Value *Obj : Iter->second)
229 EarliestEscapes.erase(Obj);
230 Inst2Obj.erase(I);
231 }
232}
233
234//===----------------------------------------------------------------------===//
235// GetElementPtr Instruction Decomposition and Analysis
236//===----------------------------------------------------------------------===//
237
238namespace {
239/// Represents zext(sext(trunc(V))).
240struct CastedValue {
241 const Value *V;
242 unsigned ZExtBits = 0;
243 unsigned SExtBits = 0;
244 unsigned TruncBits = 0;
245
246 explicit CastedValue(const Value *V) : V(V) {}
247 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
248 unsigned TruncBits)
249 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
250
251 unsigned getBitWidth() const {
252 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
253 SExtBits;
254 }
255
256 CastedValue withValue(const Value *NewV) const {
257 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
258 }
259
260 /// Replace V with zext(NewV)
261 CastedValue withZExtOfValue(const Value *NewV) const {
262 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
264 if (ExtendBy <= TruncBits)
265 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
266
267 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
268 ExtendBy -= TruncBits;
269 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
270 }
271
272 /// Replace V with sext(NewV)
273 CastedValue withSExtOfValue(const Value *NewV) const {
274 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
276 if (ExtendBy <= TruncBits)
277 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
278
279 // zext(sext(sext(NewV)))
280 ExtendBy -= TruncBits;
281 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
282 }
283
284 APInt evaluateWith(APInt N) const {
285 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
286 "Incompatible bit width");
287 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
288 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
289 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
290 return N;
291 }
292
293 ConstantRange evaluateWith(ConstantRange N) const {
294 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
295 "Incompatible bit width");
296 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
297 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
298 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
299 return N;
300 }
301
302 bool canDistributeOver(bool NUW, bool NSW) const {
303 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
304 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
305 // trunc(x op y) == trunc(x) op trunc(y)
306 return (!ZExtBits || NUW) && (!SExtBits || NSW);
307 }
308
309 bool hasSameCastsAs(const CastedValue &Other) const {
310 return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
311 TruncBits == Other.TruncBits;
312 }
313};
314
315/// Represents zext(sext(trunc(V))) * Scale + Offset.
316struct LinearExpression {
317 CastedValue Val;
318 APInt Scale;
320
321 /// True if all operations in this expression are NSW.
322 bool IsNSW;
323
324 LinearExpression(const CastedValue &Val, const APInt &Scale,
325 const APInt &Offset, bool IsNSW)
326 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
327
328 LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
329 unsigned BitWidth = Val.getBitWidth();
330 Scale = APInt(BitWidth, 1);
331 Offset = APInt(BitWidth, 0);
332 }
333
334 LinearExpression mul(const APInt &Other, bool MulIsNSW) const {
335 // The check for zero offset is necessary, because generally
336 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
337 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
338 return LinearExpression(Val, Scale * Other, Offset * Other, NSW);
339 }
340};
341}
342
343/// Analyzes the specified value as a linear expression: "A*V + B", where A and
344/// B are constant integers.
345static LinearExpression GetLinearExpression(
346 const CastedValue &Val, const DataLayout &DL, unsigned Depth,
348 // Limit our recursion depth.
349 if (Depth == 6)
350 return Val;
351
352 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
353 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
354 Val.evaluateWith(Const->getValue()), true);
355
356 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
357 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
358 APInt RHS = Val.evaluateWith(RHSC->getValue());
359 // The only non-OBO case we deal with is or, and only limited to the
360 // case where it is both nuw and nsw.
361 bool NUW = true, NSW = true;
362 if (isa<OverflowingBinaryOperator>(BOp)) {
363 NUW &= BOp->hasNoUnsignedWrap();
364 NSW &= BOp->hasNoSignedWrap();
365 }
366 if (!Val.canDistributeOver(NUW, NSW))
367 return Val;
368
369 // While we can distribute over trunc, we cannot preserve nowrap flags
370 // in that case.
371 if (Val.TruncBits)
372 NUW = NSW = false;
373
374 LinearExpression E(Val);
375 switch (BOp->getOpcode()) {
376 default:
377 // We don't understand this instruction, so we can't decompose it any
378 // further.
379 return Val;
380 case Instruction::Or:
381 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
382 // analyze it.
383 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
384 BOp, DT))
385 return Val;
386
387 [[fallthrough]];
388 case Instruction::Add: {
389 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
390 Depth + 1, AC, DT);
391 E.Offset += RHS;
392 E.IsNSW &= NSW;
393 break;
394 }
395 case Instruction::Sub: {
396 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
397 Depth + 1, AC, DT);
398 E.Offset -= RHS;
399 E.IsNSW &= NSW;
400 break;
401 }
402 case Instruction::Mul:
403 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
404 Depth + 1, AC, DT)
405 .mul(RHS, NSW);
406 break;
407 case Instruction::Shl:
408 // We're trying to linearize an expression of the kind:
409 // shl i8 -128, 36
410 // where the shift count exceeds the bitwidth of the type.
411 // We can't decompose this further (the expression would return
412 // a poison value).
413 if (RHS.getLimitedValue() > Val.getBitWidth())
414 return Val;
415
416 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
417 Depth + 1, AC, DT);
418 E.Offset <<= RHS.getLimitedValue();
419 E.Scale <<= RHS.getLimitedValue();
420 E.IsNSW &= NSW;
421 break;
422 }
423 return E;
424 }
425 }
426
427 if (isa<ZExtInst>(Val.V))
428 return GetLinearExpression(
429 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
430 DL, Depth + 1, AC, DT);
431
432 if (isa<SExtInst>(Val.V))
433 return GetLinearExpression(
434 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
435 DL, Depth + 1, AC, DT);
436
437 return Val;
438}
439
440/// To ensure a pointer offset fits in an integer of size IndexSize
441/// (in bits) when that size is smaller than the maximum index size. This is
442/// an issue, for example, in particular for 32b pointers with negative indices
443/// that rely on two's complement wrap-arounds for precise alias information
444/// where the maximum index size is 64b.
445static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize) {
446 assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
447 unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
448 return (Offset << ShiftBits).ashr(ShiftBits);
449}
450
451namespace {
452// A linear transformation of a Value; this class represents
453// ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
454struct VariableGEPIndex {
455 CastedValue Val;
456 APInt Scale;
457
458 // Context instruction to use when querying information about this index.
459 const Instruction *CxtI;
460
461 /// True if all operations in this expression are NSW.
462 bool IsNSW;
463
464 void dump() const {
465 print(dbgs());
466 dbgs() << "\n";
467 }
468 void print(raw_ostream &OS) const {
469 OS << "(V=" << Val.V->getName()
470 << ", zextbits=" << Val.ZExtBits
471 << ", sextbits=" << Val.SExtBits
472 << ", truncbits=" << Val.TruncBits
473 << ", scale=" << Scale << ")";
474 }
475};
476}
477
478// Represents the internal structure of a GEP, decomposed into a base pointer,
479// constant offsets, and variable scaled indices.
481 // Base pointer of the GEP
482 const Value *Base;
483 // Total constant offset from base.
485 // Scaled variable (non-constant) indices.
487 // Are all operations inbounds GEPs or non-indexing operations?
488 // (std::nullopt iff expression doesn't involve any geps)
489 std::optional<bool> InBounds;
490
491 void dump() const {
492 print(dbgs());
493 dbgs() << "\n";
494 }
495 void print(raw_ostream &OS) const {
496 OS << "(DecomposedGEP Base=" << Base->getName()
497 << ", Offset=" << Offset
498 << ", VarIndices=[";
499 for (size_t i = 0; i < VarIndices.size(); i++) {
500 if (i != 0)
501 OS << ", ";
502 VarIndices[i].print(OS);
503 }
504 OS << "])";
505 }
506};
507
508
509/// If V is a symbolic pointer expression, decompose it into a base pointer
510/// with a constant offset and a number of scaled symbolic offsets.
511///
512/// The scaled symbolic offsets (represented by pairs of a Value* and a scale
513/// in the VarIndices vector) are Value*'s that are known to be scaled by the
514/// specified amount, but which may have other unrepresented high bits. As
515/// such, the gep cannot necessarily be reconstructed from its decomposed form.
517BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
519 // Limit recursion depth to limit compile time in crazy cases.
520 unsigned MaxLookup = MaxLookupSearchDepth;
521 SearchTimes++;
522 const Instruction *CxtI = dyn_cast<Instruction>(V);
523
524 unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
525 DecomposedGEP Decomposed;
526 Decomposed.Offset = APInt(MaxIndexSize, 0);
527 do {
528 // See if this is a bitcast or GEP.
529 const Operator *Op = dyn_cast<Operator>(V);
530 if (!Op) {
531 // The only non-operator case we can handle are GlobalAliases.
532 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
533 if (!GA->isInterposable()) {
534 V = GA->getAliasee();
535 continue;
536 }
537 }
538 Decomposed.Base = V;
539 return Decomposed;
540 }
541
542 if (Op->getOpcode() == Instruction::BitCast ||
543 Op->getOpcode() == Instruction::AddrSpaceCast) {
544 V = Op->getOperand(0);
545 continue;
546 }
547
548 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
549 if (!GEPOp) {
550 if (const auto *PHI = dyn_cast<PHINode>(V)) {
551 // Look through single-arg phi nodes created by LCSSA.
552 if (PHI->getNumIncomingValues() == 1) {
553 V = PHI->getIncomingValue(0);
554 continue;
555 }
556 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
557 // CaptureTracking can know about special capturing properties of some
558 // intrinsics like launder.invariant.group, that can't be expressed with
559 // the attributes, but have properties like returning aliasing pointer.
560 // Because some analysis may assume that nocaptured pointer is not
561 // returned from some special intrinsic (because function would have to
562 // be marked with returns attribute), it is crucial to use this function
563 // because it should be in sync with CaptureTracking. Not using it may
564 // cause weird miscompilations where 2 aliasing pointers are assumed to
565 // noalias.
566 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
567 V = RP;
568 continue;
569 }
570 }
571
572 Decomposed.Base = V;
573 return Decomposed;
574 }
575
576 // Track whether we've seen at least one in bounds gep, and if so, whether
577 // all geps parsed were in bounds.
578 if (Decomposed.InBounds == std::nullopt)
579 Decomposed.InBounds = GEPOp->isInBounds();
580 else if (!GEPOp->isInBounds())
581 Decomposed.InBounds = false;
582
583 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
584
585 unsigned AS = GEPOp->getPointerAddressSpace();
586 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
588 unsigned IndexSize = DL.getIndexSizeInBits(AS);
589 // Assume all GEP operands are constants until proven otherwise.
590 bool GepHasConstantOffset = true;
591 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
592 I != E; ++I, ++GTI) {
593 const Value *Index = *I;
594 // Compute the (potentially symbolic) offset in bytes for this index.
595 if (StructType *STy = GTI.getStructTypeOrNull()) {
596 // For a struct, add the member offset.
597 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
598 if (FieldNo == 0)
599 continue;
600
601 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
602 continue;
603 }
604
605 // For an array/pointer, add the element offset, explicitly scaled.
606 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
607 if (CIdx->isZero())
608 continue;
609
610 // Don't attempt to analyze GEPs if the scalable index is not zero.
611 TypeSize AllocTypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
612 if (AllocTypeSize.isScalable()) {
613 Decomposed.Base = V;
614 return Decomposed;
615 }
616
617 Decomposed.Offset += AllocTypeSize.getFixedValue() *
618 CIdx->getValue().sextOrTrunc(MaxIndexSize);
619 continue;
620 }
621
622 TypeSize AllocTypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
623 if (AllocTypeSize.isScalable()) {
624 Decomposed.Base = V;
625 return Decomposed;
626 }
627
628 GepHasConstantOffset = false;
629
630 // If the integer type is smaller than the index size, it is implicitly
631 // sign extended or truncated to index size.
632 unsigned Width = Index->getType()->getIntegerBitWidth();
633 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
634 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
635 LinearExpression LE = GetLinearExpression(
636 CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
637
638 // Scale by the type size.
639 unsigned TypeSize = AllocTypeSize.getFixedValue();
640 LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
641 Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
642 APInt Scale = LE.Scale.sext(MaxIndexSize);
643
644 // If we already had an occurrence of this index variable, merge this
645 // scale into it. For example, we want to handle:
646 // A[x][x] -> x*16 + x*4 -> x*20
647 // This also ensures that 'x' only appears in the index list once.
648 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
649 if (Decomposed.VarIndices[i].Val.V == LE.Val.V &&
650 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
651 Scale += Decomposed.VarIndices[i].Scale;
652 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
653 break;
654 }
655 }
656
657 // Make sure that we have a scale that makes sense for this target's
658 // index size.
659 Scale = adjustToIndexSize(Scale, IndexSize);
660
661 if (!!Scale) {
662 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW};
663 Decomposed.VarIndices.push_back(Entry);
664 }
665 }
666
667 // Take care of wrap-arounds
668 if (GepHasConstantOffset)
669 Decomposed.Offset = adjustToIndexSize(Decomposed.Offset, IndexSize);
670
671 // Analyze the base pointer next.
672 V = GEPOp->getOperand(0);
673 } while (--MaxLookup);
674
675 // If the chain of expressions is too deep, just return early.
676 Decomposed.Base = V;
677 SearchLimitReached++;
678 return Decomposed;
679}
680
682 AAQueryInfo &AAQI,
683 bool IgnoreLocals) {
684 assert(Visited.empty() && "Visited must be cleared after use!");
685 auto _ = make_scope_exit([&] { Visited.clear(); });
686
687 unsigned MaxLookup = 8;
689 Worklist.push_back(Loc.Ptr);
691
692 do {
693 const Value *V = getUnderlyingObject(Worklist.pop_back_val());
694 if (!Visited.insert(V).second)
695 continue;
696
697 // Ignore allocas if we were instructed to do so.
698 if (IgnoreLocals && isa<AllocaInst>(V))
699 continue;
700
701 // If the location points to memory that is known to be invariant for
702 // the life of the underlying SSA value, then we can exclude Mod from
703 // the set of valid memory effects.
704 //
705 // An argument that is marked readonly and noalias is known to be
706 // invariant while that function is executing.
707 if (const Argument *Arg = dyn_cast<Argument>(V)) {
708 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
709 Result |= ModRefInfo::Ref;
710 continue;
711 }
712 }
713
714 // A global constant can't be mutated.
715 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
716 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
717 // global to be marked constant in some modules and non-constant in
718 // others. GV may even be a declaration, not a definition.
719 if (!GV->isConstant())
720 return AAResultBase::getModRefInfoMask(Loc, AAQI, IgnoreLocals);
721 continue;
722 }
723
724 // If both select values point to local memory, then so does the select.
725 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
726 Worklist.push_back(SI->getTrueValue());
727 Worklist.push_back(SI->getFalseValue());
728 continue;
729 }
730
731 // If all values incoming to a phi node point to local memory, then so does
732 // the phi.
733 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
734 // Don't bother inspecting phi nodes with many operands.
735 if (PN->getNumIncomingValues() > MaxLookup)
736 return AAResultBase::getModRefInfoMask(Loc, AAQI, IgnoreLocals);
737 append_range(Worklist, PN->incoming_values());
738 continue;
739 }
740
741 // Otherwise be conservative.
742 return AAResultBase::getModRefInfoMask(Loc, AAQI, IgnoreLocals);
743 } while (!Worklist.empty() && --MaxLookup);
744
745 // If we hit the maximum number of instructions to examine, be conservative.
746 if (!Worklist.empty())
747 return AAResultBase::getModRefInfoMask(Loc, AAQI, IgnoreLocals);
748
749 return Result;
750}
751
752static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
753 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
754 return II && II->getIntrinsicID() == IID;
755}
756
757/// Returns the behavior when calling the given call site.
759 AAQueryInfo &AAQI) {
760 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
761
762 if (const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
763 MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
764 // Operand bundles on the call may also read or write memory, in addition
765 // to the behavior of the called function.
766 if (Call->hasReadingOperandBundles())
767 FuncME |= MemoryEffects::readOnly();
768 if (Call->hasClobberingOperandBundles())
769 FuncME |= MemoryEffects::writeOnly();
770 Min &= FuncME;
771 }
772
773 return Min;
774}
775
776/// Returns the behavior when calling the given function. For use when the call
777/// site is not known.
779 switch (F->getIntrinsicID()) {
780 case Intrinsic::experimental_guard:
781 case Intrinsic::experimental_deoptimize:
782 // These intrinsics can read arbitrary memory, and additionally modref
783 // inaccessible memory to model control dependence.
784 return MemoryEffects::readOnly() |
786 }
787
788 return F->getMemoryEffects();
789}
790
792 unsigned ArgIdx) {
793 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
794 return ModRefInfo::Mod;
795
796 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
797 return ModRefInfo::Ref;
798
799 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
801
802 return AAResultBase::getArgModRefInfo(Call, ArgIdx);
803}
804
805#ifndef NDEBUG
806static const Function *getParent(const Value *V) {
807 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
808 if (!inst->getParent())
809 return nullptr;
810 return inst->getParent()->getParent();
811 }
812
813 if (const Argument *arg = dyn_cast<Argument>(V))
814 return arg->getParent();
815
816 return nullptr;
817}
818
819static bool notDifferentParent(const Value *O1, const Value *O2) {
820
821 const Function *F1 = getParent(O1);
822 const Function *F2 = getParent(O2);
823
824 return !F1 || !F2 || F1 == F2;
825}
826#endif
827
829 const MemoryLocation &LocB, AAQueryInfo &AAQI,
830 const Instruction *CtxI) {
831 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
832 "BasicAliasAnalysis doesn't support interprocedural queries.");
833 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
834}
835
836/// Checks to see if the specified callsite can clobber the specified memory
837/// object.
838///
839/// Since we only look at local properties of this function, we really can't
840/// say much about this query. We do, however, use simple "address taken"
841/// analysis on local objects.
843 const MemoryLocation &Loc,
844 AAQueryInfo &AAQI) {
845 assert(notDifferentParent(Call, Loc.Ptr) &&
846 "AliasAnalysis query involving multiple functions!");
847
848 const Value *Object = getUnderlyingObject(Loc.Ptr);
849
850 // Calls marked 'tail' cannot read or write allocas from the current frame
851 // because the current frame might be destroyed by the time they run. However,
852 // a tail call may use an alloca with byval. Calling with byval copies the
853 // contents of the alloca into argument registers or stack slots, so there is
854 // no lifetime issue.
855 if (isa<AllocaInst>(Object))
856 if (const CallInst *CI = dyn_cast<CallInst>(Call))
857 if (CI->isTailCall() &&
858 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
860
861 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
862 // modify them even though the alloca is not escaped.
863 if (auto *AI = dyn_cast<AllocaInst>(Object))
864 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
865 return ModRefInfo::Mod;
866
867 // If the pointer is to a locally allocated object that does not escape,
868 // then the call can not mod/ref the pointer unless the call takes the pointer
869 // as an argument, and itself doesn't capture it.
870 if (!isa<Constant>(Object) && Call != Object &&
871 AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) {
872
873 // Optimistically assume that call doesn't touch Object and check this
874 // assumption in the following loop.
876
877 unsigned OperandNo = 0;
878 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
879 CI != CE; ++CI, ++OperandNo) {
880 // Only look at the no-capture or byval pointer arguments. If this
881 // pointer were passed to arguments that were neither of these, then it
882 // couldn't be no-capture.
883 if (!(*CI)->getType()->isPointerTy() ||
884 (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() &&
885 !Call->isByValArgument(OperandNo)))
886 continue;
887
888 // Call doesn't access memory through this operand, so we don't care
889 // if it aliases with Object.
890 if (Call->doesNotAccessMemory(OperandNo))
891 continue;
892
893 // If this is a no-capture pointer argument, see if we can tell that it
894 // is impossible to alias the pointer we're checking.
895 AliasResult AR =
898 // Operand doesn't alias 'Object', continue looking for other aliases
899 if (AR == AliasResult::NoAlias)
900 continue;
901 // Operand aliases 'Object', but call doesn't modify it. Strengthen
902 // initial assumption and keep looking in case if there are more aliases.
903 if (Call->onlyReadsMemory(OperandNo)) {
904 Result |= ModRefInfo::Ref;
905 continue;
906 }
907 // Operand aliases 'Object' but call only writes into it.
908 if (Call->onlyWritesMemory(OperandNo)) {
909 Result |= ModRefInfo::Mod;
910 continue;
911 }
912 // This operand aliases 'Object' and call reads and writes into it.
913 // Setting ModRef will not yield an early return below, MustAlias is not
914 // used further.
915 Result = ModRefInfo::ModRef;
916 break;
917 }
918
919 // Early return if we improved mod ref information
920 if (!isModAndRefSet(Result))
921 return Result;
922 }
923
924 // If the call is malloc/calloc like, we can assume that it doesn't
925 // modify any IR visible value. This is only valid because we assume these
926 // routines do not read values visible in the IR. TODO: Consider special
927 // casing realloc and strdup routines which access only their arguments as
928 // well. Or alternatively, replace all of this with inaccessiblememonly once
929 // that's implemented fully.
930 if (isMallocOrCallocLikeFn(Call, &TLI)) {
931 // Be conservative if the accessed pointer may alias the allocation -
932 // fallback to the generic handling below.
933 if (AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(Call), Loc, AAQI) ==
936 }
937
938 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
939 // writing so that proper control dependencies are maintained but they never
940 // mod any particular memory location visible to the IR.
941 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
942 // intrinsic is now modeled as reading memory. This prevents hoisting the
943 // invariant.start intrinsic over stores. Consider:
944 // *ptr = 40;
945 // *ptr = 50;
946 // invariant_start(ptr)
947 // int val = *ptr;
948 // print(val);
949 //
950 // This cannot be transformed to:
951 //
952 // *ptr = 40;
953 // invariant_start(ptr)
954 // *ptr = 50;
955 // int val = *ptr;
956 // print(val);
957 //
958 // The transformation will cause the second store to be ignored (based on
959 // rules of invariant.start) and print 40, while the first program always
960 // prints 50.
961 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
962 return ModRefInfo::Ref;
963
964 // The AAResultBase base class has some smarts, lets use them.
965 return AAResultBase::getModRefInfo(Call, Loc, AAQI);
966}
967
969 const CallBase *Call2,
970 AAQueryInfo &AAQI) {
971 // Guard intrinsics are marked as arbitrarily writing so that proper control
972 // dependencies are maintained but they never mods any particular memory
973 // location.
974 //
975 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
976 // heap state at the point the guard is issued needs to be consistent in case
977 // the guard invokes the "deopt" continuation.
978
979 // NB! This function is *not* commutative, so we special case two
980 // possibilities for guard intrinsics.
981
982 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
983 return isModSet(getMemoryEffects(Call2, AAQI).getModRef())
986
987 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
988 return isModSet(getMemoryEffects(Call1, AAQI).getModRef())
991
992 // The AAResultBase base class has some smarts, lets use them.
993 return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
994}
995
996/// Return true if we know V to the base address of the corresponding memory
997/// object. This implies that any address less than V must be out of bounds
998/// for the underlying object. Note that just being isIdentifiedObject() is
999/// not enough - For example, a negative offset from a noalias argument or call
1000/// can be inbounds w.r.t the actual underlying object.
1001static bool isBaseOfObject(const Value *V) {
1002 // TODO: We can handle other cases here
1003 // 1) For GC languages, arguments to functions are often required to be
1004 // base pointers.
1005 // 2) Result of allocation routines are often base pointers. Leverage TLI.
1006 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1007}
1008
1009/// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1010/// another pointer.
1011///
1012/// We know that V1 is a GEP, but we don't know anything about V2.
1013/// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1014/// V2.
1015AliasResult BasicAAResult::aliasGEP(
1016 const GEPOperator *GEP1, LocationSize V1Size,
1017 const Value *V2, LocationSize V2Size,
1018 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1019 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1020 // TODO: This limitation exists for compile-time reasons. Relax it if we
1021 // can avoid exponential pathological cases.
1022 if (!isa<GEPOperator>(V2))
1023 return AliasResult::MayAlias;
1024
1025 // If both accesses have unknown size, we can only check whether the base
1026 // objects don't alias.
1027 AliasResult BaseAlias =
1028 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1029 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1030 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1032 }
1033
1034 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1035 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1036
1037 // Bail if we were not able to decompose anything.
1038 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1039 return AliasResult::MayAlias;
1040
1041 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1042 // symbolic difference.
1043 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1044
1045 // If an inbounds GEP would have to start from an out of bounds address
1046 // for the two to alias, then we can assume noalias.
1047 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1048 V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1049 isBaseOfObject(DecompGEP2.Base))
1050 return AliasResult::NoAlias;
1051
1052 if (isa<GEPOperator>(V2)) {
1053 // Symmetric case to above.
1054 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1055 V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1056 isBaseOfObject(DecompGEP1.Base))
1057 return AliasResult::NoAlias;
1058 }
1059
1060 // For GEPs with identical offsets, we can preserve the size and AAInfo
1061 // when performing the alias check on the underlying objects.
1062 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1063 return AAQI.AAR.alias(MemoryLocation(DecompGEP1.Base, V1Size),
1064 MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1065
1066 // Do the base pointers alias?
1067 AliasResult BaseAlias =
1068 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1069 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1070
1071 // If we get a No or May, then return it immediately, no amount of analysis
1072 // will improve this situation.
1073 if (BaseAlias != AliasResult::MustAlias) {
1074 assert(BaseAlias == AliasResult::NoAlias ||
1075 BaseAlias == AliasResult::MayAlias);
1076 return BaseAlias;
1077 }
1078
1079 // If there is a constant difference between the pointers, but the difference
1080 // is less than the size of the associated memory object, then we know
1081 // that the objects are partially overlapping. If the difference is
1082 // greater, we know they do not overlap.
1083 if (DecompGEP1.VarIndices.empty()) {
1084 APInt &Off = DecompGEP1.Offset;
1085
1086 // Initialize for Off >= 0 (V2 <= GEP1) case.
1087 const Value *LeftPtr = V2;
1088 const Value *RightPtr = GEP1;
1089 LocationSize VLeftSize = V2Size;
1090 LocationSize VRightSize = V1Size;
1091 const bool Swapped = Off.isNegative();
1092
1093 if (Swapped) {
1094 // Swap if we have the situation where:
1095 // + +
1096 // | BaseOffset |
1097 // ---------------->|
1098 // |-->V1Size |-------> V2Size
1099 // GEP1 V2
1100 std::swap(LeftPtr, RightPtr);
1101 std::swap(VLeftSize, VRightSize);
1102 Off = -Off;
1103 }
1104
1105 if (!VLeftSize.hasValue())
1106 return AliasResult::MayAlias;
1107
1108 const uint64_t LSize = VLeftSize.getValue();
1109 if (Off.ult(LSize)) {
1110 // Conservatively drop processing if a phi was visited and/or offset is
1111 // too big.
1113 if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1114 (Off + VRightSize.getValue()).ule(LSize)) {
1115 // Memory referenced by right pointer is nested. Save the offset in
1116 // cache. Note that originally offset estimated as GEP1-V2, but
1117 // AliasResult contains the shift that represents GEP1+Offset=V2.
1118 AR.setOffset(-Off.getSExtValue());
1119 AR.swap(Swapped);
1120 }
1121 return AR;
1122 }
1123 return AliasResult::NoAlias;
1124 }
1125
1126 // We need to know both acess sizes for all the following heuristics.
1127 if (!V1Size.hasValue() || !V2Size.hasValue())
1128 return AliasResult::MayAlias;
1129
1130 APInt GCD;
1131 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1132 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1133 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1134 const APInt &Scale = Index.Scale;
1135 APInt ScaleForGCD = Scale;
1136 if (!Index.IsNSW)
1137 ScaleForGCD =
1139
1140 if (i == 0)
1141 GCD = ScaleForGCD.abs();
1142 else
1143 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1144
1145 ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1146 true, &AC, Index.CxtI);
1147 KnownBits Known =
1148 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1149 CR = CR.intersectWith(
1150 ConstantRange::fromKnownBits(Known, /* Signed */ true),
1152 CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1153
1154 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1155 "Bit widths are normalized to MaxIndexSize");
1156 if (Index.IsNSW)
1157 OffsetRange = OffsetRange.add(CR.smul_sat(ConstantRange(Scale)));
1158 else
1159 OffsetRange = OffsetRange.add(CR.smul_fast(ConstantRange(Scale)));
1160 }
1161
1162 // We now have accesses at two offsets from the same base:
1163 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1164 // 2. 0 with size V2Size
1165 // Using arithmetic modulo GCD, the accesses are at
1166 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1167 // into the range [V2Size..GCD), then we know they cannot overlap.
1168 APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1169 if (ModOffset.isNegative())
1170 ModOffset += GCD; // We want mod, not rem.
1171 if (ModOffset.uge(V2Size.getValue()) &&
1172 (GCD - ModOffset).uge(V1Size.getValue()))
1173 return AliasResult::NoAlias;
1174
1175 // Compute ranges of potentially accessed bytes for both accesses. If the
1176 // interseciton is empty, there can be no overlap.
1177 unsigned BW = OffsetRange.getBitWidth();
1178 ConstantRange Range1 = OffsetRange.add(
1179 ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1180 ConstantRange Range2 =
1181 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1182 if (Range1.intersectWith(Range2).isEmptySet())
1183 return AliasResult::NoAlias;
1184
1185 // Try to determine the range of values for VarIndex such that
1186 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1187 std::optional<APInt> MinAbsVarIndex;
1188 if (DecompGEP1.VarIndices.size() == 1) {
1189 // VarIndex = Scale*V.
1190 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1191 if (Var.Val.TruncBits == 0 &&
1192 isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1193 // If V != 0, then abs(VarIndex) > 0.
1194 MinAbsVarIndex = APInt(Var.Scale.getBitWidth(), 1);
1195
1196 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1197 // potentially wrapping math.
1198 auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1199 if (Var.IsNSW)
1200 return true;
1201
1202 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1203 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1204 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1205 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1206 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1207 if (MaxScaleValueBW <= 0)
1208 return false;
1209 return Var.Scale.ule(
1210 APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1211 };
1212 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1213 // presence of potentially wrapping math.
1214 if (MultiplyByScaleNoWrap(Var)) {
1215 // If V != 0 then abs(VarIndex) >= abs(Scale).
1216 MinAbsVarIndex = Var.Scale.abs();
1217 }
1218 }
1219 } else if (DecompGEP1.VarIndices.size() == 2) {
1220 // VarIndex = Scale*V0 + (-Scale)*V1.
1221 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1222 // Check that MayBeCrossIteration is false, to avoid reasoning about
1223 // inequality of values across loop iterations.
1224 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1225 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1226 if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 &&
1227 Var0.Val.hasSameCastsAs(Var1.Val) && !AAQI.MayBeCrossIteration &&
1228 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1229 DT))
1230 MinAbsVarIndex = Var0.Scale.abs();
1231 }
1232
1233 if (MinAbsVarIndex) {
1234 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1235 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1236 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1237 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1238 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1239 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1240 return AliasResult::NoAlias;
1241 }
1242
1243 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1244 return AliasResult::NoAlias;
1245
1246 // Statically, we can see that the base objects are the same, but the
1247 // pointers have dynamic offsets which we can't resolve. And none of our
1248 // little tricks above worked.
1249 return AliasResult::MayAlias;
1250}
1251
1253 // If the results agree, take it.
1254 if (A == B)
1255 return A;
1256 // A mix of PartialAlias and MustAlias is PartialAlias.
1260 // Otherwise, we don't know anything.
1261 return AliasResult::MayAlias;
1262}
1263
1264/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1265/// against another.
1267BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1268 const Value *V2, LocationSize V2Size,
1269 AAQueryInfo &AAQI) {
1270 // If the values are Selects with the same condition, we can do a more precise
1271 // check: just check for aliases between the values on corresponding arms.
1272 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1273 if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1274 AAQI)) {
1275 AliasResult Alias =
1276 AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1277 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1278 if (Alias == AliasResult::MayAlias)
1279 return AliasResult::MayAlias;
1280 AliasResult ThisAlias =
1281 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1282 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1283 return MergeAliasResults(ThisAlias, Alias);
1284 }
1285
1286 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1287 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1288 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1289 MemoryLocation(V2, V2Size), AAQI);
1290 if (Alias == AliasResult::MayAlias)
1291 return AliasResult::MayAlias;
1292
1293 AliasResult ThisAlias =
1294 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1295 MemoryLocation(V2, V2Size), AAQI);
1296 return MergeAliasResults(ThisAlias, Alias);
1297}
1298
1299/// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1300/// another.
1301AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1302 const Value *V2, LocationSize V2Size,
1303 AAQueryInfo &AAQI) {
1304 if (!PN->getNumIncomingValues())
1305 return AliasResult::NoAlias;
1306 // If the values are PHIs in the same block, we can do a more precise
1307 // as well as efficient check: just check for aliases between the values
1308 // on corresponding edges.
1309 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1310 if (PN2->getParent() == PN->getParent()) {
1311 std::optional<AliasResult> Alias;
1312 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1313 AliasResult ThisAlias = AAQI.AAR.alias(
1314 MemoryLocation(PN->getIncomingValue(i), PNSize),
1316 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1317 AAQI);
1318 if (Alias)
1319 *Alias = MergeAliasResults(*Alias, ThisAlias);
1320 else
1321 Alias = ThisAlias;
1322 if (*Alias == AliasResult::MayAlias)
1323 break;
1324 }
1325 return *Alias;
1326 }
1327
1329 // If a phi operand recurses back to the phi, we can still determine NoAlias
1330 // if we don't alias the underlying objects of the other phi operands, as we
1331 // know that the recursive phi needs to be based on them in some way.
1332 bool isRecursive = false;
1333 auto CheckForRecPhi = [&](Value *PV) {
1335 return false;
1336 if (getUnderlyingObject(PV) == PN) {
1337 isRecursive = true;
1338 return true;
1339 }
1340 return false;
1341 };
1342
1343 SmallPtrSet<Value *, 4> UniqueSrc;
1344 Value *OnePhi = nullptr;
1345 for (Value *PV1 : PN->incoming_values()) {
1346 // Skip the phi itself being the incoming value.
1347 if (PV1 == PN)
1348 continue;
1349
1350 if (isa<PHINode>(PV1)) {
1351 if (OnePhi && OnePhi != PV1) {
1352 // To control potential compile time explosion, we choose to be
1353 // conserviate when we have more than one Phi input. It is important
1354 // that we handle the single phi case as that lets us handle LCSSA
1355 // phi nodes and (combined with the recursive phi handling) simple
1356 // pointer induction variable patterns.
1357 return AliasResult::MayAlias;
1358 }
1359 OnePhi = PV1;
1360 }
1361
1362 if (CheckForRecPhi(PV1))
1363 continue;
1364
1365 if (UniqueSrc.insert(PV1).second)
1366 V1Srcs.push_back(PV1);
1367 }
1368
1369 if (OnePhi && UniqueSrc.size() > 1)
1370 // Out of an abundance of caution, allow only the trivial lcssa and
1371 // recursive phi cases.
1372 return AliasResult::MayAlias;
1373
1374 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1375 // value. This should only be possible in blocks unreachable from the entry
1376 // block, but return MayAlias just in case.
1377 if (V1Srcs.empty())
1378 return AliasResult::MayAlias;
1379
1380 // If this PHI node is recursive, indicate that the pointer may be moved
1381 // across iterations. We can only prove NoAlias if different underlying
1382 // objects are involved.
1383 if (isRecursive)
1385
1386 // In the recursive alias queries below, we may compare values from two
1387 // different loop iterations.
1388 SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1389
1390 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(V1Srcs[0], PNSize),
1391 MemoryLocation(V2, V2Size), AAQI);
1392
1393 // Early exit if the check of the first PHI source against V2 is MayAlias.
1394 // Other results are not possible.
1395 if (Alias == AliasResult::MayAlias)
1396 return AliasResult::MayAlias;
1397 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1398 // remain valid to all elements and needs to conservatively return MayAlias.
1399 if (isRecursive && Alias != AliasResult::NoAlias)
1400 return AliasResult::MayAlias;
1401
1402 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1403 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1404 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1405 Value *V = V1Srcs[i];
1406
1407 AliasResult ThisAlias = AAQI.AAR.alias(
1408 MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), AAQI);
1409 Alias = MergeAliasResults(ThisAlias, Alias);
1410 if (Alias == AliasResult::MayAlias)
1411 break;
1412 }
1413
1414 return Alias;
1415}
1416
1417/// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1418/// array references.
1419AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1420 const Value *V2, LocationSize V2Size,
1421 AAQueryInfo &AAQI,
1422 const Instruction *CtxI) {
1423 // If either of the memory references is empty, it doesn't matter what the
1424 // pointer values are.
1425 if (V1Size.isZero() || V2Size.isZero())
1426 return AliasResult::NoAlias;
1427
1428 // Strip off any casts if they exist.
1430 V2 = V2->stripPointerCastsForAliasAnalysis();
1431
1432 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1433 // value for undef that aliases nothing in the program.
1434 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1435 return AliasResult::NoAlias;
1436
1437 // Are we checking for alias of the same value?
1438 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1439 // different iterations. We must therefore make sure that this is not the
1440 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1441 // happen by looking at the visited phi nodes and making sure they cannot
1442 // reach the value.
1443 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1445
1446 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1447 return AliasResult::NoAlias; // Scalars cannot alias each other
1448
1449 // Figure out what objects these things are pointing to if we can.
1452
1453 // Null values in the default address space don't point to any object, so they
1454 // don't alias any other pointer.
1455 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1456 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1457 return AliasResult::NoAlias;
1458 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1459 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1460 return AliasResult::NoAlias;
1461
1462 if (O1 != O2) {
1463 // If V1/V2 point to two different objects, we know that we have no alias.
1465 return AliasResult::NoAlias;
1466
1467 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1468 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1469 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1470 return AliasResult::NoAlias;
1471
1472 // Function arguments can't alias with things that are known to be
1473 // unambigously identified at the function level.
1474 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1475 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1476 return AliasResult::NoAlias;
1477
1478 // If one pointer is the result of a call/invoke or load and the other is a
1479 // non-escaping local object within the same function, then we know the
1480 // object couldn't escape to a point where the call could return it.
1481 //
1482 // Note that if the pointers are in different functions, there are a
1483 // variety of complications. A call with a nocapture argument may still
1484 // temporary store the nocapture argument's value in a temporary memory
1485 // location if that memory location doesn't escape. Or it may pass a
1486 // nocapture value to other functions as long as they don't capture it.
1487 if (isEscapeSource(O1) &&
1488 AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1)))
1489 return AliasResult::NoAlias;
1490 if (isEscapeSource(O2) &&
1491 AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2)))
1492 return AliasResult::NoAlias;
1493 }
1494
1495 // If the size of one access is larger than the entire object on the other
1496 // side, then we know such behavior is undefined and can assume no alias.
1497 bool NullIsValidLocation = NullPointerIsDefined(&F);
1499 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1500 TLI, NullIsValidLocation)) ||
1502 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1503 TLI, NullIsValidLocation)))
1504 return AliasResult::NoAlias;
1505
1506 if (CtxI && EnableSeparateStorageAnalysis) {
1507 for (auto &AssumeVH : AC.assumptions()) {
1508 if (!AssumeVH)
1509 continue;
1510
1511 AssumeInst *Assume = cast<AssumeInst>(AssumeVH);
1512
1513 for (unsigned Idx = 0; Idx < Assume->getNumOperandBundles(); Idx++) {
1515 if (OBU.getTagName() == "separate_storage") {
1516 assert(OBU.Inputs.size() == 2);
1517 const Value *Hint1 = OBU.Inputs[0].get();
1518 const Value *Hint2 = OBU.Inputs[1].get();
1519 // This is often a no-op; instcombine rewrites this for us. No-op
1520 // getUnderlyingObject calls are fast, though.
1521 const Value *HintO1 = getUnderlyingObject(Hint1);
1522 const Value *HintO2 = getUnderlyingObject(Hint2);
1523
1524 if (((O1 == HintO1 && O2 == HintO2) ||
1525 (O1 == HintO2 && O2 == HintO1)) &&
1526 isValidAssumeForContext(Assume, CtxI, DT))
1527 return AliasResult::NoAlias;
1528 }
1529 }
1530 }
1531 }
1532
1533 // If one the accesses may be before the accessed pointer, canonicalize this
1534 // by using unknown after-pointer sizes for both accesses. This is
1535 // equivalent, because regardless of which pointer is lower, one of them
1536 // will always came after the other, as long as the underlying objects aren't
1537 // disjoint. We do this so that the rest of BasicAA does not have to deal
1538 // with accesses before the base pointer, and to improve cache utilization by
1539 // merging equivalent states.
1540 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1541 V1Size = LocationSize::afterPointer();
1542 V2Size = LocationSize::afterPointer();
1543 }
1544
1545 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1546 // for recursive queries. For this reason, this limit is chosen to be large
1547 // enough to be very rarely hit, while still being small enough to avoid
1548 // stack overflows.
1549 if (AAQI.Depth >= 512)
1550 return AliasResult::MayAlias;
1551
1552 // Check the cache before climbing up use-def chains. This also terminates
1553 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1554 // cache key, because some cases where MayBeCrossIteration==false returns
1555 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1556 AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1557 {V2, V2Size, AAQI.MayBeCrossIteration});
1558 const bool Swapped = V1 > V2;
1559 if (Swapped)
1560 std::swap(Locs.first, Locs.second);
1561 const auto &Pair = AAQI.AliasCache.try_emplace(
1563 if (!Pair.second) {
1564 auto &Entry = Pair.first->second;
1565 if (!Entry.isDefinitive()) {
1566 // Remember that we used an assumption.
1567 ++Entry.NumAssumptionUses;
1568 ++AAQI.NumAssumptionUses;
1569 }
1570 // Cache contains sorted {V1,V2} pairs but we should return original order.
1571 auto Result = Entry.Result;
1572 Result.swap(Swapped);
1573 return Result;
1574 }
1575
1576 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1577 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1579 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1580
1581 auto It = AAQI.AliasCache.find(Locs);
1582 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1583 auto &Entry = It->second;
1584
1585 // Check whether a NoAlias assumption has been used, but disproven.
1586 bool AssumptionDisproven =
1587 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1588 if (AssumptionDisproven)
1590
1591 // This is a definitive result now, when considered as a root query.
1592 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1593 Entry.Result = Result;
1594 // Cache contains sorted {V1,V2} pairs.
1595 Entry.Result.swap(Swapped);
1596 Entry.NumAssumptionUses = -1;
1597
1598 // If the assumption has been disproven, remove any results that may have
1599 // been based on this assumption. Do this after the Entry updates above to
1600 // avoid iterator invalidation.
1601 if (AssumptionDisproven)
1602 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1604
1605 // The result may still be based on assumptions higher up in the chain.
1606 // Remember it, so it can be purged from the cache later.
1607 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1608 Result != AliasResult::MayAlias)
1610 return Result;
1611}
1612
1613AliasResult BasicAAResult::aliasCheckRecursive(
1614 const Value *V1, LocationSize V1Size,
1615 const Value *V2, LocationSize V2Size,
1616 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1617 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1618 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1619 if (Result != AliasResult::MayAlias)
1620 return Result;
1621 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1622 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1623 Result.swap();
1624 if (Result != AliasResult::MayAlias)
1625 return Result;
1626 }
1627
1628 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1629 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1630 if (Result != AliasResult::MayAlias)
1631 return Result;
1632 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1633 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1634 Result.swap();
1635 if (Result != AliasResult::MayAlias)
1636 return Result;
1637 }
1638
1639 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1640 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1641 if (Result != AliasResult::MayAlias)
1642 return Result;
1643 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1644 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1645 Result.swap();
1646 if (Result != AliasResult::MayAlias)
1647 return Result;
1648 }
1649
1650 // If both pointers are pointing into the same object and one of them
1651 // accesses the entire object, then the accesses must overlap in some way.
1652 if (O1 == O2) {
1653 bool NullIsValidLocation = NullPointerIsDefined(&F);
1654 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1655 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1656 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1658 }
1659
1660 return AliasResult::MayAlias;
1661}
1662
1663/// Check whether two Values can be considered equivalent.
1664///
1665/// If the values may come from different cycle iterations, this will also
1666/// check that the values are not part of cycle. We have to do this because we
1667/// are looking through phi nodes, that is we say
1668/// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1669bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1670 const Value *V2,
1671 const AAQueryInfo &AAQI) {
1672 if (V != V2)
1673 return false;
1674
1675 if (!AAQI.MayBeCrossIteration)
1676 return true;
1677
1678 // Non-instructions and instructions in the entry block cannot be part of
1679 // a loop.
1680 const Instruction *Inst = dyn_cast<Instruction>(V);
1681 if (!Inst || Inst->getParent()->isEntryBlock())
1682 return true;
1683
1684 // Check whether the instruction is part of a cycle, by checking whether the
1685 // block can (non-trivially) reach itself.
1686 BasicBlock *BB = const_cast<BasicBlock *>(Inst->getParent());
1688 return !Succs.empty() &&
1689 !isPotentiallyReachableFromMany(Succs, BB, nullptr, DT);
1690}
1691
1692/// Computes the symbolic difference between two de-composed GEPs.
1693void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1694 const DecomposedGEP &SrcGEP,
1695 const AAQueryInfo &AAQI) {
1696 DestGEP.Offset -= SrcGEP.Offset;
1697 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1698 // Find V in Dest. This is N^2, but pointer indices almost never have more
1699 // than a few variable indexes.
1700 bool Found = false;
1701 for (auto I : enumerate(DestGEP.VarIndices)) {
1702 VariableGEPIndex &Dest = I.value();
1703 if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) ||
1704 !Dest.Val.hasSameCastsAs(Src.Val))
1705 continue;
1706
1707 // If we found it, subtract off Scale V's from the entry in Dest. If it
1708 // goes to zero, remove the entry.
1709 if (Dest.Scale != Src.Scale) {
1710 Dest.Scale -= Src.Scale;
1711 Dest.IsNSW = false;
1712 } else {
1713 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1714 }
1715 Found = true;
1716 break;
1717 }
1718
1719 // If we didn't consume this entry, add it to the end of the Dest list.
1720 if (!Found) {
1721 VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW};
1722 DestGEP.VarIndices.push_back(Entry);
1723 }
1724 }
1725}
1726
1727bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1728 LocationSize MaybeV1Size,
1729 LocationSize MaybeV2Size,
1730 AssumptionCache *AC,
1731 DominatorTree *DT,
1732 const AAQueryInfo &AAQI) {
1733 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1734 !MaybeV2Size.hasValue())
1735 return false;
1736
1737 const uint64_t V1Size = MaybeV1Size.getValue();
1738 const uint64_t V2Size = MaybeV2Size.getValue();
1739
1740 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1741
1742 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1743 Var0.Scale != -Var1.Scale ||
1744 Var0.Val.V->getType() != Var1.Val.V->getType())
1745 return false;
1746
1747 // We'll strip off the Extensions of Var0 and Var1 and do another round
1748 // of GetLinearExpression decomposition. In the example above, if Var0
1749 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1750
1751 LinearExpression E0 =
1752 GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1753 LinearExpression E1 =
1754 GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1755 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1756 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1757 return false;
1758
1759 // We have a hit - Var0 and Var1 only differ by a constant offset!
1760
1761 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1762 // Var1 is possible to calculate, but we're just interested in the absolute
1763 // minimum difference between the two. The minimum distance may occur due to
1764 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1765 // the minimum distance between %i and %i + 5 is 3.
1766 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1767 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1768 APInt MinDiffBytes =
1769 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1770
1771 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1772 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1773 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1774 // V2Size can fit in the MinDiffBytes gap.
1775 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1776 MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1777}
1778
1779//===----------------------------------------------------------------------===//
1780// BasicAliasAnalysis Pass
1781//===----------------------------------------------------------------------===//
1782
1783AnalysisKey BasicAA::Key;
1784
1786 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1787 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1788 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1789 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT);
1790}
1791
1794}
1795
1796char BasicAAWrapperPass::ID = 0;
1797
1798void BasicAAWrapperPass::anchor() {}
1799
1801 "Basic Alias Analysis (stateless AA impl)", true, true)
1806 "Basic Alias Analysis (stateless AA impl)", true, true)
1807
1809 return new BasicAAWrapperPass();
1810}
1811
1813 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1814 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1815 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1816
1817 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1818 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1819 &DTWP.getDomTree()));
1820
1821 return false;
1822}
1823
1825 AU.setPreservesAll();
1829}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Rewrite undef for PHI
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
static const Function * getParent(const Value *V)
static cl::opt< bool > EnableSeparateStorageAnalysis("basic-aa-separate-storage", cl::Hidden, cl::init(false))
static bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
static bool notDifferentParent(const Value *O1, const Value *O2)
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize)
To ensure a pointer offset fits in an integer of size IndexSize (in bits) when that size is smaller t...
static uint64_t getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
basic Basic Alias true
static const unsigned MaxLookupSearchDepth
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
This is the interface for LLVM's primary stateless and local alias analysis.
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1269
Hexagon Common GEP
#define _
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
place backedge safepoints impl
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file provides utility classes that use RAII to save and restore values.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Value * RHS
This class stores info we want to provide to or retain within an alias query.
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
unsigned Depth
Query depth used to distinguish recursive queries.
CaptureInfo * CI
int NumAssumptionUses
How many active NoAlias assumption uses there are.
std::pair< AACacheLoc, AACacheLoc > LocPair
AliasCacheT AliasCache
bool MayBeCrossIteration
Tracks whether the accesses may be on different cycle iterations.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals)
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
Definition: APInt.h:75
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:972
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:993
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition: APInt.h:186
APInt abs() const
Get the absolute value.
Definition: APInt.h:1746
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1443
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:312
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition: APInt.h:1596
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition: APInt.cpp:1733
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:317
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:222
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1203
The possible results of an alias query.
Definition: AliasAnalysis.h:83
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
@ MayAlias
The two locations may or may not alias.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
void setOffset(int32_t NewOffset)
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:661
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:679
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptions()
Access the list of assumption handles currently tracked for this function.
This is the AA result object for the basic, local, and stateless alias analysis.
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI)
Returns the behavior when calling the given call site.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI, const Instruction *CtxI)
Legacy wrapper pass to provide the BasicAAResult object.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:403
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1190
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2024
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:1968
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
A constant pointer value that points to null.
Definition: Constants.h:533
This class represents a range of values.
Definition: ConstantRange.h:47
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:235
bool erase(const KeyT &Val)
Definition: DenseMap.h:329
iterator end()
Definition: DenseMap.h:84
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:314
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I) override
void removeInstruction(Instruction *I)
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Definition: Operator.h:395
Type * getSourceElementType() const
Definition: Operator.cpp:54
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:439
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
const BasicBlock * getParent() const
Definition: Instruction.h:90
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
bool hasValue() const
uint64_t getValue() const
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isZero() const
bool isPrecise() const
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
Summary of how a function affects memory in the program.
Definition: ModRef.h:63
static MemoryEffects writeOnly()
Create MemoryEffects that can write any memory.
Definition: ModRef.h:128
static MemoryEffects readOnly()
Create MemoryEffects that can read any memory.
Definition: ModRef.h:123
static MemoryEffects inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffects that can only access inaccessible memory.
Definition: ModRef.h:138
Representation for a specific memory location.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
const Value * Ptr
The address of the start of the location.
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:31
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
This class represents the LLVM 'select' instruction.
bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I) override
size_type size() const
Definition: SmallPtrSet.h:93
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
Class to represent struct types.
Definition: DerivedTypes.h:213
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:256
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_iterator op_begin()
Definition: User.h:234
Value * getOperand(unsigned i) const
Definition: User.h:169
op_iterator op_end()
Definition: User.h:236
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
Definition: Value.cpp:704
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:182
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:166
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition: APInt.h:2197
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition: APInt.cpp:758
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
@ Offset
Definition: DWP.cpp:440
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2430
bool isPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const BasicBlock *StopBB, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is at least one path from a block in 'Worklist' to 'StopBB' without passing t...
Definition: CFG.cpp:133
auto successors(const MachineBasicBlock *BB)
Instruction * FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, bool StoreCaptures, const DominatorTree &DT, const SmallPtrSetImpl< const Value * > &EphValues, unsigned MaxUsesToExplore=0)
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:2129
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2102
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
FunctionPass * createBasicAAWrapperPass()
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition: ModRef.h:27
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
void initializeBasicAAWrapperPassPass(PassRegistry &)
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isModAndRefSet(const ModRefInfo MRI)
Definition: ModRef.h:45
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:184
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
gep_type_iterator gep_type_begin(const User *GEP)
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition: CFG.cpp:231
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
SmallVector< VariableGEPIndex, 4 > VarIndices
void print(raw_ostream &OS) const
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:69
virtual bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I)=0
virtual ~CaptureInfo()=0
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition: InstrTypes.h:1085
StringRef getTagName() const
Return the tag of this operand bundle as a string.
Definition: InstrTypes.h:1104
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1086
A utility class that uses RAII to save and restore the value of a variable.