63#define DEBUG_TYPE "basicaa"
77STATISTIC(SearchLimitReached,
"Number of times the limit to "
78 "decompose GEPs is reached");
79STATISTIC(SearchTimes,
"Number of times a GEP is decomposed");
107 bool RoundToAlign =
false) {
122 bool NullIsValidLoc) {
166 bool NullIsValidLoc) {
171 bool CanBeNull, CanBeFreed;
173 V.getPointerDereferenceableBytes(
DL, CanBeNull, CanBeFreed);
174 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
178 DerefBytes = std::max(DerefBytes, LocSize.
getValue());
205 auto Iter = EarliestEscapes.insert({
Object,
nullptr});
209 false,
true, DT, EphValues);
210 if (EarliestCapture) {
211 auto Ins = Inst2Obj.insert({EarliestCapture, {}});
212 Ins.first->second.push_back(
Object);
214 Iter.first->second = EarliestCapture;
218 if (!Iter.first->second)
221 return I != Iter.first->second &&
226 auto Iter = Inst2Obj.find(
I);
227 if (Iter != Inst2Obj.end()) {
228 for (
const Value *Obj : Iter->second)
229 EarliestEscapes.erase(Obj);
242 unsigned ZExtBits = 0;
243 unsigned SExtBits = 0;
244 unsigned TruncBits = 0;
246 explicit CastedValue(
const Value *V) : V(V) {}
247 explicit CastedValue(
const Value *V,
unsigned ZExtBits,
unsigned SExtBits,
249 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
252 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
256 CastedValue withValue(
const Value *NewV)
const {
257 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
261 CastedValue withZExtOfValue(
const Value *NewV)
const {
262 unsigned ExtendBy =
V->getType()->getPrimitiveSizeInBits() -
264 if (ExtendBy <= TruncBits)
265 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
268 ExtendBy -= TruncBits;
269 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
273 CastedValue withSExtOfValue(
const Value *NewV)
const {
274 unsigned ExtendBy =
V->getType()->getPrimitiveSizeInBits() -
276 if (ExtendBy <= TruncBits)
277 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
280 ExtendBy -= TruncBits;
281 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
285 assert(
N.getBitWidth() ==
V->getType()->getPrimitiveSizeInBits() &&
286 "Incompatible bit width");
287 if (TruncBits)
N =
N.trunc(
N.getBitWidth() - TruncBits);
288 if (SExtBits)
N =
N.sext(
N.getBitWidth() + SExtBits);
289 if (ZExtBits)
N =
N.zext(
N.getBitWidth() + ZExtBits);
294 assert(
N.getBitWidth() ==
V->getType()->getPrimitiveSizeInBits() &&
295 "Incompatible bit width");
296 if (TruncBits)
N =
N.truncate(
N.getBitWidth() - TruncBits);
297 if (SExtBits)
N =
N.signExtend(
N.getBitWidth() + SExtBits);
298 if (ZExtBits)
N =
N.zeroExtend(
N.getBitWidth() + ZExtBits);
302 bool canDistributeOver(
bool NUW,
bool NSW)
const {
306 return (!ZExtBits || NUW) && (!SExtBits || NSW);
309 bool hasSameCastsAs(
const CastedValue &
Other)
const {
310 return ZExtBits ==
Other.ZExtBits && SExtBits ==
Other.SExtBits &&
311 TruncBits ==
Other.TruncBits;
316struct LinearExpression {
324 LinearExpression(
const CastedValue &Val,
const APInt &Scale,
326 : Val(Val), Scale(Scale),
Offset(
Offset), IsNSW(IsNSW) {}
328 LinearExpression(
const CastedValue &Val) : Val(Val), IsNSW(
true) {
329 unsigned BitWidth = Val.getBitWidth();
334 LinearExpression mul(
const APInt &
Other,
bool MulIsNSW)
const {
337 bool NSW = IsNSW && (
Other.isOne() || (MulIsNSW &&
Offset.isZero()));
352 if (
const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
353 return LinearExpression(Val,
APInt(Val.getBitWidth(), 0),
354 Val.evaluateWith(Const->getValue()),
true);
356 if (
const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
357 if (
ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
358 APInt RHS = Val.evaluateWith(RHSC->getValue());
361 bool NUW =
true, NSW =
true;
362 if (isa<OverflowingBinaryOperator>(BOp)) {
363 NUW &= BOp->hasNoUnsignedWrap();
364 NSW &= BOp->hasNoSignedWrap();
366 if (!Val.canDistributeOver(NUW, NSW))
374 LinearExpression
E(Val);
375 switch (BOp->getOpcode()) {
380 case Instruction::Or:
388 case Instruction::Add: {
395 case Instruction::Sub: {
402 case Instruction::Mul:
407 case Instruction::Shl:
413 if (
RHS.getLimitedValue() > Val.getBitWidth())
418 E.Offset <<=
RHS.getLimitedValue();
419 E.Scale <<=
RHS.getLimitedValue();
427 if (isa<ZExtInst>(Val.V))
429 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
432 if (isa<SExtInst>(Val.V))
434 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
446 assert(IndexSize <=
Offset.getBitWidth() &&
"Invalid IndexSize!");
447 unsigned ShiftBits =
Offset.getBitWidth() - IndexSize;
448 return (
Offset << ShiftBits).ashr(ShiftBits);
454struct VariableGEPIndex {
469 OS <<
"(V=" << Val.V->getName()
470 <<
", zextbits=" << Val.ZExtBits
471 <<
", sextbits=" << Val.SExtBits
472 <<
", truncbits=" << Val.TruncBits
473 <<
", scale=" << Scale <<
")";
522 const Instruction *CxtI = dyn_cast<Instruction>(V);
524 unsigned MaxIndexSize =
DL.getMaxIndexSizeInBits();
525 DecomposedGEP Decomposed;
526 Decomposed.Offset =
APInt(MaxIndexSize, 0);
529 const Operator *Op = dyn_cast<Operator>(V);
532 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
533 if (!GA->isInterposable()) {
534 V = GA->getAliasee();
542 if (
Op->getOpcode() == Instruction::BitCast ||
543 Op->getOpcode() == Instruction::AddrSpaceCast) {
544 V =
Op->getOperand(0);
548 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
550 if (
const auto *
PHI = dyn_cast<PHINode>(V)) {
552 if (
PHI->getNumIncomingValues() == 1) {
553 V =
PHI->getIncomingValue(0);
556 }
else if (
const auto *Call = dyn_cast<CallBase>(V)) {
578 if (Decomposed.InBounds == std::nullopt)
581 Decomposed.InBounds =
false;
588 unsigned IndexSize =
DL.getIndexSizeInBits(AS);
590 bool GepHasConstantOffset =
true;
592 I !=
E; ++
I, ++GTI) {
597 unsigned FieldNo = cast<ConstantInt>(
Index)->getZExtValue();
601 Decomposed.Offset +=
DL.getStructLayout(STy)->getElementOffset(FieldNo);
618 CIdx->getValue().sextOrTrunc(MaxIndexSize);
628 GepHasConstantOffset =
false;
632 unsigned Width =
Index->getType()->getIntegerBitWidth();
633 unsigned SExtBits = IndexSize >
Width ? IndexSize -
Width : 0;
634 unsigned TruncBits = IndexSize <
Width ?
Width - IndexSize : 0;
636 CastedValue(
Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
641 Decomposed.Offset +=
LE.Offset.sext(MaxIndexSize);
642 APInt Scale =
LE.Scale.sext(MaxIndexSize);
648 for (
unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
649 if (Decomposed.VarIndices[i].Val.V ==
LE.Val.V &&
650 Decomposed.VarIndices[i].Val.hasSameCastsAs(
LE.Val)) {
651 Scale += Decomposed.VarIndices[i].Scale;
652 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
662 VariableGEPIndex Entry = {
LE.Val, Scale, CxtI,
LE.IsNSW};
663 Decomposed.VarIndices.push_back(Entry);
668 if (GepHasConstantOffset)
673 }
while (--MaxLookup);
677 SearchLimitReached++;
684 assert(Visited.empty() &&
"Visited must be cleared after use!");
687 unsigned MaxLookup = 8;
694 if (!Visited.insert(V).second)
698 if (IgnoreLocals && isa<AllocaInst>(V))
708 if (
Arg->hasNoAliasAttr() &&
Arg->onlyReadsMemory()) {
719 if (!GV->isConstant())
733 if (
const PHINode *PN = dyn_cast<PHINode>(V)) {
735 if (PN->getNumIncomingValues() > MaxLookup)
743 }
while (!Worklist.
empty() && --MaxLookup);
746 if (!Worklist.
empty())
760 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
762 if (
const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
766 if (Call->hasReadingOperandBundles())
768 if (Call->hasClobberingOperandBundles())
779 switch (
F->getIntrinsicID()) {
780 case Intrinsic::experimental_guard:
781 case Intrinsic::experimental_deoptimize:
788 return F->getMemoryEffects();
793 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
796 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
799 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
807 if (
const Instruction *inst = dyn_cast<Instruction>(V)) {
808 if (!inst->getParent())
813 if (
const Argument *arg = dyn_cast<Argument>(V))
814 return arg->getParent();
824 return !F1 || !F2 || F1 == F2;
832 "BasicAliasAnalysis doesn't support interprocedural queries.");
833 return aliasCheck(LocA.
Ptr, LocA.
Size, LocB.
Ptr, LocB.
Size, AAQI, CtxI);
846 "AliasAnalysis query involving multiple functions!");
855 if (isa<AllocaInst>(
Object))
856 if (
const CallInst *CI = dyn_cast<CallInst>(Call))
857 if (CI->isTailCall() &&
858 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
863 if (
auto *AI = dyn_cast<AllocaInst>(
Object))
864 if (!AI->isStaticAlloca() &&
isIntrinsicCall(Call, Intrinsic::stackrestore))
877 unsigned OperandNo = 0;
878 for (
auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
879 CI != CE; ++CI, ++OperandNo) {
883 if (!(*CI)->getType()->isPointerTy() ||
884 (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() &&
885 !Call->isByValArgument(OperandNo)))
890 if (Call->doesNotAccessMemory(OperandNo))
903 if (Call->onlyReadsMemory(OperandNo)) {
908 if (Call->onlyWritesMemory(OperandNo)) {
1006 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1022 if (!isa<GEPOperator>(V2))
1034 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1035 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1038 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1043 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1047 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1052 if (isa<GEPOperator>(V2)) {
1054 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1062 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1083 if (DecompGEP1.VarIndices.empty()) {
1088 const Value *RightPtr = GEP1;
1091 const bool Swapped =
Off.isNegative();
1109 if (
Off.ult(LSize)) {
1113 if (VRightSize.
hasValue() &&
Off.ule(INT32_MAX) &&
1114 (Off + VRightSize.
getValue()).ule(LSize)) {
1132 for (
unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1133 const VariableGEPIndex &
Index = DecompGEP1.VarIndices[i];
1135 APInt ScaleForGCD = Scale;
1141 GCD = ScaleForGCD.
abs();
1146 true, &AC,
Index.CxtI);
1155 "Bit widths are normalized to MaxIndexSize");
1168 APInt ModOffset = DecompGEP1.Offset.
srem(GCD);
1172 (GCD - ModOffset).uge(V1Size.
getValue()))
1187 std::optional<APInt> MinAbsVarIndex;
1188 if (DecompGEP1.VarIndices.size() == 1) {
1190 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1191 if (Var.Val.TruncBits == 0 &&
1194 MinAbsVarIndex =
APInt(Var.Scale.getBitWidth(), 1);
1198 auto MultiplyByScaleNoWrap = [](
const VariableGEPIndex &Var) {
1202 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1206 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1207 if (MaxScaleValueBW <= 0)
1209 return Var.Scale.ule(
1214 if (MultiplyByScaleNoWrap(Var)) {
1216 MinAbsVarIndex = Var.Scale.abs();
1219 }
else if (DecompGEP1.VarIndices.size() == 2) {
1224 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1225 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1226 if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 &&
1230 MinAbsVarIndex = Var0.Scale.abs();
1233 if (MinAbsVarIndex) {
1235 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1236 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1243 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1272 if (
const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1273 if (isValueEqualInPotentialCycles(
SI->getCondition(), SI2->getCondition(),
1309 if (
const PHINode *PN2 = dyn_cast<PHINode>(V2))
1310 if (PN2->getParent() == PN->
getParent()) {
1311 std::optional<AliasResult> Alias;
1332 bool isRecursive =
false;
1333 auto CheckForRecPhi = [&](
Value *PV) {
1344 Value *OnePhi =
nullptr;
1350 if (isa<PHINode>(PV1)) {
1351 if (OnePhi && OnePhi != PV1) {
1362 if (CheckForRecPhi(PV1))
1365 if (UniqueSrc.
insert(PV1).second)
1369 if (OnePhi && UniqueSrc.
size() > 1)
1404 for (
unsigned i = 1, e = V1Srcs.
size(); i != e; ++i) {
1430 V2 =
V2->stripPointerCastsForAliasAnalysis();
1434 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1443 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1500 TLI, NullIsValidLocation)) ||
1503 TLI, NullIsValidLocation)))
1511 AssumeInst *Assume = cast<AssumeInst>(AssumeVH);
1515 if (OBU.
getTagName() ==
"separate_storage") {
1524 if (((O1 == HintO1 && O2 == HintO2) ||
1525 (O1 == HintO2 && O2 == HintO1)) &&
1549 if (AAQI.
Depth >= 512)
1558 const bool Swapped = V1 >
V2;
1564 auto &Entry = Pair.first->second;
1565 if (!Entry.isDefinitive()) {
1567 ++Entry.NumAssumptionUses;
1571 auto Result = Entry.Result;
1579 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1583 auto &Entry = It->second;
1586 bool AssumptionDisproven =
1588 if (AssumptionDisproven)
1595 Entry.Result.swap(Swapped);
1596 Entry.NumAssumptionUses = -1;
1601 if (AssumptionDisproven)
1617 if (
const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1621 }
else if (
const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1628 if (
const PHINode *PN = dyn_cast<PHINode>(V1)) {
1632 }
else if (
const PHINode *PN = dyn_cast<PHINode>(V2)) {
1639 if (
const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1643 }
else if (
const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1669bool BasicAAResult::isValueEqualInPotentialCycles(
const Value *V,
1680 const Instruction *Inst = dyn_cast<Instruction>(V);
1688 return !Succs.empty() &&
1693void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1694 const DecomposedGEP &SrcGEP,
1696 DestGEP.Offset -= SrcGEP.Offset;
1697 for (
const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1701 for (
auto I :
enumerate(DestGEP.VarIndices)) {
1702 VariableGEPIndex &Dest =
I.value();
1703 if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) ||
1704 !Dest.Val.hasSameCastsAs(Src.Val))
1709 if (Dest.Scale != Src.Scale) {
1710 Dest.Scale -= Src.Scale;
1713 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() +
I.index());
1721 VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW};
1722 DestGEP.VarIndices.push_back(Entry);
1727bool BasicAAResult::constantOffsetHeuristic(
const DecomposedGEP &
GEP,
1733 if (
GEP.VarIndices.size() != 2 || !MaybeV1Size.
hasValue() ||
1740 const VariableGEPIndex &Var0 =
GEP.VarIndices[0], &Var1 =
GEP.VarIndices[1];
1742 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1743 Var0.Scale != -Var1.Scale ||
1744 Var0.Val.V->getType() != Var1.Val.V->getType())
1751 LinearExpression E0 =
1753 LinearExpression E1 =
1755 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1756 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1766 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1768 APInt MinDiffBytes =
1769 MinDiff.
zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.
abs();
1775 return MinDiffBytes.
uge(V1Size +
GEP.Offset.abs()) &&
1776 MinDiffBytes.
uge(V2Size +
GEP.Offset.abs());
1798void BasicAAWrapperPass::anchor() {}
1801 "Basic Alias Analysis (stateless AA impl)",
true,
true)
1813 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1814 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1815 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1818 TLIWP.getTLI(
F), ACT.getAssumptionCache(
F),
1819 &DTWP.getDomTree()));
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
This file implements a class to represent arbitrary precision integral constant values and operations...
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
static const Function * getParent(const Value *V)
static cl::opt< bool > EnableSeparateStorageAnalysis("basic-aa-separate-storage", cl::Hidden, cl::init(false))
static bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
static bool notDifferentParent(const Value *O1, const Value *O2)
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize)
To ensure a pointer offset fits in an integer of size IndexSize (in bits) when that size is smaller t...
static uint64_t getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
static const unsigned MaxLookupSearchDepth
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
This is the interface for LLVM's primary stateless and local alias analysis.
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::optional< std::vector< StOtherPiece > > Other
This file provides utility analysis objects describing memory locations.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
place backedge safepoints impl
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file provides utility classes that use RAII to save and restore values.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
This class stores info we want to provide to or retain within an alias query.
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
unsigned Depth
Query depth used to distinguish recursive queries.
int NumAssumptionUses
How many active NoAlias assumption uses there are.
std::pair< AACacheLoc, AACacheLoc > LocPair
bool MayBeCrossIteration
Tracks whether the accesses may be on different cycle iterations.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals)
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isNegative() const
Determine sign of this APInt.
unsigned countr_zero() const
Count the number of trailing zero bits.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
The possible results of an alias query.
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
@ MayAlias
The two locations may or may not alias.
@ NoAlias
The two locations do not alias at all.
@ PartialAlias
The two locations alias, but only due to a partial overlap.
@ MustAlias
The two locations precisely alias each other.
void setOffset(int32_t NewOffset)
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
This class represents an incoming formal argument to a Function.
This represents the llvm.assume intrinsic.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptions()
Access the list of assumption handles currently tracked for this function.
This is the AA result object for the basic, local, and stateless alias analysis.
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI)
Returns the behavior when calling the given call site.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI, const Instruction *CtxI)
Legacy wrapper pass to provide the BasicAAResult object.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
LLVM Basic Block Representation.
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
A constant pointer value that points to null.
This class represents a range of values.
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
bool erase(const KeyT &Val)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I) override
void removeInstruction(Instruction *I)
FunctionPass class - This class is used to implement most global optimizations.
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Type * getSourceElementType() const
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Module * getParent()
Get the module that this global value is contained inside of...
const BasicBlock * getParent() const
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
uint64_t getValue() const
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
Summary of how a function affects memory in the program.
static MemoryEffects writeOnly()
Create MemoryEffects that can write any memory.
static MemoryEffects readOnly()
Create MemoryEffects that can read any memory.
static MemoryEffects inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffects that can only access inaccessible memory.
Representation for a specific memory location.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
const Value * Ptr
The address of the start of the location.
This is a utility class that provides an abstraction for the common functionality between Instruction...
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
This class represents the LLVM 'select' instruction.
bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I) override
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
StringRef getName() const
Return a constant reference to the value's name.
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
StructType * getStructTypeOrNull() const
Type * getIndexedType() const
This class implements an extremely fast bulk output stream that can only output to a stream.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
bool isPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const BasicBlock *StopBB, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is at least one path from a block in 'Worklist' to 'StopBB' without passing t...
auto successors(const MachineBasicBlock *BB)
Instruction * FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, bool StoreCaptures, const DominatorTree &DT, const SmallPtrSetImpl< const Value * > &EphValues, unsigned MaxUsesToExplore=0)
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool isModSet(const ModRefInfo MRI)
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FunctionPass * createBasicAAWrapperPass()
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
void initializeBasicAAWrapperPassPass(PassRegistry &)
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
bool isModAndRefSet(const ModRefInfo MRI)
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
constexpr unsigned BitWidth
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
gep_type_iterator gep_type_begin(const User *GEP)
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
std::optional< bool > InBounds
SmallVector< VariableGEPIndex, 4 > VarIndices
void print(raw_ostream &OS) const
A special type used by analysis passes to provide an address that identifies that particular analysis...
virtual bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I)=0
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
A lightweight accessor for an operand bundle meant to be passed around by value.
StringRef getTagName() const
Return the tag of this operand bundle as a string.
A utility class that uses RAII to save and restore the value of a variable.