Go to the documentation of this file.
62 #define DEBUG_TYPE "basicaa"
73 STATISTIC(SearchLimitReached,
"Number of times the limit to "
74 "decompose GEPs is reached");
75 STATISTIC(SearchTimes,
"Number of times a GEP is decomposed");
109 if (isa<CallBase>(V))
115 if (isa<LoadInst>(V))
123 if (isa<IntToPtrInst>(V))
133 bool RoundToAlign =
false) {
148 bool NullIsValidLoc) {
192 bool NullIsValidLoc) {
197 bool CanBeNull, CanBeFreed;
200 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
231 auto Iter = EarliestEscapes.insert({
Object,
nullptr});
235 false,
true, DT, EphValues);
236 if (EarliestCapture) {
237 auto Ins = Inst2Obj.insert({EarliestCapture, {}});
240 Iter.first->second = EarliestCapture;
244 if (!Iter.first->second)
247 return I != Iter.first->second &&
252 auto Iter = Inst2Obj.find(
I);
253 if (Iter != Inst2Obj.end()) {
254 for (
const Value *Obj : Iter->second)
255 EarliestEscapes.erase(Obj);
268 unsigned ZExtBits = 0;
269 unsigned SExtBits = 0;
270 unsigned TruncBits = 0;
272 explicit CastedValue(
const Value *V) : V(V) {}
273 explicit CastedValue(
const Value *V,
unsigned ZExtBits,
unsigned SExtBits,
275 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
282 CastedValue withValue(
const Value *NewV)
const {
283 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
287 CastedValue withZExtOfValue(
const Value *NewV)
const {
290 if (ExtendBy <= TruncBits)
291 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
294 ExtendBy -= TruncBits;
295 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
299 CastedValue withSExtOfValue(
const Value *NewV)
const {
302 if (ExtendBy <= TruncBits)
303 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
306 ExtendBy -= TruncBits;
307 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
312 "Incompatible bit width");
313 if (TruncBits)
N =
N.trunc(
N.getBitWidth() - TruncBits);
314 if (SExtBits)
N =
N.sext(
N.getBitWidth() + SExtBits);
315 if (ZExtBits)
N =
N.zext(
N.getBitWidth() + ZExtBits);
321 "Incompatible bit width");
322 if (TruncBits)
N =
N.truncate(
N.getBitWidth() - TruncBits);
323 if (SExtBits)
N =
N.signExtend(
N.getBitWidth() + SExtBits);
324 if (ZExtBits)
N =
N.zeroExtend(
N.getBitWidth() + ZExtBits);
328 bool canDistributeOver(
bool NUW,
bool NSW)
const {
332 return (!ZExtBits || NUW) && (!SExtBits || NSW);
335 bool hasSameCastsAs(
const CastedValue &Other)
const {
336 return ZExtBits ==
Other.ZExtBits && SExtBits ==
Other.SExtBits &&
337 TruncBits ==
Other.TruncBits;
342 struct LinearExpression {
350 LinearExpression(
const CastedValue &Val,
const APInt &Scale,
351 const APInt &Offset,
bool IsNSW)
352 : Val(Val), Scale(Scale),
Offset(
Offset), IsNSW(IsNSW) {}
354 LinearExpression(
const CastedValue &Val) : Val(Val), IsNSW(
true) {
355 unsigned BitWidth = Val.getBitWidth();
360 LinearExpression mul(
const APInt &Other,
bool MulIsNSW)
const {
363 bool NSW = IsNSW && (
Other.isOne() || (MulIsNSW &&
Offset.isZero()));
364 return LinearExpression(Val, Scale * Other, Offset * Other, NSW);
378 if (
const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
379 return LinearExpression(Val,
APInt(Val.getBitWidth(), 0),
380 Val.evaluateWith(Const->getValue()),
true);
382 if (
const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
383 if (
ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
384 APInt RHS = Val.evaluateWith(RHSC->getValue());
387 bool NUW =
true, NSW =
true;
388 if (isa<OverflowingBinaryOperator>(BOp)) {
389 NUW &= BOp->hasNoUnsignedWrap();
390 NSW &= BOp->hasNoSignedWrap();
392 if (!Val.canDistributeOver(NUW, NSW))
400 LinearExpression
E(Val);
401 switch (BOp->getOpcode()) {
406 case Instruction::Or:
421 case Instruction::Sub: {
433 case Instruction::Shl:
439 if (
RHS.getLimitedValue() > Val.getBitWidth())
444 E.Offset <<=
RHS.getLimitedValue();
445 E.Scale <<=
RHS.getLimitedValue();
453 if (isa<ZExtInst>(Val.V))
455 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
458 if (isa<SExtInst>(Val.V))
460 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
472 assert(IndexSize <= Offset.getBitWidth() &&
"Invalid IndexSize!");
473 unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
474 return (Offset << ShiftBits).ashr(ShiftBits);
480 struct VariableGEPIndex {
495 OS <<
"(V=" << Val.V->getName()
496 <<
", zextbits=" << Val.ZExtBits
497 <<
", sextbits=" << Val.SExtBits
498 <<
", truncbits=" << Val.TruncBits
499 <<
", scale=" << Scale <<
")";
548 const Instruction *CxtI = dyn_cast<Instruction>(V);
550 unsigned MaxIndexSize =
DL.getMaxIndexSizeInBits();
551 DecomposedGEP Decomposed;
552 Decomposed.Offset =
APInt(MaxIndexSize, 0);
558 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
559 if (!GA->isInterposable()) {
560 V = GA->getAliasee();
568 if (
Op->getOpcode() == Instruction::BitCast ||
569 Op->getOpcode() == Instruction::AddrSpaceCast) {
570 V =
Op->getOperand(0);
576 if (
const auto *PHI = dyn_cast<PHINode>(V)) {
578 if (PHI->getNumIncomingValues() == 1) {
579 V = PHI->getIncomingValue(0);
582 }
else if (
const auto *Call = dyn_cast<CallBase>(V)) {
604 if (Decomposed.InBounds ==
None)
607 Decomposed.InBounds =
false;
621 unsigned IndexSize =
DL.getIndexSizeInBits(AS);
623 bool GepHasConstantOffset =
true;
625 I !=
E; ++
I, ++GTI) {
630 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
634 Decomposed.Offset +=
DL.getStructLayout(STy)->getElementOffset(FieldNo);
639 if (
const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
644 CIdx->getValue().sextOrTrunc(MaxIndexSize);
648 GepHasConstantOffset =
false;
652 unsigned Width =
Index->getType()->getIntegerBitWidth();
653 unsigned SExtBits = IndexSize >
Width ? IndexSize -
Width : 0;
654 unsigned TruncBits = IndexSize <
Width ?
Width - IndexSize : 0;
656 CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
662 Decomposed.Offset +=
LE.Offset.sext(MaxIndexSize);
663 APInt Scale =
LE.Scale.sext(MaxIndexSize);
669 for (
unsigned i = 0,
e = Decomposed.VarIndices.size();
i !=
e; ++
i) {
670 if (Decomposed.VarIndices[
i].Val.V ==
LE.Val.V &&
671 Decomposed.VarIndices[
i].Val.hasSameCastsAs(
LE.Val)) {
672 Scale += Decomposed.VarIndices[
i].Scale;
673 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() +
i);
683 VariableGEPIndex Entry = {
LE.Val, Scale, CxtI,
LE.IsNSW};
684 Decomposed.VarIndices.push_back(Entry);
689 if (GepHasConstantOffset)
694 }
while (--MaxLookup);
698 SearchLimitReached++;
707 assert(Visited.empty() &&
"Visited must be cleared after use!");
709 unsigned MaxLookup = 8;
711 Worklist.push_back(Loc.
Ptr);
714 if (!Visited.insert(V).second) {
720 if (OrLocal && isa<AllocaInst>(V))
728 if (!GV->isConstant()) {
737 Worklist.push_back(
SI->getTrueValue());
738 Worklist.push_back(
SI->getFalseValue());
744 if (
const PHINode *PN = dyn_cast<PHINode>(V)) {
746 if (PN->getNumIncomingValues() > MaxLookup) {
757 }
while (!Worklist.empty() && --MaxLookup);
760 return Worklist.empty();
770 if (Call->doesNotAccessMemory())
778 if (Call->onlyReadsMemory())
780 else if (Call->onlyWritesMemory())
783 if (Call->onlyAccessesArgMemory())
785 else if (Call->onlyAccessesInaccessibleMemory())
787 else if (Call->onlyAccessesInaccessibleMemOrArgMem())
793 if (!Call->hasOperandBundles())
794 if (
const Function *F = Call->getCalledFunction())
805 if (
F->doesNotAccessMemory())
811 if (
F->onlyReadsMemory())
813 else if (
F->onlyWritesMemory())
816 if (
F->onlyAccessesArgMemory())
818 else if (
F->onlyAccessesInaccessibleMemory())
820 else if (
F->onlyAccessesInaccessibleMemOrArgMem())
829 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
839 if (Call->getCalledFunction() &&
841 F == LibFunc_memset_pattern16 && TLI.
has(
F))
858 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
861 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
869 if (
const Instruction *inst = dyn_cast<Instruction>(V)) {
870 if (!inst->getParent())
875 if (
const Argument *arg = dyn_cast<Argument>(V))
876 return arg->getParent();
886 return !F1 || !F2 || F1 == F2;
894 "BasicAliasAnalysis doesn't support interprocedural queries.");
895 return aliasCheck(LocA.
Ptr, LocA.
Size, LocB.
Ptr, LocB.
Size, AAQI);
908 "AliasAnalysis query involving multiple functions!");
917 if (isa<AllocaInst>(
Object))
918 if (
const CallInst *CI = dyn_cast<CallInst>(Call))
919 if (CI->isTailCall() &&
920 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
925 if (
auto *AI = dyn_cast<AllocaInst>(
Object))
926 if (!AI->isStaticAlloca() &&
isIntrinsicCall(Call, Intrinsic::stackrestore))
938 bool IsMustAlias =
true;
940 unsigned OperandNo = 0;
941 for (
auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
942 CI != CE; ++CI, ++OperandNo) {
946 if (!(*CI)->getType()->isPointerTy() ||
947 (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() &&
948 !Call->isByValArgument(OperandNo)))
953 if (Call->doesNotAccessMemory(OperandNo))
968 if (Call->onlyReadsMemory(OperandNo)) {
973 if (Call->onlyWritesMemory(OperandNo)) {
1017 if (
auto *Inst = dyn_cast<AnyMemTransferInst>(Call)) {
1112 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1128 if (!isa<GEPOperator>(
V2))
1140 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1141 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(
V2, DL, &AC, DT);
1144 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base ==
V2)
1149 subtractDecomposedGEPs(DecompGEP1, DecompGEP2);
1153 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1158 if (isa<GEPOperator>(
V2)) {
1160 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1168 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1190 if (DecompGEP1.VarIndices.empty()) {
1195 const Value *RightPtr = GEP1;
1198 const bool Swapped =
Off.isNegative();
1216 if (
Off.ult(LSize)) {
1220 if (VRightSize.
hasValue() &&
Off.ule(INT32_MAX) &&
1221 (Off + VRightSize.
getValue()).ule(LSize)) {
1239 for (
unsigned i = 0,
e = DecompGEP1.VarIndices.size();
i !=
e; ++
i) {
1240 const VariableGEPIndex &
Index = DecompGEP1.VarIndices[
i];
1242 APInt ScaleForGCD = Scale;
1248 GCD = ScaleForGCD.
abs();
1253 true, &AC,
Index.CxtI);
1262 "Bit widths are normalized to MaxIndexSize");
1275 APInt ModOffset = DecompGEP1.Offset.
srem(GCD);
1279 (GCD - ModOffset).uge(V1Size.
getValue()))
1295 if (DecompGEP1.VarIndices.size() == 1) {
1297 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1298 if (Var.Val.TruncBits == 0 &&
1301 MinAbsVarIndex =
APInt(Var.Scale.getBitWidth(), 1);
1305 auto MultiplyByScaleNoWrap = [](
const VariableGEPIndex &Var) {
1309 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1313 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1314 if (MaxScaleValueBW <= 0)
1316 return Var.Scale.ule(
1321 if (MultiplyByScaleNoWrap(Var)) {
1323 MinAbsVarIndex = Var.Scale.abs();
1326 }
else if (DecompGEP1.VarIndices.size() == 2) {
1331 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1332 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1333 if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 &&
1334 Var0.Val.hasSameCastsAs(Var1.Val) && VisitedPhiBBs.
empty() &&
1337 MinAbsVarIndex = Var0.Scale.abs();
1340 if (MinAbsVarIndex) {
1342 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1343 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1350 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT))
1380 if (
SI->getCondition() == SI2->getCondition()) {
1416 if (
const PHINode *PN2 = dyn_cast<PHINode>(
V2))
1417 if (PN2->getParent() == PN->
getParent()) {
1439 bool isRecursive =
false;
1440 auto CheckForRecPhi = [&](
Value *PV) {
1460 for (
Value *PV1 : PhiValueSet) {
1461 if (CheckForRecPhi(PV1))
1463 V1Srcs.push_back(PV1);
1469 Value *OnePhi =
nullptr;
1471 if (isa<PHINode>(PV1)) {
1472 if (OnePhi && OnePhi != PV1) {
1483 if (CheckForRecPhi(PV1))
1486 if (UniqueSrc.
insert(PV1).second)
1487 V1Srcs.push_back(PV1);
1490 if (OnePhi && UniqueSrc.
size() > 1)
1521 AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1537 for (
unsigned i = 1,
e = V1Srcs.size();
i !=
e; ++
i) {
1562 V2 =
V2->stripPointerCastsForAliasAnalysis();
1566 if (isa<UndefValue>(V1) || isa<UndefValue>(
V2))
1575 if (isValueEqualInPotentialCycles(V1,
V2))
1632 TLI, NullIsValidLocation)) ||
1635 TLI, NullIsValidLocation)))
1654 if (AAQI.
Depth >= 512)
1660 const bool Swapped = V1 >
V2;
1666 auto &Entry = Pair.first->second;
1667 if (!Entry.isDefinitive()) {
1669 ++Entry.NumAssumptionUses;
1673 auto Result = Entry.Result;
1681 aliasCheckRecursive(V1, V1Size,
V2, V2Size, AAQI, O1, O2);
1685 auto &Entry = It->second;
1688 bool AssumptionDisproven =
1690 if (AssumptionDisproven)
1697 Entry.Result.swap(Swapped);
1698 Entry.NumAssumptionUses = -1;
1703 if (AssumptionDisproven)
1719 if (
const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1723 }
else if (
const GEPOperator *GV2 = dyn_cast<GEPOperator>(
V2)) {
1730 if (
const PHINode *PN = dyn_cast<PHINode>(V1)) {
1734 }
else if (
const PHINode *PN = dyn_cast<PHINode>(
V2)) {
1741 if (
const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1745 }
else if (
const SelectInst *S2 = dyn_cast<SelectInst>(
V2)) {
1772 bool BasicAAResult::isValueEqualInPotentialCycles(
const Value *V,
1777 const Instruction *Inst = dyn_cast<Instruction>(V);
1781 if (VisitedPhiBBs.
empty())
1790 for (
auto *
P : VisitedPhiBBs)
1798 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1799 const DecomposedGEP &SrcGEP) {
1800 DestGEP.Offset -= SrcGEP.Offset;
1801 for (
const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1805 for (
auto I :
enumerate(DestGEP.VarIndices)) {
1806 VariableGEPIndex &Dest =
I.value();
1807 if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) ||
1808 !Dest.Val.hasSameCastsAs(Src.Val))
1813 if (Dest.Scale != Src.Scale) {
1814 Dest.Scale -= Src.Scale;
1817 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() +
I.index());
1825 VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW};
1826 DestGEP.VarIndices.push_back(Entry);
1831 bool BasicAAResult::constantOffsetHeuristic(
1834 if (
GEP.VarIndices.size() != 2 || !MaybeV1Size.
hasValue() ||
1841 const VariableGEPIndex &Var0 =
GEP.VarIndices[0], &Var1 =
GEP.VarIndices[1];
1843 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1844 Var0.Scale != -Var1.Scale ||
1845 Var0.Val.V->getType() != Var1.Val.V->getType())
1852 LinearExpression E0 =
1854 LinearExpression E1 =
1856 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1857 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1867 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1869 APInt MinDiffBytes =
1870 MinDiff.
zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1876 return MinDiffBytes.
uge(V1Size +
GEP.Offset.abs()) &&
1877 MinDiffBytes.
uge(V2Size +
GEP.Offset.abs());
1891 return BasicAAResult(
F.getParent()->getDataLayout(),
F, TLI, AC, DT, PV);
1900 void BasicAAWrapperPass::anchor() {}
1903 "Basic Alias Analysis (stateless AA impl)",
true,
true)
1916 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1917 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1918 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1919 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1922 TLIWP.getTLI(
F), ACT.getAssumptionCache(
F),
1924 PVWP ? &PVWP->getResult() :
nullptr));
1939 F.getParent()->getDataLayout(),
F,
A set of analyses that are preserved following a run of a transformation pass.
@ NoModRef
The access neither references nor modifies the value stored in memory.
This class represents an incoming formal argument to a Function.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
@ MayAlias
The two locations may or may not alias.
void removeInstruction(Instruction *I)
@ PartialAlias
The two locations alias, but only due to a partial overlap.
This is an optimization pass for GlobalISel generic memory operations.
This is a utility class that provides an abstraction for the common functionality between Instruction...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
op_range incoming_values()
A parsed version of the target data layout string in and methods for querying it.
LLVM_NODISCARD bool isModAndRefSet(const ModRefInfo MRI)
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
constexpr static LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
bool isPointerTy() const
True if this is an instance of PointerType.
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
const Value * Ptr
The address of the start of the location.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
static uint64_t getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
void print(raw_ostream &OS) const
size_type size() const
Determine the number of elements in the SetVector.
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
void initializeBasicAAWrapperPassPass(PassRegistry &)
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
LLVM_NODISCARD ModRefInfo createModRefInfo(const FunctionModRefBehavior FMRB)
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
static const unsigned MaxLookupSearchDepth
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool erase(const KeyT &Val)
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
Wrapper pass for the legacy pass manager.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
The possible results of an alias query.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
place backedge safepoints impl
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
LLVM_NODISCARD T pop_back_val()
gep_type_iterator gep_type_begin(const User *GEP)
@ FMRB_UnknownModRefBehavior
This indicates that the function could not be classified into one of the behaviors above.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
SmallVector< VariableGEPIndex, 4 > VarIndices
static bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
@ FMRB_OnlyAccessesInaccessibleMem
The only memory references in this function (if it has any) are references of memory that is otherwis...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
A constant pointer value that points to null.
This class stores info we want to provide to or retain within an alias query.
static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
@ FMRB_OnlyAccessesInaccessibleOrArgMem
The function may perform non-volatile loads and stores of objects pointed to by its pointer-typed arg...
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This is the shared class of boolean and integer constants.
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isNegative() const
Determine sign of this APInt.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
@ Ref
The access may reference the value stored in memory.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I) override
Represent the analysis usage information of a pass.
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
uint64_t getValue() const
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
LLVM_NODISCARD detail::scope_exit< typename std::decay< Callable >::type > make_scope_exit(Callable &&F)
Legacy analysis pass which computes a DominatorTree.
STATISTIC(NumFunctions, "Total number of functions")
This class implements an extremely fast bulk output stream that can only output to a stream.
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
const unsigned MaxNumPhiBBsValueReachabilityCheck
Cutoff after which to stop analysing a set of phi nodes potentially involved in a cycle.
INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", "Basic Alias Analysis (stateless AA impl)", true, true) INITIALIZE_PASS_END(BasicAAWrapperPass
API to communicate dependencies between analyses during invalidation.
static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
StructType * getStructTypeOrNull() const
LLVM_NODISCARD bool isModSet(const ModRefInfo MRI)
ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
const ValueSet & getValuesForPhi(const PHINode *PN)
Get the underlying values of a phi.
LLVM_NODISCARD ModRefInfo setRef(const ModRefInfo MRI)
bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool OrLocal)
Chases pointers until we find a (constant global) or not.
LLVM_NODISCARD ModRefInfo clearMust(const ModRefInfo MRI)
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
std::pair< AACacheLoc, AACacheLoc > LocPair
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI)
virtual bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I)=0
Instruction * FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, bool StoreCaptures, const DominatorTree &DT, const SmallPtrSetImpl< const Value * > &EphValues, unsigned MaxUsesToExplore=0)
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
LLVM_NODISCARD bool isNoModRef(const ModRefInfo MRI)
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
@ FMRB_OnlyAccessesArgumentPointees
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
Module * getParent()
Get the module that this global value is contained inside of...
A function analysis which provides an AssumptionCache.
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
Legacy wrapper pass to provide the BasicAAResult object.
int NumAssumptionUses
How many active NoAlias assumption uses there are.
A special type used by analysis passes to provide an address that identifies that particular analysis...
initializer< Ty > init(const Ty &Val)
The analysis pass which yields a PhiValues.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize)
To ensure a pointer offset fits in an integer of size IndexSize (in bits) when that size is smaller t...
ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
iterator find(const_arg_type_t< KeyT > Val)
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
void setOffset(int32_t NewOffset)
StandardInstrumentations SI(Debug, VerifyEach)
Type * getIndexedType() const
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
This class represents the LLVM 'select' instruction.
bool has(LibFunc F) const
Tests whether a library function is available.
@ FMRB_DoesNotAccessMemory
This function does not perform any non-local loads or stores to memory.
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Class for arbitrary precision integers.
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
Optional< bool > InBounds
An immutable pass that tracks lazily created AssumptionCache objects.
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
Class to represent struct types.
A cache of @llvm.assume calls within a function.
Type * getType() const
All values are typed, get the type of this value.
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
static const Function * getParent(const Value *V)
@ FMRB_OnlyReadsMemory
This function does not perform any non-local stores or volatile loads, but may read from any memory l...
@ NoAlias
The two locations do not alias at all.
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
@ Mod
The access may modify the value stored in memory.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
@ MustAlias
The two locations precisely alias each other.
TargetLibraryInfo & getTLI(const Function &F)
Various options to control the behavior of getObjectSize.
FunctionModRefBehavior
Summary of how a function affects memory in the program.
This is the AA result object for the basic, local, and stateless alias analysis.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
StringRef getName() const
Return a constant reference to the value's name.
bool isNotCapturedBeforeOrAt(const Value *Object, const Instruction *I) override
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
APInt zext(unsigned width) const
Zero extend to a new width.
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
FunctionPass * createBasicAAWrapperPass()
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
void setPreservesAll()
Set by analyses that do not transform their input at all.
constexpr static LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
constexpr unsigned BitWidth
Provides information about what library functions are available for the current target.
static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo &TLI)
Returns true if this is a writeonly (i.e Mod only) parameter.
bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool OrLocal)
This class represents a range of values.
AAQueryInfo withEmptyCache()
Create a new AAQueryInfo based on this one, but with the cache cleared.
A wrapper class for inspecting calls to intrinsic functions.
APInt abs() const
Get the absolute value.
BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F)
A helper for the legacy pass manager to create a BasicAAResult object populated to the best of our ab...
Analysis pass which computes a DominatorTree.
AAResultsProxy getBestAAResults()
Get a proxy for the best AA result set to query at this time.
Pass interface - Implemented by all 'passes'.
Type * getSourceElementType() const
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
LLVM_NODISCARD bool empty() const
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
const BasicBlock * getParent() const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Align max(MaybeAlign Lhs, Align Rhs)
LLVM_NODISCARD ModRefInfo setMust(const ModRefInfo MRI)
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
AnalysisUsage & addRequiredTransitive()
A SetVector that performs no allocations if smaller than a certain size.
A container for analyses that lazily runs them and caches their results.
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
FunctionPass class - This class is used to implement most global optimizations.
This class represents a function call, abstracting a target machine's calling convention.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
Value * getOperand(unsigned i) const
bool isEmptySet() const
Return true if this set contains no members.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM Value Representation.
Analysis pass providing the TargetLibraryInfo.
FunctionModRefBehavior getModRefBehavior(const CallBase *Call)
Returns the behavior when calling the given call site.
@ ModRef
The access may reference and may modify the value stored in memory.
Representation for a specific memory location.
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
unsigned Depth
Query depth used to distinguish recursive queries.
LLVM_NODISCARD ModRefInfo setMod(const ModRefInfo MRI)
Optional< std::vector< StOtherPiece > > Other
static bool notDifferentParent(const Value *O1, const Value *O2)
A Use represents the edge between a Value definition and its users.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.