LLVM  14.0.0git
AttributorAttributes.cpp
Go to the documentation of this file.
1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SCCIterator.h"
18 #include "llvm/ADT/SetOperations.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/Statistic.h"
32 #include "llvm/IR/Assumptions.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/NoFolder.h"
39 #include "llvm/Support/Alignment.h"
40 #include "llvm/Support/Casting.h"
47 #include <cassert>
48 
49 using namespace llvm;
50 
51 #define DEBUG_TYPE "attributor"
52 
54  "attributor-manifest-internal", cl::Hidden,
55  cl::desc("Manifest Attributor internal string attributes."),
56  cl::init(false));
57 
58 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
59  cl::Hidden);
60 
61 template <>
63 
65  "attributor-max-potential-values", cl::Hidden,
66  cl::desc("Maximum number of potential values to be "
67  "tracked for each position."),
69  cl::init(7));
70 
71 STATISTIC(NumAAs, "Number of abstract attributes created");
72 
73 // Some helper macros to deal with statistics tracking.
74 //
75 // Usage:
76 // For simple IR attribute tracking overload trackStatistics in the abstract
77 // attribute and choose the right STATS_DECLTRACK_********* macro,
78 // e.g.,:
79 // void trackStatistics() const override {
80 // STATS_DECLTRACK_ARG_ATTR(returned)
81 // }
82 // If there is a single "increment" side one can use the macro
83 // STATS_DECLTRACK with a custom message. If there are multiple increment
84 // sides, STATS_DECL and STATS_TRACK can also be used separately.
85 //
86 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
87  ("Number of " #TYPE " marked '" #NAME "'")
88 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
89 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
90 #define STATS_DECL(NAME, TYPE, MSG) \
91  STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
92 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
93 #define STATS_DECLTRACK(NAME, TYPE, MSG) \
94  { \
95  STATS_DECL(NAME, TYPE, MSG) \
96  STATS_TRACK(NAME, TYPE) \
97  }
98 #define STATS_DECLTRACK_ARG_ATTR(NAME) \
99  STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
100 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \
101  STATS_DECLTRACK(NAME, CSArguments, \
102  BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
103 #define STATS_DECLTRACK_FN_ATTR(NAME) \
104  STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
105 #define STATS_DECLTRACK_CS_ATTR(NAME) \
106  STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
107 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \
108  STATS_DECLTRACK(NAME, FunctionReturn, \
109  BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
110 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \
111  STATS_DECLTRACK(NAME, CSReturn, \
112  BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
113 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
114  STATS_DECLTRACK(NAME, Floating, \
115  ("Number of floating values known to be '" #NAME "'"))
116 
117 // Specialization of the operator<< for abstract attributes subclasses. This
118 // disambiguates situations where multiple operators are applicable.
119 namespace llvm {
120 #define PIPE_OPERATOR(CLASS) \
121  raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \
122  return OS << static_cast<const AbstractAttribute &>(AA); \
123  }
124 
152 
153 #undef PIPE_OPERATOR
154 
155 template <>
157  const DerefState &R) {
158  ChangeStatus CS0 =
159  clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
160  ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
161  return CS0 | CS1;
162 }
163 
164 } // namespace llvm
165 
166 /// Get pointer operand of memory accessing instruction. If \p I is
167 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
168 /// is set to false and the instruction is volatile, return nullptr.
169 static const Value *getPointerOperand(const Instruction *I,
170  bool AllowVolatile) {
171  if (!AllowVolatile && I->isVolatile())
172  return nullptr;
173 
174  if (auto *LI = dyn_cast<LoadInst>(I)) {
175  return LI->getPointerOperand();
176  }
177 
178  if (auto *SI = dyn_cast<StoreInst>(I)) {
179  return SI->getPointerOperand();
180  }
181 
182  if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
183  return CXI->getPointerOperand();
184  }
185 
186  if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
187  return RMWI->getPointerOperand();
188  }
189 
190  return nullptr;
191 }
192 
193 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
194 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
195 /// getelement pointer instructions that traverse the natural type of \p Ptr if
196 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
197 /// through a cast to i8*.
198 ///
199 /// TODO: This could probably live somewhere more prominantly if it doesn't
200 /// already exist.
201 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
202  int64_t Offset, IRBuilder<NoFolder> &IRB,
203  const DataLayout &DL) {
204  assert(Offset >= 0 && "Negative offset not supported yet!");
205  LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
206  << "-bytes as " << *ResTy << "\n");
207 
208  if (Offset) {
209  Type *Ty = PtrElemTy;
210  APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
211  SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
212 
213  SmallVector<Value *, 4> ValIndices;
214  std::string GEPName = Ptr->getName().str();
215  for (const APInt &Index : IntIndices) {
216  ValIndices.push_back(IRB.getInt(Index));
217  GEPName += "." + std::to_string(Index.getZExtValue());
218  }
219 
220  // Create a GEP for the indices collected above.
221  Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
222 
223  // If an offset is left we use byte-wise adjustment.
224  if (IntOffset != 0) {
225  Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
226  Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
227  GEPName + ".b" + Twine(IntOffset.getZExtValue()));
228  }
229  }
230 
231  // Ensure the result has the requested type.
232  Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
233 
234  LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
235  return Ptr;
236 }
237 
238 /// Recursively visit all values that might become \p IRP at some point. This
239 /// will be done by looking through cast instructions, selects, phis, and calls
240 /// with the "returned" attribute. Once we cannot look through the value any
241 /// further, the callback \p VisitValueCB is invoked and passed the current
242 /// value, the \p State, and a flag to indicate if we stripped anything.
243 /// Stripped means that we unpacked the value associated with \p IRP at least
244 /// once. Note that the value used for the callback may still be the value
245 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
246 /// we will never visit more values than specified by \p MaxValues.
247 template <typename StateTy>
249  Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
250  StateTy &State,
251  function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
252  VisitValueCB,
253  const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
254  function_ref<Value *(Value *)> StripCB = nullptr) {
255 
256  const AAIsDead *LivenessAA = nullptr;
257  if (IRP.getAnchorScope())
258  LivenessAA = &A.getAAFor<AAIsDead>(
259  QueryingAA,
262  bool AnyDead = false;
263 
264  Value *InitialV = &IRP.getAssociatedValue();
265  using Item = std::pair<Value *, const Instruction *>;
266  SmallSet<Item, 16> Visited;
267  SmallVector<Item, 16> Worklist;
268  Worklist.push_back({InitialV, CtxI});
269 
270  int Iteration = 0;
271  do {
272  Item I = Worklist.pop_back_val();
273  Value *V = I.first;
274  CtxI = I.second;
275  if (StripCB)
276  V = StripCB(V);
277 
278  // Check if we should process the current value. To prevent endless
279  // recursion keep a record of the values we followed!
280  if (!Visited.insert(I).second)
281  continue;
282 
283  // Make sure we limit the compile time for complex expressions.
284  if (Iteration++ >= MaxValues)
285  return false;
286 
287  // Explicitly look through calls with a "returned" attribute if we do
288  // not have a pointer as stripPointerCasts only works on them.
289  Value *NewV = nullptr;
290  if (V->getType()->isPointerTy()) {
291  NewV = V->stripPointerCasts();
292  } else {
293  auto *CB = dyn_cast<CallBase>(V);
294  if (CB && CB->getCalledFunction()) {
295  for (Argument &Arg : CB->getCalledFunction()->args())
296  if (Arg.hasReturnedAttr()) {
297  NewV = CB->getArgOperand(Arg.getArgNo());
298  break;
299  }
300  }
301  }
302  if (NewV && NewV != V) {
303  Worklist.push_back({NewV, CtxI});
304  continue;
305  }
306 
307  // Look through select instructions, visit assumed potential values.
308  if (auto *SI = dyn_cast<SelectInst>(V)) {
309  bool UsedAssumedInformation = false;
310  Optional<Constant *> C = A.getAssumedConstant(
311  *SI->getCondition(), QueryingAA, UsedAssumedInformation);
312  bool NoValueYet = !C.hasValue();
313  if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
314  continue;
315  if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
316  if (CI->isZero())
317  Worklist.push_back({SI->getFalseValue(), CtxI});
318  else
319  Worklist.push_back({SI->getTrueValue(), CtxI});
320  continue;
321  }
322  // We could not simplify the condition, assume both values.(
323  Worklist.push_back({SI->getTrueValue(), CtxI});
324  Worklist.push_back({SI->getFalseValue(), CtxI});
325  continue;
326  }
327 
328  // Look through phi nodes, visit all live operands.
329  if (auto *PHI = dyn_cast<PHINode>(V)) {
330  assert(LivenessAA &&
331  "Expected liveness in the presence of instructions!");
332  for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
333  BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
334  bool UsedAssumedInformation = false;
335  if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
336  LivenessAA, UsedAssumedInformation,
337  /* CheckBBLivenessOnly */ true)) {
338  AnyDead = true;
339  continue;
340  }
341  Worklist.push_back(
342  {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
343  }
344  continue;
345  }
346 
347  if (UseValueSimplify && !isa<Constant>(V)) {
348  bool UsedAssumedInformation = false;
349  Optional<Value *> SimpleV =
350  A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
351  if (!SimpleV.hasValue())
352  continue;
353  if (!SimpleV.getValue())
354  return false;
355  Value *NewV = SimpleV.getValue();
356  if (NewV != V) {
357  Worklist.push_back({NewV, CtxI});
358  continue;
359  }
360  }
361 
362  // Once a leaf is reached we inform the user through the callback.
363  if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
364  return false;
365  } while (!Worklist.empty());
366 
367  // If we actually used liveness information so we have to record a dependence.
368  if (AnyDead)
369  A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
370 
371  // All values have been visited.
372  return true;
373 }
374 
376  SmallVectorImpl<Value *> &Objects,
377  const AbstractAttribute &QueryingAA,
378  const Instruction *CtxI) {
379  auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
380  SmallPtrSet<Value *, 8> SeenObjects;
381  auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
382  SmallVectorImpl<Value *> &Objects,
383  bool) -> bool {
384  if (SeenObjects.insert(&Val).second)
385  Objects.push_back(&Val);
386  return true;
387  };
388  if (!genericValueTraversal<decltype(Objects)>(
389  A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
390  true, 32, StripCB))
391  return false;
392  return true;
393 }
394 
396  Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
397  const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
398  bool UseAssumed = false) {
399 
400  auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
401  const IRPosition &Pos = IRPosition::value(V);
402  // Only track dependence if we are going to use the assumed info.
403  const AAValueConstantRange &ValueConstantRangeAA =
404  A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
405  UseAssumed ? DepClassTy::OPTIONAL
406  : DepClassTy::NONE);
407  ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
408  : ValueConstantRangeAA.getKnown();
409  // We can only use the lower part of the range because the upper part can
410  // be higher than what the value can really be.
411  ROffset = Range.getSignedMin();
412  return true;
413  };
414 
415  return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
416  /* AllowInvariant */ false,
417  AttributorAnalysis);
418 }
419 
421  Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
422  int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
423  const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
424  if (!Ptr)
425  return nullptr;
426  APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
428  A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
429 
430  BytesOffset = OffsetAPInt.getSExtValue();
431  return Base;
432 }
433 
434 static const Value *
436  const DataLayout &DL,
437  bool AllowNonInbounds = false) {
438  const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
439  if (!Ptr)
440  return nullptr;
441 
442  return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
443  AllowNonInbounds);
444 }
445 
446 /// Clamp the information known for all returned values of a function
447 /// (identified by \p QueryingAA) into \p S.
448 template <typename AAType, typename StateType = typename AAType::StateType>
450  Attributor &A, const AAType &QueryingAA, StateType &S,
451  const IRPosition::CallBaseContext *CBContext = nullptr) {
452  LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
453  << QueryingAA << " into " << S << "\n");
454 
455  assert((QueryingAA.getIRPosition().getPositionKind() ==
457  QueryingAA.getIRPosition().getPositionKind() ==
459  "Can only clamp returned value states for a function returned or call "
460  "site returned position!");
461 
462  // Use an optional state as there might not be any return values and we want
463  // to join (IntegerState::operator&) the state of all there are.
465 
466  // Callback for each possibly returned value.
467  auto CheckReturnValue = [&](Value &RV) -> bool {
468  const IRPosition &RVPos = IRPosition::value(RV, CBContext);
469  const AAType &AA =
470  A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
471  LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
472  << " @ " << RVPos << "\n");
473  const StateType &AAS = AA.getState();
474  if (T.hasValue())
475  *T &= AAS;
476  else
477  T = AAS;
478  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
479  << "\n");
480  return T->isValidState();
481  };
482 
483  if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
484  S.indicatePessimisticFixpoint();
485  else if (T.hasValue())
486  S ^= *T;
487 }
488 
489 namespace {
490 /// Helper class for generic deduction: return value -> returned position.
491 template <typename AAType, typename BaseType,
492  typename StateType = typename BaseType::StateType,
493  bool PropagateCallBaseContext = false>
494 struct AAReturnedFromReturnedValues : public BaseType {
495  AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
496  : BaseType(IRP, A) {}
497 
498  /// See AbstractAttribute::updateImpl(...).
499  ChangeStatus updateImpl(Attributor &A) override {
500  StateType S(StateType::getBestState(this->getState()));
501  clampReturnedValueStates<AAType, StateType>(
502  A, *this, S,
503  PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
504  // TODO: If we know we visited all returned values, thus no are assumed
505  // dead, we can take the known information from the state T.
506  return clampStateAndIndicateChange<StateType>(this->getState(), S);
507  }
508 };
509 
510 /// Clamp the information known at all call sites for a given argument
511 /// (identified by \p QueryingAA) into \p S.
512 template <typename AAType, typename StateType = typename AAType::StateType>
513 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
514  StateType &S) {
515  LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
516  << QueryingAA << " into " << S << "\n");
517 
518  assert(QueryingAA.getIRPosition().getPositionKind() ==
520  "Can only clamp call site argument states for an argument position!");
521 
522  // Use an optional state as there might not be any return values and we want
523  // to join (IntegerState::operator&) the state of all there are.
525 
526  // The argument number which is also the call site argument number.
527  unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
528 
529  auto CallSiteCheck = [&](AbstractCallSite ACS) {
530  const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
531  // Check if a coresponding argument was found or if it is on not associated
532  // (which can happen for callback calls).
533  if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
534  return false;
535 
536  const AAType &AA =
537  A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
538  LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
539  << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
540  const StateType &AAS = AA.getState();
541  if (T.hasValue())
542  *T &= AAS;
543  else
544  T = AAS;
545  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
546  << "\n");
547  return T->isValidState();
548  };
549 
550  bool AllCallSitesKnown;
551  if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
552  AllCallSitesKnown))
553  S.indicatePessimisticFixpoint();
554  else if (T.hasValue())
555  S ^= *T;
556 }
557 
558 /// This function is the bridge between argument position and the call base
559 /// context.
560 template <typename AAType, typename BaseType,
561  typename StateType = typename AAType::StateType>
562 bool getArgumentStateFromCallBaseContext(Attributor &A,
563  BaseType &QueryingAttribute,
564  IRPosition &Pos, StateType &State) {
566  "Expected an 'argument' position !");
567  const CallBase *CBContext = Pos.getCallBaseContext();
568  if (!CBContext)
569  return false;
570 
571  int ArgNo = Pos.getCallSiteArgNo();
572  assert(ArgNo >= 0 && "Invalid Arg No!");
573 
574  const auto &AA = A.getAAFor<AAType>(
575  QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
577  const StateType &CBArgumentState =
578  static_cast<const StateType &>(AA.getState());
579 
580  LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
581  << "Position:" << Pos << "CB Arg state:" << CBArgumentState
582  << "\n");
583 
584  // NOTE: If we want to do call site grouping it should happen here.
585  State ^= CBArgumentState;
586  return true;
587 }
588 
589 /// Helper class for generic deduction: call site argument -> argument position.
590 template <typename AAType, typename BaseType,
591  typename StateType = typename AAType::StateType,
592  bool BridgeCallBaseContext = false>
593 struct AAArgumentFromCallSiteArguments : public BaseType {
594  AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
595  : BaseType(IRP, A) {}
596 
597  /// See AbstractAttribute::updateImpl(...).
598  ChangeStatus updateImpl(Attributor &A) override {
599  StateType S = StateType::getBestState(this->getState());
600 
601  if (BridgeCallBaseContext) {
602  bool Success =
603  getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
604  A, *this, this->getIRPosition(), S);
605  if (Success)
606  return clampStateAndIndicateChange<StateType>(this->getState(), S);
607  }
608  clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
609 
610  // TODO: If we know we visited all incoming values, thus no are assumed
611  // dead, we can take the known information from the state T.
612  return clampStateAndIndicateChange<StateType>(this->getState(), S);
613  }
614 };
615 
616 /// Helper class for generic replication: function returned -> cs returned.
617 template <typename AAType, typename BaseType,
618  typename StateType = typename BaseType::StateType,
619  bool IntroduceCallBaseContext = false>
620 struct AACallSiteReturnedFromReturned : public BaseType {
621  AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
622  : BaseType(IRP, A) {}
623 
624  /// See AbstractAttribute::updateImpl(...).
625  ChangeStatus updateImpl(Attributor &A) override {
626  assert(this->getIRPosition().getPositionKind() ==
628  "Can only wrap function returned positions for call site returned "
629  "positions!");
630  auto &S = this->getState();
631 
632  const Function *AssociatedFunction =
633  this->getIRPosition().getAssociatedFunction();
634  if (!AssociatedFunction)
635  return S.indicatePessimisticFixpoint();
636 
637  CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
638  if (IntroduceCallBaseContext)
639  LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
640  << CBContext << "\n");
641 
643  *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
644  const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
645  return clampStateAndIndicateChange(S, AA.getState());
646  }
647 };
648 } // namespace
649 
650 /// Helper function to accumulate uses.
651 template <class AAType, typename StateType = typename AAType::StateType>
652 static void followUsesInContext(AAType &AA, Attributor &A,
654  const Instruction *CtxI,
656  StateType &State) {
657  auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
658  for (unsigned u = 0; u < Uses.size(); ++u) {
659  const Use *U = Uses[u];
660  if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
661  bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
662  if (Found && AA.followUseInMBEC(A, U, UserI, State))
663  for (const Use &Us : UserI->uses())
664  Uses.insert(&Us);
665  }
666  }
667 }
668 
669 /// Use the must-be-executed-context around \p I to add information into \p S.
670 /// The AAType class is required to have `followUseInMBEC` method with the
671 /// following signature and behaviour:
672 ///
673 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
674 /// U - Underlying use.
675 /// I - The user of the \p U.
676 /// Returns true if the value should be tracked transitively.
677 ///
678 template <class AAType, typename StateType = typename AAType::StateType>
679 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
680  Instruction &CtxI) {
681 
682  // Container for (transitive) uses of the associated value.
684  for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
685  Uses.insert(&U);
686 
688  A.getInfoCache().getMustBeExecutedContextExplorer();
689 
690  followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
691 
692  if (S.isAtFixpoint())
693  return;
694 
696  auto Pred = [&](const Instruction *I) {
697  if (const BranchInst *Br = dyn_cast<BranchInst>(I))
698  if (Br->isConditional())
699  BrInsts.push_back(Br);
700  return true;
701  };
702 
703  // Here, accumulate conditional branch instructions in the context. We
704  // explore the child paths and collect the known states. The disjunction of
705  // those states can be merged to its own state. Let ParentState_i be a state
706  // to indicate the known information for an i-th branch instruction in the
707  // context. ChildStates are created for its successors respectively.
708  //
709  // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
710  // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
711  // ...
712  // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
713  //
714  // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
715  //
716  // FIXME: Currently, recursive branches are not handled. For example, we
717  // can't deduce that ptr must be dereferenced in below function.
718  //
719  // void f(int a, int c, int *ptr) {
720  // if(a)
721  // if (b) {
722  // *ptr = 0;
723  // } else {
724  // *ptr = 1;
725  // }
726  // else {
727  // if (b) {
728  // *ptr = 0;
729  // } else {
730  // *ptr = 1;
731  // }
732  // }
733  // }
734 
735  Explorer.checkForAllContext(&CtxI, Pred);
736  for (const BranchInst *Br : BrInsts) {
737  StateType ParentState;
738 
739  // The known state of the parent state is a conjunction of children's
740  // known states so it is initialized with a best state.
741  ParentState.indicateOptimisticFixpoint();
742 
743  for (const BasicBlock *BB : Br->successors()) {
744  StateType ChildState;
745 
746  size_t BeforeSize = Uses.size();
747  followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
748 
749  // Erase uses which only appear in the child.
750  for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
751  It = Uses.erase(It);
752 
753  ParentState &= ChildState;
754  }
755 
756  // Use only known state.
757  S += ParentState;
758  }
759 }
760 
761 /// ------------------------ PointerInfo ---------------------------------------
762 
763 namespace llvm {
764 namespace AA {
765 namespace PointerInfo {
766 
767 /// An access kind description as used by AAPointerInfo.
768 struct OffsetAndSize;
769 
770 struct State;
771 
772 } // namespace PointerInfo
773 } // namespace AA
774 
775 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
776 template <>
777 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
779  static inline Access getEmptyKey();
780  static inline Access getTombstoneKey();
781  static unsigned getHashValue(const Access &A);
782  static bool isEqual(const Access &LHS, const Access &RHS);
783 };
784 
785 /// Helper that allows OffsetAndSize as a key in a DenseMap.
786 template <>
787 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize>
788  : DenseMapInfo<std::pair<int64_t, int64_t>> {};
789 
790 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
791 /// but the instruction
792 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
795  static inline Access getEmptyKey();
796  static inline Access getTombstoneKey();
797  static unsigned getHashValue(const Access &A);
798  static bool isEqual(const Access &LHS, const Access &RHS);
799 };
800 
801 } // namespace llvm
802 
803 /// Helper to represent an access offset and size, with logic to deal with
804 /// uncertainty and check for overlapping accesses.
805 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
806  using BaseTy = std::pair<int64_t, int64_t>;
807  OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {}
808  OffsetAndSize(const BaseTy &P) : BaseTy(P) {}
809  int64_t getOffset() const { return first; }
810  int64_t getSize() const { return second; }
812 
813  /// Return true if this offset and size pair might describe an address that
814  /// overlaps with \p OAS.
815  bool mayOverlap(const OffsetAndSize &OAS) const {
816  // Any unknown value and we are giving up -> overlap.
817  if (OAS.getOffset() == OffsetAndSize::Unknown ||
818  OAS.getSize() == OffsetAndSize::Unknown ||
821  return true;
822 
823  // Check if one offset point is in the other interval [offset, offset+size].
824  return OAS.getOffset() + OAS.getSize() > getOffset() &&
825  OAS.getOffset() < getOffset() + getSize();
826  }
827 
828  /// Constant used to represent unknown offset or sizes.
829  static constexpr int64_t Unknown = 1 << 31;
830 };
831 
832 /// Implementation of the DenseMapInfo.
833 ///
834 ///{
837  return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr);
838 }
841  return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ,
842  nullptr);
843 }
846  return Base::getHashValue(A.getRemoteInst());
847 }
851  return LHS.getRemoteInst() == RHS.getRemoteInst();
852 }
855  return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ,
856  nullptr);
857 }
860  return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE,
861  nullptr);
862 }
863 
868  (A.isWrittenValueYetUndetermined()
869  ? ~0
870  : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) +
871  A.getKind();
872 }
873 
877  return LHS == RHS;
878 }
879 ///}
880 
881 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
883 
884  /// Return the best possible representable state.
885  static State getBestState(const State &SIS) { return State(); }
886 
887  /// Return the worst possible representable state.
888  static State getWorstState(const State &SIS) {
889  State R;
890  R.indicatePessimisticFixpoint();
891  return R;
892  }
893 
894  State() {}
895  State(const State &SIS) : AccessBins(SIS.AccessBins) {}
897 
898  const State &getAssumed() const { return *this; }
899 
900  /// See AbstractState::isValidState().
901  bool isValidState() const override { return BS.isValidState(); }
902 
903  /// See AbstractState::isAtFixpoint().
904  bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
905 
906  /// See AbstractState::indicateOptimisticFixpoint().
908  BS.indicateOptimisticFixpoint();
910  }
911 
912  /// See AbstractState::indicatePessimisticFixpoint().
914  BS.indicatePessimisticFixpoint();
915  return ChangeStatus::CHANGED;
916  }
917 
918  State &operator=(const State &R) {
919  if (this == &R)
920  return *this;
921  BS = R.BS;
922  AccessBins = R.AccessBins;
923  return *this;
924  }
925 
927  if (this == &R)
928  return *this;
929  std::swap(BS, R.BS);
930  std::swap(AccessBins, R.AccessBins);
931  return *this;
932  }
933 
934  bool operator==(const State &R) const {
935  if (BS != R.BS)
936  return false;
937  if (AccessBins.size() != R.AccessBins.size())
938  return false;
939  auto It = begin(), RIt = R.begin(), E = end();
940  while (It != E) {
941  if (It->getFirst() != RIt->getFirst())
942  return false;
943  auto &Accs = It->getSecond();
944  auto &RAccs = RIt->getSecond();
945  if (Accs.size() != RAccs.size())
946  return false;
947  auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end();
948  while (AccIt != AccE) {
949  if (*AccIt != *RAccIt)
950  return false;
951  ++AccIt;
952  ++RAccIt;
953  }
954  ++It;
955  ++RIt;
956  }
957  return true;
958  }
959  bool operator!=(const State &R) const { return !(*this == R); }
960 
961  /// We store accesses in a set with the instruction as key.
963 
964  /// We store all accesses in bins denoted by their offset and size.
966 
967  AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
968  AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
969 
970 protected:
971  /// The bins with all the accesses for the associated pointer.
973 
974  /// Add a new access to the state at offset \p Offset and with size \p Size.
975  /// The access is associated with \p I, writes \p Content (if anything), and
976  /// is of kind \p Kind.
977  /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
981  Instruction *RemoteI = nullptr,
982  Accesses *BinPtr = nullptr) {
984  Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key];
985  AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
986  // Check if we have an access for this instruction in this bin, if not,
987  // simply add it.
988  auto It = Bin.find(Acc);
989  if (It == Bin.end()) {
990  Bin.insert(Acc);
991  return ChangeStatus::CHANGED;
992  }
993  // If the existing access is the same as then new one, nothing changed.
994  AAPointerInfo::Access Before = *It;
995  // The new one will be combined with the existing one.
996  *It &= Acc;
997  return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
998  }
999 
1000  /// See AAPointerInfo::forallInterferingAccesses.
1002  Instruction &I,
1003  function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1004  if (!isValidState())
1005  return false;
1006  // First find the offset and size of I.
1007  OffsetAndSize OAS(-1, -1);
1008  for (auto &It : AccessBins) {
1009  for (auto &Access : It.getSecond()) {
1010  if (Access.getRemoteInst() == &I) {
1011  OAS = It.getFirst();
1012  break;
1013  }
1014  }
1015  if (OAS.getSize() != -1)
1016  break;
1017  }
1018  if (OAS.getSize() == -1)
1019  return true;
1020 
1021  // Now that we have an offset and size, find all overlapping ones and use
1022  // the callback on the accesses.
1023  for (auto &It : AccessBins) {
1024  OffsetAndSize ItOAS = It.getFirst();
1025  if (!OAS.mayOverlap(ItOAS))
1026  continue;
1027  for (auto &Access : It.getSecond())
1028  if (!CB(Access, OAS == ItOAS))
1029  return false;
1030  }
1031  return true;
1032  }
1033 
1034 private:
1035  /// State to track fixpoint and validity.
1036  BooleanState BS;
1037 };
1038 
1039 namespace {
1040 struct AAPointerInfoImpl
1041  : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1043  AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1044 
1045  /// See AbstractAttribute::initialize(...).
1046  void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1047 
1048  /// See AbstractAttribute::getAsStr().
1049  const std::string getAsStr() const override {
1050  return std::string("PointerInfo ") +
1051  (isValidState() ? (std::string("#") +
1052  std::to_string(AccessBins.size()) + " bins")
1053  : "<invalid>");
1054  }
1055 
1056  /// See AbstractAttribute::manifest(...).
1057  ChangeStatus manifest(Attributor &A) override {
1058  return AAPointerInfo::manifest(A);
1059  }
1060 
1061  bool forallInterferingAccesses(
1062  LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1063  const override {
1064  return State::forallInterferingAccesses(LI, CB);
1065  }
1066  bool forallInterferingAccesses(
1067  StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1068  const override {
1069  return State::forallInterferingAccesses(SI, CB);
1070  }
1071 
1072  ChangeStatus translateAndAddCalleeState(Attributor &A,
1073  const AAPointerInfo &CalleeAA,
1074  int64_t CallArgOffset, CallBase &CB) {
1075  using namespace AA::PointerInfo;
1076  if (!CalleeAA.getState().isValidState() || !isValidState())
1077  return indicatePessimisticFixpoint();
1078 
1079  const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1080  bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1081 
1082  // Combine the accesses bin by bin.
1084  for (auto &It : CalleeImplAA.getState()) {
1085  OffsetAndSize OAS = OffsetAndSize::getUnknown();
1086  if (CallArgOffset != OffsetAndSize::Unknown)
1087  OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1088  It.first.getSize());
1089  Accesses &Bin = AccessBins[OAS];
1090  for (const AAPointerInfo::Access &RAcc : It.second) {
1091  if (IsByval && !RAcc.isRead())
1092  continue;
1093  bool UsedAssumedInformation = false;
1094  Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1095  RAcc.getContent(), CB, *this, UsedAssumedInformation);
1096  AccessKind AK =
1097  AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1098  : AccessKind::AK_READ_WRITE));
1099  Changed =
1100  Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK,
1101  RAcc.getType(), RAcc.getRemoteInst(), &Bin);
1102  }
1103  }
1104  return Changed;
1105  }
1106 
1107  /// Statistic tracking for all AAPointerInfo implementations.
1108  /// See AbstractAttribute::trackStatistics().
1109  void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1110 };
1111 
1112 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1114  AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1115  : AAPointerInfoImpl(IRP, A) {}
1116 
1117  /// See AbstractAttribute::initialize(...).
1118  void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1119 
1120  /// Deal with an access and signal if it was handled successfully.
1121  bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1123  ChangeStatus &Changed, Type *Ty,
1125  using namespace AA::PointerInfo;
1126  // No need to find a size if one is given or the offset is unknown.
1128  Ty) {
1129  const DataLayout &DL = A.getDataLayout();
1130  TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1131  if (!AccessSize.isScalable())
1132  Size = AccessSize.getFixedSize();
1133  }
1134  Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty);
1135  return true;
1136  };
1137 
1138  /// Helper struct, will support ranges eventually.
1139  struct OffsetInfo {
1141 
1142  bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1143  };
1144 
1145  /// See AbstractAttribute::updateImpl(...).
1146  ChangeStatus updateImpl(Attributor &A) override {
1147  using namespace AA::PointerInfo;
1148  State S = getState();
1150  Value &AssociatedValue = getAssociatedValue();
1151 
1152  const DataLayout &DL = A.getDataLayout();
1153  DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1154  OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1155 
1156  auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI,
1157  bool &Follow) {
1158  OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1159  UsrOI = PtrOI;
1160  Follow = true;
1161  return true;
1162  };
1163 
1164  auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1165  Value *CurPtr = U.get();
1166  User *Usr = U.getUser();
1167  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1168  << *Usr << "\n");
1169 
1170  OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1171 
1172  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1173  if (CE->isCast())
1174  return HandlePassthroughUser(Usr, PtrOI, Follow);
1175  if (CE->isCompare())
1176  return true;
1177  if (!CE->isGEPWithNoNotionalOverIndexing()) {
1178  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1179  << "\n");
1180  return false;
1181  }
1182  }
1183  if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1184  OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1185  UsrOI = PtrOI;
1186 
1187  // TODO: Use range information.
1188  if (PtrOI.Offset == OffsetAndSize::Unknown ||
1189  !GEP->hasAllConstantIndices()) {
1190  UsrOI.Offset = OffsetAndSize::Unknown;
1191  Follow = true;
1192  return true;
1193  }
1194 
1195  SmallVector<Value *, 8> Indices;
1196  for (Use &Idx : GEP->indices()) {
1197  if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1198  Indices.push_back(CIdx);
1199  continue;
1200  }
1201 
1202  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1203  << " : " << *Idx << "\n");
1204  return false;
1205  }
1206  UsrOI.Offset = PtrOI.Offset +
1207  DL.getIndexedOffsetInType(
1208  CurPtr->getType()->getPointerElementType(), Indices);
1209  Follow = true;
1210  return true;
1211  }
1212  if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1213  return HandlePassthroughUser(Usr, PtrOI, Follow);
1214 
1215  // For PHIs we need to take care of the recurrence explicitly as the value
1216  // might change while we iterate through a loop. For now, we give up if
1217  // the PHI is not invariant.
1218  if (isa<PHINode>(Usr)) {
1219  // Check if the PHI is invariant (so far).
1220  OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1221  if (UsrOI == PtrOI)
1222  return true;
1223 
1224  // Check if the PHI operand has already an unknown offset as we can't
1225  // improve on that anymore.
1226  if (PtrOI.Offset == OffsetAndSize::Unknown) {
1227  UsrOI = PtrOI;
1228  Follow = true;
1229  return true;
1230  }
1231 
1232  // Check if the PHI operand is not dependent on the PHI itself.
1233  // TODO: This is not great as we look at the pointer type. However, it
1234  // is unclear where the Offset size comes from with typeless pointers.
1235  APInt Offset(
1236  DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1237  0);
1238  if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1239  DL, Offset, /* AllowNonInbounds */ true)) {
1240  if (Offset != PtrOI.Offset) {
1241  LLVM_DEBUG(dbgs()
1242  << "[AAPointerInfo] PHI operand pointer offset mismatch "
1243  << *CurPtr << " in " << *Usr << "\n");
1244  return false;
1245  }
1246  return HandlePassthroughUser(Usr, PtrOI, Follow);
1247  }
1248 
1249  // TODO: Approximate in case we know the direction of the recurrence.
1250  LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1251  << *CurPtr << " in " << *Usr << "\n");
1252  UsrOI = PtrOI;
1253  UsrOI.Offset = OffsetAndSize::Unknown;
1254  Follow = true;
1255  return true;
1256  }
1257 
1258  if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1259  return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1260  AccessKind::AK_READ, PtrOI.Offset, Changed,
1261  LoadI->getType());
1262  if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1263  if (StoreI->getValueOperand() == CurPtr) {
1264  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1265  << *StoreI << "\n");
1266  return false;
1267  }
1268  bool UsedAssumedInformation = false;
1269  Optional<Value *> Content = A.getAssumedSimplified(
1270  *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1271  return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1272  PtrOI.Offset, Changed,
1273  StoreI->getValueOperand()->getType());
1274  }
1275  if (auto *CB = dyn_cast<CallBase>(Usr)) {
1276  if (CB->isLifetimeStartOrEnd())
1277  return true;
1278  if (CB->isArgOperand(&U)) {
1279  unsigned ArgNo = CB->getArgOperandNo(&U);
1280  const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1281  *this, IRPosition::callsite_argument(*CB, ArgNo),
1283  Changed = translateAndAddCalleeState(A, CSArgPI, PtrOI.Offset, *CB) |
1284  Changed;
1285  return true;
1286  }
1287  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1288  << "\n");
1289  // TODO: Allow some call uses
1290  return false;
1291  }
1292 
1293  LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1294  return false;
1295  };
1296  if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1297  /* CheckBBLivenessOnly */ true))
1298  return indicatePessimisticFixpoint();
1299 
1300  LLVM_DEBUG({
1301  dbgs() << "Accesses by bin after update:\n";
1302  for (auto &It : AccessBins) {
1303  dbgs() << "[" << It.first.getOffset() << "-"
1304  << It.first.getOffset() + It.first.getSize()
1305  << "] : " << It.getSecond().size() << "\n";
1306  for (auto &Acc : It.getSecond()) {
1307  dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1308  << "\n";
1309  if (Acc.getLocalInst() != Acc.getRemoteInst())
1310  dbgs() << " --> "
1311  << *Acc.getRemoteInst() << "\n";
1312  if (!Acc.isWrittenValueYetUndetermined())
1313  dbgs() << " - " << Acc.getWrittenValue() << "\n";
1314  }
1315  }
1316  });
1317 
1318  return Changed;
1319  }
1320 
1321  /// See AbstractAttribute::trackStatistics()
1322  void trackStatistics() const override {
1323  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1324  }
1325 };
1326 
1327 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1328  AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1329  : AAPointerInfoImpl(IRP, A) {}
1330 
1331  /// See AbstractAttribute::updateImpl(...).
1332  ChangeStatus updateImpl(Attributor &A) override {
1333  return indicatePessimisticFixpoint();
1334  }
1335 
1336  /// See AbstractAttribute::trackStatistics()
1337  void trackStatistics() const override {
1338  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1339  }
1340 };
1341 
1342 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1343  AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1344  : AAPointerInfoFloating(IRP, A) {}
1345 
1346  /// See AbstractAttribute::initialize(...).
1347  void initialize(Attributor &A) override {
1349  if (getAnchorScope()->isDeclaration())
1350  indicatePessimisticFixpoint();
1351  }
1352 
1353  /// See AbstractAttribute::trackStatistics()
1354  void trackStatistics() const override {
1355  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1356  }
1357 };
1358 
1359 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1360  AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1361  : AAPointerInfoFloating(IRP, A) {}
1362 
1363  /// See AbstractAttribute::updateImpl(...).
1364  ChangeStatus updateImpl(Attributor &A) override {
1365  using namespace AA::PointerInfo;
1366  // We handle memory intrinsics explicitly, at least the first (=
1367  // destination) and second (=source) arguments as we know how they are
1368  // accessed.
1369  if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1370  ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1371  int64_t LengthVal = OffsetAndSize::Unknown;
1372  if (Length)
1373  LengthVal = Length->getSExtValue();
1374  Value &Ptr = getAssociatedValue();
1375  unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1376  ChangeStatus Changed;
1377  if (ArgNo == 0) {
1378  handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1379  nullptr, LengthVal);
1380  } else if (ArgNo == 1) {
1381  handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1382  nullptr, LengthVal);
1383  } else {
1384  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1385  << *MI << "\n");
1386  return indicatePessimisticFixpoint();
1387  }
1388  return Changed;
1389  }
1390 
1391  // TODO: Once we have call site specific value information we can provide
1392  // call site specific liveness information and then it makes
1393  // sense to specialize attributes for call sites arguments instead of
1394  // redirecting requests to the callee argument.
1395  Argument *Arg = getAssociatedArgument();
1396  if (!Arg)
1397  return indicatePessimisticFixpoint();
1398  const IRPosition &ArgPos = IRPosition::argument(*Arg);
1399  auto &ArgAA =
1400  A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1401  return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1402  }
1403 
1404  /// See AbstractAttribute::trackStatistics()
1405  void trackStatistics() const override {
1406  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1407  }
1408 };
1409 
1410 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1411  AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1412  : AAPointerInfoFloating(IRP, A) {}
1413 
1414  /// See AbstractAttribute::trackStatistics()
1415  void trackStatistics() const override {
1416  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1417  }
1418 };
1419 
1420 /// -----------------------NoUnwind Function Attribute--------------------------
1421 
1422 struct AANoUnwindImpl : AANoUnwind {
1423  AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1424 
1425  const std::string getAsStr() const override {
1426  return getAssumed() ? "nounwind" : "may-unwind";
1427  }
1428 
1429  /// See AbstractAttribute::updateImpl(...).
1430  ChangeStatus updateImpl(Attributor &A) override {
1431  auto Opcodes = {
1432  (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
1433  (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
1434  (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1435 
1436  auto CheckForNoUnwind = [&](Instruction &I) {
1437  if (!I.mayThrow())
1438  return true;
1439 
1440  if (const auto *CB = dyn_cast<CallBase>(&I)) {
1441  const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1443  return NoUnwindAA.isAssumedNoUnwind();
1444  }
1445  return false;
1446  };
1447 
1448  bool UsedAssumedInformation = false;
1449  if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1450  UsedAssumedInformation))
1451  return indicatePessimisticFixpoint();
1452 
1453  return ChangeStatus::UNCHANGED;
1454  }
1455 };
1456 
1457 struct AANoUnwindFunction final : public AANoUnwindImpl {
1458  AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1459  : AANoUnwindImpl(IRP, A) {}
1460 
1461  /// See AbstractAttribute::trackStatistics()
1462  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1463 };
1464 
1465 /// NoUnwind attribute deduction for a call sites.
1466 struct AANoUnwindCallSite final : AANoUnwindImpl {
1467  AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1468  : AANoUnwindImpl(IRP, A) {}
1469 
1470  /// See AbstractAttribute::initialize(...).
1471  void initialize(Attributor &A) override {
1473  Function *F = getAssociatedFunction();
1474  if (!F || F->isDeclaration())
1475  indicatePessimisticFixpoint();
1476  }
1477 
1478  /// See AbstractAttribute::updateImpl(...).
1479  ChangeStatus updateImpl(Attributor &A) override {
1480  // TODO: Once we have call site specific value information we can provide
1481  // call site specific liveness information and then it makes
1482  // sense to specialize attributes for call sites arguments instead of
1483  // redirecting requests to the callee argument.
1484  Function *F = getAssociatedFunction();
1485  const IRPosition &FnPos = IRPosition::function(*F);
1486  auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1487  return clampStateAndIndicateChange(getState(), FnAA.getState());
1488  }
1489 
1490  /// See AbstractAttribute::trackStatistics()
1491  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1492 };
1493 
1494 /// --------------------- Function Return Values -------------------------------
1495 
1496 /// "Attribute" that collects all potential returned values and the return
1497 /// instructions that they arise from.
1498 ///
1499 /// If there is a unique returned value R, the manifest method will:
1500 /// - mark R with the "returned" attribute, if R is an argument.
1501 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1502 
1503  /// Mapping of values potentially returned by the associated function to the
1504  /// return instructions that might return them.
1506 
1507  /// State flags
1508  ///
1509  ///{
1510  bool IsFixed = false;
1511  bool IsValidState = true;
1512  ///}
1513 
1514 public:
1515  AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1516  : AAReturnedValues(IRP, A) {}
1517 
1518  /// See AbstractAttribute::initialize(...).
1519  void initialize(Attributor &A) override {
1520  // Reset the state.
1521  IsFixed = false;
1522  IsValidState = true;
1523  ReturnedValues.clear();
1524 
1525  Function *F = getAssociatedFunction();
1526  if (!F || F->isDeclaration()) {
1527  indicatePessimisticFixpoint();
1528  return;
1529  }
1530  assert(!F->getReturnType()->isVoidTy() &&
1531  "Did not expect a void return type!");
1532 
1533  // The map from instruction opcodes to those instructions in the function.
1534  auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1535 
1536  // Look through all arguments, if one is marked as returned we are done.
1537  for (Argument &Arg : F->args()) {
1538  if (Arg.hasReturnedAttr()) {
1539  auto &ReturnInstSet = ReturnedValues[&Arg];
1540  if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1541  for (Instruction *RI : *Insts)
1542  ReturnInstSet.insert(cast<ReturnInst>(RI));
1543 
1544  indicateOptimisticFixpoint();
1545  return;
1546  }
1547  }
1548 
1549  if (!A.isFunctionIPOAmendable(*F))
1550  indicatePessimisticFixpoint();
1551  }
1552 
1553  /// See AbstractAttribute::manifest(...).
1554  ChangeStatus manifest(Attributor &A) override;
1555 
1556  /// See AbstractAttribute::getState(...).
1557  AbstractState &getState() override { return *this; }
1558 
1559  /// See AbstractAttribute::getState(...).
1560  const AbstractState &getState() const override { return *this; }
1561 
1562  /// See AbstractAttribute::updateImpl(Attributor &A).
1563  ChangeStatus updateImpl(Attributor &A) override;
1564 
1565  llvm::iterator_range<iterator> returned_values() override {
1566  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1567  }
1568 
1569  llvm::iterator_range<const_iterator> returned_values() const override {
1570  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1571  }
1572 
1573  /// Return the number of potential return values, -1 if unknown.
1574  size_t getNumReturnValues() const override {
1575  return isValidState() ? ReturnedValues.size() : -1;
1576  }
1577 
1578  /// Return an assumed unique return value if a single candidate is found. If
1579  /// there cannot be one, return a nullptr. If it is not clear yet, return the
1580  /// Optional::NoneType.
1581  Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1582 
1583  /// See AbstractState::checkForAllReturnedValues(...).
1584  bool checkForAllReturnedValuesAndReturnInsts(
1585  function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1586  const override;
1587 
1588  /// Pretty print the attribute similar to the IR representation.
1589  const std::string getAsStr() const override;
1590 
1591  /// See AbstractState::isAtFixpoint().
1592  bool isAtFixpoint() const override { return IsFixed; }
1593 
1594  /// See AbstractState::isValidState().
1595  bool isValidState() const override { return IsValidState; }
1596 
1597  /// See AbstractState::indicateOptimisticFixpoint(...).
1598  ChangeStatus indicateOptimisticFixpoint() override {
1599  IsFixed = true;
1600  return ChangeStatus::UNCHANGED;
1601  }
1602 
1603  ChangeStatus indicatePessimisticFixpoint() override {
1604  IsFixed = true;
1605  IsValidState = false;
1606  return ChangeStatus::CHANGED;
1607  }
1608 };
1609 
1610 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1612 
1613  // Bookkeeping.
1614  assert(isValidState());
1615  STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1616  "Number of function with known return values");
1617 
1618  // Check if we have an assumed unique return value that we could manifest.
1619  Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1620 
1621  if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1622  return Changed;
1623 
1624  // Bookkeeping.
1625  STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1626  "Number of function with unique return");
1627  // If the assumed unique return value is an argument, annotate it.
1628  if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1629  if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1630  getAssociatedFunction()->getReturnType())) {
1631  getIRPosition() = IRPosition::argument(*UniqueRVArg);
1632  Changed = IRAttribute::manifest(A);
1633  }
1634  }
1635  return Changed;
1636 }
1637 
1638 const std::string AAReturnedValuesImpl::getAsStr() const {
1639  return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1640  (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1641 }
1642 
1644 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1645  // If checkForAllReturnedValues provides a unique value, ignoring potential
1646  // undef values that can also be present, it is assumed to be the actual
1647  // return value and forwarded to the caller of this method. If there are
1648  // multiple, a nullptr is returned indicating there cannot be a unique
1649  // returned value.
1650  Optional<Value *> UniqueRV;
1651  Type *Ty = getAssociatedFunction()->getReturnType();
1652 
1653  auto Pred = [&](Value &RV) -> bool {
1654  UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1655  return UniqueRV != Optional<Value *>(nullptr);
1656  };
1657 
1658  if (!A.checkForAllReturnedValues(Pred, *this))
1659  UniqueRV = nullptr;
1660 
1661  return UniqueRV;
1662 }
1663 
1664 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1665  function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1666  const {
1667  if (!isValidState())
1668  return false;
1669 
1670  // Check all returned values but ignore call sites as long as we have not
1671  // encountered an overdefined one during an update.
1672  for (auto &It : ReturnedValues) {
1673  Value *RV = It.first;
1674  if (!Pred(*RV, It.second))
1675  return false;
1676  }
1677 
1678  return true;
1679 }
1680 
1681 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1683 
1684  auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1685  bool) -> bool {
1686  bool UsedAssumedInformation = false;
1687  Optional<Value *> SimpleRetVal =
1688  A.getAssumedSimplified(V, *this, UsedAssumedInformation);
1689  if (!SimpleRetVal.hasValue())
1690  return true;
1691  if (!SimpleRetVal.getValue())
1692  return false;
1693  Value *RetVal = *SimpleRetVal;
1694  assert(AA::isValidInScope(*RetVal, Ret.getFunction()) &&
1695  "Assumed returned value should be valid in function scope!");
1696  if (ReturnedValues[RetVal].insert(&Ret))
1697  Changed = ChangeStatus::CHANGED;
1698  return true;
1699  };
1700 
1701  auto ReturnInstCB = [&](Instruction &I) {
1702  ReturnInst &Ret = cast<ReturnInst>(I);
1703  return genericValueTraversal<ReturnInst>(
1704  A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1705  &I);
1706  };
1707 
1708  // Discover returned values from all live returned instructions in the
1709  // associated function.
1710  bool UsedAssumedInformation = false;
1711  if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1712  UsedAssumedInformation))
1713  return indicatePessimisticFixpoint();
1714  return Changed;
1715 }
1716 
1717 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1718  AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1719  : AAReturnedValuesImpl(IRP, A) {}
1720 
1721  /// See AbstractAttribute::trackStatistics()
1722  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1723 };
1724 
1725 /// Returned values information for a call sites.
1726 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1727  AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1728  : AAReturnedValuesImpl(IRP, A) {}
1729 
1730  /// See AbstractAttribute::initialize(...).
1731  void initialize(Attributor &A) override {
1732  // TODO: Once we have call site specific value information we can provide
1733  // call site specific liveness information and then it makes
1734  // sense to specialize attributes for call sites instead of
1735  // redirecting requests to the callee.
1736  llvm_unreachable("Abstract attributes for returned values are not "
1737  "supported for call sites yet!");
1738  }
1739 
1740  /// See AbstractAttribute::updateImpl(...).
1741  ChangeStatus updateImpl(Attributor &A) override {
1742  return indicatePessimisticFixpoint();
1743  }
1744 
1745  /// See AbstractAttribute::trackStatistics()
1746  void trackStatistics() const override {}
1747 };
1748 
1749 /// ------------------------ NoSync Function Attribute -------------------------
1750 
1751 struct AANoSyncImpl : AANoSync {
1752  AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1753 
1754  const std::string getAsStr() const override {
1755  return getAssumed() ? "nosync" : "may-sync";
1756  }
1757 
1758  /// See AbstractAttribute::updateImpl(...).
1759  ChangeStatus updateImpl(Attributor &A) override;
1760 
1761  /// Helper function used to determine whether an instruction is non-relaxed
1762  /// atomic. In other words, if an atomic instruction does not have unordered
1763  /// or monotonic ordering
1764  static bool isNonRelaxedAtomic(Instruction *I);
1765 
1766  /// Helper function specific for intrinsics which are potentially volatile
1767  static bool isNoSyncIntrinsic(Instruction *I);
1768 };
1769 
1770 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1771  if (!I->isAtomic())
1772  return false;
1773 
1774  if (auto *FI = dyn_cast<FenceInst>(I))
1775  // All legal orderings for fence are stronger than monotonic.
1776  return FI->getSyncScopeID() != SyncScope::SingleThread;
1777  else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1778  // Unordered is not a legal ordering for cmpxchg.
1779  return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1780  AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1781  }
1782 
1783  AtomicOrdering Ordering;
1784  switch (I->getOpcode()) {
1785  case Instruction::AtomicRMW:
1786  Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1787  break;
1788  case Instruction::Store:
1789  Ordering = cast<StoreInst>(I)->getOrdering();
1790  break;
1791  case Instruction::Load:
1792  Ordering = cast<LoadInst>(I)->getOrdering();
1793  break;
1794  default:
1796  "New atomic operations need to be known in the attributor.");
1797  }
1798 
1799  return (Ordering != AtomicOrdering::Unordered &&
1800  Ordering != AtomicOrdering::Monotonic);
1801 }
1802 
1803 /// Return true if this intrinsic is nosync. This is only used for intrinsics
1804 /// which would be nosync except that they have a volatile flag. All other
1805 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1806 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1807  if (auto *MI = dyn_cast<MemIntrinsic>(I))
1808  return !MI->isVolatile();
1809  return false;
1810 }
1811 
1812 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1813 
1814  auto CheckRWInstForNoSync = [&](Instruction &I) {
1815  /// We are looking for volatile instructions or Non-Relaxed atomics.
1816 
1817  if (const auto *CB = dyn_cast<CallBase>(&I)) {
1818  if (CB->hasFnAttr(Attribute::NoSync))
1819  return true;
1820 
1821  if (isNoSyncIntrinsic(&I))
1822  return true;
1823 
1824  const auto &NoSyncAA = A.getAAFor<AANoSync>(
1826  return NoSyncAA.isAssumedNoSync();
1827  }
1828 
1829  if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1830  return true;
1831 
1832  return false;
1833  };
1834 
1835  auto CheckForNoSync = [&](Instruction &I) {
1836  // At this point we handled all read/write effects and they are all
1837  // nosync, so they can be skipped.
1838  if (I.mayReadOrWriteMemory())
1839  return true;
1840 
1841  // non-convergent and readnone imply nosync.
1842  return !cast<CallBase>(I).isConvergent();
1843  };
1844 
1845  bool UsedAssumedInformation = false;
1846  if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1847  UsedAssumedInformation) ||
1848  !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1849  UsedAssumedInformation))
1850  return indicatePessimisticFixpoint();
1851 
1852  return ChangeStatus::UNCHANGED;
1853 }
1854 
1855 struct AANoSyncFunction final : public AANoSyncImpl {
1856  AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1857  : AANoSyncImpl(IRP, A) {}
1858 
1859  /// See AbstractAttribute::trackStatistics()
1860  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1861 };
1862 
1863 /// NoSync attribute deduction for a call sites.
1864 struct AANoSyncCallSite final : AANoSyncImpl {
1865  AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1866  : AANoSyncImpl(IRP, A) {}
1867 
1868  /// See AbstractAttribute::initialize(...).
1869  void initialize(Attributor &A) override {
1871  Function *F = getAssociatedFunction();
1872  if (!F || F->isDeclaration())
1873  indicatePessimisticFixpoint();
1874  }
1875 
1876  /// See AbstractAttribute::updateImpl(...).
1877  ChangeStatus updateImpl(Attributor &A) override {
1878  // TODO: Once we have call site specific value information we can provide
1879  // call site specific liveness information and then it makes
1880  // sense to specialize attributes for call sites arguments instead of
1881  // redirecting requests to the callee argument.
1882  Function *F = getAssociatedFunction();
1883  const IRPosition &FnPos = IRPosition::function(*F);
1884  auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1885  return clampStateAndIndicateChange(getState(), FnAA.getState());
1886  }
1887 
1888  /// See AbstractAttribute::trackStatistics()
1889  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1890 };
1891 
1892 /// ------------------------ No-Free Attributes ----------------------------
1893 
1894 struct AANoFreeImpl : public AANoFree {
1895  AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1896 
1897  /// See AbstractAttribute::updateImpl(...).
1898  ChangeStatus updateImpl(Attributor &A) override {
1899  auto CheckForNoFree = [&](Instruction &I) {
1900  const auto &CB = cast<CallBase>(I);
1901  if (CB.hasFnAttr(Attribute::NoFree))
1902  return true;
1903 
1904  const auto &NoFreeAA = A.getAAFor<AANoFree>(
1906  return NoFreeAA.isAssumedNoFree();
1907  };
1908 
1909  bool UsedAssumedInformation = false;
1910  if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
1911  UsedAssumedInformation))
1912  return indicatePessimisticFixpoint();
1913  return ChangeStatus::UNCHANGED;
1914  }
1915 
1916  /// See AbstractAttribute::getAsStr().
1917  const std::string getAsStr() const override {
1918  return getAssumed() ? "nofree" : "may-free";
1919  }
1920 };
1921 
1922 struct AANoFreeFunction final : public AANoFreeImpl {
1923  AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1924  : AANoFreeImpl(IRP, A) {}
1925 
1926  /// See AbstractAttribute::trackStatistics()
1927  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1928 };
1929 
1930 /// NoFree attribute deduction for a call sites.
1931 struct AANoFreeCallSite final : AANoFreeImpl {
1932  AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1933  : AANoFreeImpl(IRP, A) {}
1934 
1935  /// See AbstractAttribute::initialize(...).
1936  void initialize(Attributor &A) override {
1938  Function *F = getAssociatedFunction();
1939  if (!F || F->isDeclaration())
1940  indicatePessimisticFixpoint();
1941  }
1942 
1943  /// See AbstractAttribute::updateImpl(...).
1944  ChangeStatus updateImpl(Attributor &A) override {
1945  // TODO: Once we have call site specific value information we can provide
1946  // call site specific liveness information and then it makes
1947  // sense to specialize attributes for call sites arguments instead of
1948  // redirecting requests to the callee argument.
1949  Function *F = getAssociatedFunction();
1950  const IRPosition &FnPos = IRPosition::function(*F);
1951  auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1952  return clampStateAndIndicateChange(getState(), FnAA.getState());
1953  }
1954 
1955  /// See AbstractAttribute::trackStatistics()
1956  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1957 };
1958 
1959 /// NoFree attribute for floating values.
1960 struct AANoFreeFloating : AANoFreeImpl {
1961  AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1962  : AANoFreeImpl(IRP, A) {}
1963 
1964  /// See AbstractAttribute::trackStatistics()
1965  void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1966 
1967  /// See Abstract Attribute::updateImpl(...).
1968  ChangeStatus updateImpl(Attributor &A) override {
1969  const IRPosition &IRP = getIRPosition();
1970 
1971  const auto &NoFreeAA = A.getAAFor<AANoFree>(
1973  if (NoFreeAA.isAssumedNoFree())
1974  return ChangeStatus::UNCHANGED;
1975 
1976  Value &AssociatedValue = getIRPosition().getAssociatedValue();
1977  auto Pred = [&](const Use &U, bool &Follow) -> bool {
1978  Instruction *UserI = cast<Instruction>(U.getUser());
1979  if (auto *CB = dyn_cast<CallBase>(UserI)) {
1980  if (CB->isBundleOperand(&U))
1981  return false;
1982  if (!CB->isArgOperand(&U))
1983  return true;
1984  unsigned ArgNo = CB->getArgOperandNo(&U);
1985 
1986  const auto &NoFreeArg = A.getAAFor<AANoFree>(
1987  *this, IRPosition::callsite_argument(*CB, ArgNo),
1989  return NoFreeArg.isAssumedNoFree();
1990  }
1991 
1992  if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1993  isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1994  Follow = true;
1995  return true;
1996  }
1997  if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
1998  isa<ReturnInst>(UserI))
1999  return true;
2000 
2001  // Unknown user.
2002  return false;
2003  };
2004  if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2005  return indicatePessimisticFixpoint();
2006 
2007  return ChangeStatus::UNCHANGED;
2008  }
2009 };
2010 
2011 /// NoFree attribute for a call site argument.
2012 struct AANoFreeArgument final : AANoFreeFloating {
2013  AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2014  : AANoFreeFloating(IRP, A) {}
2015 
2016  /// See AbstractAttribute::trackStatistics()
2017  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2018 };
2019 
2020 /// NoFree attribute for call site arguments.
2021 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2022  AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2023  : AANoFreeFloating(IRP, A) {}
2024 
2025  /// See AbstractAttribute::updateImpl(...).
2026  ChangeStatus updateImpl(Attributor &A) override {
2027  // TODO: Once we have call site specific value information we can provide
2028  // call site specific liveness information and then it makes
2029  // sense to specialize attributes for call sites arguments instead of
2030  // redirecting requests to the callee argument.
2031  Argument *Arg = getAssociatedArgument();
2032  if (!Arg)
2033  return indicatePessimisticFixpoint();
2034  const IRPosition &ArgPos = IRPosition::argument(*Arg);
2035  auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2036  return clampStateAndIndicateChange(getState(), ArgAA.getState());
2037  }
2038 
2039  /// See AbstractAttribute::trackStatistics()
2040  void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2041 };
2042 
2043 /// NoFree attribute for function return value.
2044 struct AANoFreeReturned final : AANoFreeFloating {
2045  AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2046  : AANoFreeFloating(IRP, A) {
2047  llvm_unreachable("NoFree is not applicable to function returns!");
2048  }
2049 
2050  /// See AbstractAttribute::initialize(...).
2051  void initialize(Attributor &A) override {
2052  llvm_unreachable("NoFree is not applicable to function returns!");
2053  }
2054 
2055  /// See AbstractAttribute::updateImpl(...).
2056  ChangeStatus updateImpl(Attributor &A) override {
2057  llvm_unreachable("NoFree is not applicable to function returns!");
2058  }
2059 
2060  /// See AbstractAttribute::trackStatistics()
2061  void trackStatistics() const override {}
2062 };
2063 
2064 /// NoFree attribute deduction for a call site return value.
2065 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2066  AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2067  : AANoFreeFloating(IRP, A) {}
2068 
2069  ChangeStatus manifest(Attributor &A) override {
2070  return ChangeStatus::UNCHANGED;
2071  }
2072  /// See AbstractAttribute::trackStatistics()
2073  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2074 };
2075 
2076 /// ------------------------ NonNull Argument Attribute ------------------------
2077 static int64_t getKnownNonNullAndDerefBytesForUse(
2078  Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2079  const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2080  TrackUse = false;
2081 
2082  const Value *UseV = U->get();
2083  if (!UseV->getType()->isPointerTy())
2084  return 0;
2085 
2086  // We need to follow common pointer manipulation uses to the accesses they
2087  // feed into. We can try to be smart to avoid looking through things we do not
2088  // like for now, e.g., non-inbounds GEPs.
2089  if (isa<CastInst>(I)) {
2090  TrackUse = true;
2091  return 0;
2092  }
2093 
2094  if (isa<GetElementPtrInst>(I)) {
2095  TrackUse = true;
2096  return 0;
2097  }
2098 
2099  Type *PtrTy = UseV->getType();
2100  const Function *F = I->getFunction();
2101  bool NullPointerIsDefined =
2103  const DataLayout &DL = A.getInfoCache().getDL();
2104  if (const auto *CB = dyn_cast<CallBase>(I)) {
2105  if (CB->isBundleOperand(U)) {
2107  U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2108  IsNonNull |=
2109  (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2110  return RK.ArgValue;
2111  }
2112  return 0;
2113  }
2114 
2115  if (CB->isCallee(U)) {
2116  IsNonNull |= !NullPointerIsDefined;
2117  return 0;
2118  }
2119 
2120  unsigned ArgNo = CB->getArgOperandNo(U);
2121  IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2122  // As long as we only use known information there is no need to track
2123  // dependences here.
2124  auto &DerefAA =
2125  A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2126  IsNonNull |= DerefAA.isKnownNonNull();
2127  return DerefAA.getKnownDereferenceableBytes();
2128  }
2129 
2130  int64_t Offset;
2131  const Value *Base =
2132  getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
2133  if (Base) {
2134  if (Base == &AssociatedValue &&
2135  getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2136  int64_t DerefBytes =
2137  (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
2138 
2139  IsNonNull |= !NullPointerIsDefined;
2140  return std::max(int64_t(0), DerefBytes);
2141  }
2142  }
2143 
2144  /// Corner case when an offset is 0.
2146  /*AllowNonInbounds*/ true);
2147  if (Base) {
2148  if (Offset == 0 && Base == &AssociatedValue &&
2149  getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2150  int64_t DerefBytes =
2151  (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
2152  IsNonNull |= !NullPointerIsDefined;
2153  return std::max(int64_t(0), DerefBytes);
2154  }
2155  }
2156 
2157  return 0;
2158 }
2159 
2160 struct AANonNullImpl : AANonNull {
2161  AANonNullImpl(const IRPosition &IRP, Attributor &A)
2162  : AANonNull(IRP, A),
2163  NullIsDefined(NullPointerIsDefined(
2164  getAnchorScope(),
2165  getAssociatedValue().getType()->getPointerAddressSpace())) {}
2166 
2167  /// See AbstractAttribute::initialize(...).
2168  void initialize(Attributor &A) override {
2169  Value &V = getAssociatedValue();
2170  if (!NullIsDefined &&
2171  hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2172  /* IgnoreSubsumingPositions */ false, &A)) {
2173  indicateOptimisticFixpoint();
2174  return;
2175  }
2176 
2177  if (isa<ConstantPointerNull>(V)) {
2178  indicatePessimisticFixpoint();
2179  return;
2180  }
2181 
2183 
2184  bool CanBeNull, CanBeFreed;
2185  if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2186  CanBeFreed)) {
2187  if (!CanBeNull) {
2188  indicateOptimisticFixpoint();
2189  return;
2190  }
2191  }
2192 
2193  if (isa<GlobalValue>(&getAssociatedValue())) {
2194  indicatePessimisticFixpoint();
2195  return;
2196  }
2197 
2198  if (Instruction *CtxI = getCtxI())
2199  followUsesInMBEC(*this, A, getState(), *CtxI);
2200  }
2201 
2202  /// See followUsesInMBEC
2203  bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2204  AANonNull::StateType &State) {
2205  bool IsNonNull = false;
2206  bool TrackUse = false;
2207  getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2208  IsNonNull, TrackUse);
2209  State.setKnown(IsNonNull);
2210  return TrackUse;
2211  }
2212 
2213  /// See AbstractAttribute::getAsStr().
2214  const std::string getAsStr() const override {
2215  return getAssumed() ? "nonnull" : "may-null";
2216  }
2217 
2218  /// Flag to determine if the underlying value can be null and still allow
2219  /// valid accesses.
2220  const bool NullIsDefined;
2221 };
2222 
2223 /// NonNull attribute for a floating value.
2224 struct AANonNullFloating : public AANonNullImpl {
2225  AANonNullFloating(const IRPosition &IRP, Attributor &A)
2226  : AANonNullImpl(IRP, A) {}
2227 
2228  /// See AbstractAttribute::updateImpl(...).
2229  ChangeStatus updateImpl(Attributor &A) override {
2230  const DataLayout &DL = A.getDataLayout();
2231 
2232  DominatorTree *DT = nullptr;
2233  AssumptionCache *AC = nullptr;
2234  InformationCache &InfoCache = A.getInfoCache();
2235  if (const Function *Fn = getAnchorScope()) {
2237  AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2238  }
2239 
2240  auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2241  AANonNull::StateType &T, bool Stripped) -> bool {
2242  const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2244  if (!Stripped && this == &AA) {
2245  if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2246  T.indicatePessimisticFixpoint();
2247  } else {
2248  // Use abstract attribute information.
2249  const AANonNull::StateType &NS = AA.getState();
2250  T ^= NS;
2251  }
2252  return T.isValidState();
2253  };
2254 
2255  StateType T;
2256  if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2257  VisitValueCB, getCtxI()))
2258  return indicatePessimisticFixpoint();
2259 
2260  return clampStateAndIndicateChange(getState(), T);
2261  }
2262 
2263  /// See AbstractAttribute::trackStatistics()
2264  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2265 };
2266 
2267 /// NonNull attribute for function return value.
2268 struct AANonNullReturned final
2269  : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2270  AANonNullReturned(const IRPosition &IRP, Attributor &A)
2271  : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2272 
2273  /// See AbstractAttribute::getAsStr().
2274  const std::string getAsStr() const override {
2275  return getAssumed() ? "nonnull" : "may-null";
2276  }
2277 
2278  /// See AbstractAttribute::trackStatistics()
2279  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2280 };
2281 
2282 /// NonNull attribute for function argument.
2283 struct AANonNullArgument final
2284  : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2285  AANonNullArgument(const IRPosition &IRP, Attributor &A)
2286  : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2287 
2288  /// See AbstractAttribute::trackStatistics()
2289  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2290 };
2291 
2292 struct AANonNullCallSiteArgument final : AANonNullFloating {
2293  AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2294  : AANonNullFloating(IRP, A) {}
2295 
2296  /// See AbstractAttribute::trackStatistics()
2297  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2298 };
2299 
2300 /// NonNull attribute for a call site return position.
2301 struct AANonNullCallSiteReturned final
2302  : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2303  AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2304  : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2305 
2306  /// See AbstractAttribute::trackStatistics()
2307  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2308 };
2309 
2310 /// ------------------------ No-Recurse Attributes ----------------------------
2311 
2312 struct AANoRecurseImpl : public AANoRecurse {
2313  AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2314 
2315  /// See AbstractAttribute::getAsStr()
2316  const std::string getAsStr() const override {
2317  return getAssumed() ? "norecurse" : "may-recurse";
2318  }
2319 };
2320 
2321 struct AANoRecurseFunction final : AANoRecurseImpl {
2322  AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2323  : AANoRecurseImpl(IRP, A) {}
2324 
2325  /// See AbstractAttribute::initialize(...).
2326  void initialize(Attributor &A) override {
2328  if (const Function *F = getAnchorScope())
2329  if (A.getInfoCache().getSccSize(*F) != 1)
2330  indicatePessimisticFixpoint();
2331  }
2332 
2333  /// See AbstractAttribute::updateImpl(...).
2334  ChangeStatus updateImpl(Attributor &A) override {
2335 
2336  // If all live call sites are known to be no-recurse, we are as well.
2337  auto CallSitePred = [&](AbstractCallSite ACS) {
2338  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2339  *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2341  return NoRecurseAA.isKnownNoRecurse();
2342  };
2343  bool AllCallSitesKnown;
2344  if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2345  // If we know all call sites and all are known no-recurse, we are done.
2346  // If all known call sites, which might not be all that exist, are known
2347  // to be no-recurse, we are not done but we can continue to assume
2348  // no-recurse. If one of the call sites we have not visited will become
2349  // live, another update is triggered.
2350  if (AllCallSitesKnown)
2351  indicateOptimisticFixpoint();
2352  return ChangeStatus::UNCHANGED;
2353  }
2354 
2355  // If the above check does not hold anymore we look at the calls.
2356  auto CheckForNoRecurse = [&](Instruction &I) {
2357  const auto &CB = cast<CallBase>(I);
2358  if (CB.hasFnAttr(Attribute::NoRecurse))
2359  return true;
2360 
2361  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2363  if (!NoRecurseAA.isAssumedNoRecurse())
2364  return false;
2365 
2366  // Recursion to the same function
2367  if (CB.getCalledFunction() == getAnchorScope())
2368  return false;
2369 
2370  return true;
2371  };
2372 
2373  bool UsedAssumedInformation = false;
2374  if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this,
2375  UsedAssumedInformation))
2376  return indicatePessimisticFixpoint();
2377  return ChangeStatus::UNCHANGED;
2378  }
2379 
2380  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2381 };
2382 
2383 /// NoRecurse attribute deduction for a call sites.
2384 struct AANoRecurseCallSite final : AANoRecurseImpl {
2385  AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2386  : AANoRecurseImpl(IRP, A) {}
2387 
2388  /// See AbstractAttribute::initialize(...).
2389  void initialize(Attributor &A) override {
2391  Function *F = getAssociatedFunction();
2392  if (!F || F->isDeclaration())
2393  indicatePessimisticFixpoint();
2394  }
2395 
2396  /// See AbstractAttribute::updateImpl(...).
2397  ChangeStatus updateImpl(Attributor &A) override {
2398  // TODO: Once we have call site specific value information we can provide
2399  // call site specific liveness information and then it makes
2400  // sense to specialize attributes for call sites arguments instead of
2401  // redirecting requests to the callee argument.
2402  Function *F = getAssociatedFunction();
2403  const IRPosition &FnPos = IRPosition::function(*F);
2404  auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2405  return clampStateAndIndicateChange(getState(), FnAA.getState());
2406  }
2407 
2408  /// See AbstractAttribute::trackStatistics()
2409  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2410 };
2411 
2412 /// -------------------- Undefined-Behavior Attributes ------------------------
2413 
2414 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2415  AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2416  : AAUndefinedBehavior(IRP, A) {}
2417 
2418  /// See AbstractAttribute::updateImpl(...).
2419  // through a pointer (i.e. also branches etc.)
2420  ChangeStatus updateImpl(Attributor &A) override {
2421  const size_t UBPrevSize = KnownUBInsts.size();
2422  const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2423 
2424  auto InspectMemAccessInstForUB = [&](Instruction &I) {
2425  // Lang ref now states volatile store is not UB, let's skip them.
2426  if (I.isVolatile() && I.mayWriteToMemory())
2427  return true;
2428 
2429  // Skip instructions that are already saved.
2430  if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2431  return true;
2432 
2433  // If we reach here, we know we have an instruction
2434  // that accesses memory through a pointer operand,
2435  // for which getPointerOperand() should give it to us.
2436  Value *PtrOp =
2437  const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2438  assert(PtrOp &&
2439  "Expected pointer operand of memory accessing instruction");
2440 
2441  // Either we stopped and the appropriate action was taken,
2442  // or we got back a simplified value to continue.
2443  Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2444  if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2445  return true;
2446  const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2447 
2448  // A memory access through a pointer is considered UB
2449  // only if the pointer has constant null value.
2450  // TODO: Expand it to not only check constant values.
2451  if (!isa<ConstantPointerNull>(PtrOpVal)) {
2452  AssumedNoUBInsts.insert(&I);
2453  return true;
2454  }
2455  const Type *PtrTy = PtrOpVal->getType();
2456 
2457  // Because we only consider instructions inside functions,
2458  // assume that a parent function exists.
2459  const Function *F = I.getFunction();
2460 
2461  // A memory access using constant null pointer is only considered UB
2462  // if null pointer is _not_ defined for the target platform.
2464  AssumedNoUBInsts.insert(&I);
2465  else
2466  KnownUBInsts.insert(&I);
2467  return true;
2468  };
2469 
2470  auto InspectBrInstForUB = [&](Instruction &I) {
2471  // A conditional branch instruction is considered UB if it has `undef`
2472  // condition.
2473 
2474  // Skip instructions that are already saved.
2475  if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2476  return true;
2477 
2478  // We know we have a branch instruction.
2479  auto *BrInst = cast<BranchInst>(&I);
2480 
2481  // Unconditional branches are never considered UB.
2482  if (BrInst->isUnconditional())
2483  return true;
2484 
2485  // Either we stopped and the appropriate action was taken,
2486  // or we got back a simplified value to continue.
2487  Optional<Value *> SimplifiedCond =
2488  stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2489  if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2490  return true;
2491  AssumedNoUBInsts.insert(&I);
2492  return true;
2493  };
2494 
2495  auto InspectCallSiteForUB = [&](Instruction &I) {
2496  // Check whether a callsite always cause UB or not
2497 
2498  // Skip instructions that are already saved.
2499  if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2500  return true;
2501 
2502  // Check nonnull and noundef argument attribute violation for each
2503  // callsite.
2504  CallBase &CB = cast<CallBase>(I);
2506  if (!Callee)
2507  return true;
2508  for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2509  // If current argument is known to be simplified to null pointer and the
2510  // corresponding argument position is known to have nonnull attribute,
2511  // the argument is poison. Furthermore, if the argument is poison and
2512  // the position is known to have noundef attriubte, this callsite is
2513  // considered UB.
2514  if (idx >= Callee->arg_size())
2515  break;
2516  Value *ArgVal = CB.getArgOperand(idx);
2517  if (!ArgVal)
2518  continue;
2519  // Here, we handle three cases.
2520  // (1) Not having a value means it is dead. (we can replace the value
2521  // with undef)
2522  // (2) Simplified to undef. The argument violate noundef attriubte.
2523  // (3) Simplified to null pointer where known to be nonnull.
2524  // The argument is a poison value and violate noundef attribute.
2525  IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2526  auto &NoUndefAA =
2527  A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2528  if (!NoUndefAA.isKnownNoUndef())
2529  continue;
2530  bool UsedAssumedInformation = false;
2531  Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2532  IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2533  if (UsedAssumedInformation)
2534  continue;
2535  if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2536  return true;
2537  if (!SimplifiedVal.hasValue() ||
2538  isa<UndefValue>(*SimplifiedVal.getValue())) {
2539  KnownUBInsts.insert(&I);
2540  continue;
2541  }
2542  if (!ArgVal->getType()->isPointerTy() ||
2543  !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2544  continue;
2545  auto &NonNullAA =
2546  A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2547  if (NonNullAA.isKnownNonNull())
2548  KnownUBInsts.insert(&I);
2549  }
2550  return true;
2551  };
2552 
2553  auto InspectReturnInstForUB =
2554  [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2555  // Check if a return instruction always cause UB or not
2556  // Note: It is guaranteed that the returned position of the anchor
2557  // scope has noundef attribute when this is called.
2558  // We also ensure the return position is not "assumed dead"
2559  // because the returned value was then potentially simplified to
2560  // `undef` in AAReturnedValues without removing the `noundef`
2561  // attribute yet.
2562 
2563  // When the returned position has noundef attriubte, UB occur in the
2564  // following cases.
2565  // (1) Returned value is known to be undef.
2566  // (2) The value is known to be a null pointer and the returned
2567  // position has nonnull attribute (because the returned value is
2568  // poison).
2569  bool FoundUB = false;
2570  if (isa<UndefValue>(V)) {
2571  FoundUB = true;
2572  } else {
2573  if (isa<ConstantPointerNull>(V)) {
2574  auto &NonNullAA = A.getAAFor<AANonNull>(
2575  *this, IRPosition::returned(*getAnchorScope()),
2577  if (NonNullAA.isKnownNonNull())
2578  FoundUB = true;
2579  }
2580  }
2581 
2582  if (FoundUB)
2583  for (ReturnInst *RI : RetInsts)
2584  KnownUBInsts.insert(RI);
2585  return true;
2586  };
2587 
2588  bool UsedAssumedInformation = false;
2589  A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2591  Instruction::AtomicCmpXchg,
2592  Instruction::AtomicRMW},
2593  UsedAssumedInformation,
2594  /* CheckBBLivenessOnly */ true);
2595  A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2596  UsedAssumedInformation,
2597  /* CheckBBLivenessOnly */ true);
2598  A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2599  UsedAssumedInformation);
2600 
2601  // If the returned position of the anchor scope has noundef attriubte, check
2602  // all returned instructions.
2603  if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2604  const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2605  if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2606  auto &RetPosNoUndefAA =
2607  A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2608  if (RetPosNoUndefAA.isKnownNoUndef())
2609  A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2610  *this);
2611  }
2612  }
2613 
2614  if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2615  UBPrevSize != KnownUBInsts.size())
2616  return ChangeStatus::CHANGED;
2617  return ChangeStatus::UNCHANGED;
2618  }
2619 
2620  bool isKnownToCauseUB(Instruction *I) const override {
2621  return KnownUBInsts.count(I);
2622  }
2623 
2624  bool isAssumedToCauseUB(Instruction *I) const override {
2625  // In simple words, if an instruction is not in the assumed to _not_
2626  // cause UB, then it is assumed UB (that includes those
2627  // in the KnownUBInsts set). The rest is boilerplate
2628  // is to ensure that it is one of the instructions we test
2629  // for UB.
2630 
2631  switch (I->getOpcode()) {
2632  case Instruction::Load:
2633  case Instruction::Store:
2634  case Instruction::AtomicCmpXchg:
2635  case Instruction::AtomicRMW:
2636  return !AssumedNoUBInsts.count(I);
2637  case Instruction::Br: {
2638  auto BrInst = cast<BranchInst>(I);
2639  if (BrInst->isUnconditional())
2640  return false;
2641  return !AssumedNoUBInsts.count(I);
2642  } break;
2643  default:
2644  return false;
2645  }
2646  return false;
2647  }
2648 
2649  ChangeStatus manifest(Attributor &A) override {
2650  if (KnownUBInsts.empty())
2651  return ChangeStatus::UNCHANGED;
2652  for (Instruction *I : KnownUBInsts)
2653  A.changeToUnreachableAfterManifest(I);
2654  return ChangeStatus::CHANGED;
2655  }
2656 
2657  /// See AbstractAttribute::getAsStr()
2658  const std::string getAsStr() const override {
2659  return getAssumed() ? "undefined-behavior" : "no-ub";
2660  }
2661 
2662  /// Note: The correctness of this analysis depends on the fact that the
2663  /// following 2 sets will stop changing after some point.
2664  /// "Change" here means that their size changes.
2665  /// The size of each set is monotonically increasing
2666  /// (we only add items to them) and it is upper bounded by the number of
2667  /// instructions in the processed function (we can never save more
2668  /// elements in either set than this number). Hence, at some point,
2669  /// they will stop increasing.
2670  /// Consequently, at some point, both sets will have stopped
2671  /// changing, effectively making the analysis reach a fixpoint.
2672 
2673  /// Note: These 2 sets are disjoint and an instruction can be considered
2674  /// one of 3 things:
2675  /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2676  /// the KnownUBInsts set.
2677  /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2678  /// has a reason to assume it).
2679  /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2680  /// could not find a reason to assume or prove that it can cause UB,
2681  /// hence it assumes it doesn't. We have a set for these instructions
2682  /// so that we don't reprocess them in every update.
2683  /// Note however that instructions in this set may cause UB.
2684 
2685 protected:
2686  /// A set of all live instructions _known_ to cause UB.
2687  SmallPtrSet<Instruction *, 8> KnownUBInsts;
2688 
2689 private:
2690  /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2691  SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2692 
2693  // Should be called on updates in which if we're processing an instruction
2694  // \p I that depends on a value \p V, one of the following has to happen:
2695  // - If the value is assumed, then stop.
2696  // - If the value is known but undef, then consider it UB.
2697  // - Otherwise, do specific processing with the simplified value.
2698  // We return None in the first 2 cases to signify that an appropriate
2699  // action was taken and the caller should stop.
2700  // Otherwise, we return the simplified value that the caller should
2701  // use for specific processing.
2702  Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2703  Instruction *I) {
2704  bool UsedAssumedInformation = false;
2705  Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2706  IRPosition::value(*V), *this, UsedAssumedInformation);
2707  if (!UsedAssumedInformation) {
2708  // Don't depend on assumed values.
2709  if (!SimplifiedV.hasValue()) {
2710  // If it is known (which we tested above) but it doesn't have a value,
2711  // then we can assume `undef` and hence the instruction is UB.
2712  KnownUBInsts.insert(I);
2713  return llvm::None;
2714  }
2715  if (!SimplifiedV.getValue())
2716  return nullptr;
2717  V = *SimplifiedV;
2718  }
2719  if (isa<UndefValue>(V)) {
2720  KnownUBInsts.insert(I);
2721  return llvm::None;
2722  }
2723  return V;
2724  }
2725 };
2726 
2727 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2728  AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2729  : AAUndefinedBehaviorImpl(IRP, A) {}
2730 
2731  /// See AbstractAttribute::trackStatistics()
2732  void trackStatistics() const override {
2733  STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2734  "Number of instructions known to have UB");
2735  BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2736  KnownUBInsts.size();
2737  }
2738 };
2739 
2740 /// ------------------------ Will-Return Attributes ----------------------------
2741 
2742 // Helper function that checks whether a function has any cycle which we don't
2743 // know if it is bounded or not.
2744 // Loops with maximum trip count are considered bounded, any other cycle not.
2745 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2746  ScalarEvolution *SE =
2747  A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2748  LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2749  // If either SCEV or LoopInfo is not available for the function then we assume
2750  // any cycle to be unbounded cycle.
2751  // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2752  // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2753  if (!SE || !LI) {
2754  for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2755  if (SCCI.hasCycle())
2756  return true;
2757  return false;
2758  }
2759 
2760  // If there's irreducible control, the function may contain non-loop cycles.
2762  return true;
2763 
2764  // Any loop that does not have a max trip count is considered unbounded cycle.
2765  for (auto *L : LI->getLoopsInPreorder()) {
2766  if (!SE->getSmallConstantMaxTripCount(L))
2767  return true;
2768  }
2769  return false;
2770 }
2771 
2772 struct AAWillReturnImpl : public AAWillReturn {
2773  AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2774  : AAWillReturn(IRP, A) {}
2775 
2776  /// See AbstractAttribute::initialize(...).
2777  void initialize(Attributor &A) override {
2779 
2780  if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2781  indicateOptimisticFixpoint();
2782  return;
2783  }
2784  }
2785 
2786  /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2787  bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2788  // Check for `mustprogress` in the scope and the associated function which
2789  // might be different if this is a call site.
2790  if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2791  (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2792  return false;
2793 
2794  const auto &MemAA =
2795  A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2796  if (!MemAA.isAssumedReadOnly())
2797  return false;
2798  if (KnownOnly && !MemAA.isKnownReadOnly())
2799  return false;
2800  if (!MemAA.isKnownReadOnly())
2801  A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2802 
2803  return true;
2804  }
2805 
2806  /// See AbstractAttribute::updateImpl(...).
2807  ChangeStatus updateImpl(Attributor &A) override {
2808  if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2809  return ChangeStatus::UNCHANGED;
2810 
2811  auto CheckForWillReturn = [&](Instruction &I) {
2812  IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2813  const auto &WillReturnAA =
2814  A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2815  if (WillReturnAA.isKnownWillReturn())
2816  return true;
2817  if (!WillReturnAA.isAssumedWillReturn())
2818  return false;
2819  const auto &NoRecurseAA =
2820  A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2821  return NoRecurseAA.isAssumedNoRecurse();
2822  };
2823 
2824  bool UsedAssumedInformation = false;
2825  if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2826  UsedAssumedInformation))
2827  return indicatePessimisticFixpoint();
2828 
2829  return ChangeStatus::UNCHANGED;
2830  }
2831 
2832  /// See AbstractAttribute::getAsStr()
2833  const std::string getAsStr() const override {
2834  return getAssumed() ? "willreturn" : "may-noreturn";
2835  }
2836 };
2837 
2838 struct AAWillReturnFunction final : AAWillReturnImpl {
2839  AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2840  : AAWillReturnImpl(IRP, A) {}
2841 
2842  /// See AbstractAttribute::initialize(...).
2843  void initialize(Attributor &A) override {
2845 
2846  Function *F = getAnchorScope();
2847  if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2848  indicatePessimisticFixpoint();
2849  }
2850 
2851  /// See AbstractAttribute::trackStatistics()
2852  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2853 };
2854 
2855 /// WillReturn attribute deduction for a call sites.
2856 struct AAWillReturnCallSite final : AAWillReturnImpl {
2857  AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2858  : AAWillReturnImpl(IRP, A) {}
2859 
2860  /// See AbstractAttribute::initialize(...).
2861  void initialize(Attributor &A) override {
2863  Function *F = getAssociatedFunction();
2864  if (!F || !A.isFunctionIPOAmendable(*F))
2865  indicatePessimisticFixpoint();
2866  }
2867 
2868  /// See AbstractAttribute::updateImpl(...).
2869  ChangeStatus updateImpl(Attributor &A) override {
2870  if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2871  return ChangeStatus::UNCHANGED;
2872 
2873  // TODO: Once we have call site specific value information we can provide
2874  // call site specific liveness information and then it makes
2875  // sense to specialize attributes for call sites arguments instead of
2876  // redirecting requests to the callee argument.
2877  Function *F = getAssociatedFunction();
2878  const IRPosition &FnPos = IRPosition::function(*F);
2879  auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2880  return clampStateAndIndicateChange(getState(), FnAA.getState());
2881  }
2882 
2883  /// See AbstractAttribute::trackStatistics()
2884  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2885 };
2886 
2887 /// -------------------AAReachability Attribute--------------------------
2888 
2889 struct AAReachabilityImpl : AAReachability {
2890  AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2891  : AAReachability(IRP, A) {}
2892 
2893  const std::string getAsStr() const override {
2894  // TODO: Return the number of reachable queries.
2895  return "reachable";
2896  }
2897 
2898  /// See AbstractAttribute::updateImpl(...).
2899  ChangeStatus updateImpl(Attributor &A) override {
2900  return ChangeStatus::UNCHANGED;
2901  }
2902 };
2903 
2904 struct AAReachabilityFunction final : public AAReachabilityImpl {
2905  AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2906  : AAReachabilityImpl(IRP, A) {}
2907 
2908  /// See AbstractAttribute::trackStatistics()
2909  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2910 };
2911 
2912 /// ------------------------ NoAlias Argument Attribute ------------------------
2913 
2914 struct AANoAliasImpl : AANoAlias {
2915  AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2916  assert(getAssociatedType()->isPointerTy() &&
2917  "Noalias is a pointer attribute");
2918  }
2919 
2920  const std::string getAsStr() const override {
2921  return getAssumed() ? "noalias" : "may-alias";
2922  }
2923 };
2924 
2925 /// NoAlias attribute for a floating value.
2926 struct AANoAliasFloating final : AANoAliasImpl {
2927  AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2928  : AANoAliasImpl(IRP, A) {}
2929 
2930  /// See AbstractAttribute::initialize(...).
2931  void initialize(Attributor &A) override {
2933  Value *Val = &getAssociatedValue();
2934  do {
2935  CastInst *CI = dyn_cast<CastInst>(Val);
2936  if (!CI)
2937  break;
2938  Value *Base = CI->getOperand(0);
2939  if (!Base->hasOneUse())
2940  break;
2941  Val = Base;
2942  } while (true);
2943 
2944  if (!Val->getType()->isPointerTy()) {
2945  indicatePessimisticFixpoint();
2946  return;
2947  }
2948 
2949  if (isa<AllocaInst>(Val))
2950  indicateOptimisticFixpoint();
2951  else if (isa<ConstantPointerNull>(Val) &&
2952  !NullPointerIsDefined(getAnchorScope(),
2953  Val->getType()->getPointerAddressSpace()))
2954  indicateOptimisticFixpoint();
2955  else if (Val != &getAssociatedValue()) {
2956  const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2957  *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2958  if (ValNoAliasAA.isKnownNoAlias())
2959  indicateOptimisticFixpoint();
2960  }
2961  }
2962 
2963  /// See AbstractAttribute::updateImpl(...).
2964  ChangeStatus updateImpl(Attributor &A) override {
2965  // TODO: Implement this.
2966  return indicatePessimisticFixpoint();
2967  }
2968 
2969  /// See AbstractAttribute::trackStatistics()
2970  void trackStatistics() const override {
2972  }
2973 };
2974 
2975 /// NoAlias attribute for an argument.
2976 struct AANoAliasArgument final
2977  : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2978  using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2979  AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2980 
2981  /// See AbstractAttribute::initialize(...).
2982  void initialize(Attributor &A) override {
2983  Base::initialize(A);
2984  // See callsite argument attribute and callee argument attribute.
2985  if (hasAttr({Attribute::ByVal}))
2986  indicateOptimisticFixpoint();
2987  }
2988 
2989  /// See AbstractAttribute::update(...).
2990  ChangeStatus updateImpl(Attributor &A) override {
2991  // We have to make sure no-alias on the argument does not break
2992  // synchronization when this is a callback argument, see also [1] below.
2993  // If synchronization cannot be affected, we delegate to the base updateImpl
2994  // function, otherwise we give up for now.
2995 
2996  // If the function is no-sync, no-alias cannot break synchronization.
2997  const auto &NoSyncAA =
2998  A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3000  if (NoSyncAA.isAssumedNoSync())
3001  return Base::updateImpl(A);
3002 
3003  // If the argument is read-only, no-alias cannot break synchronization.
3004  const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3005  *this, getIRPosition(), DepClassTy::OPTIONAL);
3006  if (MemBehaviorAA.isAssumedReadOnly())
3007  return Base::updateImpl(A);
3008 
3009  // If the argument is never passed through callbacks, no-alias cannot break
3010  // synchronization.
3011  bool AllCallSitesKnown;
3012  if (A.checkForAllCallSites(
3013  [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3014  true, AllCallSitesKnown))
3015  return Base::updateImpl(A);
3016 
3017  // TODO: add no-alias but make sure it doesn't break synchronization by
3018  // introducing fake uses. See:
3019  // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3020  // International Workshop on OpenMP 2018,
3021  // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3022 
3023  return indicatePessimisticFixpoint();
3024  }
3025 
3026  /// See AbstractAttribute::trackStatistics()
3027  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3028 };
3029 
3030 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3031  AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3032  : AANoAliasImpl(IRP, A) {}
3033 
3034  /// See AbstractAttribute::initialize(...).
3035  void initialize(Attributor &A) override {
3036  // See callsite argument attribute and callee argument attribute.
3037  const auto &CB = cast<CallBase>(getAnchorValue());
3038  if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3039  indicateOptimisticFixpoint();
3040  Value &Val = getAssociatedValue();
3041  if (isa<ConstantPointerNull>(Val) &&
3042  !NullPointerIsDefined(getAnchorScope(),
3043  Val.getType()->getPointerAddressSpace()))
3044  indicateOptimisticFixpoint();
3045  }
3046 
3047  /// Determine if the underlying value may alias with the call site argument
3048  /// \p OtherArgNo of \p ICS (= the underlying call site).
3049  bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3050  const AAMemoryBehavior &MemBehaviorAA,
3051  const CallBase &CB, unsigned OtherArgNo) {
3052  // We do not need to worry about aliasing with the underlying IRP.
3053  if (this->getCalleeArgNo() == (int)OtherArgNo)
3054  return false;
3055 
3056  // If it is not a pointer or pointer vector we do not alias.
3057  const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3058  if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3059  return false;
3060 
3061  auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3062  *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3063 
3064  // If the argument is readnone, there is no read-write aliasing.
3065  if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3066  A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3067  return false;
3068  }
3069 
3070  // If the argument is readonly and the underlying value is readonly, there
3071  // is no read-write aliasing.
3072  bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3073  if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3074  A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3075  A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3076  return false;
3077  }
3078 
3079  // We have to utilize actual alias analysis queries so we need the object.
3080  if (!AAR)
3081  AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3082 
3083  // Try to rule it out at the call site.
3084  bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3085  LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3086  "callsite arguments: "
3087  << getAssociatedValue() << " " << *ArgOp << " => "
3088  << (IsAliasing ? "" : "no-") << "alias \n");
3089 
3090  return IsAliasing;
3091  }
3092 
3093  bool
3094  isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3095  const AAMemoryBehavior &MemBehaviorAA,
3096  const AANoAlias &NoAliasAA) {
3097  // We can deduce "noalias" if the following conditions hold.
3098  // (i) Associated value is assumed to be noalias in the definition.
3099  // (ii) Associated value is assumed to be no-capture in all the uses
3100  // possibly executed before this callsite.
3101  // (iii) There is no other pointer argument which could alias with the
3102  // value.
3103 
3104  bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3105  if (!AssociatedValueIsNoAliasAtDef) {
3106  LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3107  << " is not no-alias at the definition\n");
3108  return false;
3109  }
3110 
3111  A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3112 
3113  const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3114  const Function *ScopeFn = VIRP.getAnchorScope();
3115  auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3116  // Check whether the value is captured in the scope using AANoCapture.
3117  // Look at CFG and check only uses possibly executed before this
3118  // callsite.
3119  auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3120  Instruction *UserI = cast<Instruction>(U.getUser());
3121 
3122  // If UserI is the curr instruction and there is a single potential use of
3123  // the value in UserI we allow the use.
3124  // TODO: We should inspect the operands and allow those that cannot alias
3125  // with the value.
3126  if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3127  return true;
3128 
3129  if (ScopeFn) {
3130  const auto &ReachabilityAA = A.getAAFor<AAReachability>(
3131  *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
3132 
3133  if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
3134  return true;
3135 
3136  if (auto *CB = dyn_cast<CallBase>(UserI)) {
3137  if (CB->isArgOperand(&U)) {
3138 
3139  unsigned ArgNo = CB->getArgOperandNo(&U);
3140 
3141  const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3142  *this, IRPosition::callsite_argument(*CB, ArgNo),
3144 
3145  if (NoCaptureAA.isAssumedNoCapture())
3146  return true;
3147  }
3148  }
3149  }
3150 
3151  // For cases which can potentially have more users
3152  if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
3153  isa<SelectInst>(U)) {
3154  Follow = true;
3155  return true;
3156  }
3157 
3158  LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
3159  return false;
3160  };
3161 
3162  if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3163  if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3164  LLVM_DEBUG(
3165  dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3166  << " cannot be noalias as it is potentially captured\n");
3167  return false;
3168  }
3169  }
3170  A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3171 
3172  // Check there is no other pointer argument which could alias with the
3173  // value passed at this call site.
3174  // TODO: AbstractCallSite
3175  const auto &CB = cast<CallBase>(getAnchorValue());
3176  for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3177  if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3178  return false;
3179 
3180  return true;
3181  }
3182 
3183  /// See AbstractAttribute::updateImpl(...).
3184  ChangeStatus updateImpl(Attributor &A) override {
3185  // If the argument is readnone we are done as there are no accesses via the
3186  // argument.
3187  auto &MemBehaviorAA =
3188  A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3189  if (MemBehaviorAA.isAssumedReadNone()) {
3190  A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3191  return ChangeStatus::UNCHANGED;
3192  }
3193 
3194  const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3195  const auto &NoAliasAA =
3196  A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3197 
3198  AAResults *AAR = nullptr;
3199  if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3200  NoAliasAA)) {
3201  LLVM_DEBUG(
3202  dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3203  return ChangeStatus::UNCHANGED;
3204  }
3205 
3206  return indicatePessimisticFixpoint();
3207  }
3208 
3209  /// See AbstractAttribute::trackStatistics()
3210  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3211 };
3212 
3213 /// NoAlias attribute for function return value.
3214 struct AANoAliasReturned final : AANoAliasImpl {
3215  AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3216  : AANoAliasImpl(IRP, A) {}
3217 
3218  /// See AbstractAttribute::initialize(...).
3219  void initialize(Attributor &A) override {
3221  Function *F = getAssociatedFunction();
3222  if (!F || F->isDeclaration())
3223  indicatePessimisticFixpoint();
3224  }
3225 
3226  /// See AbstractAttribute::updateImpl(...).
3227  virtual ChangeStatus updateImpl(Attributor &A) override {
3228 
3229  auto CheckReturnValue = [&](Value &RV) -> bool {
3230  if (Constant *C = dyn_cast<Constant>(&RV))
3231  if (C->isNullValue() || isa<UndefValue>(C))
3232  return true;
3233 
3234  /// For now, we can only deduce noalias if we have call sites.
3235  /// FIXME: add more support.
3236  if (!isa<CallBase>(&RV))
3237  return false;
3238 
3239  const IRPosition &RVPos = IRPosition::value(RV);
3240  const auto &NoAliasAA =
3241  A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3242  if (!NoAliasAA.isAssumedNoAlias())
3243  return false;
3244 
3245  const auto &NoCaptureAA =
3246  A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3247  return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3248  };
3249 
3250  if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3251  return indicatePessimisticFixpoint();
3252 
3253  return ChangeStatus::UNCHANGED;
3254  }
3255 
3256  /// See AbstractAttribute::trackStatistics()
3257  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3258 };
3259 
3260 /// NoAlias attribute deduction for a call site return value.
3261 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3262  AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3263  : AANoAliasImpl(IRP, A) {}
3264 
3265  /// See AbstractAttribute::initialize(...).
3266  void initialize(Attributor &A) override {
3268  Function *F = getAssociatedFunction();
3269  if (!F || F->isDeclaration())
3270  indicatePessimisticFixpoint();
3271  }
3272 
3273  /// See AbstractAttribute::updateImpl(...).
3274  ChangeStatus updateImpl(Attributor &A) override {
3275  // TODO: Once we have call site specific value information we can provide
3276  // call site specific liveness information and then it makes
3277  // sense to specialize attributes for call sites arguments instead of
3278  // redirecting requests to the callee argument.
3279  Function *F = getAssociatedFunction();
3280  const IRPosition &FnPos = IRPosition::returned(*F);
3281  auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3282  return clampStateAndIndicateChange(getState(), FnAA.getState());
3283  }
3284 
3285  /// See AbstractAttribute::trackStatistics()
3286  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3287 };
3288 
3289 /// -------------------AAIsDead Function Attribute-----------------------
3290 
3291 struct AAIsDeadValueImpl : public AAIsDead {
3292  AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3293 
3294  /// See AAIsDead::isAssumedDead().
3295  bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3296 
3297  /// See AAIsDead::isKnownDead().
3298  bool isKnownDead() const override { return isKnown(IS_DEAD); }
3299 
3300  /// See AAIsDead::isAssumedDead(BasicBlock *).
3301  bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3302 
3303  /// See AAIsDead::isKnownDead(BasicBlock *).
3304  bool isKnownDead(const BasicBlock *BB) const override { return false; }
3305 
3306  /// See AAIsDead::isAssumedDead(Instruction *I).
3307  bool isAssumedDead(const Instruction *I) const override {
3308  return I == getCtxI() && isAssumedDead();
3309  }
3310 
3311  /// See AAIsDead::isKnownDead(Instruction *I).
3312  bool isKnownDead(const Instruction *I) const override {
3313  return isAssumedDead(I) && isKnownDead();
3314  }
3315 
3316  /// See AbstractAttribute::getAsStr().
3317  const std::string getAsStr() const override {
3318  return isAssumedDead() ? "assumed-dead" : "assumed-live";
3319  }
3320 
3321  /// Check if all uses are assumed dead.
3322  bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3323  // Callers might not check the type, void has no uses.
3324  if (V.getType()->isVoidTy())
3325  return true;
3326 
3327  // If we replace a value with a constant there are no uses left afterwards.
3328  if (!isa<Constant>(V)) {
3329  bool UsedAssumedInformation = false;
3331  A.getAssumedConstant(V, *this, UsedAssumedInformation);
3332  if (!C.hasValue() || *C)
3333  return true;
3334  }
3335 
3336  auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3337  // Explicitly set the dependence class to required because we want a long
3338  // chain of N dependent instructions to be considered live as soon as one is
3339  // without going through N update cycles. This is not required for
3340  // correctness.
3341  return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3343  }
3344 
3345  /// Determine if \p I is assumed to be side-effect free.
3346  bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3348  return true;
3349 
3350  auto *CB = dyn_cast<CallBase>(I);
3351  if (!CB || isa<IntrinsicInst>(CB))
3352  return false;
3353 
3354  const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3355  const auto &NoUnwindAA =
3356  A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3357  if (!NoUnwindAA.isAssumedNoUnwind())
3358  return false;
3359  if (!NoUnwindAA.isKnownNoUnwind())
3360  A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3361 
3362  const auto &MemBehaviorAA =
3363  A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
3364  if (MemBehaviorAA.isAssumedReadOnly()) {
3365  if (!MemBehaviorAA.isKnownReadOnly())
3366  A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3367  return true;
3368  }
3369  return false;
3370  }
3371 };
3372 
3373 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3374  AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3375  : AAIsDeadValueImpl(IRP, A) {}
3376 
3377  /// See AbstractAttribute::initialize(...).
3378  void initialize(Attributor &A) override {
3379  if (isa<UndefValue>(getAssociatedValue())) {
3380  indicatePessimisticFixpoint();
3381  return;
3382  }
3383 
3384  Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3385  if (!isAssumedSideEffectFree(A, I)) {
3386  if (!isa_and_nonnull<StoreInst>(I))
3387  indicatePessimisticFixpoint();
3388  else
3389  removeAssumedBits(HAS_NO_EFFECT);
3390  }
3391  }
3392 
3393  bool isDeadStore(Attributor &A, StoreInst &SI) {
3394  // Lang ref now states volatile store is not UB/dead, let's skip them.
3395  if (SI.isVolatile())
3396  return false;
3397 
3398  bool UsedAssumedInformation = false;
3399  SmallSetVector<Value *, 4> PotentialCopies;
3400  if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3401  UsedAssumedInformation))
3402  return false;
3403  return llvm::all_of(PotentialCopies, [&](Value *V) {
3404  return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3405  UsedAssumedInformation);
3406  });
3407  }
3408 
3409  /// See AbstractAttribute::updateImpl(...).
3410  ChangeStatus updateImpl(Attributor &A) override {
3411  Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3412  if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3413  if (!isDeadStore(A, *SI))
3414  return indicatePessimisticFixpoint();
3415  } else {
3416  if (!isAssumedSideEffectFree(A, I))
3417  return indicatePessimisticFixpoint();
3418  if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3419  return indicatePessimisticFixpoint();
3420  }
3421  return ChangeStatus::UNCHANGED;
3422  }
3423 
3424  /// See AbstractAttribute::manifest(...).
3425  ChangeStatus manifest(Attributor &A) override {
3426  Value &V = getAssociatedValue();
3427  if (auto *I = dyn_cast<Instruction>(&V)) {
3428  // If we get here we basically know the users are all dead. We check if
3429  // isAssumedSideEffectFree returns true here again because it might not be
3430  // the case and only the users are dead but the instruction (=call) is
3431  // still needed.
3432  if (isa<StoreInst>(I) ||
3433  (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3434  A.deleteAfterManifest(*I);
3435  return ChangeStatus::CHANGED;
3436  }
3437  }
3438  if (V.use_empty())
3439  return ChangeStatus::UNCHANGED;
3440 
3441  bool UsedAssumedInformation = false;
3443  A.getAssumedConstant(V, *this, UsedAssumedInformation);
3444  if (C.hasValue() && C.getValue())
3445  return ChangeStatus::UNCHANGED;
3446 
3447  // Replace the value with undef as it is dead but keep droppable uses around
3448  // as they provide information we don't want to give up on just yet.
3449  UndefValue &UV = *UndefValue::get(V.getType());
3450  bool AnyChange =
3451  A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
3452  return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3453  }
3454 
3455  /// See AbstractAttribute::trackStatistics()
3456  void trackStatistics() const override {
3458  }
3459 };
3460 
3461 struct AAIsDeadArgument : public AAIsDeadFloating {
3462  AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3463  : AAIsDeadFloating(IRP, A) {}
3464 
3465  /// See AbstractAttribute::initialize(...).
3466  void initialize(Attributor &A) override {
3467  if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3468  indicatePessimisticFixpoint();
3469  }
3470 
3471  /// See AbstractAttribute::manifest(...).
3472  ChangeStatus manifest(Attributor &A) override {
3473  ChangeStatus Changed = AAIsDeadFloating::manifest(A);
3474  Argument &Arg = *getAssociatedArgument();
3475  if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3476  if (A.registerFunctionSignatureRewrite(
3477  Arg, /* ReplacementTypes */ {},
3480  Arg.dropDroppableUses();
3481  return ChangeStatus::CHANGED;
3482  }
3483  return Changed;
3484  }
3485 
3486  /// See AbstractAttribute::trackStatistics()
3487  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3488 };
3489 
3490 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3491  AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3492  : AAIsDeadValueImpl(IRP, A) {}
3493 
3494  /// See AbstractAttribute::initialize(...).
3495  void initialize(Attributor &A) override {
3496  if (isa<UndefValue>(getAssociatedValue()))
3497  indicatePessimisticFixpoint();
3498  }
3499 
3500  /// See AbstractAttribute::updateImpl(...).
3501  ChangeStatus updateImpl(Attributor &A) override {
3502  // TODO: Once we have call site specific value information we can provide
3503  // call site specific liveness information and then it makes
3504  // sense to specialize attributes for call sites arguments instead of
3505  // redirecting requests to the callee argument.
3506  Argument *Arg = getAssociatedArgument();
3507  if (!Arg)
3508  return indicatePessimisticFixpoint();
3509  const IRPosition &ArgPos = IRPosition::argument(*Arg);
3510  auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3511  return clampStateAndIndicateChange(getState(), ArgAA.getState());
3512  }
3513 
3514  /// See AbstractAttribute::manifest(...).
3515  ChangeStatus manifest(Attributor &A) override {
3516  CallBase &CB = cast<CallBase>(getAnchorValue());
3517  Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3518  assert(!isa<UndefValue>(U.get()) &&
3519  "Expected undef values to be filtered out!");
3520  UndefValue &UV = *UndefValue::get(U->getType());
3521  if (A.changeUseAfterManifest(U, UV))
3522  return ChangeStatus::CHANGED;
3523  return ChangeStatus::UNCHANGED;
3524  }
3525 
3526  /// See AbstractAttribute::trackStatistics()
3527  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3528 };
3529 
3530 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3531  AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3532  : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3533 
3534  /// See AAIsDead::isAssumedDead().
3535  bool isAssumedDead() const override {
3536  return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3537  }
3538 
3539  /// See AbstractAttribute::initialize(...).
3540  void initialize(Attributor &A) override {
3541  if (isa<UndefValue>(getAssociatedValue())) {
3542  indicatePessimisticFixpoint();
3543  return;
3544  }
3545 
3546  // We track this separately as a secondary state.
3547  IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3548  }
3549 
3550  /// See AbstractAttribute::updateImpl(...).
3551  ChangeStatus updateImpl(Attributor &A) override {
3553  if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3554  IsAssumedSideEffectFree = false;
3555  Changed = ChangeStatus::CHANGED;
3556  }
3557  if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3558  return indicatePessimisticFixpoint();
3559  return Changed;
3560  }
3561 
3562  /// See AbstractAttribute::trackStatistics()
3563  void trackStatistics() const override {
3564  if (IsAssumedSideEffectFree)
3566  else
3567  STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3568  }
3569 
3570  /// See AbstractAttribute::getAsStr().
3571  const std::string getAsStr() const override {
3572  return isAssumedDead()
3573  ? "assumed-dead"
3574  : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3575  }
3576 
3577 private:
3578  bool IsAssumedSideEffectFree;
3579 };
3580 
3581 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3582  AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3583  : AAIsDeadValueImpl(IRP, A) {}
3584 
3585  /// See AbstractAttribute::updateImpl(...).
3586  ChangeStatus updateImpl(Attributor &A) override {
3587 
3588  bool UsedAssumedInformation = false;
3589  A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3590  {Instruction::Ret}, UsedAssumedInformation);
3591 
3592  auto PredForCallSite = [&](AbstractCallSite ACS) {
3593  if (ACS.isCallbackCall() || !ACS.getInstruction())
3594  return false;
3595  return areAllUsesAssumedDead(A, *ACS.getInstruction());
3596  };
3597 
3598  bool AllCallSitesKnown;
3599  if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3600  AllCallSitesKnown))
3601  return indicatePessimisticFixpoint();
3602 
3603  return ChangeStatus::UNCHANGED;
3604  }
3605 
3606  /// See AbstractAttribute::manifest(...).
3607  ChangeStatus manifest(Attributor &A) override {
3608  // TODO: Rewrite the signature to return void?
3609  bool AnyChange = false;
3610  UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3611  auto RetInstPred = [&](Instruction &I) {
3612  ReturnInst &RI = cast<ReturnInst>(I);
3613  if (!isa<UndefValue>(RI.getReturnValue()))
3614  AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3615  return true;
3616  };
3617  bool UsedAssumedInformation = false;
3618  A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3619  UsedAssumedInformation);
3620  return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3621  }
3622 
3623  /// See AbstractAttribute::trackStatistics()
3624  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3625 };
3626 
3627 struct AAIsDeadFunction : public AAIsDead {
3628  AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3629 
3630  /// See AbstractAttribute::initialize(...).
3631  void initialize(Attributor &A) override {
3632  const Function *F = getAnchorScope();
3633  if (F && !F->isDeclaration()) {
3634  // We only want to compute liveness once. If the function is not part of
3635  // the SCC, skip it.
3636  if (A.isRunOn(*const_cast<Function *>(F))) {
3637  ToBeExploredFrom.insert(&F->getEntryBlock().front());
3638  assumeLive(A, F->getEntryBlock());
3639  } else {
3640  indicatePessimisticFixpoint();
3641  }
3642  }
3643  }
3644 
3645  /// See AbstractAttribute::getAsStr().
3646  const std::string getAsStr() const override {
3647  return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3648  std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3649  std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3650  std::to_string(KnownDeadEnds.size()) + "]";
3651  }
3652 
3653  /// See AbstractAttribute::manifest(...).
3654  ChangeStatus manifest(Attributor &A) override {
3655  assert(getState().isValidState() &&
3656  "Attempted to manifest an invalid state!");
3657 
3659  Function &F = *getAnchorScope();
3660 
3661  if (AssumedLiveBlocks.empty()) {
3662  A.deleteAfterManifest(F);
3663  return ChangeStatus::CHANGED;
3664  }
3665 
3666  // Flag to determine if we can change an invoke to a call assuming the
3667  // callee is nounwind. This is not possible if the personality of the
3668  // function allows to catch asynchronous exceptions.
3669  bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3670 
3671  KnownDeadEnds.set_union(ToBeExploredFrom);
3672  for (const Instruction *DeadEndI : KnownDeadEnds) {
3673  auto *CB = dyn_cast<CallBase>(DeadEndI);
3674  if (!CB)
3675  continue;
3676  const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3678  bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3679  if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3680  continue;
3681 
3682  if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3683  A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3684  else
3685  A.changeToUnreachableAfterManifest(
3686  const_cast<Instruction *>(DeadEndI->getNextNode()));
3687  HasChanged = ChangeStatus::CHANGED;
3688  }
3689 
3690  STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3691  for (BasicBlock &BB : F)
3692  if (!AssumedLiveBlocks.count(&BB)) {
3693  A.deleteAfterManifest(BB);
3695  }
3696 
3697  return HasChanged;
3698  }
3699 
3700  /// See AbstractAttribute::updateImpl(...).
3701  ChangeStatus updateImpl(Attributor &A) override;
3702 
3703  bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3704  return !AssumedLiveEdges.count(std::make_pair(From, To));
3705  }
3706 
3707  /// See AbstractAttribute::trackStatistics()
3708  void trackStatistics() const override {}
3709 
3710  /// Returns true if the function is assumed dead.
3711  bool isAssumedDead() const override { return false; }
3712 
3713  /// See AAIsDead::isKnownDead().
3714  bool isKnownDead() const override { return false; }
3715 
3716  /// See AAIsDead::isAssumedDead(BasicBlock *).
3717  bool isAssumedDead(const BasicBlock *BB) const override {
3718  assert(BB->getParent() == getAnchorScope() &&
3719  "BB must be in the same anchor scope function.");
3720 
3721  if (!getAssumed())
3722  return false;
3723  return !AssumedLiveBlocks.count(BB);
3724  }
3725 
3726  /// See AAIsDead::isKnownDead(BasicBlock *).
3727  bool isKnownDead(const BasicBlock *BB) const override {
3728  return getKnown() && isAssumedDead(BB);
3729  }
3730 
3731  /// See AAIsDead::isAssumed(Instruction *I).
3732  bool isAssumedDead(const Instruction *I) const override {
3733  assert(I->getParent()->getParent() == getAnchorScope() &&
3734  "Instruction must be in the same anchor scope function.");
3735 
3736  if (!getAssumed())
3737  return false;
3738 
3739  // If it is not in AssumedLiveBlocks then it for sure dead.
3740  // Otherwise, it can still be after noreturn call in a live block.
3741  if (!AssumedLiveBlocks.count(I->getParent()))
3742  return true;
3743 
3744  // If it is not after a liveness barrier it is live.
3745  const Instruction *PrevI = I->getPrevNode();
3746  while (PrevI) {
3747  if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3748  return true;
3749  PrevI = PrevI->getPrevNode();
3750  }
3751  return false;
3752  }
3753 
3754  /// See AAIsDead::isKnownDead(Instruction *I).
3755  bool isKnownDead(const Instruction *I) const override {
3756  return getKnown() && isAssumedDead(I);
3757  }
3758 
3759  /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3760  /// that internal function called from \p BB should now be looked at.
3761  bool assumeLive(Attributor &A, const BasicBlock &BB) {
3762  if (!AssumedLiveBlocks.insert(&BB).second)
3763  return false;
3764 
3765  // We assume that all of BB is (probably) live now and if there are calls to
3766  // internal functions we will assume that those are now live as well. This
3767  // is a performance optimization for blocks with calls to a lot of internal
3768  // functions. It can however cause dead functions to be treated as live.
3769  for (const Instruction &I : BB)
3770  if (const auto *CB = dyn_cast<CallBase>(&I))
3771  if (const Function *F = CB->getCalledFunction())
3772  if (F->hasLocalLinkage())
3773  A.markLiveInternalFunction(*F);
3774  return true;
3775  }
3776 
3777  /// Collection of instructions that need to be explored again, e.g., we
3778  /// did assume they do not transfer control to (one of their) successors.
3779  SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3780 
3781  /// Collection of instructions that are known to not transfer control.
3783 
3784  /// Collection of all assumed live edges
3786 
3787  /// Collection of all assumed live BasicBlocks.
3788  DenseSet<const BasicBlock *> AssumedLiveBlocks;
3789 };
3790 
3791 static bool
3792 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3793  AbstractAttribute &AA,
3794  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3795  const IRPosition &IPos = IRPosition::callsite_function(CB);
3796 
3797  const auto &NoReturnAA =
3798  A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3799  if (NoReturnAA.isAssumedNoReturn())
3800  return !NoReturnAA.isKnownNoReturn();
3801  if (CB.isTerminator())
3802  AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3803  else
3804  AliveSuccessors.push_back(CB.getNextNode());
3805  return false;
3806 }
3807 
3808 static bool
3809 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3810  AbstractAttribute &AA,
3811  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3812  bool UsedAssumedInformation =
3813  identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3814 
3815  // First, determine if we can change an invoke to a call assuming the
3816  // callee is nounwind. This is not possible if the personality of the
3817  // function allows to catch asynchronous exceptions.
3818  if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3819  AliveSuccessors.push_back(&II.getUnwindDest()->front());
3820  } else {
3821  const IRPosition &IPos = IRPosition::callsite_function(II);
3822  const auto &AANoUnw =
3823  A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3824  if (AANoUnw.isAssumedNoUnwind()) {
3825  UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3826  } else {
3827  AliveSuccessors.push_back(&II.getUnwindDest()->front());
3828  }
3829  }
3830  return UsedAssumedInformation;
3831 }
3832 
3833 static bool
3834 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3835  AbstractAttribute &AA,
3836  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3837  bool UsedAssumedInformation = false;
3838  if (BI.getNumSuccessors() == 1) {
3839  AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3840  } else {
3842  A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3843  if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3844  // No value yet, assume both edges are dead.
3845  } else if (isa_and_nonnull<ConstantInt>(*C)) {
3846  const BasicBlock *SuccBB =
3847  BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3848  AliveSuccessors.push_back(&SuccBB->front());
3849  } else {
3850  AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3851  AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3852  UsedAssumedInformation = false;
3853  }
3854  }
3855  return UsedAssumedInformation;
3856 }
3857 
3858 static bool
3859 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3860  AbstractAttribute &AA,
3861  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3862  bool UsedAssumedInformation = false;
3864  A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3865  if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3866  // No value yet, assume all edges are dead.
3867  } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3868  for (auto &CaseIt : SI.cases()) {
3869  if (CaseIt.getCaseValue() == C.getValue()) {
3870  AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3871  return UsedAssumedInformation;
3872  }
3873  }
3874  AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3875  return UsedAssumedInformation;
3876  } else {
3877  for (const BasicBlock *SuccBB : successors(SI.getParent()))
3878  AliveSuccessors.push_back(&SuccBB->front());
3879  }
3880  return UsedAssumedInformation;
3881 }
3882 
3883 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3885 
3886  LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3887  << getAnchorScope()->size() << "] BBs and "
3888  << ToBeExploredFrom.size() << " exploration points and "
3889  << KnownDeadEnds.size() << " known dead ends\n");
3890 
3891  // Copy and clear the list of instructions we need to explore from. It is
3892  // refilled with instructions the next update has to look at.
3893  SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3894  ToBeExploredFrom.end());
3895  decltype(ToBeExploredFrom) NewToBeExploredFrom;
3896 
3897  SmallVector<const Instruction *, 8> AliveSuccessors;
3898  while (!Worklist.empty()) {
3899  const Instruction *I = Worklist.pop_back_val();
3900  LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3901 
3902  // Fast forward for uninteresting instructions. We could look for UB here
3903  // though.
3904  while (!I->isTerminator() && !isa<CallBase>(I))
3905  I = I->getNextNode();
3906 
3907  AliveSuccessors.clear();
3908 
3909  bool UsedAssumedInformation = false;
3910  switch (I->getOpcode()) {
3911  // TODO: look for (assumed) UB to backwards propagate "deadness".
3912  default:
3913  assert(I->isTerminator() &&
3914  "Expected non-terminators to be handled already!");
3915  for (const BasicBlock *SuccBB : successors(I->getParent()))
3916  AliveSuccessors.push_back(&SuccBB->front());
3917  break;
3918  case Instruction::Call:
3919  UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3920  *this, AliveSuccessors);
3921  break;
3922  case Instruction::Invoke:
3923  UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3924  *this, AliveSuccessors);
3925  break;
3926  case Instruction::Br:
3927  UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3928  *this, AliveSuccessors);
3929  break;
3930  case Instruction::Switch:
3931  UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3932  *this, AliveSuccessors);
3933  break;
3934  }
3935 
3936  if (UsedAssumedInformation) {
3937  NewToBeExploredFrom.insert(I);
3938  } else if (AliveSuccessors.empty() ||
3939  (I->isTerminator() &&
3940  AliveSuccessors.size() < I->getNumSuccessors())) {
3941  if (KnownDeadEnds.insert(I))
3942  Change = ChangeStatus::CHANGED;
3943  }
3944 
3945  LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3946  << AliveSuccessors.size() << " UsedAssumedInformation: "
3947  << UsedAssumedInformation << "\n");
3948 
3949  for (const Instruction *AliveSuccessor : AliveSuccessors) {
3950  if (!I->isTerminator()) {
3951  assert(AliveSuccessors.size() == 1 &&
3952  "Non-terminator expected to have a single successor!");
3953  Worklist.push_back(AliveSuccessor);
3954  } else {
3955  // record the assumed live edge
3956  auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
3957  if (AssumedLiveEdges.insert(Edge).second)
3958  Change = ChangeStatus::CHANGED;
3959  if (assumeLive(A, *AliveSuccessor->getParent()))
3960  Worklist.push_back(AliveSuccessor);
3961  }
3962  }
3963  }
3964 
3965  // Check if the content of ToBeExploredFrom changed, ignore the order.
3966  if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
3967  llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
3968  return !ToBeExploredFrom.count(I);
3969  })) {
3970  Change = ChangeStatus::CHANGED;
3971  ToBeExploredFrom = std::move(NewToBeExploredFrom);
3972  }
3973 
3974  // If we know everything is live there is no need to query for liveness.
3975  // Instead, indicating a pessimistic fixpoint will cause the state to be
3976  // "invalid" and all queries to be answered conservatively without lookups.
3977  // To be in this state we have to (1) finished the exploration and (3) not
3978  // discovered any non-trivial dead end and (2) not ruled unreachable code
3979  // dead.
3980  if (ToBeExploredFrom.empty() &&
3981  getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3982  llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3983  return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3984  }))
3985  return indicatePessimisticFixpoint();
3986  return Change;
3987 }
3988 
3989 /// Liveness information for a call sites.
3990 struct AAIsDeadCallSite final : AAIsDeadFunction {
3991  AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3992  : AAIsDeadFunction(IRP, A) {}
3993 
3994  /// See AbstractAttribute::initialize(...).
3995  void initialize(Attributor &A) override {
3996  // TODO: Once we have call site specific value information we can provide
3997  // call site specific liveness information and then it makes
3998  // sense to specialize attributes for call sites instead of
3999  // redirecting requests to the callee.
4000  llvm_unreachable("Abstract attributes for liveness are not "
4001  "supported for call sites yet!");
4002  }
4003 
4004  /// See AbstractAttribute::updateImpl(...).
4005  ChangeStatus updateImpl(Attributor &A) override {
4006  return indicatePessimisticFixpoint();
4007  }
4008 
4009  /// See AbstractAttribute::trackStatistics()
4010  void trackStatistics() const override {}
4011 };
4012 
4013 /// -------------------- Dereferenceable Argument Attribute --------------------
4014 
4015 struct AADereferenceableImpl : AADereferenceable {
4016  AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4017  : AADereferenceable(IRP, A) {}
4018  using StateType = DerefState;
4019 
4020  /// See AbstractAttribute::initialize(...).
4021  void initialize(Attributor &A) override {
4023  getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4024  Attrs, /* IgnoreSubsumingPositions */ false, &A);
4025  for (const Attribute &Attr : Attrs)
4026  takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4027 
4028  const IRPosition &IRP = this->getIRPosition();
4029  NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4030 
4031  bool CanBeNull, CanBeFreed;
4032  takeKnownDerefBytesMaximum(
4034  A.getDataLayout(), CanBeNull, CanBeFreed));
4035 
4036  bool IsFnInterface = IRP.isFnInterfaceKind();
4037  Function *FnScope = IRP.getAnchorScope();
4038  if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4039  indicatePessimisticFixpoint();
4040  return;
4041  }
4042 
4043  if (Instruction *CtxI = getCtxI())
4044  followUsesInMBEC(*this, A, getState(), *CtxI);
4045  }
4046 
4047  /// See AbstractAttribute::getState()
4048  /// {
4049  StateType &getState() override { return *this; }
4050  const StateType &getState() const override { return *this; }
4051  /// }
4052 
4053  /// Helper function for collecting accessed bytes in must-be-executed-context
4054  void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4055  DerefState &State) {
4056  const Value *UseV = U->get();
4057  if (!UseV->getType()->isPointerTy())
4058  return;
4059 
4060  Type *PtrTy = UseV->getType();
4061  const DataLayout &DL = A.getDataLayout();
4062  int64_t Offset;
4064  I, Offset, DL, /*AllowNonInbounds*/ true)) {
4065  if (Base == &getAssociatedValue() &&
4066  getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
4067  uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
4068  State.addAccessedBytes(Offset, Size);
4069  }
4070  }
4071  }
4072 
4073  /// See followUsesInMBEC
4074  bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4076  bool IsNonNull = false;
4077  bool TrackUse = false;
4078  int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4079  A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4080  LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4081  << " for instruction " << *I << "\n");
4082 
4083  addAccessedBytesForUse(A, U, I, State);
4084  State.takeKnownDerefBytesMaximum(DerefBytes);
4085  return TrackUse;
4086  }
4087 
4088  /// See AbstractAttribute::manifest(...).
4089  ChangeStatus manifest(Attributor &A) override {
4091  if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4092  removeAttrs({Attribute::DereferenceableOrNull});
4093  return ChangeStatus::CHANGED;
4094  }
4095  return Change;
4096  }
4097 
4098  void getDeducedAttributes(LLVMContext &Ctx,
4099  SmallVectorImpl<Attribute> &Attrs) const override {
4100  // TODO: Add *_globally support
4101  if (isAssumedNonNull())
4103  Ctx, getAssumedDereferenceableBytes()));
4104  else
4106  Ctx, getAssumedDereferenceableBytes()));
4107  }
4108 
4109  /// See AbstractAttribute::getAsStr().
4110  const std::string getAsStr() const override {
4111  if (!getAssumedDereferenceableBytes())
4112  return "unknown-dereferenceable";
4113  return std::string("dereferenceable") +
4114  (isAssumedNonNull() ? "" : "_or_null") +
4115  (isAssumedGlobal() ? "_globally" : "") + "<" +
4116  std::to_string(getKnownDereferenceableBytes()) + "-" +
4117  std::to_string(getAssumedDereferenceableBytes()) + ">";
4118  }
4119 };
4120 
4121 /// Dereferenceable attribute for a floating value.
4122 struct AADereferenceableFloating : AADereferenceableImpl {
4123  AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4124  : AADereferenceableImpl(IRP, A) {}
4125 
4126  /// See AbstractAttribute::updateImpl(...).
4127  ChangeStatus updateImpl(Attributor &A) override {
4128  const DataLayout &DL = A.getDataLayout();
4129 
4130  auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4131  bool Stripped) -> bool {
4132  unsigned IdxWidth =
4133  DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4134  APInt Offset(IdxWidth, 0);
4135  const Value *Base =
4136  stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
4137 
4138  const auto &AA = A.getAAFor<AADereferenceable>(
4140  int64_t DerefBytes = 0;
4141  if (!Stripped && this == &AA) {
4142  // Use IR information if we did not strip anything.
4143  // TODO: track globally.
4144  bool CanBeNull, CanBeFreed;
4145  DerefBytes =
4146  Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4147  T.GlobalState.indicatePessimisticFixpoint();
4148  } else {
4149  const DerefState &DS = AA.getState();
4150  DerefBytes = DS.DerefBytesState.getAssumed();
4151  T.GlobalState &= DS.GlobalState;
4152  }
4153 
4154  // For now we do not try to "increase" dereferenceability due to negative
4155  // indices as we first have to come up with code to deal with loops and
4156  // for overflows of the dereferenceable bytes.
4157  int64_t OffsetSExt = Offset.getSExtValue();
4158  if (OffsetSExt < 0)
4159  OffsetSExt = 0;
4160 
4161  T.takeAssumedDerefBytesMinimum(
4162  std::max(int64_t(0), DerefBytes - OffsetSExt));
4163 
4164  if (this == &AA) {
4165  if (!Stripped) {
4166  // If nothing was stripped IR information is all we got.
4167  T.takeKnownDerefBytesMaximum(
4168  std::max(int64_t(0), DerefBytes - OffsetSExt));
4169  T.indicatePessimisticFixpoint();
4170  } else if (OffsetSExt > 0) {
4171  // If something was stripped but there is circular reasoning we look
4172  // for the offset. If it is positive we basically decrease the
4173  // dereferenceable bytes in a circluar loop now, which will simply
4174  // drive them down to the known value in a very slow way which we
4175  // can accelerate.
4176  T.indicatePessimisticFixpoint();
4177  }
4178  }
4179 
4180  return T.isValidState();
4181  };
4182 
4183  DerefState T;
4184  if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4185  VisitValueCB, getCtxI()))
4186  return indicatePessimisticFixpoint();
4187 
4188  return clampStateAndIndicateChange(getState(), T);
4189  }
4190 
4191  /// See AbstractAttribute::trackStatistics()
4192  void trackStatistics() const override {
4193  STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4194  }
4195 };
4196 
4197 /// Dereferenceable attribute for a return value.
4198 struct AADereferenceableReturned final
4199  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4200  AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4201  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4202  IRP, A) {}
4203 
4204  /// See AbstractAttribute::trackStatistics()
4205  void trackStatistics() const override {
4206  STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4207  }
4208 };
4209 
4210 /// Dereferenceable attribute for an argument
4211 struct AADereferenceableArgument final
4212  : AAArgumentFromCallSiteArguments<AADereferenceable,
4213  AADereferenceableImpl> {
4214  using Base =
4215  AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4216  AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4217  : Base(IRP, A) {}
4218 
4219  /// See AbstractAttribute::trackStatistics()
4220  void trackStatistics() const override {
4221  STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4222  }
4223 };
4224 
4225 /// Dereferenceable attribute for a call site argument.
4226 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4227  AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4228  : AADereferenceableFloating(IRP, A) {}
4229 
4230  /// See AbstractAttribute::trackStatistics()
4231  void trackStatistics() const override {
4232  STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4233  }
4234 };
4235 
4236 /// Dereferenceable attribute deduction for a call site return value.
4237 struct AADereferenceableCallSiteReturned final
4238  : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4239  using Base =
4240  AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4241  AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4242  : Base(IRP, A) {}
4243 
4244  /// See AbstractAttribute::trackStatistics()
4245  void trackStatistics() const override {
4246  STATS_DECLTRACK_CS_ATTR(dereferenceable);
4247  }
4248 };
4249 
4250 // ------------------------ Align Argument Attribute ------------------------
4251 
4252 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4253  Value &AssociatedValue, const Use *U,
4254  const Instruction *I, bool &TrackUse) {
4255  // We need to follow common pointer manipulation uses to the accesses they
4256  // feed into.
4257  if (isa<CastInst>(I)) {
4258  // Follow all but ptr2int casts.
4259  TrackUse = !isa<PtrToIntInst>(I);
4260  return 0;
4261  }
4262  if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4263  if (GEP->hasAllConstantIndices())
4264  TrackUse = true;
4265  return 0;
4266  }
4267 
4268  MaybeAlign MA;
4269  if (const auto *CB = dyn_cast<CallBase>(I)) {
4270  if (CB->isBundleOperand(U) || CB->isCallee(U))
4271  return 0;
4272 
4273  unsigned ArgNo = CB->getArgOperandNo(U);
4274  IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4275  // As long as we only use known information there is no need to track
4276  // dependences here.
4277  auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4278  MA = MaybeAlign(AlignAA.getKnownAlign());
4279  }
4280 
4281  const DataLayout &DL = A.getDataLayout();
4282  const Value *UseV = U->get();
4283  if (auto *SI = dyn_cast<StoreInst>(I)) {
4284  if (SI->getPointerOperand() == UseV)
4285  MA = SI->getAlign();
4286  } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4287  if (LI->getPointerOperand() == UseV)
4288  MA = LI->getAlign();
4289  }
4290 
4291  if (!MA || *MA <= QueryingAA.getKnownAlign())
4292  return 0;
4293 
4294  unsigned Alignment = MA->value();
4295  int64_t Offset;
4296 
4297  if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4298  if (Base == &AssociatedValue) {
4299  // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4300  // So we can say that the maximum power of two which is a divisor of
4301  // gcd(Offset, Alignment) is an alignment.
4302 
4303  uint32_t gcd =
4304  greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4305  Alignment = llvm::PowerOf2Floor(gcd);
4306  }
4307  }
4308 
4309  return Alignment;
4310 }
4311 
4312 struct AAAlignImpl : AAAlign {
4313  AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4314 
4315  /// See AbstractAttribute::initialize(...).
4316  void initialize(Attributor &A) override {
4318  getAttrs({Attribute::Alignment}, Attrs);
4319  for (const Attribute &Attr : Attrs)
4320  takeKnownMaximum(Attr.getValueAsInt());
4321 
4322  Value &V = getAssociatedValue();
4323  // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
4324  // use of the function pointer. This was caused by D73131. We want to
4325  // avoid this for function pointers especially because we iterate
4326  // their uses and int2ptr is not handled. It is not a correctness
4327  // problem though!
4329  takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4330 
4331  if (getIRPosition().isFnInterfaceKind() &&
4332  (!getAnchorScope() ||
4333  !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4334  indicatePessimisticFixpoint();
4335  return;
4336  }
4337 
4338  if (Instruction *CtxI = getCtxI())
4339  followUsesInMBEC(*this, A, getState(), *CtxI);
4340  }
4341 
4342  /// See AbstractAttribute::manifest(...).
4343  ChangeStatus manifest(Attributor &A) override {
4344  ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4345 
4346  // Check for users that allow alignment annotations.
4347  Value &AssociatedValue = getAssociatedValue();
4348  for (const Use &U : AssociatedValue.uses()) {
4349  if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4350  if (SI->getPointerOperand() == &AssociatedValue)
4351  if (SI->getAlignment() < getAssumedAlign()) {
4353  "Number of times alignment added to a store");
4354  SI->setAlignment(Align(getAssumedAlign()));
4355  LoadStoreChanged = ChangeStatus::CHANGED;
4356  }
4357  } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4358  if (LI->getPointerOperand() == &AssociatedValue)
4359  if (LI->getAlignment() < getAssumedAlign()) {
4360  LI->setAlignment(Align(getAssumedAlign()));
4362  "Number of times alignment added to a load");
4363  LoadStoreChanged = ChangeStatus::CHANGED;
4364  }
4365  }
4366  }
4367 
4368  ChangeStatus Changed = AAAlign::manifest(A);
4369 
4370  Align InheritAlign =
4371  getAssociatedValue().getPointerAlignment(A.getDataLayout());
4372  if (InheritAlign >= getAssumedAlign())
4373  return LoadStoreChanged;
4374  return Changed | LoadStoreChanged;
4375  }
4376 
4377  // TODO: Provide a helper to determine the implied ABI alignment and check in
4378  // the existing manifest method and a new one for AAAlignImpl that value
4379  // to avoid making the alignment explicit if it did not improve.
4380 
4381  /// See AbstractAttribute::getDeducedAttributes
4382  virtual void
4383  getDeducedAttributes(LLVMContext &Ctx,
4384  SmallVectorImpl<Attribute> &Attrs) const override {
4385  if (getAssumedAlign() > 1)
4386  Attrs.emplace_back(
4387  Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4388  }
4389 
4390  /// See followUsesInMBEC
4391  bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4392  AAAlign::StateType &State) {
4393  bool TrackUse = false;
4394 
4395  unsigned int KnownAlign =
4396  getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4397  State.takeKnownMaximum(KnownAlign);
4398 
4399  return TrackUse;
4400  }
4401 
4402  /// See AbstractAttribute::getAsStr().
4403  const std::string getAsStr() const override {
4404  return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4405  "-" + std::to_string(getAssumedAlign()) + ">")
4406  : "unknown-align";
4407  }
4408 };
4409 
4410 /// Align attribute for a floating value.
4411 struct AAAlignFloating : AAAlignImpl {
4412  AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4413 
4414  /// See AbstractAttribute::updateImpl(...).
4415  ChangeStatus updateImpl(Attributor &A) override {
4416  const DataLayout &DL = A.getDataLayout();
4417 
4418  auto VisitValueCB = [&](Value &V, const Instruction *,
4419  AAAlign::StateType &T, bool Stripped) -> bool {
4420  const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4422  if (!Stripped && this == &AA) {
4423  int64_t Offset;
4424  unsigned Alignment = 1;
4425  if (const Value *Base =
4427  Align PA = Base->getPointerAlignment(DL);
4428  // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4429  // So we can say that the maximum power of two which is a divisor of
4430  // gcd(Offset, Alignment) is an alignment.
4431 
4433  uint32_t(PA.value()));
4434  Alignment = llvm::PowerOf2Floor(gcd);
4435  } else {
4436  Alignment = V.getPointerAlignment(DL).value();
4437  }
4438  // Use only IR information if we did not strip anything.
4439  T.takeKnownMaximum(Alignment);
4440  T.indicatePessimisticFixpoint();
4441  } else {
4442  // Use abstract attribute information.
4443  const AAAlign::StateType &DS = AA.getState();
4444  T ^= DS;
4445  }
4446  return T.isValidState();
4447  };
4448 
4449  StateType T;
4450  if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4451  VisitValueCB, getCtxI()))
4452  return indicatePessimisticFixpoint();
4453 
4454  // TODO: If we know we visited all incoming values, thus no are assumed
4455  // dead, we can take the known information from the state T.
4456  return clampStateAndIndicateChange(getState(), T);
4457  }
4458 
4459  /// See AbstractAttribute::trackStatistics()
4460  void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4461 };
4462 
4463 /// Align attribute for function return value.
4464 struct AAAlignReturned final
4465  : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4466  using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4467  AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4468 
4469  /// See AbstractAttribute::initialize(...).
4470  void initialize(Attributor &A) override {
4471  Base::initialize(A);
4472  Function *F = getAssociatedFunction();
4473  if (!F || F->isDeclaration())
4474  indicatePessimisticFixpoint();
4475  }
4476 
4477  /// See AbstractAttribute::trackStatistics()
4478  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4479 };
4480 
4481 /// Align attribute for function argument.
4482 struct AAAlignArgument final
4483  : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4484  using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4485  AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4486 
4487  /// See AbstractAttribute::manifest(...).
4488  ChangeStatus manifest(Attributor &A) override {
4489  // If the associated argument is involved in a must-tail call we give up
4490  // because we would need to keep the argument alignments of caller and
4491  // callee in-sync. Just does not seem worth the trouble right now.
4492  if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4493  return ChangeStatus::UNCHANGED;
4494  return Base::manifest(A);
4495  }
4496 
4497  /// See AbstractAttribute::trackStatistics()
4498  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4499 };
4500 
4501 struct AAAlignCallSiteArgument final : AAAlignFloating {
4502  AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4503  : AAAlignFloating(IRP, A) {}
4504 
4505  /// See AbstractAttribute::manifest(...).
4506  ChangeStatus manifest(Attributor &A) override {
4507  // If the associated argument is involved in a must-tail call we give up
4508  // because we would need to keep the argument alignments of caller and
4509  // callee in-sync. Just does not seem worth the trouble right now.
4510  if (Argument *Arg = getAssociatedArgument())
4511  if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4512  return ChangeStatus::UNCHANGED;
4513  ChangeStatus Changed = AAAlignImpl::manifest(A);
4514  Align InheritAlign =
4515  getAssociatedValue().getPointerAlignment(A.getDataLayout());
4516  if (InheritAlign >= getAssumedAlign())
4517  Changed = ChangeStatus::UNCHANGED;
4518  return Changed;
4519  }
4520 
4521  /// See AbstractAttribute::updateImpl(Attributor &A).
4522  ChangeStatus updateImpl(Attributor &A) override {
4523  ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4524  if (Argument *Arg = getAssociatedArgument()) {
4525  // We only take known information from the argument
4526  // so we do not need to track a dependence.
4527  const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4529  takeKnownMaximum(ArgAlignAA.getKnownAlign());
4530  }
4531  return Changed;
4532  }
4533 
4534  /// See AbstractAttribute::trackStatistics()
4535  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4536 };
4537 
4538 /// Align attribute deduction for a call site return value.
4539 struct AAAlignCallSiteReturned final
4540  : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4541  using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4542  AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4543  : Base(IRP, A) {}
4544 
4545  /// See AbstractAttribute::initialize(...).
4546  void initialize(Attributor &A) override {
4547  Base::initialize(A);
4548  Function *F = getAssociatedFunction();
4549  if (!F || F->isDeclaration())
4550  indicatePessimisticFixpoint();
4551  }
4552 
4553  /// See AbstractAttribute::trackStatistics()
4554  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4555 };
4556 
4557 /// ------------------ Function No-Return Attribute ----------------------------
4558 struct AANoReturnImpl : public AANoReturn {
4559  AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4560 
4561  /// See AbstractAttribute::initialize(...).
4562  void initialize(Attributor &A) override {
4564  Function *F = getAssociatedFunction();
4565  if (!F || F->isDeclaration())
4566  indicatePessimisticFixpoint();
4567  }
4568 
4569  /// See AbstractAttribute::getAsStr().
4570  const std::string getAsStr() const override {
4571  return getAssumed() ? "noreturn" : "may-return";
4572  }
4573 
4574  /// See AbstractAttribute::updateImpl(Attributor &A).
4575  virtual ChangeStatus updateImpl(Attributor &A) override {
4576  auto CheckForNoReturn = [](Instruction &) { return false; };
4577  bool UsedAssumedInformation = false;
4578  if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4579  {(unsigned)Instruction::Ret},
4580  UsedAssumedInformation))
4581  return indicatePessimisticFixpoint();
4582  return ChangeStatus::UNCHANGED;
4583  }
4584 };
4585 
4586 struct AANoReturnFunction final : AANoReturnImpl {
4587  AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4588  : AANoReturnImpl(IRP, A) {}
4589 
4590  /// See AbstractAttribute::trackStatistics()
4591  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4592 };
4593 
4594 /// NoReturn attribute deduction for a call sites.
4595 struct AANoReturnCallSite final : AANoReturnImpl {
4596  AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4597  : AANoReturnImpl(IRP, A) {}
4598 
4599  /// See AbstractAttribute::initialize(...).
4600  void initialize(Attributor &A) override {
4602  if (Function *F = getAssociatedFunction()) {
4603  const IRPosition &FnPos = IRPosition::function(*F);
4604  auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4605  if (!FnAA.isAssumedNoReturn())
4606  indicatePessimisticFixpoint();
4607  }
4608  }
4609 
4610  /// See AbstractAttribute::updateImpl(...).
4611  ChangeStatus updateImpl(Attributor &A) override {
4612  // TODO: Once we have call site specific value information we can provide
4613  // call site specific liveness information and then it makes
4614  // sense to specialize attributes for call sites arguments instead of
4615  // redirecting requests to the callee argument.
4616  Function *F = getAssociatedFunction();
4617  const IRPosition &FnPos = IRPosition::function(*F);
4618  auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4619  return clampStateAndIndicateChange(getState(), FnAA.getState());
4620  }
4621 
4622  /// See AbstractAttribute::trackStatistics()
4623  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4624 };
4625 
4626 /// ----------------------- Variable Capturing ---------------------------------
4627 
4628 /// A class to hold the state of for no-capture attributes.
4629 struct AANoCaptureImpl : public AANoCapture {
4630  AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4631 
4632  /// See AbstractAttribute::initialize(...).
4633  void initialize(Attributor &A) override {
4634  if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4635  indicateOptimisticFixpoint();
4636  return;
4637  }
4638  Function *AnchorScope = getAnchorScope();
4639  if (isFnInterfaceKind() &&
4640  (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4641  indicatePessimisticFixpoint();
4642  return;
4643  }
4644 
4645  // You cannot "capture" null in the default address space.
4646  if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4647  getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4648  indicateOptimisticFixpoint();
4649  return;
4650  }
4651 
4652  const Function *F =
4653  isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4654 
4655  // Check what state the associated function can actually capture.
4656  if (F)
4657  determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4658  else
4659  indicatePessimisticFixpoint();
4660  }
4661 
4662  /// See AbstractAttribute::updateImpl(...).
4663  ChangeStatus updateImpl(Attributor &A) override;
4664 
4665  /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4666  virtual void
4667  getDeducedAttributes(LLVMContext &Ctx,
4668  SmallVectorImpl<Attribute> &Attrs) const override {
4669  if (!isAssumedNoCaptureMaybeReturned())
4670  return;
4671 
4672  if (isArgumentPosition()) {
4673  if (isAssumedNoCapture())
4674  Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4675  else if (ManifestInternal)
4676  Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4677  }
4678  }
4679 
4680  /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4681  /// depending on the ability of the function associated with \p IRP to capture
4682  /// state in memory and through "returning/throwing", respectively.
4683  static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4684  const Function &F,
4685  BitIntegerState &State) {
4686  // TODO: Once we have memory behavior attributes we should use them here.
4687 
4688  // If we know we cannot communicate or write to memory, we do not care about
4689  // ptr2int anymore.
4690  if (F.onlyReadsMemory() && F.doesNotThrow() &&
4691  F.getReturnType()->isVoidTy()) {
4692  State.addKnownBits(NO_CAPTURE);
4693  return;
4694  }
4695 
4696  // A function cannot capture state in memory if it only reads memory, it can
4697  // however return/throw state and the state might be influenced by the
4698  // pointer value, e.g., loading from a returned pointer might reveal a bit.
4699  if (F.onlyReadsMemory())
4700  State.addKnownBits(NOT_CAPTURED_IN_MEM);
4701 
4702  // A function cannot communicate state back if it does not through
4703  // exceptions and doesn not return values.
4704  if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4705  State.addKnownBits(NOT_CAPTURED_IN_RET);
4706 
4707  // Check existing "returned" attributes.
4708  int ArgNo = IRP.getCalleeArgNo();
4709  if (F.doesNotThrow() && ArgNo >= 0) {
4710  for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4711  if (F.hasParamAttribute(u, Attribute::Returned)) {
4712  if (u == unsigned(ArgNo))
4713  State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4714  else if (F.onlyReadsMemory())
4715  State.addKnownBits(NO_CAPTURE);
4716  else
4717  State.addKnownBits(NOT_CAPTURED_IN_RET);
4718  break;
4719  }
4720  }
4721  }
4722 
4723  /// See AbstractState::getAsStr().
4724  const std::string getAsStr() const override {
4725  if (isKnownNoCapture())
4726  return "known not-captured";
4727  if (isAssumedNoCapture())
4728  return "assumed not-captured";
4729  if (isKnownNoCaptureMaybeReturned())
4730  return "known not-captured-maybe-returned";
4731  if (isAssumedNoCaptureMaybeReturned())
4732  return "assumed not-captured-maybe-returned";
4733  return "assumed-captured";
4734  }
4735 };
4736 
4737 /// Attributor-aware capture tracker.
4738 struct AACaptureUseTracker final : public CaptureTracker {
4739 
4740  /// Create a capture tracker that can lookup in-flight abstract attributes
4741  /// through the Attributor \p A.
4742  ///
4743  /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4744  /// search is stopped. If a use leads to a return instruction,
4745  /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4746  /// If a use leads to a ptr2int which may capture the value,
4747  /// \p CapturedInInteger is set. If a use is found that is currently assumed
4748  /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4749  /// set. All values in \p PotentialCopies are later tracked as well. For every
4750  /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4751  /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4752  /// conservatively set to true.
4753  AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4754  const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4755  SmallSetVector<Value *, 4> &PotentialCopies,
4756  unsigned &RemainingUsesToExplore)
4757  : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4758  PotentialCopies(PotentialCopies),
4759  RemainingUsesToExplore(RemainingUsesToExplore) {}
4760 
4761  /// Determine if \p V maybe captured. *Also updates the state!*
4762  bool valueMayBeCaptured(const Value *V) {
4763  if (V->getType()->isPointerTy()) {
4764  PointerMayBeCaptured(V, this);
4765  } else {
4767  }
4769  }
4770 
4771  /// See CaptureTracker::tooManyUses().
4772  void tooManyUses() override {
4774  }
4775 
4776  bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4778  return true;
4779  const auto &DerefAA = A.getAAFor<AADereferenceable>(
4780  NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4781  return DerefAA.getAssumedDereferenceableBytes();
4782  }
4783 
4784  /// See CaptureTracker::captured(...).
4785  bool captured(const Use *U) override {
4786  Instruction *UInst = cast<Instruction>(U->getUser());
4787  LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4788  << "\n");
4789 
4790  // Because we may reuse the tracker multiple times we keep track of the
4791  // number of explored uses ourselves as well.
4792  if (RemainingUsesToExplore-- == 0) {
4793  LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4794  return isCapturedIn(/* Memory */ true, /* Integer */ true,
4795  /* Return */ true);
4796  }
4797 
4798  // Deal with ptr2int by following uses.
4799  if (isa<PtrToIntInst>(UInst)) {
4800  LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4801  return valueMayBeCaptured(UInst);
4802  }
4803 
4804  // For stores we check if we can follow the value through memory or not.
4805  if (auto *SI = dyn_cast<StoreInst>(UInst)) {
4806  if (SI->isVolatile())
4807  return isCapturedIn(/* Memory */ true, /* Integer */ false,
4808  /* Return */ false);
4809  bool UsedAssumedInformation = false;
4811  A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation))
4812  return isCapturedIn(/* Memory */ true, /* Integer */ false,
4813  /* Return */ false);
4814  // Not captured directly, potential copies will be checked.
4815  return isCapturedIn(/* Memory */ false, /* Integer */ false,
4816  /* Return */ false);
4817  }
4818 
4819  // Explicitly catch return instructions.
4820  if (isa<ReturnInst>(UInst)) {
4821  if (UInst->getFunction() == NoCaptureAA.getAnchorScope())
4822  return isCapturedIn(/* Memory */ false, /* Integer */ false,
4823  /* Return */ true);
4824  return isCapturedIn(/* Memory */ true, /* Integer */ true,
4825  /* Return */ true);
4826  }
4827 
4828  // For now we only use special logic for call sites. However, the tracker
4829  // itself knows about a lot of other non-capturing cases already.
4830  auto *CB = dyn_cast<CallBase>(UInst);
4831  if (!CB || !CB->isArgOperand(U))
4832  return isCapturedIn(/* Memory */ true, /* Integer */ true,
4833  /* Return */ true);
4834 
4835  unsigned ArgNo = CB->getArgOperandNo(U);
4836  const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4837  // If we have a abstract no-capture attribute for the argument we can use
4838  // it to justify a non-capture attribute here. This allows recursion!
4839  auto &ArgNoCaptureAA =
4840  A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4841  if (ArgNoCaptureAA.isAssumedNoCapture())
4842  return isCapturedIn(/* Memory */ false, /* Integer */ false,
4843  /* Return */ false);
4844  if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4845  addPotentialCopy(*CB);
4846  return isCapturedIn(/* Memory */ false, /* Integer */ false,
4847  /* Return */ false);
4848  }
4849 
4850  // Lastly, we could not find a reason no-capture can be assumed so we don't.
4851  return isCapturedIn(/* Memory */ true, /* Integer */ true,
4852  /* Return */ true);
4853  }
4854 
4855  /// Register \p CS as potential copy of the value we are checking.
4856  void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); }
4857 
4858  /// See CaptureTracker::shouldExplore(...).
4859  bool shouldExplore(const Use *U) override {
4860  // Check liveness and ignore droppable users.
4861  bool UsedAssumedInformation = false;
4862  return !U->getUser()->isDroppable() &&
4863  !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA,
4864  UsedAssumedInformation);
4865  }
4866 
4867  /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4868  /// \p CapturedInRet, then return the appropriate value for use in the
4869  /// CaptureTracker::captured() interface.
4870  bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4871  bool CapturedInRet) {
4872  LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4873  << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4874  if (CapturedInMem)
4876  if (CapturedInInt)
4878  if (CapturedInRet)
4881  }
4882 
4883 private:
4884  /// The attributor providing in-flight abstract attributes.
4885  Attributor &A;
4886 
4887  /// The abstract attribute currently updated.
4888  AANoCapture &NoCaptureAA;
4889 
4890  /// The abstract liveness state.
4891  const AAIsDead &IsDeadAA;
4892 
4893  /// The state currently updated.
4894  AANoCapture::StateType &State;
4895 
4896  /// Set of potential copies of the tracked value.
4897  SmallSetVector<Value *, 4> &PotentialCopies;
4898 
4899  /// Global counter to limit the number of explored uses.
4900  unsigned &RemainingUsesToExplore;
4901 };
4902 
4903 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4904  const IRPosition &IRP = getIRPosition();
4905  Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4906  : &IRP.getAssociatedValue();
4907  if (!V)
4908  return indicatePessimisticFixpoint();
4909 
4910  const Function *F =
4911  isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4912  assert(F && "Expected a function!");
4913  const IRPosition &FnPos = IRPosition::function(*F);
4914  const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4915 
4917 
4918  // Readonly means we cannot capture through memory.
4919  const auto &FnMemAA =
4920  A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4921  if (FnMemAA.isAssumedReadOnly()) {
4922  T.addKnownBits(NOT_CAPTURED_IN_MEM);
4923  if (FnMemAA.isKnownReadOnly())
4924  addKnownBits(NOT_CAPTURED_IN_MEM);
4925  else
4926  A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4927  }
4928 
4929  // Make sure all returned values are different than the underlying value.
4930  // TODO: we could do this in a more sophisticated way inside
4931  // AAReturnedValues, e.g., track all values that escape through returns
4932  // directly somehow.
4933  auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4934  bool SeenConstant = false;
4935  for (auto &It : RVAA.returned_values()) {
4936  if (isa<Constant>(It.first)) {
4937  if (SeenConstant)
4938  return false;
4939  SeenConstant = true;
4940  } else if (!isa<Argument>(It.first) ||
4941  It.first == getAssociatedArgument())
4942  return false;
4943  }
4944  return true;
4945  };
4946 
4947  const auto &NoUnwindAA =
4948  A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4949  if (NoUnwindAA.isAssumedNoUnwind()) {
4950  bool IsVoidTy = F->getReturnType()->isVoidTy();
4951  const AAReturnedValues *RVAA =
4952  IsVoidTy ? nullptr
4953  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4954 
4956  if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4957  T.addKnownBits(NOT_CAPTURED_IN_RET);
4958  if (T.isKnown(NOT_CAPTURED_IN_MEM))
4959  return ChangeStatus::UNCHANGED;
4960  if (NoUnwindAA.isKnownNoUnwind() &&
4961  (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4962  addKnownBits(NOT_CAPTURED_IN_RET);
4963  if (isKnown(NOT_CAPTURED_IN_MEM))
4964  return indicateOptimisticFixpoint();
4965  }
4966  }
4967  }
4968 
4969  // Use the CaptureTracker interface and logic with the specialized tracker,
4970  // defined in AACaptureUseTracker, that can look at in-flight abstract
4971  // attributes and directly updates the assumed state.
4972  SmallSetVector<Value *, 4> PotentialCopies;
4973  unsigned RemainingUsesToExplore =
4975  AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4976  RemainingUsesToExplore);
4977 
4978  // Check all potential copies of the associated value until we can assume
4979  // none will be captured or we have to assume at least one might be.
4980  unsigned Idx = 0;
4981  PotentialCopies.insert(V);
4982  while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4983  Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4984 
4985  AANoCapture::StateType &S = getState();
4986  auto Assumed = S.getAssumed();
4987  S.intersectAssumedBits(T.getAssumed());
4988  if (!isAssumedNoCaptureMaybeReturned())
4989  return indicatePessimisticFixpoint();
4990  return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4992 }
4993 
4994 /// NoCapture attribute for function arguments.
4995 struct AANoCaptureArgument final : AANoCaptureImpl {
4996  AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4997  : AANoCaptureImpl(IRP, A) {}
4998 
4999  /// See AbstractAttribute::trackStatistics()
5000  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5001 };
5002 
5003 /// NoCapture attribute for call site arguments.
5004 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5005  AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5006  : AANoCaptureImpl(IRP, A) {}
5007 
5008  /// See AbstractAttribute::initialize(...).
5009  void initialize(Attributor &A) override {
5010  if (Argument *Arg = getAssociatedArgument())
5011  if (Arg->hasByValAttr())
5012  indicateOptimisticFixpoint();
5014  }
5015 
5016  /// See AbstractAttribute::updateImpl(...).
5017  ChangeStatus updateImpl(Attributor &A) override {
5018  // TODO: Once we have call site specific value information we can provide
5019  // call site specific liveness information and then it makes
5020  // sense to specialize attributes for call sites arguments instead of
5021  // redirecting requests to the callee argument.
5022  Argument *Arg = getAssociatedArgument();
5023  if (!Arg)
5024  return indicatePessimisticFixpoint();
5025  const IRPosition &ArgPos = IRPosition::argument(*Arg);
5026  auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5027  return clampStateAndIndicateChange(getState(), ArgAA.getState());
5028  }
5029 
5030  /// See AbstractAttribute::trackStatistics()
5031  void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5032 };
5033 
5034 /// NoCapture attribute for floating values.
5035 struct AANoCaptureFloating final : AANoCaptureImpl {
5036  AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5037  : AANoCaptureImpl(IRP, A) {}
5038 
5039  /// See AbstractAttribute::trackStatistics()
5040  void trackStatistics() const override {
5042  }
5043 };
5044 
5045 /// NoCapture attribute for function return value.
5046 struct AANoCaptureReturned final : AANoCaptureImpl {
5047  AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5048  : AANoCaptureImpl(IRP, A) {
5049  llvm_unreachable("NoCapture is not applicable to function returns!");
5050  }
5051 
5052  /// See AbstractAttribute::initialize(...).
5053  void initialize(Attributor &A) override {
5054  llvm_unreachable("NoCapture is not applicable to function returns!");
5055  }
5056 
5057  /// See AbstractAttribute::updateImpl(...).
5058  ChangeStatus updateImpl(Attributor &A) override {
5059  llvm_unreachable("NoCapture is not applicable to function returns!");
5060  }
5061 
5062  /// See AbstractAttribute::trackStatistics()
5063  void trackStatistics() const override {}
5064 };
5065 
5066 /// NoCapture attribute deduction for a call site return value.
5067 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5068  AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5069  : AANoCaptureImpl(IRP, A) {}
5070 
5071  /// See AbstractAttribute::initialize(...).
5072  void initialize(Attributor &A) override {
5073  const Function *F = getAnchorScope();
5074  // Check what state the associated function can actually capture.
5075  determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5076  }
5077 
5078  /// See AbstractAttribute::trackStatistics()
5079  void trackStatistics() const override {
5080  STATS_DECLTRACK_CSRET_ATTR(nocapture)
5081  }
5082 };
5083 } // namespace
5084 
5085 /// ------------------ Value Simplify Attribute ----------------------------
5086 
5088  // FIXME: Add a typecast support.
5089  SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5090  SimplifiedAssociatedValue, Other, Ty);
5091  if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5092  return false;
5093 
5094  LLVM_DEBUG({
5095  if (SimplifiedAssociatedValue.hasValue())
5096  dbgs() << "[ValueSimplify] is assumed to be "
5097  << **SimplifiedAssociatedValue << "\n";
5098  else
5099  dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5100  });
5101  return true;
5102 }
5103 
5104 namespace {
5105 struct AAValueSimplifyImpl : AAValueSimplify {
5106  AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5107  : AAValueSimplify(IRP, A) {}
5108 
5109  /// See AbstractAttribute::initialize(...).
5110  void initialize(Attributor &A) override {
5111  if (getAssociatedValue().getType()->isVoidTy())
5112  indicatePessimisticFixpoint();
5113  if (A.hasSimplificationCallback(getIRPosition()))
5114  indicatePessimisticFixpoint();
5115  }
5116 
5117  /// See AbstractAttribute::getAsStr().
5118  const std::string getAsStr() const override {
5119  LLVM_DEBUG({
5120  errs() << "SAV: " << SimplifiedAssociatedValue << " ";
5121  if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5122  errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5123  });
5124  return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5125  : "not-simple";
5126  }
5127 
5128  /// See AbstractAttribute::trackStatistics()
5129  void trackStatistics() const override {}
5130 
5131  /// See AAValueSimplify::getAssumedSimplifiedValue()
5132  Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5133  return SimplifiedAssociatedValue;
5134  }
5135 
5136  /// Return a value we can use as replacement for the associated one, or
5137  /// nullptr if we don't have one that makes sense.
5138  Value *getReplacementValue(Attributor &A) const {
5139  Value *NewV;
5140  NewV = SimplifiedAssociatedValue.hasValue()
5141  ? SimplifiedAssociatedValue.getValue()
5142  : UndefValue::get(getAssociatedType());
5143  if (!NewV)
5144  return nullptr;
5145  NewV = AA::getWithType(*NewV, *getAssociatedType());
5146  if (!NewV || NewV == &getAssociatedValue())
5147  return nullptr;
5148  const Instruction *CtxI = getCtxI();
5149  if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache()))
5150  return nullptr;
5151  if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope()))
5152  return nullptr;
5153  return NewV;
5154  }
5155 
5156  /// Helper function for querying AAValueSimplify and updating candicate.
5157  /// \param IRP The value position we are trying to unify with SimplifiedValue
5158  bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5159  const IRPosition &IRP, bool Simplify = true) {
5160  bool UsedAssumedInformation = false;
5161  Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5162  if (Simplify)
5163  QueryingValueSimplified =
5164  A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5165  return unionAssumed(QueryingValueSimplified);
5166  }
5167 
5168  /// Returns a candidate is found or not
5169  template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5170  if (!getAssociatedValue().getType()->isIntegerTy())
5171  return false;
5172 
5173  // This will also pass the call base context.
5174  const auto &AA =
5175  A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5176 
5177  Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
5178 
5179  if (!COpt.hasValue()) {
5180  SimplifiedAssociatedValue = llvm::None;
5181  A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5182  return true;
5183  }
5184  if (auto *C = COpt.getValue()) {
5185  SimplifiedAssociatedValue = C;
5186  A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5187  return true;
5188  }
5189  return false;
5190  }
5191 
5192  bool askSimplifiedValueForOtherAAs(Attributor &A) {
5193  if (askSimplifiedValueFor<AAValueConstantRange>(A))
5194  return true;
5195  if (askSimplifiedValueFor<AAPotentialValues>(A))
5196  return true;
5197  return false;
5198  }
5199 
5200  /// See AbstractAttribute::manifest(...).
5201  ChangeStatus manifest(Attributor &A) override {
5203  if (getAssociatedValue().user_empty())
5204  return Changed;
5205 
5206  if (auto *NewV = getReplacementValue(A)) {
5207  LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> "
5208  << *NewV << " :: " << *this << "\n");
5209  if (A.changeValueAfterManifest(getAssociatedValue(), *NewV))
5210  Changed = ChangeStatus::CHANGED;
5211  }
5212 
5213  return Changed | AAValueSimplify::manifest(A);
5214  }
5215 
5216  /// See AbstractState::indicatePessimisticFixpoint(...).
5217  ChangeStatus indicatePessimisticFixpoint() override {
5218  SimplifiedAssociatedValue = &getAssociatedValue();
5220  }
5221 
5222  static bool handleLoad(Attributor &A, const AbstractAttribute &AA,
5223  LoadInst &L, function_ref<bool(Value &)> Union) {
5224  auto UnionWrapper = [&](Value &V, Value &Obj) {
5225  if (isa<AllocaInst>(Obj))
5226  return Union(V);
5227  if (!AA::isDynamicallyUnique(A, AA, V))
5228  return false;
5229  if (!AA::isValidAtPosition(V, L, A.getInfoCache()))
5230  return false;
5231  return Union(V);
5232  };
5233 
5234  Value &Ptr = *L.getPointerOperand();
5235  SmallVector<Value *, 8> Objects;
5236  if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L))
5237  return false;
5238 
5239  for (Value *Obj : Objects) {
5240  LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
5241  if (isa<UndefVa