LLVM  15.0.0git
AttributorAttributes.cpp
Go to the documentation of this file.
1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/SCCIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetOperations.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/Statistic.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Assumptions.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/GlobalValue.h"
40 #include "llvm/IR/IRBuilder.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/NoFolder.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/IR/ValueHandle.h"
47 #include "llvm/Support/Alignment.h"
48 #include "llvm/Support/Casting.h"
57 #include <cassert>
58 
59 using namespace llvm;
60 
61 #define DEBUG_TYPE "attributor"
62 
64  "attributor-manifest-internal", cl::Hidden,
65  cl::desc("Manifest Attributor internal string attributes."),
66  cl::init(false));
67 
68 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
69  cl::Hidden);
70 
71 template <>
73 
75  "attributor-max-potential-values", cl::Hidden,
76  cl::desc("Maximum number of potential values to be "
77  "tracked for each position."),
79  cl::init(7));
80 
82  "attributor-max-interfering-accesses", cl::Hidden,
83  cl::desc("Maximum number of interfering accesses to "
84  "check before assuming all might interfere."),
85  cl::init(6));
86 
87 STATISTIC(NumAAs, "Number of abstract attributes created");
88 
89 // Some helper macros to deal with statistics tracking.
90 //
91 // Usage:
92 // For simple IR attribute tracking overload trackStatistics in the abstract
93 // attribute and choose the right STATS_DECLTRACK_********* macro,
94 // e.g.,:
95 // void trackStatistics() const override {
96 // STATS_DECLTRACK_ARG_ATTR(returned)
97 // }
98 // If there is a single "increment" side one can use the macro
99 // STATS_DECLTRACK with a custom message. If there are multiple increment
100 // sides, STATS_DECL and STATS_TRACK can also be used separately.
101 //
102 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
103  ("Number of " #TYPE " marked '" #NAME "'")
104 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
105 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
106 #define STATS_DECL(NAME, TYPE, MSG) \
107  STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
108 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
109 #define STATS_DECLTRACK(NAME, TYPE, MSG) \
110  { \
111  STATS_DECL(NAME, TYPE, MSG) \
112  STATS_TRACK(NAME, TYPE) \
113  }
114 #define STATS_DECLTRACK_ARG_ATTR(NAME) \
115  STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
116 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \
117  STATS_DECLTRACK(NAME, CSArguments, \
118  BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
119 #define STATS_DECLTRACK_FN_ATTR(NAME) \
120  STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
121 #define STATS_DECLTRACK_CS_ATTR(NAME) \
122  STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
123 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \
124  STATS_DECLTRACK(NAME, FunctionReturn, \
125  BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
126 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \
127  STATS_DECLTRACK(NAME, CSReturn, \
128  BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
129 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
130  STATS_DECLTRACK(NAME, Floating, \
131  ("Number of floating values known to be '" #NAME "'"))
132 
133 // Specialization of the operator<< for abstract attributes subclasses. This
134 // disambiguates situations where multiple operators are applicable.
135 namespace llvm {
136 #define PIPE_OPERATOR(CLASS) \
137  raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \
138  return OS << static_cast<const AbstractAttribute &>(AA); \
139  }
140 
169 
170 #undef PIPE_OPERATOR
171 
172 template <>
174  const DerefState &R) {
175  ChangeStatus CS0 =
176  clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
177  ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
178  return CS0 | CS1;
179 }
180 
181 } // namespace llvm
182 
183 /// Get pointer operand of memory accessing instruction. If \p I is
184 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
185 /// is set to false and the instruction is volatile, return nullptr.
186 static const Value *getPointerOperand(const Instruction *I,
187  bool AllowVolatile) {
188  if (!AllowVolatile && I->isVolatile())
189  return nullptr;
190 
191  if (auto *LI = dyn_cast<LoadInst>(I)) {
192  return LI->getPointerOperand();
193  }
194 
195  if (auto *SI = dyn_cast<StoreInst>(I)) {
196  return SI->getPointerOperand();
197  }
198 
199  if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
200  return CXI->getPointerOperand();
201  }
202 
203  if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
204  return RMWI->getPointerOperand();
205  }
206 
207  return nullptr;
208 }
209 
210 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
211 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
212 /// getelement pointer instructions that traverse the natural type of \p Ptr if
213 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
214 /// through a cast to i8*.
215 ///
216 /// TODO: This could probably live somewhere more prominantly if it doesn't
217 /// already exist.
218 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
219  int64_t Offset, IRBuilder<NoFolder> &IRB,
220  const DataLayout &DL) {
221  assert(Offset >= 0 && "Negative offset not supported yet!");
222  LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
223  << "-bytes as " << *ResTy << "\n");
224 
225  if (Offset) {
226  Type *Ty = PtrElemTy;
227  APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
228  SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
229 
230  SmallVector<Value *, 4> ValIndices;
231  std::string GEPName = Ptr->getName().str();
232  for (const APInt &Index : IntIndices) {
233  ValIndices.push_back(IRB.getInt(Index));
234  GEPName += "." + std::to_string(Index.getZExtValue());
235  }
236 
237  // Create a GEP for the indices collected above.
238  Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
239 
240  // If an offset is left we use byte-wise adjustment.
241  if (IntOffset != 0) {
242  Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
243  Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
244  GEPName + ".b" + Twine(IntOffset.getZExtValue()));
245  }
246  }
247 
248  // Ensure the result has the requested type.
249  Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy,
250  Ptr->getName() + ".cast");
251 
252  LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
253  return Ptr;
254 }
255 
256 /// Recursively visit all values that might become \p IRP at some point. This
257 /// will be done by looking through cast instructions, selects, phis, and calls
258 /// with the "returned" attribute. Once we cannot look through the value any
259 /// further, the callback \p VisitValueCB is invoked and passed the current
260 /// value, the \p State, and a flag to indicate if we stripped anything.
261 /// Stripped means that we unpacked the value associated with \p IRP at least
262 /// once. Note that the value used for the callback may still be the value
263 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
264 /// we will never visit more values than specified by \p MaxValues.
265 /// If \p VS does not contain the Interprocedural bit, only values valid in the
266 /// scope of \p CtxI will be visited and simplification into other scopes is
267 /// prevented.
268 template <typename StateTy>
270  Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
271  StateTy &State,
272  function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
273  VisitValueCB,
274  const Instruction *CtxI, bool &UsedAssumedInformation,
275  bool UseValueSimplify = true, int MaxValues = 16,
276  function_ref<Value *(Value *)> StripCB = nullptr,
278 
279  struct LivenessInfo {
280  const AAIsDead *LivenessAA = nullptr;
281  bool AnyDead = false;
282  };
284  auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
285  LivenessInfo &LI = LivenessAAs[&F];
286  if (!LI.LivenessAA)
287  LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F),
289  return LI;
290  };
291 
292  Value *InitialV = &IRP.getAssociatedValue();
293  using Item = std::pair<Value *, const Instruction *>;
294  SmallSet<Item, 16> Visited;
295  SmallVector<Item, 16> Worklist;
296  Worklist.push_back({InitialV, CtxI});
297 
298  int Iteration = 0;
299  do {
300  Item I = Worklist.pop_back_val();
301  Value *V = I.first;
302  CtxI = I.second;
303  if (StripCB)
304  V = StripCB(V);
305 
306  // Check if we should process the current value. To prevent endless
307  // recursion keep a record of the values we followed!
308  if (!Visited.insert(I).second)
309  continue;
310 
311  // Make sure we limit the compile time for complex expressions.
312  if (Iteration++ >= MaxValues) {
313  LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
314  << Iteration << "!\n");
315  return false;
316  }
317 
318  // Explicitly look through calls with a "returned" attribute if we do
319  // not have a pointer as stripPointerCasts only works on them.
320  Value *NewV = nullptr;
321  if (V->getType()->isPointerTy()) {
322  NewV = V->stripPointerCasts();
323  } else {
324  auto *CB = dyn_cast<CallBase>(V);
325  if (CB && CB->getCalledFunction()) {
326  for (Argument &Arg : CB->getCalledFunction()->args())
327  if (Arg.hasReturnedAttr()) {
328  NewV = CB->getArgOperand(Arg.getArgNo());
329  break;
330  }
331  }
332  }
333  if (NewV && NewV != V) {
334  Worklist.push_back({NewV, CtxI});
335  continue;
336  }
337 
338  // Look through select instructions, visit assumed potential values.
339  if (auto *SI = dyn_cast<SelectInst>(V)) {
340  Optional<Constant *> C = A.getAssumedConstant(
341  *SI->getCondition(), QueryingAA, UsedAssumedInformation);
342  bool NoValueYet = !C.hasValue();
343  if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
344  continue;
345  if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
346  if (CI->isZero())
347  Worklist.push_back({SI->getFalseValue(), CtxI});
348  else
349  Worklist.push_back({SI->getTrueValue(), CtxI});
350  continue;
351  }
352  // We could not simplify the condition, assume both values.(
353  Worklist.push_back({SI->getTrueValue(), CtxI});
354  Worklist.push_back({SI->getFalseValue(), CtxI});
355  continue;
356  }
357 
358  // Look through phi nodes, visit all live operands.
359  if (auto *PHI = dyn_cast<PHINode>(V)) {
360  LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction());
361  for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
362  BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
363  if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) {
364  LI.AnyDead = true;
365  UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint();
366  continue;
367  }
368  Worklist.push_back(
369  {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
370  }
371  continue;
372  }
373 
374  if (auto *Arg = dyn_cast<Argument>(V)) {
375  if ((VS & AA::Interprocedural) && !Arg->hasPassPointeeByValueCopyAttr()) {
376  SmallVector<Item> CallSiteValues;
377  bool UsedAssumedInformation = false;
378  if (A.checkForAllCallSites(
379  [&](AbstractCallSite ACS) {
380  // Callbacks might not have a corresponding call site operand,
381  // stick with the argument in that case.
382  Value *CSOp = ACS.getCallArgOperand(*Arg);
383  if (!CSOp)
384  return false;
385  CallSiteValues.push_back({CSOp, ACS.getInstruction()});
386  return true;
387  },
388  *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) {
389  Worklist.append(CallSiteValues);
390  continue;
391  }
392  }
393  }
394 
395  if (UseValueSimplify && !isa<Constant>(V)) {
396  Optional<Value *> SimpleV =
397  A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
398  if (!SimpleV.hasValue())
399  continue;
400  Value *NewV = SimpleV.getValue();
401  if (NewV && NewV != V) {
402  if ((VS & AA::Interprocedural) || !CtxI ||
403  AA::isValidInScope(*NewV, CtxI->getFunction())) {
404  Worklist.push_back({NewV, CtxI});
405  continue;
406  }
407  }
408  }
409 
410  if (auto *LI = dyn_cast<LoadInst>(V)) {
411  bool UsedAssumedInformation = false;
412  // If we ask for the potentially loaded values from the initial pointer we
413  // will simply end up here again. The load is as far as we can make it.
414  if (LI->getPointerOperand() != InitialV) {
415  SmallSetVector<Value *, 4> PotentialCopies;
416  SmallSetVector<Instruction *, 4> PotentialValueOrigins;
417  if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
418  PotentialValueOrigins, QueryingAA,
419  UsedAssumedInformation,
420  /* OnlyExact */ true)) {
421  // Values have to be dynamically unique or we loose the fact that a
422  // single llvm::Value might represent two runtime values (e.g., stack
423  // locations in different recursive calls).
424  bool DynamicallyUnique =
425  llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) {
426  return AA::isDynamicallyUnique(A, QueryingAA, *PC);
427  });
428  if (DynamicallyUnique &&
429  ((VS & AA::Interprocedural) || !CtxI ||
430  llvm::all_of(PotentialCopies, [CtxI](Value *PC) {
431  return AA::isValidInScope(*PC, CtxI->getFunction());
432  }))) {
433  for (auto *PotentialCopy : PotentialCopies)
434  Worklist.push_back({PotentialCopy, CtxI});
435  continue;
436  }
437  }
438  }
439  }
440 
441  // Once a leaf is reached we inform the user through the callback.
442  if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) {
443  LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: "
444  << *V << "!\n");
445  return false;
446  }
447  } while (!Worklist.empty());
448 
449  // If we actually used liveness information so we have to record a dependence.
450  for (auto &It : LivenessAAs)
451  if (It.second.AnyDead)
452  A.recordDependence(*It.second.LivenessAA, QueryingAA,
454 
455  // All values have been visited.
456  return true;
457 }
458 
460  SmallVectorImpl<Value *> &Objects,
461  const AbstractAttribute &QueryingAA,
462  const Instruction *CtxI,
463  bool &UsedAssumedInformation,
464  AA::ValueScope VS) {
465  auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
466  SmallPtrSet<Value *, 8> SeenObjects;
467  auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
468  SmallVectorImpl<Value *> &Objects,
469  bool) -> bool {
470  if (SeenObjects.insert(&Val).second)
471  Objects.push_back(&Val);
472  return true;
473  };
474  if (!genericValueTraversal<decltype(Objects)>(
475  A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
476  UsedAssumedInformation, true, 32, StripCB, VS))
477  return false;
478  return true;
479 }
480 
481 static const Value *
483  const Value *Val, const DataLayout &DL, APInt &Offset,
484  bool GetMinOffset, bool AllowNonInbounds,
485  bool UseAssumed = false) {
486 
487  auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
488  const IRPosition &Pos = IRPosition::value(V);
489  // Only track dependence if we are going to use the assumed info.
490  const AAValueConstantRange &ValueConstantRangeAA =
491  A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
492  UseAssumed ? DepClassTy::OPTIONAL
493  : DepClassTy::NONE);
494  ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
495  : ValueConstantRangeAA.getKnown();
496  if (Range.isFullSet())
497  return false;
498 
499  // We can only use the lower part of the range because the upper part can
500  // be higher than what the value can really be.
501  if (GetMinOffset)
502  ROffset = Range.getSignedMin();
503  else
504  ROffset = Range.getSignedMax();
505  return true;
506  };
507 
508  return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
509  /* AllowInvariant */ true,
510  AttributorAnalysis);
511 }
512 
513 static const Value *
515  const Value *Ptr, int64_t &BytesOffset,
516  const DataLayout &DL, bool AllowNonInbounds = false) {
517  APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
518  const Value *Base =
519  stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
520  /* GetMinOffset */ true, AllowNonInbounds);
521 
522  BytesOffset = OffsetAPInt.getSExtValue();
523  return Base;
524 }
525 
526 /// Clamp the information known for all returned values of a function
527 /// (identified by \p QueryingAA) into \p S.
528 template <typename AAType, typename StateType = typename AAType::StateType>
530  Attributor &A, const AAType &QueryingAA, StateType &S,
531  const IRPosition::CallBaseContext *CBContext = nullptr) {
532  LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
533  << QueryingAA << " into " << S << "\n");
534 
535  assert((QueryingAA.getIRPosition().getPositionKind() ==
536  IRPosition::IRP_RETURNED ||
537  QueryingAA.getIRPosition().getPositionKind() ==
538  IRPosition::IRP_CALL_SITE_RETURNED) &&
539  "Can only clamp returned value states for a function returned or call "
540  "site returned position!");
541 
542  // Use an optional state as there might not be any return values and we want
543  // to join (IntegerState::operator&) the state of all there are.
545 
546  // Callback for each possibly returned value.
547  auto CheckReturnValue = [&](Value &RV) -> bool {
548  const IRPosition &RVPos = IRPosition::value(RV, CBContext);
549  const AAType &AA =
550  A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
551  LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
552  << " @ " << RVPos << "\n");
553  const StateType &AAS = AA.getState();
554  if (T.hasValue())
555  *T &= AAS;
556  else
557  T = AAS;
558  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
559  << "\n");
560  return T->isValidState();
561  };
562 
563  if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
564  S.indicatePessimisticFixpoint();
565  else if (T.hasValue())
566  S ^= *T;
567 }
568 
569 namespace {
570 /// Helper class for generic deduction: return value -> returned position.
571 template <typename AAType, typename BaseType,
572  typename StateType = typename BaseType::StateType,
573  bool PropagateCallBaseContext = false>
574 struct AAReturnedFromReturnedValues : public BaseType {
575  AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
576  : BaseType(IRP, A) {}
577 
578  /// See AbstractAttribute::updateImpl(...).
579  ChangeStatus updateImpl(Attributor &A) override {
580  StateType S(StateType::getBestState(this->getState()));
581  clampReturnedValueStates<AAType, StateType>(
582  A, *this, S,
583  PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
584  // TODO: If we know we visited all returned values, thus no are assumed
585  // dead, we can take the known information from the state T.
586  return clampStateAndIndicateChange<StateType>(this->getState(), S);
587  }
588 };
589 
590 /// Clamp the information known at all call sites for a given argument
591 /// (identified by \p QueryingAA) into \p S.
592 template <typename AAType, typename StateType = typename AAType::StateType>
593 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
594  StateType &S) {
595  LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
596  << QueryingAA << " into " << S << "\n");
597 
598  assert(QueryingAA.getIRPosition().getPositionKind() ==
599  IRPosition::IRP_ARGUMENT &&
600  "Can only clamp call site argument states for an argument position!");
601 
602  // Use an optional state as there might not be any return values and we want
603  // to join (IntegerState::operator&) the state of all there are.
605 
606  // The argument number which is also the call site argument number.
607  unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
608 
609  auto CallSiteCheck = [&](AbstractCallSite ACS) {
610  const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
611  // Check if a coresponding argument was found or if it is on not associated
612  // (which can happen for callback calls).
613  if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
614  return false;
615 
616  const AAType &AA =
617  A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
618  LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
619  << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
620  const StateType &AAS = AA.getState();
621  if (T.hasValue())
622  *T &= AAS;
623  else
624  T = AAS;
625  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
626  << "\n");
627  return T->isValidState();
628  };
629 
630  bool UsedAssumedInformation = false;
631  if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
632  UsedAssumedInformation))
633  S.indicatePessimisticFixpoint();
634  else if (T.hasValue())
635  S ^= *T;
636 }
637 
638 /// This function is the bridge between argument position and the call base
639 /// context.
640 template <typename AAType, typename BaseType,
641  typename StateType = typename AAType::StateType>
642 bool getArgumentStateFromCallBaseContext(Attributor &A,
643  BaseType &QueryingAttribute,
644  IRPosition &Pos, StateType &State) {
645  assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
646  "Expected an 'argument' position !");
647  const CallBase *CBContext = Pos.getCallBaseContext();
648  if (!CBContext)
649  return false;
650 
651  int ArgNo = Pos.getCallSiteArgNo();
652  assert(ArgNo >= 0 && "Invalid Arg No!");
653 
654  const auto &AA = A.getAAFor<AAType>(
655  QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
656  DepClassTy::REQUIRED);
657  const StateType &CBArgumentState =
658  static_cast<const StateType &>(AA.getState());
659 
660  LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
661  << "Position:" << Pos << "CB Arg state:" << CBArgumentState
662  << "\n");
663 
664  // NOTE: If we want to do call site grouping it should happen here.
665  State ^= CBArgumentState;
666  return true;
667 }
668 
669 /// Helper class for generic deduction: call site argument -> argument position.
670 template <typename AAType, typename BaseType,
671  typename StateType = typename AAType::StateType,
672  bool BridgeCallBaseContext = false>
673 struct AAArgumentFromCallSiteArguments : public BaseType {
674  AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
675  : BaseType(IRP, A) {}
676 
677  /// See AbstractAttribute::updateImpl(...).
678  ChangeStatus updateImpl(Attributor &A) override {
679  StateType S = StateType::getBestState(this->getState());
680 
681  if (BridgeCallBaseContext) {
682  bool Success =
683  getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
684  A, *this, this->getIRPosition(), S);
685  if (Success)
686  return clampStateAndIndicateChange<StateType>(this->getState(), S);
687  }
688  clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
689 
690  // TODO: If we know we visited all incoming values, thus no are assumed
691  // dead, we can take the known information from the state T.
692  return clampStateAndIndicateChange<StateType>(this->getState(), S);
693  }
694 };
695 
696 /// Helper class for generic replication: function returned -> cs returned.
697 template <typename AAType, typename BaseType,
698  typename StateType = typename BaseType::StateType,
699  bool IntroduceCallBaseContext = false>
700 struct AACallSiteReturnedFromReturned : public BaseType {
701  AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
702  : BaseType(IRP, A) {}
703 
704  /// See AbstractAttribute::updateImpl(...).
705  ChangeStatus updateImpl(Attributor &A) override {
706  assert(this->getIRPosition().getPositionKind() ==
707  IRPosition::IRP_CALL_SITE_RETURNED &&
708  "Can only wrap function returned positions for call site returned "
709  "positions!");
710  auto &S = this->getState();
711 
712  const Function *AssociatedFunction =
713  this->getIRPosition().getAssociatedFunction();
714  if (!AssociatedFunction)
715  return S.indicatePessimisticFixpoint();
716 
717  CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
718  if (IntroduceCallBaseContext)
719  LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
720  << CBContext << "\n");
721 
722  IRPosition FnPos = IRPosition::returned(
723  *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
724  const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
725  return clampStateAndIndicateChange(S, AA.getState());
726  }
727 };
728 
729 /// Helper function to accumulate uses.
730 template <class AAType, typename StateType = typename AAType::StateType>
731 static void followUsesInContext(AAType &AA, Attributor &A,
733  const Instruction *CtxI,
735  StateType &State) {
736  auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
737  for (unsigned u = 0; u < Uses.size(); ++u) {
738  const Use *U = Uses[u];
739  if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
740  bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
741  if (Found && AA.followUseInMBEC(A, U, UserI, State))
742  for (const Use &Us : UserI->uses())
743  Uses.insert(&Us);
744  }
745  }
746 }
747 
748 /// Use the must-be-executed-context around \p I to add information into \p S.
749 /// The AAType class is required to have `followUseInMBEC` method with the
750 /// following signature and behaviour:
751 ///
752 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
753 /// U - Underlying use.
754 /// I - The user of the \p U.
755 /// Returns true if the value should be tracked transitively.
756 ///
757 template <class AAType, typename StateType = typename AAType::StateType>
758 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
759  Instruction &CtxI) {
760 
761  // Container for (transitive) uses of the associated value.
763  for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
764  Uses.insert(&U);
765 
767  A.getInfoCache().getMustBeExecutedContextExplorer();
768 
769  followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
770 
771  if (S.isAtFixpoint())
772  return;
773 
775  auto Pred = [&](const Instruction *I) {
776  if (const BranchInst *Br = dyn_cast<BranchInst>(I))
777  if (Br->isConditional())
778  BrInsts.push_back(Br);
779  return true;
780  };
781 
782  // Here, accumulate conditional branch instructions in the context. We
783  // explore the child paths and collect the known states. The disjunction of
784  // those states can be merged to its own state. Let ParentState_i be a state
785  // to indicate the known information for an i-th branch instruction in the
786  // context. ChildStates are created for its successors respectively.
787  //
788  // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
789  // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
790  // ...
791  // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
792  //
793  // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
794  //
795  // FIXME: Currently, recursive branches are not handled. For example, we
796  // can't deduce that ptr must be dereferenced in below function.
797  //
798  // void f(int a, int c, int *ptr) {
799  // if(a)
800  // if (b) {
801  // *ptr = 0;
802  // } else {
803  // *ptr = 1;
804  // }
805  // else {
806  // if (b) {
807  // *ptr = 0;
808  // } else {
809  // *ptr = 1;
810  // }
811  // }
812  // }
813 
814  Explorer.checkForAllContext(&CtxI, Pred);
815  for (const BranchInst *Br : BrInsts) {
816  StateType ParentState;
817 
818  // The known state of the parent state is a conjunction of children's
819  // known states so it is initialized with a best state.
820  ParentState.indicateOptimisticFixpoint();
821 
822  for (const BasicBlock *BB : Br->successors()) {
823  StateType ChildState;
824 
825  size_t BeforeSize = Uses.size();
826  followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
827 
828  // Erase uses which only appear in the child.
829  for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
830  It = Uses.erase(It);
831 
832  ParentState &= ChildState;
833  }
834 
835  // Use only known state.
836  S += ParentState;
837  }
838 }
839 } // namespace
840 
841 /// ------------------------ PointerInfo ---------------------------------------
842 
843 namespace llvm {
844 namespace AA {
845 namespace PointerInfo {
846 
847 struct State;
848 
849 } // namespace PointerInfo
850 } // namespace AA
851 
852 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
853 template <>
854 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
856  static inline Access getEmptyKey();
857  static inline Access getTombstoneKey();
858  static unsigned getHashValue(const Access &A);
859  static bool isEqual(const Access &LHS, const Access &RHS);
860 };
861 
862 /// Helper that allows OffsetAndSize as a key in a DenseMap.
863 template <>
864 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
865  : DenseMapInfo<std::pair<int64_t, int64_t>> {};
866 
867 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
868 /// but the instruction
869 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
872  static inline Access getEmptyKey();
873  static inline Access getTombstoneKey();
874  static unsigned getHashValue(const Access &A);
875  static bool isEqual(const Access &LHS, const Access &RHS);
876 };
877 
878 } // namespace llvm
879 
880 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
882 
883  ~State() {
884  // We do not delete the Accesses objects but need to destroy them still.
885  for (auto &It : AccessBins)
886  It.second->~Accesses();
887  }
888 
889  /// Return the best possible representable state.
890  static State getBestState(const State &SIS) { return State(); }
891 
892  /// Return the worst possible representable state.
893  static State getWorstState(const State &SIS) {
894  State R;
895  R.indicatePessimisticFixpoint();
896  return R;
897  }
898 
899  State() = default;
901  SIS.AccessBins.clear();
902  }
903 
904  const State &getAssumed() const { return *this; }
905 
906  /// See AbstractState::isValidState().
907  bool isValidState() const override { return BS.isValidState(); }
908 
909  /// See AbstractState::isAtFixpoint().
910  bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
911 
912  /// See AbstractState::indicateOptimisticFixpoint().
915  return ChangeStatus::UNCHANGED;
916  }
917 
918  /// See AbstractState::indicatePessimisticFixpoint().
921  return ChangeStatus::CHANGED;
922  }
923 
924  State &operator=(const State &R) {
925  if (this == &R)
926  return *this;
927  BS = R.BS;
928  AccessBins = R.AccessBins;
929  return *this;
930  }
931 
933  if (this == &R)
934  return *this;
935  std::swap(BS, R.BS);
936  std::swap(AccessBins, R.AccessBins);
937  return *this;
938  }
939 
940  bool operator==(const State &R) const {
941  if (BS != R.BS)
942  return false;
943  if (AccessBins.size() != R.AccessBins.size())
944  return false;
945  auto It = begin(), RIt = R.begin(), E = end();
946  while (It != E) {
947  if (It->getFirst() != RIt->getFirst())
948  return false;
949  auto &Accs = It->getSecond();
950  auto &RAccs = RIt->getSecond();
951  if (Accs->size() != RAccs->size())
952  return false;
953  for (const auto &ZipIt : llvm::zip(*Accs, *RAccs))
954  if (std::get<0>(ZipIt) != std::get<1>(ZipIt))
955  return false;
956  ++It;
957  ++RIt;
958  }
959  return true;
960  }
961  bool operator!=(const State &R) const { return !(*this == R); }
962 
963  /// We store accesses in a set with the instruction as key.
964  struct Accesses {
967 
968  unsigned size() const { return Accesses.size(); }
969 
970  using vec_iterator = decltype(Accesses)::iterator;
972  vec_iterator end() { return Accesses.end(); }
973 
974  using iterator = decltype(Map)::const_iterator;
976  return Map.find(Acc.getRemoteInst());
977  }
978  iterator find_end() { return Map.end(); }
979 
981  return Accesses[It->getSecond()];
982  }
983 
985  Map[Acc.getRemoteInst()] = Accesses.size();
986  Accesses.push_back(Acc);
987  }
988  };
989 
990  /// We store all accesses in bins denoted by their offset and size.
992 
995 
996 protected:
997  /// The bins with all the accesses for the associated pointer.
999 
1000  /// Add a new access to the state at offset \p Offset and with size \p Size.
1001  /// The access is associated with \p I, writes \p Content (if anything), and
1002  /// is of kind \p Kind.
1003  /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
1004  ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size,
1007  Instruction *RemoteI = nullptr,
1008  Accesses *BinPtr = nullptr) {
1009  AAPointerInfo::OffsetAndSize Key{Offset, Size};
1010  Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key];
1011  if (!Bin)
1012  Bin = new (A.Allocator) Accesses;
1013  AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1014  // Check if we have an access for this instruction in this bin, if not,
1015  // simply add it.
1016  auto It = Bin->find(Acc);
1017  if (It == Bin->find_end()) {
1018  Bin->insert(Acc);
1019  return ChangeStatus::CHANGED;
1020  }
1021  // If the existing access is the same as then new one, nothing changed.
1022  AAPointerInfo::Access &Current = Bin->get(It);
1023  AAPointerInfo::Access Before = Current;
1024  // The new one will be combined with the existing one.
1025  Current &= Acc;
1026  return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1027  }
1028 
1029  /// See AAPointerInfo::forallInterferingAccesses.
1032  function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1033  if (!isValidState())
1034  return false;
1035 
1036  for (auto &It : AccessBins) {
1037  AAPointerInfo::OffsetAndSize ItOAS = It.getFirst();
1038  if (!OAS.mayOverlap(ItOAS))
1039  continue;
1040  bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
1041  for (auto &Access : *It.getSecond())
1042  if (!CB(Access, IsExact))
1043  return false;
1044  }
1045  return true;
1046  }
1047 
1048  /// See AAPointerInfo::forallInterferingAccesses.
1050  Instruction &I,
1051  function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1052  if (!isValidState())
1053  return false;
1054 
1055  // First find the offset and size of I.
1056  AAPointerInfo::OffsetAndSize OAS(-1, -1);
1057  for (auto &It : AccessBins) {
1058  for (auto &Access : *It.getSecond()) {
1059  if (Access.getRemoteInst() == &I) {
1060  OAS = It.getFirst();
1061  break;
1062  }
1063  }
1064  if (OAS.getSize() != -1)
1065  break;
1066  }
1067  // No access for I was found, we are done.
1068  if (OAS.getSize() == -1)
1069  return true;
1070 
1071  // Now that we have an offset and size, find all overlapping ones and use
1072  // the callback on the accesses.
1073  return forallInterferingAccesses(OAS, CB);
1074  }
1075 
1076 private:
1077  /// State to track fixpoint and validity.
1078  BooleanState BS;
1079 };
1080 
1081 namespace {
1082 struct AAPointerInfoImpl
1083  : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1085  AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1086 
1087  /// See AbstractAttribute::initialize(...).
1088  void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1089 
1090  /// See AbstractAttribute::getAsStr().
1091  const std::string getAsStr() const override {
1092  return std::string("PointerInfo ") +
1093  (isValidState() ? (std::string("#") +
1094  std::to_string(AccessBins.size()) + " bins")
1095  : "<invalid>");
1096  }
1097 
1098  /// See AbstractAttribute::manifest(...).
1099  ChangeStatus manifest(Attributor &A) override {
1100  return AAPointerInfo::manifest(A);
1101  }
1102 
1103  bool forallInterferingAccesses(
1104  OffsetAndSize OAS,
1105  function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1106  const override {
1107  return State::forallInterferingAccesses(OAS, CB);
1108  }
1109  bool forallInterferingAccesses(
1110  Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
1111  function_ref<bool(const Access &, bool)> UserCB) const override {
1112  SmallPtrSet<const Access *, 8> DominatingWrites;
1113  SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
1114 
1115  Function &Scope = *I.getFunction();
1116  const auto &NoSyncAA = A.getAAFor<AANoSync>(
1118  const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
1120  const bool NoSync = NoSyncAA.isAssumedNoSync();
1121 
1122  // Helper to determine if we need to consider threading, which we cannot
1123  // right now. However, if the function is (assumed) nosync or the thread
1124  // executing all instructions is the main thread only we can ignore
1125  // threading.
1126  auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
1127  if (NoSync)
1128  return true;
1129  if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
1130  return true;
1131  return false;
1132  };
1133 
1134  // Helper to determine if the access is executed by the same thread as the
1135  // load, for now it is sufficient to avoid any potential threading effects
1136  // as we cannot deal with them anyway.
1137  auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
1138  return CanIgnoreThreading(*Acc.getLocalInst());
1139  };
1140 
1141  // TODO: Use inter-procedural reachability and dominance.
1142  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1144 
1145  const bool FindInterferingWrites = I.mayReadFromMemory();
1146  const bool FindInterferingReads = I.mayWriteToMemory();
1147  const bool UseDominanceReasoning = FindInterferingWrites;
1148  const bool CanUseCFGResoning = CanIgnoreThreading(I);
1149  InformationCache &InfoCache = A.getInfoCache();
1150  const DominatorTree *DT =
1151  NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning
1153  Scope)
1154  : nullptr;
1155 
1156  enum GPUAddressSpace : unsigned {
1157  Generic = 0,
1158  Global = 1,
1159  Shared = 3,
1160  Constant = 4,
1161  Local = 5,
1162  };
1163 
1164  // Helper to check if a value has "kernel lifetime", that is it will not
1165  // outlive a GPU kernel. This is true for shared, constant, and local
1166  // globals on AMD and NVIDIA GPUs.
1167  auto HasKernelLifetime = [&](Value *V, Module &M) {
1168  Triple T(M.getTargetTriple());
1169  if (!(T.isAMDGPU() || T.isNVPTX()))
1170  return false;
1171  switch (V->getType()->getPointerAddressSpace()) {
1172  case GPUAddressSpace::Shared:
1174  case GPUAddressSpace::Local:
1175  return true;
1176  default:
1177  return false;
1178  };
1179  };
1180 
1181  // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1182  // to determine if we should look at reachability from the callee. For
1183  // certain pointers we know the lifetime and we do not have to step into the
1184  // callee to determine reachability as the pointer would be dead in the
1185  // callee. See the conditional initialization below.
1186  std::function<bool(const Function &)> IsLiveInCalleeCB;
1187 
1188  if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1189  // If the alloca containing function is not recursive the alloca
1190  // must be dead in the callee.
1191  const Function *AIFn = AI->getFunction();
1192  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1194  if (NoRecurseAA.isAssumedNoRecurse()) {
1195  IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1196  }
1197  } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1198  // If the global has kernel lifetime we can stop if we reach a kernel
1199  // as it is "dead" in the (unknown) callees.
1200  if (HasKernelLifetime(GV, *GV->getParent()))
1201  IsLiveInCalleeCB = [](const Function &Fn) {
1202  return !Fn.hasFnAttribute("kernel");
1203  };
1204  }
1205 
1206  auto AccessCB = [&](const Access &Acc, bool Exact) {
1207  if ((!FindInterferingWrites || !Acc.isWrite()) &&
1208  (!FindInterferingReads || !Acc.isRead()))
1209  return true;
1210 
1211  // For now we only filter accesses based on CFG reasoning which does not
1212  // work yet if we have threading effects, or the access is complicated.
1213  if (CanUseCFGResoning) {
1214  if ((!Acc.isWrite() ||
1215  !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
1216  IsLiveInCalleeCB)) &&
1217  (!Acc.isRead() ||
1218  !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
1219  IsLiveInCalleeCB)))
1220  return true;
1221  if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) &&
1222  IsSameThreadAsLoad(Acc)) {
1223  if (DT->dominates(Acc.getLocalInst(), &I))
1224  DominatingWrites.insert(&Acc);
1225  }
1226  }
1227 
1228  InterferingAccesses.push_back({&Acc, Exact});
1229  return true;
1230  };
1231  if (!State::forallInterferingAccesses(I, AccessCB))
1232  return false;
1233 
1234  // If we cannot use CFG reasoning we only filter the non-write accesses
1235  // and are done here.
1236  if (!CanUseCFGResoning) {
1237  for (auto &It : InterferingAccesses)
1238  if (!UserCB(*It.first, It.second))
1239  return false;
1240  return true;
1241  }
1242 
1243  // Helper to determine if we can skip a specific write access. This is in
1244  // the worst case quadratic as we are looking for another write that will
1245  // hide the effect of this one.
1246  auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1247  if (!IsSameThreadAsLoad(Acc))
1248  return false;
1249  if (!DominatingWrites.count(&Acc))
1250  return false;
1251  for (const Access *DomAcc : DominatingWrites) {
1252  assert(Acc.getLocalInst()->getFunction() ==
1253  DomAcc->getLocalInst()->getFunction() &&
1254  "Expected dominating writes to be in the same function!");
1255 
1256  if (DomAcc != &Acc &&
1257  DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1258  return true;
1259  }
1260  }
1261  return false;
1262  };
1263 
1264  // Run the user callback on all accesses we cannot skip and return if that
1265  // succeeded for all or not.
1266  unsigned NumInterferingAccesses = InterferingAccesses.size();
1267  for (auto &It : InterferingAccesses) {
1268  if (!DT || NumInterferingAccesses > MaxInterferingAccesses ||
1269  !CanSkipAccess(*It.first, It.second)) {
1270  if (!UserCB(*It.first, It.second))
1271  return false;
1272  }
1273  }
1274  return true;
1275  }
1276 
1277  ChangeStatus translateAndAddCalleeState(Attributor &A,
1278  const AAPointerInfo &CalleeAA,
1279  int64_t CallArgOffset, CallBase &CB) {
1280  using namespace AA::PointerInfo;
1281  if (!CalleeAA.getState().isValidState() || !isValidState())
1282  return indicatePessimisticFixpoint();
1283 
1284  const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1285  bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1286 
1287  // Combine the accesses bin by bin.
1288  ChangeStatus Changed = ChangeStatus::UNCHANGED;
1289  for (auto &It : CalleeImplAA.getState()) {
1290  OffsetAndSize OAS = OffsetAndSize::getUnknown();
1291  if (CallArgOffset != OffsetAndSize::Unknown)
1292  OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1293  It.first.getSize());
1294  Accesses *Bin = AccessBins[OAS];
1295  for (const AAPointerInfo::Access &RAcc : *It.second) {
1296  if (IsByval && !RAcc.isRead())
1297  continue;
1298  bool UsedAssumedInformation = false;
1299  Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1300  RAcc.getContent(), CB, *this, UsedAssumedInformation);
1301  AccessKind AK =
1302  AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1303  : AccessKind::AK_READ_WRITE));
1304  Changed =
1305  Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
1306  AK, RAcc.getType(), RAcc.getRemoteInst(), Bin);
1307  }
1308  }
1309  return Changed;
1310  }
1311 
1312  /// Statistic tracking for all AAPointerInfo implementations.
1313  /// See AbstractAttribute::trackStatistics().
1314  void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1315 };
1316 
1317 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1319  AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1320  : AAPointerInfoImpl(IRP, A) {}
1321 
1322  /// See AbstractAttribute::initialize(...).
1323  void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1324 
1325  /// Deal with an access and signal if it was handled successfully.
1326  bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1328  ChangeStatus &Changed, Type *Ty,
1329  int64_t Size = OffsetAndSize::Unknown) {
1330  using namespace AA::PointerInfo;
1331  // No need to find a size if one is given or the offset is unknown.
1333  Ty) {
1334  const DataLayout &DL = A.getDataLayout();
1335  TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1336  if (!AccessSize.isScalable())
1337  Size = AccessSize.getFixedSize();
1338  }
1339  Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty);
1340  return true;
1341  };
1342 
1343  /// Helper struct, will support ranges eventually.
1344  struct OffsetInfo {
1345  int64_t Offset = OffsetAndSize::Unknown;
1346 
1347  bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1348  };
1349 
1350  /// See AbstractAttribute::updateImpl(...).
1351  ChangeStatus updateImpl(Attributor &A) override {
1352  using namespace AA::PointerInfo;
1353  ChangeStatus Changed = ChangeStatus::UNCHANGED;
1354  Value &AssociatedValue = getAssociatedValue();
1355 
1356  const DataLayout &DL = A.getDataLayout();
1357  DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1358  OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1359 
1360  auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI,
1361  bool &Follow) {
1362  OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1363  UsrOI = PtrOI;
1364  Follow = true;
1365  return true;
1366  };
1367 
1368  const auto *TLI = getAnchorScope()
1369  ? A.getInfoCache().getTargetLibraryInfoForFunction(
1370  *getAnchorScope())
1371  : nullptr;
1372  auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1373  Value *CurPtr = U.get();
1374  User *Usr = U.getUser();
1375  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1376  << *Usr << "\n");
1377  assert(OffsetInfoMap.count(CurPtr) &&
1378  "The current pointer offset should have been seeded!");
1379 
1380  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1381  if (CE->isCast())
1382  return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1383  if (CE->isCompare())
1384  return true;
1385  if (!isa<GEPOperator>(CE)) {
1386  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1387  << "\n");
1388  return false;
1389  }
1390  }
1391  if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1392  // Note the order here, the Usr access might change the map, CurPtr is
1393  // already in it though.
1394  OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1395  OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1396  UsrOI = PtrOI;
1397 
1398  // TODO: Use range information.
1399  if (PtrOI.Offset == OffsetAndSize::Unknown ||
1400  !GEP->hasAllConstantIndices()) {
1401  UsrOI.Offset = OffsetAndSize::Unknown;
1402  Follow = true;
1403  return true;
1404  }
1405 
1406  SmallVector<Value *, 8> Indices;
1407  for (Use &Idx : GEP->indices()) {
1408  if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1409  Indices.push_back(CIdx);
1410  continue;
1411  }
1412 
1413  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1414  << " : " << *Idx << "\n");
1415  return false;
1416  }
1417  UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType(
1418  GEP->getSourceElementType(), Indices);
1419  Follow = true;
1420  return true;
1421  }
1422  if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1423  return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1424 
1425  // For PHIs we need to take care of the recurrence explicitly as the value
1426  // might change while we iterate through a loop. For now, we give up if
1427  // the PHI is not invariant.
1428  if (isa<PHINode>(Usr)) {
1429  // Note the order here, the Usr access might change the map, CurPtr is
1430  // already in it though.
1431  OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1432  OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1433  // Check if the PHI is invariant (so far).
1434  if (UsrOI == PtrOI)
1435  return true;
1436 
1437  // Check if the PHI operand has already an unknown offset as we can't
1438  // improve on that anymore.
1439  if (PtrOI.Offset == OffsetAndSize::Unknown) {
1440  UsrOI = PtrOI;
1441  Follow = true;
1442  return true;
1443  }
1444 
1445  // Check if the PHI operand is not dependent on the PHI itself.
1446  // TODO: This is not great as we look at the pointer type. However, it
1447  // is unclear where the Offset size comes from with typeless pointers.
1448  APInt Offset(
1449  DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1450  0);
1451  if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1452  DL, Offset, /* AllowNonInbounds */ true)) {
1453  if (Offset != PtrOI.Offset) {
1454  LLVM_DEBUG(dbgs()
1455  << "[AAPointerInfo] PHI operand pointer offset mismatch "
1456  << *CurPtr << " in " << *Usr << "\n");
1457  return false;
1458  }
1459  return HandlePassthroughUser(Usr, PtrOI, Follow);
1460  }
1461 
1462  // TODO: Approximate in case we know the direction of the recurrence.
1463  LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1464  << *CurPtr << " in " << *Usr << "\n");
1465  UsrOI = PtrOI;
1466  UsrOI.Offset = OffsetAndSize::Unknown;
1467  Follow = true;
1468  return true;
1469  }
1470 
1471  if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1472  return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1473  AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
1474  Changed, LoadI->getType());
1475  if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1476  if (StoreI->getValueOperand() == CurPtr) {
1477  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1478  << *StoreI << "\n");
1479  return false;
1480  }
1481  bool UsedAssumedInformation = false;
1482  Optional<Value *> Content = A.getAssumedSimplified(
1483  *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1484  return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1485  OffsetInfoMap[CurPtr].Offset, Changed,
1486  StoreI->getValueOperand()->getType());
1487  }
1488  if (auto *CB = dyn_cast<CallBase>(Usr)) {
1489  if (CB->isLifetimeStartOrEnd())
1490  return true;
1491  if (TLI && isFreeCall(CB, TLI))
1492  return true;
1493  if (CB->isArgOperand(&U)) {
1494  unsigned ArgNo = CB->getArgOperandNo(&U);
1495  const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1496  *this, IRPosition::callsite_argument(*CB, ArgNo),
1497  DepClassTy::REQUIRED);
1498  Changed = translateAndAddCalleeState(
1499  A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) |
1500  Changed;
1501  return true;
1502  }
1503  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1504  << "\n");
1505  // TODO: Allow some call uses
1506  return false;
1507  }
1508 
1509  LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1510  return false;
1511  };
1512  auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1513  if (OffsetInfoMap.count(NewU))
1514  return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1515  OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1516  return true;
1517  };
1518  if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1519  /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1520  /* IgnoreDroppableUses */ true, EquivalentUseCB))
1521  return indicatePessimisticFixpoint();
1522 
1523  LLVM_DEBUG({
1524  dbgs() << "Accesses by bin after update:\n";
1525  for (auto &It : AccessBins) {
1526  dbgs() << "[" << It.first.getOffset() << "-"
1527  << It.first.getOffset() + It.first.getSize()
1528  << "] : " << It.getSecond()->size() << "\n";
1529  for (auto &Acc : *It.getSecond()) {
1530  dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1531  << "\n";
1532  if (Acc.getLocalInst() != Acc.getRemoteInst())
1533  dbgs() << " --> "
1534  << *Acc.getRemoteInst() << "\n";
1535  if (!Acc.isWrittenValueYetUndetermined()) {
1536  if (Acc.getWrittenValue())
1537  dbgs() << " - c: " << *Acc.getWrittenValue() << "\n";
1538  else
1539  dbgs() << " - c: <unknown>\n";
1540  }
1541  }
1542  }
1543  });
1544 
1545  return Changed;
1546  }
1547 
1548  /// See AbstractAttribute::trackStatistics()
1549  void trackStatistics() const override {
1550  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1551  }
1552 };
1553 
1554 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1555  AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1556  : AAPointerInfoImpl(IRP, A) {}
1557 
1558  /// See AbstractAttribute::updateImpl(...).
1559  ChangeStatus updateImpl(Attributor &A) override {
1560  return indicatePessimisticFixpoint();
1561  }
1562 
1563  /// See AbstractAttribute::trackStatistics()
1564  void trackStatistics() const override {
1565  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1566  }
1567 };
1568 
1569 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1570  AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1571  : AAPointerInfoFloating(IRP, A) {}
1572 
1573  /// See AbstractAttribute::initialize(...).
1574  void initialize(Attributor &A) override {
1576  if (getAnchorScope()->isDeclaration())
1577  indicatePessimisticFixpoint();
1578  }
1579 
1580  /// See AbstractAttribute::trackStatistics()
1581  void trackStatistics() const override {
1582  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1583  }
1584 };
1585 
1586 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1587  AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1588  : AAPointerInfoFloating(IRP, A) {}
1589 
1590  /// See AbstractAttribute::updateImpl(...).
1591  ChangeStatus updateImpl(Attributor &A) override {
1592  using namespace AA::PointerInfo;
1593  // We handle memory intrinsics explicitly, at least the first (=
1594  // destination) and second (=source) arguments as we know how they are
1595  // accessed.
1596  if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1597  ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1598  int64_t LengthVal = OffsetAndSize::Unknown;
1599  if (Length)
1600  LengthVal = Length->getSExtValue();
1601  Value &Ptr = getAssociatedValue();
1602  unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1603  ChangeStatus Changed = ChangeStatus::UNCHANGED;
1604  if (ArgNo == 0) {
1605  handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1606  nullptr, LengthVal);
1607  } else if (ArgNo == 1) {
1608  handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1609  nullptr, LengthVal);
1610  } else {
1611  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1612  << *MI << "\n");
1613  return indicatePessimisticFixpoint();
1614  }
1615  return Changed;
1616  }
1617 
1618  // TODO: Once we have call site specific value information we can provide
1619  // call site specific liveness information and then it makes
1620  // sense to specialize attributes for call sites arguments instead of
1621  // redirecting requests to the callee argument.
1622  Argument *Arg = getAssociatedArgument();
1623  if (!Arg)
1624  return indicatePessimisticFixpoint();
1625  const IRPosition &ArgPos = IRPosition::argument(*Arg);
1626  auto &ArgAA =
1627  A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1628  return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1629  }
1630 
1631  /// See AbstractAttribute::trackStatistics()
1632  void trackStatistics() const override {
1633  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1634  }
1635 };
1636 
1637 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1638  AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1639  : AAPointerInfoFloating(IRP, A) {}
1640 
1641  /// See AbstractAttribute::trackStatistics()
1642  void trackStatistics() const override {
1643  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1644  }
1645 };
1646 } // namespace
1647 
1648 /// -----------------------NoUnwind Function Attribute--------------------------
1649 
1650 namespace {
1651 struct AANoUnwindImpl : AANoUnwind {
1652  AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1653 
1654  const std::string getAsStr() const override {
1655  return getAssumed() ? "nounwind" : "may-unwind";
1656  }
1657 
1658  /// See AbstractAttribute::updateImpl(...).
1659  ChangeStatus updateImpl(Attributor &A) override {
1660  auto Opcodes = {
1661  (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
1662  (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
1663  (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1664 
1665  auto CheckForNoUnwind = [&](Instruction &I) {
1666  if (!I.mayThrow())
1667  return true;
1668 
1669  if (const auto *CB = dyn_cast<CallBase>(&I)) {
1670  const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1671  *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1672  return NoUnwindAA.isAssumedNoUnwind();
1673  }
1674  return false;
1675  };
1676 
1677  bool UsedAssumedInformation = false;
1678  if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1679  UsedAssumedInformation))
1680  return indicatePessimisticFixpoint();
1681 
1682  return ChangeStatus::UNCHANGED;
1683  }
1684 };
1685 
1686 struct AANoUnwindFunction final : public AANoUnwindImpl {
1687  AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1688  : AANoUnwindImpl(IRP, A) {}
1689 
1690  /// See AbstractAttribute::trackStatistics()
1691  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1692 };
1693 
1694 /// NoUnwind attribute deduction for a call sites.
1695 struct AANoUnwindCallSite final : AANoUnwindImpl {
1696  AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1697  : AANoUnwindImpl(IRP, A) {}
1698 
1699  /// See AbstractAttribute::initialize(...).
1700  void initialize(Attributor &A) override {
1702  Function *F = getAssociatedFunction();
1703  if (!F || F->isDeclaration())
1704  indicatePessimisticFixpoint();
1705  }
1706 
1707  /// See AbstractAttribute::updateImpl(...).
1708  ChangeStatus updateImpl(Attributor &A) override {
1709  // TODO: Once we have call site specific value information we can provide
1710  // call site specific liveness information and then it makes
1711  // sense to specialize attributes for call sites arguments instead of
1712  // redirecting requests to the callee argument.
1713  Function *F = getAssociatedFunction();
1714  const IRPosition &FnPos = IRPosition::function(*F);
1715  auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1716  return clampStateAndIndicateChange(getState(), FnAA.getState());
1717  }
1718 
1719  /// See AbstractAttribute::trackStatistics()
1720  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1721 };
1722 } // namespace
1723 
1724 /// --------------------- Function Return Values -------------------------------
1725 
1726 namespace {
1727 /// "Attribute" that collects all potential returned values and the return
1728 /// instructions that they arise from.
1729 ///
1730 /// If there is a unique returned value R, the manifest method will:
1731 /// - mark R with the "returned" attribute, if R is an argument.
1732 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1733 
1734  /// Mapping of values potentially returned by the associated function to the
1735  /// return instructions that might return them.
1737 
1738  /// State flags
1739  ///
1740  ///{
1741  bool IsFixed = false;
1742  bool IsValidState = true;
1743  ///}
1744 
1745 public:
1746  AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1747  : AAReturnedValues(IRP, A) {}
1748 
1749  /// See AbstractAttribute::initialize(...).
1750  void initialize(Attributor &A) override {
1751  // Reset the state.
1752  IsFixed = false;
1753  IsValidState = true;
1754  ReturnedValues.clear();
1755 
1756  Function *F = getAssociatedFunction();
1757  if (!F || F->isDeclaration()) {
1758  indicatePessimisticFixpoint();
1759  return;
1760  }
1761  assert(!F->getReturnType()->isVoidTy() &&
1762  "Did not expect a void return type!");
1763 
1764  // The map from instruction opcodes to those instructions in the function.
1765  auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1766 
1767  // Look through all arguments, if one is marked as returned we are done.
1768  for (Argument &Arg : F->args()) {
1769  if (Arg.hasReturnedAttr()) {
1770  auto &ReturnInstSet = ReturnedValues[&Arg];
1771  if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1772  for (Instruction *RI : *Insts)
1773  ReturnInstSet.insert(cast<ReturnInst>(RI));
1774 
1775  indicateOptimisticFixpoint();
1776  return;
1777  }
1778  }
1779 
1780  if (!A.isFunctionIPOAmendable(*F))
1781  indicatePessimisticFixpoint();
1782  }
1783 
1784  /// See AbstractAttribute::manifest(...).
1785  ChangeStatus manifest(Attributor &A) override;
1786 
1787  /// See AbstractAttribute::getState(...).
1788  AbstractState &getState() override { return *this; }
1789 
1790  /// See AbstractAttribute::getState(...).
1791  const AbstractState &getState() const override { return *this; }
1792 
1793  /// See AbstractAttribute::updateImpl(Attributor &A).
1794  ChangeStatus updateImpl(Attributor &A) override;
1795 
1796  llvm::iterator_range<iterator> returned_values() override {
1797  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1798  }
1799 
1800  llvm::iterator_range<const_iterator> returned_values() const override {
1801  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1802  }
1803 
1804  /// Return the number of potential return values, -1 if unknown.
1805  size_t getNumReturnValues() const override {
1806  return isValidState() ? ReturnedValues.size() : -1;
1807  }
1808 
1809  /// Return an assumed unique return value if a single candidate is found. If
1810  /// there cannot be one, return a nullptr. If it is not clear yet, return the
1811  /// Optional::NoneType.
1812  Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1813 
1814  /// See AbstractState::checkForAllReturnedValues(...).
1815  bool checkForAllReturnedValuesAndReturnInsts(
1816  function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1817  const override;
1818 
1819  /// Pretty print the attribute similar to the IR representation.
1820  const std::string getAsStr() const override;
1821 
1822  /// See AbstractState::isAtFixpoint().
1823  bool isAtFixpoint() const override { return IsFixed; }
1824 
1825  /// See AbstractState::isValidState().
1826  bool isValidState() const override { return IsValidState; }
1827 
1828  /// See AbstractState::indicateOptimisticFixpoint(...).
1829  ChangeStatus indicateOptimisticFixpoint() override {
1830  IsFixed = true;
1831  return ChangeStatus::UNCHANGED;
1832  }
1833 
1834  ChangeStatus indicatePessimisticFixpoint() override {
1835  IsFixed = true;
1836  IsValidState = false;
1837  return ChangeStatus::CHANGED;
1838  }
1839 };
1840 
1841 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1842  ChangeStatus Changed = ChangeStatus::UNCHANGED;
1843 
1844  // Bookkeeping.
1845  assert(isValidState());
1846  STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1847  "Number of function with known return values");
1848 
1849  // Check if we have an assumed unique return value that we could manifest.
1850  Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1851 
1852  if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1853  return Changed;
1854 
1855  // Bookkeeping.
1856  STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1857  "Number of function with unique return");
1858  // If the assumed unique return value is an argument, annotate it.
1859  if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1860  if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1861  getAssociatedFunction()->getReturnType())) {
1862  getIRPosition() = IRPosition::argument(*UniqueRVArg);
1863  Changed = IRAttribute::manifest(A);
1864  }
1865  }
1866  return Changed;
1867 }
1868 
1869 const std::string AAReturnedValuesImpl::getAsStr() const {
1870  return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1871  (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1872 }
1873 
1875 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1876  // If checkForAllReturnedValues provides a unique value, ignoring potential
1877  // undef values that can also be present, it is assumed to be the actual
1878  // return value and forwarded to the caller of this method. If there are
1879  // multiple, a nullptr is returned indicating there cannot be a unique
1880  // returned value.
1881  Optional<Value *> UniqueRV;
1882  Type *Ty = getAssociatedFunction()->getReturnType();
1883 
1884  auto Pred = [&](Value &RV) -> bool {
1885  UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1886  return UniqueRV != Optional<Value *>(nullptr);
1887  };
1888 
1889  if (!A.checkForAllReturnedValues(Pred, *this))
1890  UniqueRV = nullptr;
1891 
1892  return UniqueRV;
1893 }
1894 
1895 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1896  function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1897  const {
1898  if (!isValidState())
1899  return false;
1900 
1901  // Check all returned values but ignore call sites as long as we have not
1902  // encountered an overdefined one during an update.
1903  for (auto &It : ReturnedValues) {
1904  Value *RV = It.first;
1905  if (!Pred(*RV, It.second))
1906  return false;
1907  }
1908 
1909  return true;
1910 }
1911 
1912 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1913  ChangeStatus Changed = ChangeStatus::UNCHANGED;
1914 
1915  auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1916  bool) -> bool {
1917  assert(AA::isValidInScope(V, Ret.getFunction()) &&
1918  "Assumed returned value should be valid in function scope!");
1919  if (ReturnedValues[&V].insert(&Ret))
1920  Changed = ChangeStatus::CHANGED;
1921  return true;
1922  };
1923 
1924  bool UsedAssumedInformation = false;
1925  auto ReturnInstCB = [&](Instruction &I) {
1926  ReturnInst &Ret = cast<ReturnInst>(I);
1927  return genericValueTraversal<ReturnInst>(
1928  A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1929  &I, UsedAssumedInformation, /* UseValueSimplify */ true,
1930  /* MaxValues */ 16,
1931  /* StripCB */ nullptr, AA::Intraprocedural);
1932  };
1933 
1934  // Discover returned values from all live returned instructions in the
1935  // associated function.
1936  if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1937  UsedAssumedInformation))
1938  return indicatePessimisticFixpoint();
1939  return Changed;
1940 }
1941 
1942 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1943  AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1944  : AAReturnedValuesImpl(IRP, A) {}
1945 
1946  /// See AbstractAttribute::trackStatistics()
1947  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1948 };
1949 
1950 /// Returned values information for a call sites.
1951 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1952  AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1953  : AAReturnedValuesImpl(IRP, A) {}
1954 
1955  /// See AbstractAttribute::initialize(...).
1956  void initialize(Attributor &A) override {
1957  // TODO: Once we have call site specific value information we can provide
1958  // call site specific liveness information and then it makes
1959  // sense to specialize attributes for call sites instead of
1960  // redirecting requests to the callee.
1961  llvm_unreachable("Abstract attributes for returned values are not "
1962  "supported for call sites yet!");
1963  }
1964 
1965  /// See AbstractAttribute::updateImpl(...).
1966  ChangeStatus updateImpl(Attributor &A) override {
1967  return indicatePessimisticFixpoint();
1968  }
1969 
1970  /// See AbstractAttribute::trackStatistics()
1971  void trackStatistics() const override {}
1972 };
1973 } // namespace
1974 
1975 /// ------------------------ NoSync Function Attribute -------------------------
1976 
1977 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
1978  if (!I->isAtomic())
1979  return false;
1980 
1981  if (auto *FI = dyn_cast<FenceInst>(I))
1982  // All legal orderings for fence are stronger than monotonic.
1983  return FI->getSyncScopeID() != SyncScope::SingleThread;
1984  if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1985  // Unordered is not a legal ordering for cmpxchg.
1986  return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1987  AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1988  }
1989 
1990  AtomicOrdering Ordering;
1991  switch (I->getOpcode()) {
1992  case Instruction::AtomicRMW:
1993  Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1994  break;
1995  case Instruction::Store:
1996  Ordering = cast<StoreInst>(I)->getOrdering();
1997  break;
1998  case Instruction::Load:
1999  Ordering = cast<LoadInst>(I)->getOrdering();
2000  break;
2001  default:
2003  "New atomic operations need to be known in the attributor.");
2004  }
2005 
2006  return (Ordering != AtomicOrdering::Unordered &&
2007  Ordering != AtomicOrdering::Monotonic);
2008 }
2009 
2010 /// Return true if this intrinsic is nosync. This is only used for intrinsics
2011 /// which would be nosync except that they have a volatile flag. All other
2012 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
2013 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
2014  if (auto *MI = dyn_cast<MemIntrinsic>(I))
2015  return !MI->isVolatile();
2016  return false;
2017 }
2018 
2019 namespace {
2020 struct AANoSyncImpl : AANoSync {
2021  AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
2022 
2023  const std::string getAsStr() const override {
2024  return getAssumed() ? "nosync" : "may-sync";
2025  }
2026 
2027  /// See AbstractAttribute::updateImpl(...).
2028  ChangeStatus updateImpl(Attributor &A) override;
2029 };
2030 
2031 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
2032 
2033  auto CheckRWInstForNoSync = [&](Instruction &I) {
2034  return AA::isNoSyncInst(A, I, *this);
2035  };
2036 
2037  auto CheckForNoSync = [&](Instruction &I) {
2038  // At this point we handled all read/write effects and they are all
2039  // nosync, so they can be skipped.
2040  if (I.mayReadOrWriteMemory())
2041  return true;
2042 
2043  // non-convergent and readnone imply nosync.
2044  return !cast<CallBase>(I).isConvergent();
2045  };
2046 
2047  bool UsedAssumedInformation = false;
2048  if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
2049  UsedAssumedInformation) ||
2050  !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
2051  UsedAssumedInformation))
2052  return indicatePessimisticFixpoint();
2053 
2054  return ChangeStatus::UNCHANGED;
2055 }
2056 
2057 struct AANoSyncFunction final : public AANoSyncImpl {
2058  AANoSyncFunction(const IRPosition &IRP, Attributor &A)
2059  : AANoSyncImpl(IRP, A) {}
2060 
2061  /// See AbstractAttribute::trackStatistics()
2062  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
2063 };
2064 
2065 /// NoSync attribute deduction for a call sites.
2066 struct AANoSyncCallSite final : AANoSyncImpl {
2067  AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
2068  : AANoSyncImpl(IRP, A) {}
2069 
2070  /// See AbstractAttribute::initialize(...).
2071  void initialize(Attributor &A) override {
2073  Function *F = getAssociatedFunction();
2074  if (!F || F->isDeclaration())
2075  indicatePessimisticFixpoint();
2076  }
2077 
2078  /// See AbstractAttribute::updateImpl(...).
2079  ChangeStatus updateImpl(Attributor &A) override {
2080  // TODO: Once we have call site specific value information we can provide
2081  // call site specific liveness information and then it makes
2082  // sense to specialize attributes for call sites arguments instead of
2083  // redirecting requests to the callee argument.
2084  Function *F = getAssociatedFunction();
2085  const IRPosition &FnPos = IRPosition::function(*F);
2086  auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
2087  return clampStateAndIndicateChange(getState(), FnAA.getState());
2088  }
2089 
2090  /// See AbstractAttribute::trackStatistics()
2091  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
2092 };
2093 } // namespace
2094 
2095 /// ------------------------ No-Free Attributes ----------------------------
2096 
2097 namespace {
2098 struct AANoFreeImpl : public AANoFree {
2099  AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2100 
2101  /// See AbstractAttribute::updateImpl(...).
2102  ChangeStatus updateImpl(Attributor &A) override {
2103  auto CheckForNoFree = [&](Instruction &I) {
2104  const auto &CB = cast<CallBase>(I);
2105  if (CB.hasFnAttr(Attribute::NoFree))
2106  return true;
2107 
2108  const auto &NoFreeAA = A.getAAFor<AANoFree>(
2109  *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2110  return NoFreeAA.isAssumedNoFree();
2111  };
2112 
2113  bool UsedAssumedInformation = false;
2114  if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2115  UsedAssumedInformation))
2116  return indicatePessimisticFixpoint();
2117  return ChangeStatus::UNCHANGED;
2118  }
2119 
2120  /// See AbstractAttribute::getAsStr().
2121  const std::string getAsStr() const override {
2122  return getAssumed() ? "nofree" : "may-free";
2123  }
2124 };
2125 
2126 struct AANoFreeFunction final : public AANoFreeImpl {
2127  AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2128  : AANoFreeImpl(IRP, A) {}
2129 
2130  /// See AbstractAttribute::trackStatistics()
2131  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2132 };
2133 
2134 /// NoFree attribute deduction for a call sites.
2135 struct AANoFreeCallSite final : AANoFreeImpl {
2136  AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2137  : AANoFreeImpl(IRP, A) {}
2138 
2139  /// See AbstractAttribute::initialize(...).
2140  void initialize(Attributor &A) override {
2142  Function *F = getAssociatedFunction();
2143  if (!F || F->isDeclaration())
2144  indicatePessimisticFixpoint();
2145  }
2146 
2147  /// See AbstractAttribute::updateImpl(...).
2148  ChangeStatus updateImpl(Attributor &A) override {
2149  // TODO: Once we have call site specific value information we can provide
2150  // call site specific liveness information and then it makes
2151  // sense to specialize attributes for call sites arguments instead of
2152  // redirecting requests to the callee argument.
2153  Function *F = getAssociatedFunction();
2154  const IRPosition &FnPos = IRPosition::function(*F);
2155  auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2156  return clampStateAndIndicateChange(getState(), FnAA.getState());
2157  }
2158 
2159  /// See AbstractAttribute::trackStatistics()
2160  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2161 };
2162 
2163 /// NoFree attribute for floating values.
2164 struct AANoFreeFloating : AANoFreeImpl {
2165  AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2166  : AANoFreeImpl(IRP, A) {}
2167 
2168  /// See AbstractAttribute::trackStatistics()
2169  void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2170 
2171  /// See Abstract Attribute::updateImpl(...).
2172  ChangeStatus updateImpl(Attributor &A) override {
2173  const IRPosition &IRP = getIRPosition();
2174 
2175  const auto &NoFreeAA = A.getAAFor<AANoFree>(
2176  *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
2177  if (NoFreeAA.isAssumedNoFree())
2178  return ChangeStatus::UNCHANGED;
2179 
2180  Value &AssociatedValue = getIRPosition().getAssociatedValue();
2181  auto Pred = [&](const Use &U, bool &Follow) -> bool {
2182  Instruction *UserI = cast<Instruction>(U.getUser());
2183  if (auto *CB = dyn_cast<CallBase>(UserI)) {
2184  if (CB->isBundleOperand(&U))
2185  return false;
2186  if (!CB->isArgOperand(&U))
2187  return true;
2188  unsigned ArgNo = CB->getArgOperandNo(&U);
2189 
2190  const auto &NoFreeArg = A.getAAFor<AANoFree>(
2191  *this, IRPosition::callsite_argument(*CB, ArgNo),
2192  DepClassTy::REQUIRED);
2193  return NoFreeArg.isAssumedNoFree();
2194  }
2195 
2196  if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2197  isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2198  Follow = true;
2199  return true;
2200  }
2201  if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2202  isa<ReturnInst>(UserI))
2203  return true;
2204 
2205  // Unknown user.
2206  return false;
2207  };
2208  if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2209  return indicatePessimisticFixpoint();
2210 
2211  return ChangeStatus::UNCHANGED;
2212  }
2213 };
2214 
2215 /// NoFree attribute for a call site argument.
2216 struct AANoFreeArgument final : AANoFreeFloating {
2217  AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2218  : AANoFreeFloating(IRP, A) {}
2219 
2220  /// See AbstractAttribute::trackStatistics()
2221  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2222 };
2223 
2224 /// NoFree attribute for call site arguments.
2225 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2226  AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2227  : AANoFreeFloating(IRP, A) {}
2228 
2229  /// See AbstractAttribute::updateImpl(...).
2230  ChangeStatus updateImpl(Attributor &A) override {
2231  // TODO: Once we have call site specific value information we can provide
2232  // call site specific liveness information and then it makes
2233  // sense to specialize attributes for call sites arguments instead of
2234  // redirecting requests to the callee argument.
2235  Argument *Arg = getAssociatedArgument();
2236  if (!Arg)
2237  return indicatePessimisticFixpoint();
2238  const IRPosition &ArgPos = IRPosition::argument(*Arg);
2239  auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2240  return clampStateAndIndicateChange(getState(), ArgAA.getState());
2241  }
2242 
2243  /// See AbstractAttribute::trackStatistics()
2244  void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2245 };
2246 
2247 /// NoFree attribute for function return value.
2248 struct AANoFreeReturned final : AANoFreeFloating {
2249  AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2250  : AANoFreeFloating(IRP, A) {
2251  llvm_unreachable("NoFree is not applicable to function returns!");
2252  }
2253 
2254  /// See AbstractAttribute::initialize(...).
2255  void initialize(Attributor &A) override {
2256  llvm_unreachable("NoFree is not applicable to function returns!");
2257  }
2258 
2259  /// See AbstractAttribute::updateImpl(...).
2260  ChangeStatus updateImpl(Attributor &A) override {
2261  llvm_unreachable("NoFree is not applicable to function returns!");
2262  }
2263 
2264  /// See AbstractAttribute::trackStatistics()
2265  void trackStatistics() const override {}
2266 };
2267 
2268 /// NoFree attribute deduction for a call site return value.
2269 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2270  AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2271  : AANoFreeFloating(IRP, A) {}
2272 
2273  ChangeStatus manifest(Attributor &A) override {
2274  return ChangeStatus::UNCHANGED;
2275  }
2276  /// See AbstractAttribute::trackStatistics()
2277  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2278 };
2279 } // namespace
2280 
2281 /// ------------------------ NonNull Argument Attribute ------------------------
2282 namespace {
2283 static int64_t getKnownNonNullAndDerefBytesForUse(
2284  Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2285  const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2286  TrackUse = false;
2287 
2288  const Value *UseV = U->get();
2289  if (!UseV->getType()->isPointerTy())
2290  return 0;
2291 
2292  // We need to follow common pointer manipulation uses to the accesses they
2293  // feed into. We can try to be smart to avoid looking through things we do not
2294  // like for now, e.g., non-inbounds GEPs.
2295  if (isa<CastInst>(I)) {
2296  TrackUse = true;
2297  return 0;
2298  }
2299 
2300  if (isa<GetElementPtrInst>(I)) {
2301  TrackUse = true;
2302  return 0;
2303  }
2304 
2305  Type *PtrTy = UseV->getType();
2306  const Function *F = I->getFunction();
2307  bool NullPointerIsDefined =
2309  const DataLayout &DL = A.getInfoCache().getDL();
2310  if (const auto *CB = dyn_cast<CallBase>(I)) {
2311  if (CB->isBundleOperand(U)) {
2313  U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2314  IsNonNull |=
2315  (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2316  return RK.ArgValue;
2317  }
2318  return 0;
2319  }
2320 
2321  if (CB->isCallee(U)) {
2322  IsNonNull |= !NullPointerIsDefined;
2323  return 0;
2324  }
2325 
2326  unsigned ArgNo = CB->getArgOperandNo(U);
2327  IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2328  // As long as we only use known information there is no need to track
2329  // dependences here.
2330  auto &DerefAA =
2331  A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2332  IsNonNull |= DerefAA.isKnownNonNull();
2333  return DerefAA.getKnownDereferenceableBytes();
2334  }
2335 
2336  Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2337  if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2338  return 0;
2339 
2340  int64_t Offset;
2341  const Value *Base =
2342  getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2343  if (Base && Base == &AssociatedValue) {
2344  int64_t DerefBytes = Loc->Size.getValue() + Offset;
2345  IsNonNull |= !NullPointerIsDefined;
2346  return std::max(int64_t(0), DerefBytes);
2347  }
2348 
2349  /// Corner case when an offset is 0.
2351  /*AllowNonInbounds*/ true);
2352  if (Base && Base == &AssociatedValue && Offset == 0) {
2353  int64_t DerefBytes = Loc->Size.getValue();
2354  IsNonNull |= !NullPointerIsDefined;
2355  return std::max(int64_t(0), DerefBytes);
2356  }
2357 
2358  return 0;
2359 }
2360 
2361 struct AANonNullImpl : AANonNull {
2362  AANonNullImpl(const IRPosition &IRP, Attributor &A)
2363  : AANonNull(IRP, A),
2364  NullIsDefined(NullPointerIsDefined(
2365  getAnchorScope(),
2366  getAssociatedValue().getType()->getPointerAddressSpace())) {}
2367 
2368  /// See AbstractAttribute::initialize(...).
2369  void initialize(Attributor &A) override {
2370  Value &V = getAssociatedValue();
2371  if (!NullIsDefined &&
2372  hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2373  /* IgnoreSubsumingPositions */ false, &A)) {
2374  indicateOptimisticFixpoint();
2375  return;
2376  }
2377 
2378  if (isa<ConstantPointerNull>(V)) {
2379  indicatePessimisticFixpoint();
2380  return;
2381  }
2382 
2384 
2385  bool CanBeNull, CanBeFreed;
2386  if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2387  CanBeFreed)) {
2388  if (!CanBeNull) {
2389  indicateOptimisticFixpoint();
2390  return;
2391  }
2392  }
2393 
2394  if (isa<GlobalValue>(&getAssociatedValue())) {
2395  indicatePessimisticFixpoint();
2396  return;
2397  }
2398 
2399  if (Instruction *CtxI = getCtxI())
2400  followUsesInMBEC(*this, A, getState(), *CtxI);
2401  }
2402 
2403  /// See followUsesInMBEC
2404  bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2405  AANonNull::StateType &State) {
2406  bool IsNonNull = false;
2407  bool TrackUse = false;
2408  getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2409  IsNonNull, TrackUse);
2410  State.setKnown(IsNonNull);
2411  return TrackUse;
2412  }
2413 
2414  /// See AbstractAttribute::getAsStr().
2415  const std::string getAsStr() const override {
2416  return getAssumed() ? "nonnull" : "may-null";
2417  }
2418 
2419  /// Flag to determine if the underlying value can be null and still allow
2420  /// valid accesses.
2421  const bool NullIsDefined;
2422 };
2423 
2424 /// NonNull attribute for a floating value.
2425 struct AANonNullFloating : public AANonNullImpl {
2426  AANonNullFloating(const IRPosition &IRP, Attributor &A)
2427  : AANonNullImpl(IRP, A) {}
2428 
2429  /// See AbstractAttribute::updateImpl(...).
2430  ChangeStatus updateImpl(Attributor &A) override {
2431  const DataLayout &DL = A.getDataLayout();
2432 
2433  DominatorTree *DT = nullptr;
2434  AssumptionCache *AC = nullptr;
2435  InformationCache &InfoCache = A.getInfoCache();
2436  if (const Function *Fn = getAnchorScope()) {
2438  AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2439  }
2440 
2441  auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2442  AANonNull::StateType &T, bool Stripped) -> bool {
2443  const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2444  DepClassTy::REQUIRED);
2445  if (!Stripped && this == &AA) {
2446  if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2447  T.indicatePessimisticFixpoint();
2448  } else {
2449  // Use abstract attribute information.
2450  const AANonNull::StateType &NS = AA.getState();
2451  T ^= NS;
2452  }
2453  return T.isValidState();
2454  };
2455 
2456  StateType T;
2457  bool UsedAssumedInformation = false;
2458  if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2459  VisitValueCB, getCtxI(),
2460  UsedAssumedInformation))
2461  return indicatePessimisticFixpoint();
2462 
2463  return clampStateAndIndicateChange(getState(), T);
2464  }
2465 
2466  /// See AbstractAttribute::trackStatistics()
2467  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2468 };
2469 
2470 /// NonNull attribute for function return value.
2471 struct AANonNullReturned final
2472  : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2473  AANonNullReturned(const IRPosition &IRP, Attributor &A)
2474  : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2475 
2476  /// See AbstractAttribute::getAsStr().
2477  const std::string getAsStr() const override {
2478  return getAssumed() ? "nonnull" : "may-null";
2479  }
2480 
2481  /// See AbstractAttribute::trackStatistics()
2482  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2483 };
2484 
2485 /// NonNull attribute for function argument.
2486 struct AANonNullArgument final
2487  : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2488  AANonNullArgument(const IRPosition &IRP, Attributor &A)
2489  : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2490 
2491  /// See AbstractAttribute::trackStatistics()
2492  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2493 };
2494 
2495 struct AANonNullCallSiteArgument final : AANonNullFloating {
2496  AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2497  : AANonNullFloating(IRP, A) {}
2498 
2499  /// See AbstractAttribute::trackStatistics()
2500  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2501 };
2502 
2503 /// NonNull attribute for a call site return position.
2504 struct AANonNullCallSiteReturned final
2505  : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2506  AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2507  : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2508 
2509  /// See AbstractAttribute::trackStatistics()
2510  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2511 };
2512 } // namespace
2513 
2514 /// ------------------------ No-Recurse Attributes ----------------------------
2515 
2516 namespace {
2517 struct AANoRecurseImpl : public AANoRecurse {
2518  AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2519 
2520  /// See AbstractAttribute::getAsStr()
2521  const std::string getAsStr() const override {
2522  return getAssumed() ? "norecurse" : "may-recurse";
2523  }
2524 };
2525 
2526 struct AANoRecurseFunction final : AANoRecurseImpl {
2527  AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2528  : AANoRecurseImpl(IRP, A) {}
2529 
2530  /// See AbstractAttribute::updateImpl(...).
2531  ChangeStatus updateImpl(Attributor &A) override {
2532 
2533  // If all live call sites are known to be no-recurse, we are as well.
2534  auto CallSitePred = [&](AbstractCallSite ACS) {
2535  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2536  *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2538  return NoRecurseAA.isKnownNoRecurse();
2539  };
2540  bool UsedAssumedInformation = false;
2541  if (A.checkForAllCallSites(CallSitePred, *this, true,
2542  UsedAssumedInformation)) {
2543  // If we know all call sites and all are known no-recurse, we are done.
2544  // If all known call sites, which might not be all that exist, are known
2545  // to be no-recurse, we are not done but we can continue to assume
2546  // no-recurse. If one of the call sites we have not visited will become
2547  // live, another update is triggered.
2548  if (!UsedAssumedInformation)
2549  indicateOptimisticFixpoint();
2550  return ChangeStatus::UNCHANGED;
2551  }
2552 
2553  const AAFunctionReachability &EdgeReachability =
2554  A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2555  DepClassTy::REQUIRED);
2556  if (EdgeReachability.canReach(A, *getAnchorScope()))
2557  return indicatePessimisticFixpoint();
2558  return ChangeStatus::UNCHANGED;
2559  }
2560 
2561  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2562 };
2563 
2564 /// NoRecurse attribute deduction for a call sites.
2565 struct AANoRecurseCallSite final : AANoRecurseImpl {
2566  AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2567  : AANoRecurseImpl(IRP, A) {}
2568 
2569  /// See AbstractAttribute::initialize(...).
2570  void initialize(Attributor &A) override {
2572  Function *F = getAssociatedFunction();
2573  if (!F || F->isDeclaration())
2574  indicatePessimisticFixpoint();
2575  }
2576 
2577  /// See AbstractAttribute::updateImpl(...).
2578  ChangeStatus updateImpl(Attributor &A) override {
2579  // TODO: Once we have call site specific value information we can provide
2580  // call site specific liveness information and then it makes
2581  // sense to specialize attributes for call sites arguments instead of
2582  // redirecting requests to the callee argument.
2583  Function *F = getAssociatedFunction();
2584  const IRPosition &FnPos = IRPosition::function(*F);
2585  auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2586  return clampStateAndIndicateChange(getState(), FnAA.getState());
2587  }
2588 
2589  /// See AbstractAttribute::trackStatistics()
2590  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2591 };
2592 } // namespace
2593 
2594 /// -------------------- Undefined-Behavior Attributes ------------------------
2595 
2596 namespace {
2597 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2598  AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2599  : AAUndefinedBehavior(IRP, A) {}
2600 
2601  /// See AbstractAttribute::updateImpl(...).
2602  // through a pointer (i.e. also branches etc.)
2603  ChangeStatus updateImpl(Attributor &A) override {
2604  const size_t UBPrevSize = KnownUBInsts.size();
2605  const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2606 
2607  auto InspectMemAccessInstForUB = [&](Instruction &I) {
2608  // Lang ref now states volatile store is not UB, let's skip them.
2609  if (I.isVolatile() && I.mayWriteToMemory())
2610  return true;
2611 
2612  // Skip instructions that are already saved.
2613  if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2614  return true;
2615 
2616  // If we reach here, we know we have an instruction
2617  // that accesses memory through a pointer operand,
2618  // for which getPointerOperand() should give it to us.
2619  Value *PtrOp =
2620  const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2621  assert(PtrOp &&
2622  "Expected pointer operand of memory accessing instruction");
2623 
2624  // Either we stopped and the appropriate action was taken,
2625  // or we got back a simplified value to continue.
2626  Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2627  if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2628  return true;
2629  const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2630 
2631  // A memory access through a pointer is considered UB
2632  // only if the pointer has constant null value.
2633  // TODO: Expand it to not only check constant values.
2634  if (!isa<ConstantPointerNull>(PtrOpVal)) {
2635  AssumedNoUBInsts.insert(&I);
2636  return true;
2637  }
2638  const Type *PtrTy = PtrOpVal->getType();
2639 
2640  // Because we only consider instructions inside functions,
2641  // assume that a parent function exists.
2642  const Function *F = I.getFunction();
2643 
2644  // A memory access using constant null pointer is only considered UB
2645  // if null pointer is _not_ defined for the target platform.
2647  AssumedNoUBInsts.insert(&I);
2648  else
2649  KnownUBInsts.insert(&I);
2650  return true;
2651  };
2652 
2653  auto InspectBrInstForUB = [&](Instruction &I) {
2654  // A conditional branch instruction is considered UB if it has `undef`
2655  // condition.
2656 
2657  // Skip instructions that are already saved.
2658  if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2659  return true;
2660 
2661  // We know we have a branch instruction.
2662  auto *BrInst = cast<BranchInst>(&I);
2663 
2664  // Unconditional branches are never considered UB.
2665  if (BrInst->isUnconditional())
2666  return true;
2667 
2668  // Either we stopped and the appropriate action was taken,
2669  // or we got back a simplified value to continue.
2670  Optional<Value *> SimplifiedCond =
2671  stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2672  if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2673  return true;
2674  AssumedNoUBInsts.insert(&I);
2675  return true;
2676  };
2677 
2678  auto InspectCallSiteForUB = [&](Instruction &I) {
2679  // Check whether a callsite always cause UB or not
2680 
2681  // Skip instructions that are already saved.
2682  if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2683  return true;
2684 
2685  // Check nonnull and noundef argument attribute violation for each
2686  // callsite.
2687  CallBase &CB = cast<CallBase>(I);
2689  if (!Callee)
2690  return true;
2691  for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2692  // If current argument is known to be simplified to null pointer and the
2693  // corresponding argument position is known to have nonnull attribute,
2694  // the argument is poison. Furthermore, if the argument is poison and
2695  // the position is known to have noundef attriubte, this callsite is
2696  // considered UB.
2697  if (idx >= Callee->arg_size())
2698  break;
2699  Value *ArgVal = CB.getArgOperand(idx);
2700  if (!ArgVal)
2701  continue;
2702  // Here, we handle three cases.
2703  // (1) Not having a value means it is dead. (we can replace the value
2704  // with undef)
2705  // (2) Simplified to undef. The argument violate noundef attriubte.
2706  // (3) Simplified to null pointer where known to be nonnull.
2707  // The argument is a poison value and violate noundef attribute.
2708  IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2709  auto &NoUndefAA =
2710  A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2711  if (!NoUndefAA.isKnownNoUndef())
2712  continue;
2713  bool UsedAssumedInformation = false;
2714  Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2715  IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2716  if (UsedAssumedInformation)
2717  continue;
2718  if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2719  return true;
2720  if (!SimplifiedVal.hasValue() ||
2721  isa<UndefValue>(*SimplifiedVal.getValue())) {
2722  KnownUBInsts.insert(&I);
2723  continue;
2724  }
2725  if (!ArgVal->getType()->isPointerTy() ||
2726  !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2727  continue;
2728  auto &NonNullAA =
2729  A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2730  if (NonNullAA.isKnownNonNull())
2731  KnownUBInsts.insert(&I);
2732  }
2733  return true;
2734  };
2735 
2736  auto InspectReturnInstForUB = [&](Instruction &I) {
2737  auto &RI = cast<ReturnInst>(I);
2738  // Either we stopped and the appropriate action was taken,
2739  // or we got back a simplified return value to continue.
2740  Optional<Value *> SimplifiedRetValue =
2741  stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2742  if (!SimplifiedRetValue.hasValue() || !SimplifiedRetValue.getValue())
2743  return true;
2744 
2745  // Check if a return instruction always cause UB or not
2746  // Note: It is guaranteed that the returned position of the anchor
2747  // scope has noundef attribute when this is called.
2748  // We also ensure the return position is not "assumed dead"
2749  // because the returned value was then potentially simplified to
2750  // `undef` in AAReturnedValues without removing the `noundef`
2751  // attribute yet.
2752 
2753  // When the returned position has noundef attriubte, UB occurs in the
2754  // following cases.
2755  // (1) Returned value is known to be undef.
2756  // (2) The value is known to be a null pointer and the returned
2757  // position has nonnull attribute (because the returned value is
2758  // poison).
2759  if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2760  auto &NonNullAA = A.getAAFor<AANonNull>(
2761  *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2762  if (NonNullAA.isKnownNonNull())
2763  KnownUBInsts.insert(&I);
2764  }
2765 
2766  return true;
2767  };
2768 
2769  bool UsedAssumedInformation = false;
2770  A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2772  Instruction::AtomicCmpXchg,
2773  Instruction::AtomicRMW},
2774  UsedAssumedInformation,
2775  /* CheckBBLivenessOnly */ true);
2776  A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2777  UsedAssumedInformation,
2778  /* CheckBBLivenessOnly */ true);
2779  A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2780  UsedAssumedInformation);
2781 
2782  // If the returned position of the anchor scope has noundef attriubte, check
2783  // all returned instructions.
2784  if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2785  const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2786  if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2787  auto &RetPosNoUndefAA =
2788  A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2789  if (RetPosNoUndefAA.isKnownNoUndef())
2790  A.checkForAllInstructions(InspectReturnInstForUB, *this,
2791  {Instruction::Ret}, UsedAssumedInformation,
2792  /* CheckBBLivenessOnly */ true);
2793  }
2794  }
2795 
2796  if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2797  UBPrevSize != KnownUBInsts.size())
2798  return ChangeStatus::CHANGED;
2799  return ChangeStatus::UNCHANGED;
2800  }
2801 
2802  bool isKnownToCauseUB(Instruction *I) const override {
2803  return KnownUBInsts.count(I);
2804  }
2805 
2806  bool isAssumedToCauseUB(Instruction *I) const override {
2807  // In simple words, if an instruction is not in the assumed to _not_
2808  // cause UB, then it is assumed UB (that includes those
2809  // in the KnownUBInsts set). The rest is boilerplate
2810  // is to ensure that it is one of the instructions we test
2811  // for UB.
2812 
2813  switch (I->getOpcode()) {
2814  case Instruction::Load:
2815  case Instruction::Store:
2816  case Instruction::AtomicCmpXchg:
2817  case Instruction::AtomicRMW:
2818  return !AssumedNoUBInsts.count(I);
2819  case Instruction::Br: {
2820  auto *BrInst = cast<BranchInst>(I);
2821  if (BrInst->isUnconditional())
2822  return false;
2823  return !AssumedNoUBInsts.count(I);
2824  } break;
2825  default:
2826  return false;
2827  }
2828  return false;
2829  }
2830 
2831  ChangeStatus manifest(Attributor &A) override {
2832  if (KnownUBInsts.empty())
2833  return ChangeStatus::UNCHANGED;
2834  for (Instruction *I : KnownUBInsts)
2835  A.changeToUnreachableAfterManifest(I);
2836  return ChangeStatus::CHANGED;
2837  }
2838 
2839  /// See AbstractAttribute::getAsStr()
2840  const std::string getAsStr() const override {
2841  return getAssumed() ? "undefined-behavior" : "no-ub";
2842  }
2843 
2844  /// Note: The correctness of this analysis depends on the fact that the
2845  /// following 2 sets will stop changing after some point.
2846  /// "Change" here means that their size changes.
2847  /// The size of each set is monotonically increasing
2848  /// (we only add items to them) and it is upper bounded by the number of
2849  /// instructions in the processed function (we can never save more
2850  /// elements in either set than this number). Hence, at some point,
2851  /// they will stop increasing.
2852  /// Consequently, at some point, both sets will have stopped
2853  /// changing, effectively making the analysis reach a fixpoint.
2854 
2855  /// Note: These 2 sets are disjoint and an instruction can be considered
2856  /// one of 3 things:
2857  /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2858  /// the KnownUBInsts set.
2859  /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2860  /// has a reason to assume it).
2861  /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2862  /// could not find a reason to assume or prove that it can cause UB,
2863  /// hence it assumes it doesn't. We have a set for these instructions
2864  /// so that we don't reprocess them in every update.
2865  /// Note however that instructions in this set may cause UB.
2866 
2867 protected:
2868  /// A set of all live instructions _known_ to cause UB.
2869  SmallPtrSet<Instruction *, 8> KnownUBInsts;
2870 
2871 private:
2872  /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2873  SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2874 
2875  // Should be called on updates in which if we're processing an instruction
2876  // \p I that depends on a value \p V, one of the following has to happen:
2877  // - If the value is assumed, then stop.
2878  // - If the value is known but undef, then consider it UB.
2879  // - Otherwise, do specific processing with the simplified value.
2880  // We return None in the first 2 cases to signify that an appropriate
2881  // action was taken and the caller should stop.
2882  // Otherwise, we return the simplified value that the caller should
2883  // use for specific processing.
2884  Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2885  Instruction *I) {
2886  bool UsedAssumedInformation = false;
2887  Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2888  IRPosition::value(*V), *this, UsedAssumedInformation);
2889  if (!UsedAssumedInformation) {
2890  // Don't depend on assumed values.
2891  if (!SimplifiedV.hasValue()) {
2892  // If it is known (which we tested above) but it doesn't have a value,
2893  // then we can assume `undef` and hence the instruction is UB.
2894  KnownUBInsts.insert(I);
2895  return llvm::None;
2896  }
2897  if (!SimplifiedV.getValue())
2898  return nullptr;
2899  V = *SimplifiedV;
2900  }
2901  if (isa<UndefValue>(V)) {
2902  KnownUBInsts.insert(I);
2903  return llvm::None;
2904  }
2905  return V;
2906  }
2907 };
2908 
2909 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2910  AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2911  : AAUndefinedBehaviorImpl(IRP, A) {}
2912 
2913  /// See AbstractAttribute::trackStatistics()
2914  void trackStatistics() const override {
2915  STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2916  "Number of instructions known to have UB");
2917  BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2918  KnownUBInsts.size();
2919  }
2920 };
2921 } // namespace
2922 
2923 /// ------------------------ Will-Return Attributes ----------------------------
2924 
2925 namespace {
2926 // Helper function that checks whether a function has any cycle which we don't
2927 // know if it is bounded or not.
2928 // Loops with maximum trip count are considered bounded, any other cycle not.
2929 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2930  ScalarEvolution *SE =
2931  A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2932  LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2933  // If either SCEV or LoopInfo is not available for the function then we assume
2934  // any cycle to be unbounded cycle.
2935  // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2936  // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2937  if (!SE || !LI) {
2938  for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2939  if (SCCI.hasCycle())
2940  return true;
2941  return false;
2942  }
2943 
2944  // If there's irreducible control, the function may contain non-loop cycles.
2946  return true;
2947 
2948  // Any loop that does not have a max trip count is considered unbounded cycle.
2949  for (auto *L : LI->getLoopsInPreorder()) {
2950  if (!SE->getSmallConstantMaxTripCount(L))
2951  return true;
2952  }
2953  return false;
2954 }
2955 
2956 struct AAWillReturnImpl : public AAWillReturn {
2957  AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2958  : AAWillReturn(IRP, A) {}
2959 
2960  /// See AbstractAttribute::initialize(...).
2961  void initialize(Attributor &A) override {
2963 
2964  if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2965  indicateOptimisticFixpoint();
2966  return;
2967  }
2968  }
2969 
2970  /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2971  bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2972  // Check for `mustprogress` in the scope and the associated function which
2973  // might be different if this is a call site.
2974  if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2975  (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2976  return false;
2977 
2978  bool IsKnown;
2979  if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
2980  return IsKnown || !KnownOnly;
2981  return false;
2982  }
2983 
2984  /// See AbstractAttribute::updateImpl(...).
2985  ChangeStatus updateImpl(Attributor &A) override {
2986  if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2987  return ChangeStatus::UNCHANGED;
2988 
2989  auto CheckForWillReturn = [&](Instruction &I) {
2990  IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2991  const auto &WillReturnAA =
2992  A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2993  if (WillReturnAA.isKnownWillReturn())
2994  return true;
2995  if (!WillReturnAA.isAssumedWillReturn())
2996  return false;
2997  const auto &NoRecurseAA =
2998  A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2999  return NoRecurseAA.isAssumedNoRecurse();
3000  };
3001 
3002  bool UsedAssumedInformation = false;
3003  if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
3004  UsedAssumedInformation))
3005  return indicatePessimisticFixpoint();
3006 
3007  return ChangeStatus::UNCHANGED;
3008  }
3009 
3010  /// See AbstractAttribute::getAsStr()
3011  const std::string getAsStr() const override {
3012  return getAssumed() ? "willreturn" : "may-noreturn";
3013  }
3014 };
3015 
3016 struct AAWillReturnFunction final : AAWillReturnImpl {
3017  AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
3018  : AAWillReturnImpl(IRP, A) {}
3019 
3020  /// See AbstractAttribute::initialize(...).
3021  void initialize(Attributor &A) override {
3023 
3024  Function *F = getAnchorScope();
3025  if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
3026  indicatePessimisticFixpoint();
3027  }
3028 
3029  /// See AbstractAttribute::trackStatistics()
3030  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
3031 };
3032 
3033 /// WillReturn attribute deduction for a call sites.
3034 struct AAWillReturnCallSite final : AAWillReturnImpl {
3035  AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
3036  : AAWillReturnImpl(IRP, A) {}
3037 
3038  /// See AbstractAttribute::initialize(...).
3039  void initialize(Attributor &A) override {
3041  Function *F = getAssociatedFunction();
3042  if (!F || !A.isFunctionIPOAmendable(*F))
3043  indicatePessimisticFixpoint();
3044  }
3045 
3046  /// See AbstractAttribute::updateImpl(...).
3047  ChangeStatus updateImpl(Attributor &A) override {
3048  if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3049  return ChangeStatus::UNCHANGED;
3050 
3051  // TODO: Once we have call site specific value information we can provide
3052  // call site specific liveness information and then it makes
3053  // sense to specialize attributes for call sites arguments instead of
3054  // redirecting requests to the callee argument.
3055  Function *F = getAssociatedFunction();
3056  const IRPosition &FnPos = IRPosition::function(*F);
3057  auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
3058  return clampStateAndIndicateChange(getState(), FnAA.getState());
3059  }
3060 
3061  /// See AbstractAttribute::trackStatistics()
3062  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
3063 };
3064 } // namespace
3065 
3066 /// -------------------AAReachability Attribute--------------------------
3067 
3068 namespace {
3069 struct AAReachabilityImpl : AAReachability {
3070  AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
3071  : AAReachability(IRP, A) {}
3072 
3073  const std::string getAsStr() const override {
3074  // TODO: Return the number of reachable queries.
3075  return "reachable";
3076  }
3077 
3078  /// See AbstractAttribute::updateImpl(...).
3079  ChangeStatus updateImpl(Attributor &A) override {
3080  return ChangeStatus::UNCHANGED;
3081  }
3082 };
3083 
3084 struct AAReachabilityFunction final : public AAReachabilityImpl {
3085  AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
3086  : AAReachabilityImpl(IRP, A) {}
3087 
3088  /// See AbstractAttribute::trackStatistics()
3089  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
3090 };
3091 } // namespace
3092 
3093 /// ------------------------ NoAlias Argument Attribute ------------------------
3094 
3095 namespace {
3096 struct AANoAliasImpl : AANoAlias {
3097  AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3098  assert(getAssociatedType()->isPointerTy() &&
3099  "Noalias is a pointer attribute");
3100  }
3101 
3102  const std::string getAsStr() const override {
3103  return getAssumed() ? "noalias" : "may-alias";
3104  }
3105 };
3106 
3107 /// NoAlias attribute for a floating value.
3108 struct AANoAliasFloating final : AANoAliasImpl {
3109  AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3110  : AANoAliasImpl(IRP, A) {}
3111 
3112  /// See AbstractAttribute::initialize(...).
3113  void initialize(Attributor &A) override {
3115  Value *Val = &getAssociatedValue();
3116  do {
3117  CastInst *CI = dyn_cast<CastInst>(Val);
3118  if (!CI)
3119  break;
3120  Value *Base = CI->getOperand(0);
3121  if (!Base->hasOneUse())
3122  break;
3123  Val = Base;
3124  } while (true);
3125 
3126  if (!Val->getType()->isPointerTy()) {
3127  indicatePessimisticFixpoint();
3128  return;
3129  }
3130 
3131  if (isa<AllocaInst>(Val))
3132  indicateOptimisticFixpoint();
3133  else if (isa<ConstantPointerNull>(Val) &&
3134  !NullPointerIsDefined(getAnchorScope(),
3135  Val->getType()->getPointerAddressSpace()))
3136  indicateOptimisticFixpoint();
3137  else if (Val != &getAssociatedValue()) {
3138  const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3139  *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3140  if (ValNoAliasAA.isKnownNoAlias())
3141  indicateOptimisticFixpoint();
3142  }
3143  }
3144 
3145  /// See AbstractAttribute::updateImpl(...).
3146  ChangeStatus updateImpl(Attributor &A) override {
3147  // TODO: Implement this.
3148  return indicatePessimisticFixpoint();
3149  }
3150 
3151  /// See AbstractAttribute::trackStatistics()
3152  void trackStatistics() const override {
3154  }
3155 };
3156 
3157 /// NoAlias attribute for an argument.
3158 struct AANoAliasArgument final
3159  : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3160  using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3161  AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3162 
3163  /// See AbstractAttribute::initialize(...).
3164  void initialize(Attributor &A) override {
3166  // See callsite argument attribute and callee argument attribute.
3167  if (hasAttr({Attribute::ByVal}))
3168  indicateOptimisticFixpoint();
3169  }
3170 
3171  /// See AbstractAttribute::update(...).
3172  ChangeStatus updateImpl(Attributor &A) override {
3173  // We have to make sure no-alias on the argument does not break
3174  // synchronization when this is a callback argument, see also [1] below.
3175  // If synchronization cannot be affected, we delegate to the base updateImpl
3176  // function, otherwise we give up for now.
3177 
3178  // If the function is no-sync, no-alias cannot break synchronization.
3179  const auto &NoSyncAA =
3180  A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3181  DepClassTy::OPTIONAL);
3182  if (NoSyncAA.isAssumedNoSync())
3183  return Base::updateImpl(A);
3184 
3185  // If the argument is read-only, no-alias cannot break synchronization.
3186  bool IsKnown;
3187  if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3188  return Base::updateImpl(A);
3189 
3190  // If the argument is never passed through callbacks, no-alias cannot break
3191  // synchronization.
3192  bool UsedAssumedInformation = false;
3193  if (A.checkForAllCallSites(
3194  [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3195  true, UsedAssumedInformation))
3196  return Base::updateImpl(A);
3197 
3198  // TODO: add no-alias but make sure it doesn't break synchronization by
3199  // introducing fake uses. See:
3200  // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3201  // International Workshop on OpenMP 2018,
3202  // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3203 
3204  return indicatePessimisticFixpoint();
3205  }
3206 
3207  /// See AbstractAttribute::trackStatistics()
3208  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3209 };
3210 
3211 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3212  AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3213  : AANoAliasImpl(IRP, A) {}
3214 
3215  /// See AbstractAttribute::initialize(...).
3216  void initialize(Attributor &A) override {
3217  // See callsite argument attribute and callee argument attribute.
3218  const auto &CB = cast<CallBase>(getAnchorValue());
3219  if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3220  indicateOptimisticFixpoint();
3221  Value &Val = getAssociatedValue();
3222  if (isa<ConstantPointerNull>(Val) &&
3223  !NullPointerIsDefined(getAnchorScope(),
3224  Val.getType()->getPointerAddressSpace()))
3225  indicateOptimisticFixpoint();
3226  }
3227 
3228  /// Determine if the underlying value may alias with the call site argument
3229  /// \p OtherArgNo of \p ICS (= the underlying call site).
3230  bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3231  const AAMemoryBehavior &MemBehaviorAA,
3232  const CallBase &CB, unsigned OtherArgNo) {
3233  // We do not need to worry about aliasing with the underlying IRP.
3234  if (this->getCalleeArgNo() == (int)OtherArgNo)
3235  return false;
3236 
3237  // If it is not a pointer or pointer vector we do not alias.
3238  const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3239  if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3240  return false;
3241 
3242  auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3243  *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3244 
3245  // If the argument is readnone, there is no read-write aliasing.
3246  if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3247  A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3248  return false;
3249  }
3250 
3251  // If the argument is readonly and the underlying value is readonly, there
3252  // is no read-write aliasing.
3253  bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3254  if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3255  A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3256  A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3257  return false;
3258  }
3259 
3260  // We have to utilize actual alias analysis queries so we need the object.
3261  if (!AAR)
3262  AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3263 
3264  // Try to rule it out at the call site.
3265  bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3266  LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3267  "callsite arguments: "
3268  << getAssociatedValue() << " " << *ArgOp << " => "
3269  << (IsAliasing ? "" : "no-") << "alias \n");
3270 
3271  return IsAliasing;
3272  }
3273 
3274  bool
3275  isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3276  const AAMemoryBehavior &MemBehaviorAA,
3277  const AANoAlias &NoAliasAA) {
3278  // We can deduce "noalias" if the following conditions hold.
3279  // (i) Associated value is assumed to be noalias in the definition.
3280  // (ii) Associated value is assumed to be no-capture in all the uses
3281  // possibly executed before this callsite.
3282  // (iii) There is no other pointer argument which could alias with the
3283  // value.
3284 
3285  bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3286  if (!AssociatedValueIsNoAliasAtDef) {
3287  LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3288  << " is not no-alias at the definition\n");
3289  return false;
3290  }
3291 
3292  auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3293  const auto &DerefAA = A.getAAFor<AADereferenceable>(
3294  *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3295  return DerefAA.getAssumedDereferenceableBytes();
3296  };
3297 
3298  A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3299 
3300  const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3301  const Function *ScopeFn = VIRP.getAnchorScope();
3302  auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3303  // Check whether the value is captured in the scope using AANoCapture.
3304  // Look at CFG and check only uses possibly executed before this
3305  // callsite.
3306  auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3307  Instruction *UserI = cast<Instruction>(U.getUser());
3308 
3309  // If UserI is the curr instruction and there is a single potential use of
3310  // the value in UserI we allow the use.
3311  // TODO: We should inspect the operands and allow those that cannot alias
3312  // with the value.
3313  if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3314  return true;
3315 
3316  if (ScopeFn) {
3317  if (auto *CB = dyn_cast<CallBase>(UserI)) {
3318  if (CB->isArgOperand(&U)) {
3319 
3320  unsigned ArgNo = CB->getArgOperandNo(&U);
3321 
3322  const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3323  *this, IRPosition::callsite_argument(*CB, ArgNo),
3324  DepClassTy::OPTIONAL);
3325 
3326  if (NoCaptureAA.isAssumedNoCapture())
3327  return true;
3328  }
3329  }
3330 
3331  if (!AA::isPotentiallyReachable(A, *UserI, *getCtxI(), *this))
3332  return true;
3333  }
3334 
3335  // TODO: We should track the capturing uses in AANoCapture but the problem
3336  // is CGSCC runs. For those we would need to "allow" AANoCapture for
3337  // a value in the module slice.
3338  switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3339  case UseCaptureKind::NO_CAPTURE:
3340  return true;
3341  case UseCaptureKind::MAY_CAPTURE:
3342  LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3343  << "\n");
3344  return false;
3345  case UseCaptureKind::PASSTHROUGH:
3346  Follow = true;
3347  return true;
3348  }
3349  llvm_unreachable("unknown UseCaptureKind");
3350  };
3351 
3352  if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3353  if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3354  LLVM_DEBUG(
3355  dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3356  << " cannot be noalias as it is potentially captured\n");
3357  return false;
3358  }
3359  }
3360  A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3361 
3362  // Check there is no other pointer argument which could alias with the
3363  // value passed at this call site.
3364  // TODO: AbstractCallSite
3365  const auto &CB = cast<CallBase>(getAnchorValue());
3366  for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3367  if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3368  return false;
3369 
3370  return true;
3371  }
3372 
3373  /// See AbstractAttribute::updateImpl(...).
3374  ChangeStatus updateImpl(Attributor &A) override {
3375  // If the argument is readnone we are done as there are no accesses via the
3376  // argument.
3377  auto &MemBehaviorAA =
3378  A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3379  if (MemBehaviorAA.isAssumedReadNone()) {
3380  A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3381  return ChangeStatus::UNCHANGED;
3382  }
3383 
3384  const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3385  const auto &NoAliasAA =
3386  A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3387 
3388  AAResults *AAR = nullptr;
3389  if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3390  NoAliasAA)) {
3391  LLVM_DEBUG(
3392  dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3393  return ChangeStatus::UNCHANGED;
3394  }
3395 
3396  return indicatePessimisticFixpoint();
3397  }
3398 
3399  /// See AbstractAttribute::trackStatistics()
3400  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3401 };
3402 
3403 /// NoAlias attribute for function return value.
3404 struct AANoAliasReturned final : AANoAliasImpl {
3405  AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3406  : AANoAliasImpl(IRP, A) {}
3407 
3408  /// See AbstractAttribute::initialize(...).
3409  void initialize(Attributor &A) override {
3411  Function *F = getAssociatedFunction();
3412  if (!F || F->isDeclaration())
3413  indicatePessimisticFixpoint();
3414  }
3415 
3416  /// See AbstractAttribute::updateImpl(...).
3417  virtual ChangeStatus updateImpl(Attributor &A) override {
3418 
3419  auto CheckReturnValue = [&](Value &RV) -> bool {
3420  if (Constant *C = dyn_cast<Constant>(&RV))
3421  if (C->isNullValue() || isa<UndefValue>(C))
3422  return true;
3423 
3424  /// For now, we can only deduce noalias if we have call sites.
3425  /// FIXME: add more support.
3426  if (!isa<CallBase>(&RV))
3427  return false;
3428 
3429  const IRPosition &RVPos = IRPosition::value(RV);
3430  const auto &NoAliasAA =
3431  A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3432  if (!NoAliasAA.isAssumedNoAlias())
3433  return false;
3434 
3435  const auto &NoCaptureAA =
3436  A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3437  return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3438  };
3439 
3440  if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3441  return indicatePessimisticFixpoint();
3442 
3443  return ChangeStatus::UNCHANGED;
3444  }
3445 
3446  /// See AbstractAttribute::trackStatistics()
3447  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3448 };
3449 
3450 /// NoAlias attribute deduction for a call site return value.
3451 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3452  AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3453  : AANoAliasImpl(IRP, A) {}
3454 
3455  /// See AbstractAttribute::initialize(...).
3456  void initialize(Attributor &A) override {
3458  Function *F = getAssociatedFunction();
3459  if (!F || F->isDeclaration())
3460  indicatePessimisticFixpoint();
3461  }
3462 
3463  /// See AbstractAttribute::updateImpl(...).
3464  ChangeStatus updateImpl(Attributor &A) override {
3465  // TODO: Once we have call site specific value information we can provide
3466  // call site specific liveness information and then it makes
3467  // sense to specialize attributes for call sites arguments instead of
3468  // redirecting requests to the callee argument.
3469  Function *F = getAssociatedFunction();
3470  const IRPosition &FnPos = IRPosition::returned(*F);
3471  auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3472  return clampStateAndIndicateChange(getState(), FnAA.getState());
3473  }
3474 
3475  /// See AbstractAttribute::trackStatistics()
3476  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3477 };
3478 } // namespace
3479 
3480 /// -------------------AAIsDead Function Attribute-----------------------
3481 
3482 namespace {
3483 struct AAIsDeadValueImpl : public AAIsDead {
3484  AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3485 
3486  /// See AbstractAttribute::initialize(...).
3487  void initialize(Attributor &A) override {
3488  if (auto *Scope = getAnchorScope())
3489  if (!A.isRunOn(*Scope))
3490  indicatePessimisticFixpoint();
3491  }
3492 
3493  /// See AAIsDead::isAssumedDead().
3494  bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3495 
3496  /// See AAIsDead::isKnownDead().
3497  bool isKnownDead() const override { return isKnown(IS_DEAD); }
3498 
3499  /// See AAIsDead::isAssumedDead(BasicBlock *).
3500  bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3501 
3502  /// See AAIsDead::isKnownDead(BasicBlock *).
3503  bool isKnownDead(const BasicBlock *BB) const override { return false; }
3504 
3505  /// See AAIsDead::isAssumedDead(Instruction *I).
3506  bool isAssumedDead(const Instruction *I) const override {
3507  return I == getCtxI() && isAssumedDead();
3508  }
3509 
3510  /// See AAIsDead::isKnownDead(Instruction *I).
3511  bool isKnownDead(const Instruction *I) const override {
3512  return isAssumedDead(I) && isKnownDead();
3513  }
3514 
3515  /// See AbstractAttribute::getAsStr().
3516  virtual const std::string getAsStr() const override {
3517  return isAssumedDead() ? "assumed-dead" : "assumed-live";
3518  }
3519 
3520  /// Check if all uses are assumed dead.
3521  bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3522  // Callers might not check the type, void has no uses.
3523  if (V.getType()->isVoidTy() || V.use_empty())
3524  return true;
3525 
3526  // If we replace a value with a constant there are no uses left afterwards.
3527  if (!isa<Constant>(V)) {
3528  if (auto *I = dyn_cast<Instruction>(&V))
3529  if (!A.isRunOn(*I->getFunction()))
3530  return false;
3531  bool UsedAssumedInformation = false;
3533  A.getAssumedConstant(V, *this, UsedAssumedInformation);
3534  if (!C.hasValue() || *C)
3535  return true;
3536  }
3537 
3538  auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3539  // Explicitly set the dependence class to required because we want a long
3540  // chain of N dependent instructions to be considered live as soon as one is
3541  // without going through N update cycles. This is not required for
3542  // correctness.
3543  return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3544  DepClassTy::REQUIRED,
3545  /* IgnoreDroppableUses */ false);
3546  }
3547 
3548  /// Determine if \p I is assumed to be side-effect free.
3549  bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3551  return true;
3552 
3553  auto *CB = dyn_cast<CallBase>(I);
3554  if (!CB || isa<IntrinsicInst>(CB))
3555  return false;
3556 
3557  const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3558  const auto &NoUnwindAA =
3559  A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3560  if (!NoUnwindAA.isAssumedNoUnwind())
3561  return false;
3562  if (!NoUnwindAA.isKnownNoUnwind())
3563  A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3564 
3565  bool IsKnown;
3566  return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3567  }
3568 };
3569 
3570 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3571  AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3572  : AAIsDeadValueImpl(IRP, A) {}
3573 
3574  /// See AbstractAttribute::initialize(...).
3575  void initialize(Attributor &A) override {
3577 
3578  if (isa<UndefValue>(getAssociatedValue())) {
3579  indicatePessimisticFixpoint();
3580  return;
3581  }
3582 
3583  Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3584  if (!isAssumedSideEffectFree(A, I)) {
3585  if (!isa_and_nonnull<StoreInst>(I))
3586  indicatePessimisticFixpoint();
3587  else
3588  removeAssumedBits(HAS_NO_EFFECT);
3589  }
3590  }
3591 
3592  bool isDeadStore(Attributor &A, StoreInst &SI) {
3593  // Lang ref now states volatile store is not UB/dead, let's skip them.
3594  if (SI.isVolatile())
3595  return false;
3596 
3597  bool UsedAssumedInformation = false;
3598  SmallSetVector<Value *, 4> PotentialCopies;
3599  if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3600  UsedAssumedInformation))
3601  return false;
3602  return llvm::all_of(PotentialCopies, [&](Value *V) {
3603  return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3604  UsedAssumedInformation);
3605  });
3606  }
3607 
3608  /// See AbstractAttribute::getAsStr().
3609  const std::string getAsStr() const override {
3610  Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3611  if (isa_and_nonnull<StoreInst>(I))
3612  if (isValidState())
3613  return "assumed-dead-store";
3614  return AAIsDeadValueImpl::getAsStr();
3615  }
3616 
3617  /// See AbstractAttribute::updateImpl(...).
3618  ChangeStatus updateImpl(Attributor &A) override {
3619  Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3620  if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3621  if (!isDeadStore(A, *SI))
3622  return indicatePessimisticFixpoint();
3623  } else {
3624  if (!isAssumedSideEffectFree(A, I))
3625  return indicatePessimisticFixpoint();
3626  if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3627  return indicatePessimisticFixpoint();
3628  }
3629  return ChangeStatus::UNCHANGED;
3630  }
3631 
3632  bool isRemovableStore() const override {
3633  return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
3634  }
3635 
3636  /// See AbstractAttribute::manifest(...).
3637  ChangeStatus manifest(Attributor &A) override {
3638  Value &V = getAssociatedValue();
3639  if (auto *I = dyn_cast<Instruction>(&V)) {
3640  // If we get here we basically know the users are all dead. We check if
3641  // isAssumedSideEffectFree returns true here again because it might not be
3642  // the case and only the users are dead but the instruction (=call) is
3643  // still needed.
3644  if (isa<StoreInst>(I) ||
3645  (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3646  A.deleteAfterManifest(*I);
3647  return ChangeStatus::CHANGED;
3648  }
3649  }
3650  return ChangeStatus::UNCHANGED;
3651  }
3652 
3653  /// See AbstractAttribute::trackStatistics()
3654  void trackStatistics() const override {
3656  }
3657 };
3658 
3659 struct AAIsDeadArgument : public AAIsDeadFloating {
3660  AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3661  : AAIsDeadFloating(IRP, A) {}
3662 
3663  /// See AbstractAttribute::initialize(...).
3664  void initialize(Attributor &A) override {
3666  if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3667  indicatePessimisticFixpoint();
3668  }
3669 
3670  /// See AbstractAttribute::manifest(...).
3671  ChangeStatus manifest(Attributor &A) override {
3672  Argument &Arg = *getAssociatedArgument();
3673  if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3674  if (A.registerFunctionSignatureRewrite(
3675  Arg, /* ReplacementTypes */ {},
3678  return ChangeStatus::CHANGED;
3679  }
3680  return ChangeStatus::UNCHANGED;
3681  }
3682 
3683  /// See AbstractAttribute::trackStatistics()
3684  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3685 };
3686 
3687 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3688  AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3689  : AAIsDeadValueImpl(IRP, A) {}
3690 
3691  /// See AbstractAttribute::initialize(...).
3692  void initialize(Attributor &A) override {
3694  if (isa<UndefValue>(getAssociatedValue()))
3695  indicatePessimisticFixpoint();
3696  }
3697 
3698  /// See AbstractAttribute::updateImpl(...).
3699  ChangeStatus updateImpl(Attributor &A) override {
3700  // TODO: Once we have call site specific value information we can provide
3701  // call site specific liveness information and then it makes
3702  // sense to specialize attributes for call sites arguments instead of
3703  // redirecting requests to the callee argument.
3704  Argument *Arg = getAssociatedArgument();
3705  if (!Arg)
3706  return indicatePessimisticFixpoint();
3707  const IRPosition &ArgPos = IRPosition::argument(*Arg);
3708  auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3709  return clampStateAndIndicateChange(getState(), ArgAA.getState());
3710  }
3711 
3712  /// See AbstractAttribute::manifest(...).
3713  ChangeStatus manifest(Attributor &A) override {
3714  CallBase &CB = cast<CallBase>(getAnchorValue());
3715  Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3716  assert(!isa<UndefValue>(U.get()) &&
3717  "Expected undef values to be filtered out!");
3718  UndefValue &UV = *UndefValue::get(U->getType());
3719  if (A.changeUseAfterManifest(U, UV))
3720  return ChangeStatus::CHANGED;
3721  return ChangeStatus::UNCHANGED;
3722  }
3723 
3724  /// See AbstractAttribute::trackStatistics()
3725  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3726 };
3727 
3728 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3729  AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3730  : AAIsDeadFloating(IRP, A) {}
3731 
3732  /// See AAIsDead::isAssumedDead().
3733  bool isAssumedDead() const override {
3734  return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3735  }
3736 
3737  /// See AbstractAttribute::initialize(...).
3738  void initialize(Attributor &A) override {
3740  if (isa<UndefValue>(getAssociatedValue())) {
3741  indicatePessimisticFixpoint();
3742  return;
3743  }
3744 
3745  // We track this separately as a secondary state.
3746  IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3747  }
3748 
3749  /// See AbstractAttribute::updateImpl(...).
3750  ChangeStatus updateImpl(Attributor &A) override {
3751  ChangeStatus Changed = ChangeStatus::UNCHANGED;
3752  if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3753  IsAssumedSideEffectFree = false;
3754  Changed = ChangeStatus::CHANGED;
3755  }
3756  if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3757  return indicatePessimisticFixpoint();
3758  return Changed;
3759  }
3760 
3761  /// See AbstractAttribute::trackStatistics()
3762  void trackStatistics() const override {
3763  if (IsAssumedSideEffectFree)
3765  else
3766  STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3767  }
3768 
3769  /// See AbstractAttribute::getAsStr().
3770  const std::string getAsStr() const override {
3771  return isAssumedDead()
3772  ? "assumed-dead"
3773  : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3774  }
3775 
3776 private:
3777  bool IsAssumedSideEffectFree = true;
3778 };
3779 
3780 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3781  AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3782  : AAIsDeadValueImpl(IRP, A) {}
3783 
3784  /// See AbstractAttribute::updateImpl(...).
3785  ChangeStatus updateImpl(Attributor &A) override {
3786 
3787  bool UsedAssumedInformation = false;
3788  A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3789  {Instruction::Ret}, UsedAssumedInformation);
3790 
3791  auto PredForCallSite = [&](AbstractCallSite ACS) {
3792  if (ACS.isCallbackCall() || !ACS.getInstruction())
3793  return false;
3794  return areAllUsesAssumedDead(A, *ACS.getInstruction());
3795  };
3796 
3797  if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3798  UsedAssumedInformation))
3799  return indicatePessimisticFixpoint();
3800 
3801  return ChangeStatus::UNCHANGED;
3802  }
3803 
3804  /// See AbstractAttribute::manifest(...).
3805  ChangeStatus manifest(Attributor &A) override {
3806  // TODO: Rewrite the signature to return void?
3807  bool AnyChange = false;
3808  UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3809  auto RetInstPred = [&](Instruction &I) {
3810  ReturnInst &RI = cast<ReturnInst>(I);
3811  if (!isa<UndefValue>(RI.getReturnValue()))
3812  AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3813  return true;
3814  };
3815  bool UsedAssumedInformation = false;
3816  A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3817  UsedAssumedInformation);
3818  return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3819  }
3820 
3821  /// See AbstractAttribute::trackStatistics()
3822  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3823 };
3824 
3825 struct AAIsDeadFunction : public AAIsDead {
3826  AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3827 
3828  /// See AbstractAttribute::initialize(...).
3829  void initialize(Attributor &A) override {
3830  Function *F = getAnchorScope();
3831  if (!F || F->isDeclaration() || !A.isRunOn(*F)) {
3832  indicatePessimisticFixpoint();
3833  return;
3834  }
3835  ToBeExploredFrom.insert(&F->getEntryBlock().front());
3836  assumeLive(A, F->getEntryBlock());
3837  }
3838 
3839  /// See AbstractAttribute::getAsStr().
3840  const std::string getAsStr() const override {
3841  return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3842  std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3843  std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3844  std::to_string(KnownDeadEnds.size()) + "]";
3845  }
3846 
3847  /// See AbstractAttribute::manifest(...).
3848  ChangeStatus manifest(Attributor &A) override {
3849  assert(getState().isValidState() &&
3850  "Attempted to manifest an invalid state!");
3851 
3852  ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3853  Function &F = *getAnchorScope();
3854 
3855  if (AssumedLiveBlocks.empty()) {
3856  A.deleteAfterManifest(F);
3857  return ChangeStatus::CHANGED;
3858  }
3859 
3860  // Flag to determine if we can change an invoke to a call assuming the
3861  // callee is nounwind. This is not possible if the personality of the
3862  // function allows to catch asynchronous exceptions.
3863  bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3864 
3865  KnownDeadEnds.set_union(ToBeExploredFrom);
3866  for (const Instruction *DeadEndI : KnownDeadEnds) {
3867  auto *CB = dyn_cast<CallBase>(DeadEndI);
3868  if (!CB)
3869  continue;
3870  const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3871  *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3872  bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3873  if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3874  continue;
3875 
3876  if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3877  A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3878  else
3879  A.changeToUnreachableAfterManifest(
3880  const_cast<Instruction *>(DeadEndI->getNextNode()));
3881  HasChanged = ChangeStatus::CHANGED;
3882  }
3883 
3884  STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3885  for (BasicBlock &BB : F)
3886  if (!AssumedLiveBlocks.count(&BB)) {
3887  A.deleteAfterManifest(BB);
3889  HasChanged = ChangeStatus::CHANGED;
3890  }
3891 
3892  return HasChanged;
3893  }
3894 
3895  /// See AbstractAttribute::updateImpl(...).
3896  ChangeStatus updateImpl(Attributor &A) override;
3897 
3898  bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3899  assert(From->getParent() == getAnchorScope() &&
3900  To->getParent() == getAnchorScope() &&
3901  "Used AAIsDead of the wrong function");
3902  return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3903  }
3904 
3905  /// See AbstractAttribute::trackStatistics()
3906  void trackStatistics() const override {}
3907 
3908  /// Returns true if the function is assumed dead.
3909  bool isAssumedDead() const override { return false; }
3910 
3911  /// See AAIsDead::isKnownDead().
3912  bool isKnownDead() const override { return false; }
3913 
3914  /// See AAIsDead::isAssumedDead(BasicBlock *).
3915  bool isAssumedDead(const BasicBlock *BB) const override {
3916  assert(BB->getParent() == getAnchorScope() &&
3917  "BB must be in the same anchor scope function.");
3918 
3919  if (!getAssumed())
3920  return false;
3921  return !AssumedLiveBlocks.count(BB);
3922  }
3923 
3924  /// See AAIsDead::isKnownDead(BasicBlock *).
3925  bool isKnownDead(const BasicBlock *BB) const override {
3926  return getKnown() && isAssumedDead(BB);
3927  }
3928 
3929  /// See AAIsDead::isAssumed(Instruction *I).
3930  bool isAssumedDead(const Instruction *I) const override {
3931  assert(I->getParent()->getParent() == getAnchorScope() &&
3932  "Instruction must be in the same anchor scope function.");
3933 
3934  if (!getAssumed())
3935  return false;
3936 
3937  // If it is not in AssumedLiveBlocks then it for sure dead.
3938  // Otherwise, it can still be after noreturn call in a live block.
3939  if (!AssumedLiveBlocks.count(I->getParent()))
3940  return true;
3941 
3942  // If it is not after a liveness barrier it is live.
3943  const Instruction *PrevI = I->getPrevNode();
3944  while (PrevI) {
3945  if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3946  return true;
3947  PrevI = PrevI->getPrevNode();
3948  }
3949  return false;
3950  }
3951 
3952  /// See AAIsDead::isKnownDead(Instruction *I).
3953  bool isKnownDead(const Instruction *I) const override {
3954  return getKnown() && isAssumedDead(I);
3955  }
3956 
3957  /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3958  /// that internal function called from \p BB should now be looked at.
3959  bool assumeLive(Attributor &A, const BasicBlock &BB) {
3960  if (!AssumedLiveBlocks.insert(&BB).second)
3961  return false;
3962 
3963  // We assume that all of BB is (probably) live now and if there are calls to
3964  // internal functions we will assume that those are now live as well. This
3965  // is a performance optimization for blocks with calls to a lot of internal
3966  // functions. It can however cause dead functions to be treated as live.
3967  for (const Instruction &I : BB)
3968  if (const auto *CB = dyn_cast<CallBase>(&I))
3969  if (const Function *F = CB->getCalledFunction())
3970  if (F->hasLocalLinkage())
3971  A.markLiveInternalFunction(*F);
3972  return true;
3973  }
3974 
3975  /// Collection of instructions that need to be explored again, e.g., we
3976  /// did assume they do not transfer control to (one of their) successors.
3977  SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3978 
3979  /// Collection of instructions that are known to not transfer control.
3981 
3982  /// Collection of all assumed live edges
3984 
3985  /// Collection of all assumed live BasicBlocks.
3986  DenseSet<const BasicBlock *> AssumedLiveBlocks;
3987 };
3988 
3989 static bool
3990 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3992  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3993  const IRPosition &IPos = IRPosition::callsite_function(CB);
3994 
3995  const auto &NoReturnAA =
3996  A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3997  if (NoReturnAA.isAssumedNoReturn())
3998  return !NoReturnAA.isKnownNoReturn();
3999  if (CB.isTerminator())
4000  AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
4001  else
4002  AliveSuccessors.push_back(CB.getNextNode());
4003  return false;
4004 }
4005 
4006 static bool
4007 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
4009  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4010  bool UsedAssumedInformation =
4011  identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
4012 
4013  // First, determine if we can change an invoke to a call assuming the
4014  // callee is nounwind. This is not possible if the personality of the
4015  // function allows to catch asynchronous exceptions.
4016  if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
4017  AliveSuccessors.push_back(&II.getUnwindDest()->front());
4018  } else {
4019  const IRPosition &IPos = IRPosition::callsite_function(II);
4020  const auto &AANoUnw =
4021  A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
4022  if (AANoUnw.isAssumedNoUnwind()) {
4023  UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
4024  } else {
4025  AliveSuccessors.push_back(&II.getUnwindDest()->front());
4026  }
4027  }
4028  return UsedAssumedInformation;
4029 }
4030 
4031 static bool
4032 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
4034  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4035  bool UsedAssumedInformation = false;
4036  if (BI.getNumSuccessors() == 1) {
4037  AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4038  } else {
4040  A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
4041  if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
4042  // No value yet, assume both edges are dead.
4043  } else if (isa_and_nonnull<ConstantInt>(*C)) {
4044  const BasicBlock *SuccBB =
4045  BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
4046  AliveSuccessors.push_back(&SuccBB->front());
4047  } else {
4048  AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4049  AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
4050  UsedAssumedInformation = false;
4051  }
4052  }
4053  return UsedAssumedInformation;
4054 }
4055 
4056 static bool
4057 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
4059  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4060  bool UsedAssumedInformation = false;
4062  A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
4063  if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
4064  // No value yet, assume all edges are dead.
4065  } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
4066  for (auto &CaseIt : SI.cases()) {
4067  if (CaseIt.getCaseValue() == C.getValue()) {
4068  AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
4069  return UsedAssumedInformation;
4070  }
4071  }
4072  AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4073  return UsedAssumedInformation;
4074  } else {
4075  for (const BasicBlock *SuccBB : successors(SI.getParent()))
4076  AliveSuccessors.push_back(&SuccBB->front());
4077  }
4078  return UsedAssumedInformation;
4079 }
4080 
4081 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4082  ChangeStatus Change = ChangeStatus::UNCHANGED;
4083 
4084  LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
4085  << getAnchorScope()->size() << "] BBs and "
4086  << ToBeExploredFrom.size() << " exploration points and "
4087  << KnownDeadEnds.size() << " known dead ends\n");
4088 
4089  // Copy and clear the list of instructions we need to explore from. It is
4090  // refilled with instructions the next update has to look at.
4091  SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4092  ToBeExploredFrom.end());
4093  decltype(ToBeExploredFrom) NewToBeExploredFrom;
4094 
4095  SmallVector<const Instruction *, 8> AliveSuccessors;
4096  while (!Worklist.empty()) {
4097  const Instruction *I = Worklist.pop_back_val();
4098  LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4099 
4100  // Fast forward for uninteresting instructions. We could look for UB here
4101  // though.
4102  while (!I->isTerminator() && !isa<CallBase>(I))
4103  I = I->getNextNode();
4104 
4105  AliveSuccessors.clear();
4106 
4107  bool UsedAssumedInformation = false;
4108  switch (I->getOpcode()) {
4109  // TODO: look for (assumed) UB to backwards propagate "deadness".
4110  default:
4111  assert(I->isTerminator() &&
4112  "Expected non-terminators to be handled already!");
4113  for (const BasicBlock *SuccBB : successors(I->getParent()))
4114  AliveSuccessors.push_back(&SuccBB->front());
4115  break;
4116  case Instruction::Call:
4117  UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4118  *this, AliveSuccessors);
4119  break;
4120  case Instruction::Invoke:
4121  UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4122  *this, AliveSuccessors);
4123  break;
4124  case Instruction::Br:
4125  UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4126  *this, AliveSuccessors);
4127  break;
4128  case Instruction::Switch:
4129  UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4130  *this, AliveSuccessors);
4131  break;
4132  }
4133 
4134  if (UsedAssumedInformation) {
4135  NewToBeExploredFrom.insert(I);
4136  } else if (AliveSuccessors.empty() ||
4137  (I->isTerminator() &&
4138  AliveSuccessors.size() < I->getNumSuccessors())) {
4139  if (KnownDeadEnds.insert(I))
4140  Change = ChangeStatus::CHANGED;
4141  }
4142 
4143  LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4144  << AliveSuccessors.size() << " UsedAssumedInformation: "
4145  << UsedAssumedInformation << "\n");
4146 
4147  for (const Instruction *AliveSuccessor : AliveSuccessors) {
4148  if (!I->isTerminator()) {
4149  assert(AliveSuccessors.size() == 1 &&
4150  "Non-terminator expected to have a single successor!");
4151  Worklist.push_back(AliveSuccessor);
4152  } else {
4153  // record the assumed live edge
4154  auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4155  if (AssumedLiveEdges.insert(Edge).second)
4156  Change = ChangeStatus::CHANGED;
4157  if (assumeLive(A, *AliveSuccessor->getParent()))
4158  Worklist.push_back(AliveSuccessor);
4159  }
4160  }
4161  }
4162 
4163  // Check if the content of ToBeExploredFrom changed, ignore the order.
4164  if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4165  llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4166  return !ToBeExploredFrom.count(I);
4167  })) {
4168  Change = ChangeStatus::CHANGED;
4169  ToBeExploredFrom = std::move(NewToBeExploredFrom);
4170  }
4171 
4172  // If we know everything is live there is no need to query for liveness.
4173  // Instead, indicating a pessimistic fixpoint will cause the state to be
4174  // "invalid" and all queries to be answered conservatively without lookups.
4175  // To be in this state we have to (1) finished the exploration and (3) not
4176  // discovered any non-trivial dead end and (2) not ruled unreachable code
4177  // dead.
4178  if (ToBeExploredFrom.empty() &&
4179  getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4180  llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4181  return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4182  }))
4183  return indicatePessimisticFixpoint();
4184  return Change;
4185 }
4186 
4187 /// Liveness information for a call sites.
4188 struct AAIsDeadCallSite final : AAIsDeadFunction {
4189  AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4190  : AAIsDeadFunction(IRP, A) {}
4191 
4192  /// See AbstractAttribute::initialize(...).
4193  void initialize(Attributor &A) override {
4194  // TODO: Once we have call site specific value information we can provide
4195  // call site specific liveness information and then it makes
4196  // sense to specialize attributes for call sites instead of
4197  // redirecting requests to the callee.
4198  llvm_unreachable("Abstract attributes for liveness are not "
4199  "supported for call sites yet!");
4200  }
4201 
4202  /// See AbstractAttribute::updateImpl(...).
4203  ChangeStatus updateImpl(Attributor &A) override {
4204  return indicatePessimisticFixpoint();
4205  }
4206 
4207  /// See AbstractAttribute::trackStatistics()
4208  void trackStatistics() const override {}
4209 };
4210 } // namespace
4211 
4212 /// -------------------- Dereferenceable Argument Attribute --------------------
4213 
4214 namespace {
4215 struct AADereferenceableImpl : AADereferenceable {
4216  AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4217  : AADereferenceable(IRP, A) {}
4218  using StateType = DerefState;
4219 
4220  /// See AbstractAttribute::initialize(...).
4221  void initialize(Attributor &A) override {
4223  getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4224  Attrs, /* IgnoreSubsumingPositions */ false, &A);
4225  for (const Attribute &Attr : Attrs)
4226  takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4227 
4228  const IRPosition &IRP = this->getIRPosition();
4229  NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4230 
4231  bool CanBeNull, CanBeFreed;
4232  takeKnownDerefBytesMaximum(
4234  A.getDataLayout(), CanBeNull, CanBeFreed));
4235 
4236  bool IsFnInterface = IRP.isFnInterfaceKind();
4237  Function *FnScope = IRP.getAnchorScope();
4238  if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4239  indicatePessimisticFixpoint();
4240  return;
4241  }
4242 
4243  if (Instruction *CtxI = getCtxI())
4244  followUsesInMBEC(*this, A, getState(), *CtxI);
4245  }
4246 
4247  /// See AbstractAttribute::getState()
4248  /// {
4249  StateType &getState() override { return *this; }
4250  const StateType &getState() const override { return *this; }
4251  /// }
4252 
4253  /// Helper function for collecting accessed bytes in must-be-executed-context
4254  void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4255  DerefState &State) {
4256  const Value *UseV = U->get();
4257  if (!UseV->getType()->isPointerTy())
4258  return;
4259 
4260  Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4261  if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4262  return;
4263 
4264  int64_t Offset;
4266  Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4267  if (Base && Base == &getAssociatedValue())
4268  State.addAccessedBytes(Offset, Loc->Size.getValue());
4269  }
4270 
4271  /// See followUsesInMBEC
4272  bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4273  AADereferenceable::StateType &State) {
4274  bool IsNonNull = false;
4275  bool TrackUse = false;
4276  int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4277  A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4278  LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4279  << " for instruction " << *I << "\n");
4280 
4281  addAccessedBytesForUse(A, U, I, State);
4282  State.takeKnownDerefBytesMaximum(DerefBytes);
4283  return TrackUse;
4284  }
4285 
4286  /// See AbstractAttribute::manifest(...).
4287  ChangeStatus manifest(Attributor &A) override {
4288  ChangeStatus Change = AADereferenceable::manifest(A);
4289  if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4290  removeAttrs({Attribute::DereferenceableOrNull});
4291  return ChangeStatus::CHANGED;
4292  }
4293  return Change;
4294  }
4295 
4296  void getDeducedAttributes(LLVMContext &Ctx,
4297  SmallVectorImpl<Attribute> &Attrs) const override {
4298  // TODO: Add *_globally support
4299  if (isAssumedNonNull())
4300  Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4301  Ctx, getAssumedDereferenceableBytes()));
4302  else
4303  Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4304  Ctx, getAssumedDereferenceableBytes()));
4305  }
4306 
4307  /// See AbstractAttribute::getAsStr().
4308  const std::string getAsStr() const override {
4309  if (!getAssumedDereferenceableBytes())
4310  return "unknown-dereferenceable";
4311  return std::string("dereferenceable") +
4312  (isAssumedNonNull() ? "" : "_or_null") +
4313  (isAssumedGlobal() ? "_globally" : "") + "<" +
4314  std::to_string(getKnownDereferenceableBytes()) + "-" +
4315  std::to_string(getAssumedDereferenceableBytes()) + ">";
4316  }
4317 };
4318 
4319 /// Dereferenceable attribute for a floating value.
4320 struct AADereferenceableFloating : AADereferenceableImpl {
4321  AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4322  : AADereferenceableImpl(IRP, A) {}
4323 
4324  /// See AbstractAttribute::updateImpl(...).
4325  ChangeStatus updateImpl(Attributor &A) override {
4326  const DataLayout &DL = A.getDataLayout();
4327 
4328  auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4329  bool Stripped) -> bool {
4330  unsigned IdxWidth =
4331  DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4332  APInt Offset(IdxWidth, 0);
4334  A, *this, &V, DL, Offset, /* GetMinOffset */ false,
4335  /* AllowNonInbounds */ true);
4336 
4337  const auto &AA = A.getAAFor<AADereferenceable>(
4338  *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4339  int64_t DerefBytes = 0;
4340  if (!Stripped && this == &AA) {
4341  // Use IR information if we did not strip anything.
4342  // TODO: track globally.
4343  bool CanBeNull, CanBeFreed;
4344  DerefBytes =
4345  Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4346  T.GlobalState.indicatePessimisticFixpoint();
4347  } else {
4348  const DerefState &DS = AA.getState();
4349  DerefBytes = DS.DerefBytesState.getAssumed();
4350  T.GlobalState &= DS.GlobalState;
4351  }
4352 
4353  // For now we do not try to "increase" dereferenceability due to negative
4354  // indices as we first have to come up with code to deal with loops and
4355  // for overflows of the dereferenceable bytes.
4356  int64_t OffsetSExt = Offset.getSExtValue();
4357  if (OffsetSExt < 0)
4358  OffsetSExt = 0;
4359 
4360  T.takeAssumedDerefBytesMinimum(
4361  std::max(int64_t(0), DerefBytes - OffsetSExt));
4362 
4363  if (this == &AA) {
4364  if (!Stripped) {
4365  // If nothing was stripped IR information is all we got.
4366  T.takeKnownDerefBytesMaximum(
4367  std::max(int64_t(0), DerefBytes - OffsetSExt));
4368  T.indicatePessimisticFixpoint();
4369  } else if (OffsetSExt > 0) {
4370  // If something was stripped but there is circular reasoning we look
4371  // for the offset. If it is positive we basically decrease the
4372  // dereferenceable bytes in a circluar loop now, which will simply
4373  // drive them down to the known value in a very slow way which we
4374  // can accelerate.
4375  T.indicatePessimisticFixpoint();
4376  }
4377  }
4378 
4379  return T.isValidState();
4380  };
4381 
4382  DerefState T;
4383  bool UsedAssumedInformation = false;
4384  if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4385  VisitValueCB, getCtxI(),
4386  UsedAssumedInformation))
4387  return indicatePessimisticFixpoint();
4388 
4389  return clampStateAndIndicateChange(getState(), T);
4390  }
4391 
4392  /// See AbstractAttribute::trackStatistics()
4393  void trackStatistics() const override {
4394  STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4395  }
4396 };
4397 
4398 /// Dereferenceable attribute for a return value.
4399 struct AADereferenceableReturned final
4400  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4401  AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4402  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4403  IRP, A) {}
4404 
4405  /// See AbstractAttribute::trackStatistics()
4406  void trackStatistics() const override {
4407  STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4408  }
4409 };
4410 
4411 /// Dereferenceable attribute for an argument
4412 struct AADereferenceableArgument final
4413  : AAArgumentFromCallSiteArguments<AADereferenceable,
4414  AADereferenceableImpl> {
4415  using Base =
4416  AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4417  AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4418  : Base(IRP, A) {}
4419 
4420  /// See AbstractAttribute::trackStatistics()
4421  void trackStatistics() const override {
4422  STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4423  }
4424 };
4425 
4426 /// Dereferenceable attribute for a call site argument.
4427 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4428  AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4429  : AADereferenceableFloating(IRP, A) {}
4430 
4431  /// See AbstractAttribute::trackStatistics()
4432  void trackStatistics() const override {
4433  STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4434  }
4435 };
4436 
4437 /// Dereferenceable attribute deduction for a call site return value.
4438 struct AADereferenceableCallSiteReturned final
4439  : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4440  using Base =
4441  AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4442  AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4443  : Base(IRP, A) {}
4444 
4445  /// See AbstractAttribute::trackStatistics()
4446  void trackStatistics() const override {
4447  STATS_DECLTRACK_CS_ATTR(dereferenceable);
4448  }
4449 };
4450 } // namespace
4451 
4452 // ------------------------ Align Argument Attribute ------------------------
4453 
4454 namespace {
4455 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4456  Value &AssociatedValue, const Use *U,
4457  const Instruction *I, bool &TrackUse) {
4458  // We need to follow common pointer manipulation uses to the accesses they
4459  // feed into.
4460  if (isa<CastInst>(I)) {
4461  // Follow all but ptr2int casts.
4462  TrackUse = !isa<PtrToIntInst>(I);
4463  return 0;
4464  }
4465  if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4466  if (GEP->hasAllConstantIndices())
4467  TrackUse = true;
4468  return 0;
4469  }
4470 
4471  MaybeAlign MA;
4472  if (const auto *CB = dyn_cast<CallBase>(I)) {
4473  if (CB->isBundleOperand(U) || CB->isCallee(U))
4474  return 0;
4475 
4476  unsigned ArgNo = CB->getArgOperandNo(U);
4477  IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4478  // As long as we only use known information there is no need to track
4479  // dependences here.
4480  auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4481  MA = MaybeAlign(AlignAA.getKnownAlign());
4482  }
4483 
4484  const DataLayout &DL = A.getDataLayout();
4485  const Value *UseV = U->get();
4486  if (auto *SI = dyn_cast<StoreInst>(I)) {
4487  if (SI->getPointerOperand() == UseV)
4488  MA = SI->getAlign();
4489  } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4490  if (LI->getPointerOperand() == UseV)
4491  MA = LI->getAlign();
4492  }
4493 
4494  if (!MA || *MA <= QueryingAA.getKnownAlign())
4495  return 0;
4496 
4497  unsigned Alignment = MA->value();
4498  int64_t Offset;
4499 
4500  if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4501  if (Base == &AssociatedValue) {
4502  // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4503  // So we can say that the maximum power of two which is a divisor of
4504  // gcd(Offset, Alignment) is an alignment.
4505 
4506  uint32_t gcd =
4507  greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4509  }
4510  }
4511 
4512  return Alignment;
4513 }
4514 
4515 struct AAAlignImpl : AAAlign {
4516  AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4517 
4518  /// See AbstractAttribute::initialize(...).
4519  void initialize(Attributor &A) override {
4521  getAttrs({Attribute::Alignment}, Attrs);
4522  for (const Attribute &Attr : Attrs)
4523  takeKnownMaximum(Attr.getValueAsInt());
4524 
4525  Value &V = getAssociatedValue();
4526  takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4527 
4528  if (getIRPosition().isFnInterfaceKind() &&
4529  (!getAnchorScope() ||
4530  !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4531  indicatePessimisticFixpoint();
4532  return;
4533  }
4534 
4535  if (Instruction *CtxI = getCtxI())
4536  followUsesInMBEC(*this, A, getState(), *CtxI);
4537  }
4538 
4539  /// See AbstractAttribute::manifest(...).
4540  ChangeStatus manifest(Attributor &A) override {
4541  ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4542 
4543  // Check for users that allow alignment annotations.
4544  Value &AssociatedValue = getAssociatedValue();
4545  for (const Use &U : AssociatedValue.uses()) {
4546  if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4547  if (SI->getPointerOperand() == &AssociatedValue)
4548  if (SI->getAlignment() < getAssumedAlign()) {
4550  "Number of times alignment added to a store");
4551  SI->setAlignment(Align(getAssumedAlign()));
4552  LoadStoreChanged = ChangeStatus::CHANGED;
4553  }
4554  } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4555  if (LI->getPointerOperand() == &AssociatedValue)
4556  if (LI->getAlignment() < getAssumedAlign()) {
4557  LI->setAlignment(Align(getAssumedAlign()));
4559  "Number of times alignment added to a load");
4560  LoadStoreChanged = ChangeStatus::CHANGED;
4561  }
4562  }
4563  }
4564 
4565  ChangeStatus Changed = AAAlign::manifest(A);
4566 
4567  Align InheritAlign =
4568  getAssociatedValue().getPointerAlignment(A.getDataLayout());
4569  if (InheritAlign >= getAssumedAlign())
4570  return LoadStoreChanged;
4571  return Changed | LoadStoreChanged;
4572  }
4573 
4574  // TODO: Provide a helper to determine the implied ABI alignment and check in
4575  // the existing manifest method and a new one for AAAlignImpl that value
4576  // to avoid making the alignment explicit if it did not improve.
4577 
4578  /// See AbstractAttribute::getDeducedAttributes
4579  virtual void
4580  getDeducedAttributes(LLVMContext &Ctx,
4581  SmallVectorImpl<Attribute> &Attrs) const override {
4582  if (getAssumedAlign() > 1)
4583  Attrs.emplace_back(
4584  Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4585  }
4586 
4587  /// See followUsesInMBEC
4588  bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4589  AAAlign::StateType &State) {
4590  bool TrackUse = false;
4591 
4592  unsigned int KnownAlign =
4593  getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4594  State.takeKnownMaximum(KnownAlign);
4595 
4596  return TrackUse;
4597  }
4598 
4599  /// See AbstractAttribute::getAsStr().
4600  const std::string getAsStr() const override {
4601  return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4602  "-" + std::to_string(getAssumedAlign()) + ">")
4603  : "unknown-align";
4604  }
4605 };
4606 
4607 /// Align attribute for a floating value.
4608 struct AAAlignFloating : AAAlignImpl {
4609  AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4610 
4611  /// See AbstractAttribute::updateImpl(...).
4612  ChangeStatus updateImpl(Attributor &A) override {
4613  const DataLayout &DL = A.getDataLayout();
4614 
4615  auto VisitValueCB = [&](Value &V, const Instruction *,
4616  AAAlign::StateType &T, bool Stripped) -> bool {
4617  if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4618  return true;
4619  const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4620  DepClassTy::REQUIRED);
4621  if (!Stripped && this == &AA) {
4622  int64_t Offset;
4623  unsigned Alignment = 1;
4624  if (const Value *Base =
4626  // TODO: Use AAAlign for the base too.
4627  Align PA = Base->getPointerAlignment(DL);
4628  // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4629  // So we can say that the maximum power of two which is a divisor of
4630  // gcd(Offset, Alignment) is an alignment.
4631 
4633  uint32_t(PA.value()));
4635  } else {
4637  }
4638  // Use only IR information if we did not strip anything.
4639  T.takeKnownMaximum(Alignment);
4640  T.indicatePessimisticFixpoint();
4641  } else {
4642  // Use abstract attribute information.
4643  const AAAlign::StateType &DS = AA.getState();
4644  T ^= DS;
4645  }
4646  return T.isValidState();
4647  };
4648 
4649  StateType T;
4650  bool UsedAssumedInformation = false;
4651  if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4652  VisitValueCB, getCtxI(),
4653  UsedAssumedInformation))
4654  return indicatePessimisticFixpoint();
4655 
4656  // TODO: If we know we visited all incoming values, thus no are assumed
4657  // dead, we can take the known information from the state T.
4658  return clampStateAndIndicateChange(getState(), T);
4659  }
4660 
4661  /// See AbstractAttribute::trackStatistics()
4662  void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4663 };
4664 
4665 /// Align attribute for function return value.
4666 struct AAAlignReturned final
4667  : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4668  using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4669  AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4670 
4671  /// See AbstractAttribute::initialize(...).
4672  void initialize(Attributor &A) override {
4674  Function *F = getAssociatedFunction();
4675  if (!F || F->isDeclaration())
4676  indicatePessimisticFixpoint();
4677  }
4678 
4679  /// See AbstractAttribute::trackStatistics()
4680  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4681 };
4682 
4683 /// Align attribute for function argument.
4684 struct AAAlignArgument final
4685  : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4686  using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4687  AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4688 
4689  /// See AbstractAttribute::manifest(...).
4690  ChangeStatus manifest(Attributor &A) override {
4691  // If the associated argument is involved in a must-tail call we give up
4692  // because we would need to keep the argument alignments of caller and
4693  // callee in-sync. Just does not seem worth the trouble right now.
4694  if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4695  return ChangeStatus::UNCHANGED;
4696  return Base::manifest(A);
4697  }
4698 
4699  /// See AbstractAttribute::trackStatistics()
4700  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4701 };
4702 
4703 struct AAAlignCallSiteArgument final : AAAlignFloating {
4704  AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4705  : AAAlignFloating(IRP, A) {}
4706 
4707  /// See AbstractAttribute::manifest(...).
4708  ChangeStatus manifest(Attributor &A) override {
4709  // If the associated argument is involved in a must-tail call we give up
4710  // because we would need to keep the argument alignments of caller and
4711  // callee in-sync. Just does not seem worth the trouble right now.
4712  if (Argument *Arg = getAssociatedArgument())
4713  if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4714  return ChangeStatus::UNCHANGED;
4715  ChangeStatus Changed = AAAlignImpl::manifest(A);
4716  Align InheritAlign =
4717  getAssociatedValue().getPointerAlignment(A.getDataLayout());
4718  if (InheritAlign >= getAssumedAlign())
4719  Changed = ChangeStatus::UNCHANGED;
4720  return Changed;
4721  }
4722 
4723  /// See AbstractAttribute::updateImpl(Attributor &A).
4724  ChangeStatus updateImpl(Attributor &A) override {
4725  ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4726  if (Argument *Arg = getAssociatedArgument()) {
4727  // We only take known information from the argument
4728  // so we do not need to track a dependence.
4729  const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4730  *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4731  takeKnownMaximum(ArgAlignAA.getKnownAlign());
4732  }
4733  return Changed;
4734  }
4735 
4736  /// See AbstractAttribute::trackStatistics()
4737  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4738 };
4739 
4740 /// Align attribute deduction for a call site return value.
4741 struct AAAlignCallSiteReturned final
4742  : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4743  using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4744  AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4745  : Base(IRP, A) {}
4746 
4747  /// See AbstractAttribute::initialize(...).
4748  void initialize(Attributor &A) override {
4750  Function *F = getAssociatedFunction();
4751  if (!F || F->isDeclaration())
4752  indicatePessimisticFixpoint();
4753  }
4754 
4755  /// See AbstractAttribute::trackStatistics()
4756  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4757 };
4758 } // namespace
4759 
4760 /// ------------------ Function No-Return Attribute ----------------------------
4761 namespace {
4762 struct AANoReturnImpl : public AANoReturn {
4763  AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4764 
4765  /// See AbstractAttribute::initialize(...).
4766  void initialize(Attributor &A) override {
4768  Function *F = getAssociatedFunction();
4769  if (!F || F->isDeclaration())
4770  indicatePessimisticFixpoint();
4771  }
4772 
4773  /// See AbstractAttribute::getAsStr().
4774  const std::string getAsStr() const override {
4775  return getAssumed() ? "noreturn" : "may-return";
4776  }
4777 
4778  /// See AbstractAttribute::updateImpl(Attributor &A).
4779  virtual ChangeStatus updateImpl(Attributor &A) override {
4780  auto CheckForNoReturn = [](Instruction &) { return false; };
4781  bool UsedAssumedInformation = false;
4782  if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4783  {(unsigned)Instruction::Ret},
4784  UsedAssumedInformation))
4785  return indicatePessimisticFixpoint();
4786  return ChangeStatus::UNCHANGED;
4787  }
4788 };
4789 
4790 struct AANoReturnFunction final : AANoReturnImpl {
4791  AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4792  : AANoReturnImpl(IRP, A) {}
4793 
4794  /// See AbstractAttribute::trackStatistics()
4795  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4796 };
4797 
4798 /// NoReturn attribute deduction for a call sites.
4799 struct AANoReturnCallSite final : AANoReturnImpl {
4800  AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4801  : AANoReturnImpl(IRP, A) {}
4802 
4803  /// See AbstractAttribute::initialize(...).
4804  void initialize(Attributor &A) override {
4806  if (Function *F = getAssociatedFunction()) {
4807  const IRPosition &FnPos = IRPosition::function(*F);
4808  auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4809  if (!FnAA.isAssumedNoReturn())
4810  indicatePessimisticFixpoint();
4811  }
4812  }
4813 
4814  /// See AbstractAttribute::updateImpl(...).
4815  ChangeStatus updateImpl(Attributor &A) override {
4816  // TODO: Once we have call site specific value information we can provide
4817  // call site specific liveness information and then it makes
4818  // sense to specialize attributes for call sites arguments instead of
4819  // redirecting requests to the callee argument.
4820  Function *F = getAssociatedFunction();
4821  const IRPosition &FnPos = IRPosition::function(*F);
4822  auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4823  return clampStateAndIndicateChange(getState(), FnAA.getState());
4824  }
4825 
4826  /// See AbstractAttribute::trackStatistics()
4827  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4828 };
4829 } // namespace
4830 
4831 /// ----------------------- Instance Info ---------------------------------
4832 
4833 namespace {
4834 /// A class to hold the state of for no-capture attributes.
4835 struct AAInstanceInfoImpl : public AAInstanceInfo {
4836  AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
4837  : AAInstanceInfo(IRP, A) {}
4838 
4839  /// See AbstractAttribute::initialize(...).
4840  void initialize(Attributor &A) override {
4841  Value &V = getAssociatedValue();
4842  if (auto *C = dyn_cast<Constant>(&V)) {
4843  if (C->isThreadDependent())
4844  indicatePessimisticFixpoint();
4845  else
4846  indicateOptimisticFixpoint();
4847  return;
4848  }
4849  if (auto *CB = dyn_cast<CallBase>(&V))
4850  if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
4851  !CB->mayReadFromMemory()) {
4852  indicateOptimisticFixpoint();
4853  return;
4854  }
4855  }
4856 
4857  /// See AbstractAttribute::updateImpl(...).
4858  ChangeStatus updateImpl(Attributor &A) override {
4859  ChangeStatus Changed = ChangeStatus::UNCHANGED;
4860 
4861  Value &V = getAssociatedValue();
4862  const Function *Scope = nullptr;
4863  if (auto *I = dyn_cast<Instruction>(&V))
4864  Scope = I->getFunction();
4865  if (auto *A = dyn_cast<Argument>(&V)) {
4866  Scope = A->getParent();
4867  if (!Scope->hasLocalLinkage())
4868  return Changed;
4869  }
4870  if (!Scope)
4871  return indicateOptimisticFixpoint();
4872 
4873  auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
4874  *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL);
4875  if (NoRecurseAA.isAssumedNoRecurse())
4876  return Changed;
4877 
4878  auto UsePred = [&](const Use &U, bool &Follow) {
4879  const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
4880  if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
4881  isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4882  Follow = true;
4883  return true;
4884  }
4885  if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
4886  (isa<StoreInst>(UserI) &&
4887  cast<StoreInst>(UserI)->getValueOperand() != U.get()))
4888  return true;
4889  if (auto *CB = dyn_cast<CallBase>(UserI)) {
4890  // This check is not guaranteeing uniqueness but for now that we cannot
4891  // end up with two versions of \p U thinking it was one.
4892  if (!CB->getCalledFunction() ||
4894  return true;
4895  if (!CB->isArgOperand(&U))
4896  return false;
4897  const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
4898  *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
4899  DepClassTy::OPTIONAL);
4900  if (ArgInstanceInfoAA.isAssumedUniqueForAnalysis())
4901  return true;
4902  }
4903  return false;
4904  };
4905 
4906  auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
4907  if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
4908  auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
4909  if (isa<AllocaInst>(Ptr) && AA::isDynamicallyUnique(A, *this, *Ptr))
4910  return true;
4911  auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(
4912  *SI->getFunction());
4913  if (isAllocationFn(Ptr, TLI) && AA::isDynamicallyUnique(A, *this, *Ptr))
4914  return true;
4915  }
4916  return false;
4917  };
4918 
4919  if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
4920  DepClassTy::OPTIONAL,
4921  /* IgnoreDroppableUses */ true, EquivalentUseCB))
4922  return indicatePessimisticFixpoint();
4923 
4924  return Changed;
4925  }
4926 
4927  /// See AbstractState::getAsStr().
4928  const std::string getAsStr() const override {
4929  return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
4930  }
4931 
4932  /// See AbstractAttribute::trackStatistics()
4933  void trackStatistics() const override {}
4934 };
4935 
4936 /// InstanceInfo attribute for floating values.
4937 struct AAInstanceInfoFloating : AAInstanceInfoImpl {
4938  AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
4939  : AAInstanceInfoImpl(IRP, A) {}
4940 };
4941 
4942 /// NoCapture attribute for function arguments.
4943 struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
4944  AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
4945  : AAInstanceInfoFloating(IRP, A) {}
4946 };
4947 
4948 /// InstanceInfo attribute for call site arguments.
4949 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
4950  AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
4951  : AAInstanceInfoImpl(IRP, A) {}
4952 
4953  /// See AbstractAttribute::updateImpl(...).
4954  ChangeStatus updateImpl(Attributor &A) override {
4955  // TODO: Once we have call site specific value information we can provide
4956  // call site specific liveness information and then it makes
4957  // sense to specialize attributes for call sites arguments instead of
4958  // redirecting requests to the callee argument.
4959  Argument *Arg = getAssociatedArgument();
4960  if (!Arg)
4961  return indicatePessimisticFixpoint();
4962  const IRPosition &ArgPos = IRPosition::argument(*Arg);
4963  auto &ArgAA =
4964  A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
4965  return clampStateAndIndicateChange(getState(), ArgAA.getState());
4966  }
4967 };
4968 
4969 /// InstanceInfo attribute for function return value.
4970 struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
4971  AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
4972  : AAInstanceInfoImpl(IRP, A) {
4973  llvm_unreachable("InstanceInfo is not applicable to function returns!");
4974  }
4975 
4976  /// See AbstractAttribute::initialize(...).
4977  void initialize(Attributor &A) override {
4978  llvm_unreachable("InstanceInfo is not applicable to function returns!");
4979  }
4980 
4981  /// See AbstractAttribute::updateImpl(...).
4982  ChangeStatus updateImpl(Attributor &A) override {
4983  llvm_unreachable("InstanceInfo is not applicable to function returns!");
4984  }
4985 };
4986 
4987 /// InstanceInfo attribute deduction for a call site return value.
4988 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
4989  AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
4990  : AAInstanceInfoFloating(IRP, A) {}
4991 };
4992 } // namespace
4993 
4994 /// ----------------------- Variable Capturing ---------------------------------
4995 
4996 namespace {
4997 /// A class to hold the state of for no-capture attributes.
4998 struct AANoCaptureImpl : public AANoCapture {
4999  AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
5000 
5001  /// See AbstractAttribute::initialize(...).
5002  void initialize(Attributor &A) override {
5003  if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
5004  indicateOptimisticFixpoint();
5005  return;
5006  }
5007  Function *AnchorScope = getAnchorScope();
5008  if (isFnInterfaceKind() &&
5009  (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
5010  indicatePessimisticFixpoint();
5011  return;
5012  }
5013 
5014  // You cannot "capture" null in the default address space.
5015  if (isa<ConstantPointerNull>(getAssociatedValue()) &&
5016  getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
5017  indicateOptimisticFixpoint();
5018  return;
5019  }
5020 
5021  const Function *F =
5022  isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
5023 
5024  // Check what state the associated function can actually capture.
5025  if (F)
5026  determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5027  else
5028  indicatePessimisticFixpoint();
5029  }
5030 
5031  /// See AbstractAttribute::updateImpl(...).
5032  ChangeStatus updateImpl(Attributor &A) override;
5033 
5034  /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
5035  virtual void
5036  getDeducedAttributes(LLVMContext &Ctx,
5037  SmallVectorImpl<Attribute> &Attrs) const override {
5038  if (!isAssumedNoCaptureMaybeReturned())
5039  return;
5040 
5041  if (isArgumentPosition()) {
5042  if (isAssumedNoCapture())
5043  Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
5044  else if (ManifestInternal)
5045  Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
5046  }
5047  }
5048 
5049  /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
5050  /// depending on the ability of the function associated with \p IRP to capture
5051  /// state in memory and through "returning/throwing", respectively.
5052  static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
5053  const Function &F,
5054  BitIntegerState &State) {
5055  // TODO: Once we have memory behavior attributes we should use them here.
5056 
5057  // If we know we cannot communicate or write to memory, we do not care about
5058  // ptr2int anymore.
5059  if (F.onlyReadsMemory() && F.doesNotThrow() &&
5060  F.getReturnType()->isVoidTy()) {
5061  State.addKnownBits(NO_CAPTURE);
5062  return;
5063  }
5064 
5065  // A function cannot capture state in memory if it only reads memory, it can
5066  // however return/throw state and the state might be influenced by the
5067  // pointer value, e.g., loading from a returned pointer might reveal a bit.
5068  if (F.onlyReadsMemory())
5069  State.addKnownBits(NOT_CAPTURED_IN_MEM);
5070 
5071  // A function cannot communicate state back if it does not through
5072  // exceptions and doesn not return values.
5073  if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
5074  State.addKnownBits(NOT_CAPTURED_IN_RET);
5075 
5076  // Check existing "returned" attributes.
5077  int ArgNo = IRP.getCalleeArgNo();
5078  if (F.doesNotThrow() && ArgNo >= 0) {
5079  for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
5080  if (F.hasParamAttribute(u, Attribute::Returned)) {
5081  if (u == unsigned(ArgNo))
5082  State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5083  else if (F.onlyReadsMemory())
5084  State.addKnownBits(NO_CAPTURE);
5085  else
5086  State.addKnownBits(NOT_CAPTURED_IN_RET);
5087  break;
5088  }
5089  }
5090  }
5091 
5092  /// See AbstractState::getAsStr().
5093  const std::string getAsStr() const override {
5094  if (isKnownNoCapture())
5095  return "known not-captured";
5096  if (isAssumedNoCapture())
5097  return "assumed not-captured";
5098  if (isKnownNoCaptureMaybeReturned())
5099  return "known not-captured-maybe-returned";
5100  if (isAssumedNoCaptureMaybeReturned())
5101  return "assumed not-captured-maybe-returned";
5102  return "assumed-captured";
5103  }
5104 
5105  /// Check the use \p U and update \p State accordingly. Return true if we
5106  /// should continue to update the state.
5107  bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5108  bool &Follow) {
5109  Instruction *UInst = cast<Instruction>(U.getUser());
5110  LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5111  << *UInst << "\n");
5112 
5113  // Deal with ptr2int by following uses.
5114  if (isa<PtrToIntInst>(UInst)) {
5115  LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5116  return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5117  /* Return */ true);
5118  }
5119 
5120  // For stores we already checked if we can follow them, if they make it
5121  // here we give up.
5122  if (isa<StoreInst>(UInst))
5123  return isCapturedIn(State, /* Memory */ true, /* Integer */ false,
5124  /* Return */ false);
5125 
5126  // Explicitly catch return instructions.
5127  if (isa<ReturnInst>(UInst)) {
5128  if (UInst->getFunction() == getAnchorScope())
5129  return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5130  /* Return */ true);
5131  return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5132  /* Return */ true);
5133  }
5134 
5135  // For now we only use special logic for call sites. However, the tracker
5136  // itself knows about a lot of other non-capturing cases already.
5137  auto *CB = dyn_cast<CallBase>(UInst);
5138  if (!CB || !CB->isArgOperand(&U))
5139  return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5140  /* Return */ true);
5141 
5142  unsigned ArgNo = CB->getArgOperandNo(&U);
5143  const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5144  // If we have a abstract no-capture attribute for the argument we can use
5145  // it to justify a non-capture attribute here. This allows recursion!
5146  auto &ArgNoCaptureAA =
5147  A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED);
5148  if (ArgNoCaptureAA.isAssumedNoCapture())
5149  return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5150  /* Return */ false);
5151  if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5152  Follow = true;
5153  return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5154  /* Return */ false);
5155  }
5156 
5157  // Lastly, we could not find a reason no-capture can be assumed so we don't.
5158  return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5159  /* Return */ true);
5160  }
5161 
5162  /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5163  /// \p CapturedInRet, then return true if we should continue updating the
5164  /// state.
5165  static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5166  bool CapturedInInt, bool CapturedInRet) {
5167  LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5168  << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5169  if (CapturedInMem)
5170  State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5171  if (CapturedInInt)
5172  State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5173  if (CapturedInRet)
5174  State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5175  return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5176  }
5177 };
5178 
5179 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5180  const IRPosition &IRP = getIRPosition();
5181  Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5182  : &IRP.getAssociatedValue();
5183  if (!V)
5184  return indicatePessimisticFixpoint();
5185 
5186  const Function *F =
5187  isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5188  assert(F && "Expected a function!");
5189  const IRPosition &FnPos = IRPosition::function(*F);
5190 
5191  AANoCapture::StateType T;
5192 
5193  // Readonly means we cannot capture through memory.
5194  bool IsKnown;
5195  if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5196  T.addKnownBits(NOT_CAPTURED_IN_MEM);
5197  if (IsKnown)
5198  addKnownBits(NOT_CAPTURED_IN_MEM);
5199  }
5200 
5201  // Make sure all returned values are different than the underlying value.
5202  // TODO: we could do this in a more sophisticated way inside
5203  // AAReturnedValues, e.g., track all values that escape through returns
5204  // directly somehow.
5205  auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5206  bool SeenConstant = false;
5207  for (auto &It : RVAA.returned_values()) {
5208  if (isa<Constant>(It.first)) {
5209  if (SeenConstant)
5210  return false;
5211  SeenConstant = true;
5212  } else if (!isa<Argument>(It.first) ||
5213  It.first == getAssociatedArgument())
5214  return false;
5215  }
5216  return true;
5217  };
5218 
5219  const auto &NoUnwindAA =
5220  A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5221  if (NoUnwindAA.isAssumedNoUnwind()) {
5222  bool IsVoidTy = F->getReturnType()->isVoidTy();
5223  const AAReturnedValues *RVAA =
5224  IsVoidTy ? nullptr
5225  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5226 
5227  DepClassTy::OPTIONAL);
5228  if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5229  T.addKnownBits(NOT_CAPTURED_IN_RET);
5230  if (T.isKnown(NOT_CAPTURED_IN_MEM))
5231  return ChangeStatus::UNCHANGED;
5232  if (NoUnwindAA.isKnownNoUnwind() &&
5233  (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5234  addKnownBits(NOT_CAPTURED_IN_RET);
5235  if (isKnown(NOT_CAPTURED_IN_MEM))
5236  return indicateOptimisticFixpoint();
5237  }
5238  }
5239  }
5240 
5241  auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5242  const auto &DerefAA = A.getAAFor<AADereferenceable>(
5243  *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5244  return DerefAA.getAssumedDereferenceableBytes();
5245  };
5246 
5247  auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5248  switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5249  case UseCaptureKind::NO_CAPTURE:
5250  return true;
5251  case UseCaptureKind::MAY_CAPTURE:
5252  return checkUse(