LLVM  16.0.0git
AttributorAttributes.cpp
Go to the documentation of this file.
1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/MapVector.h"
19 #include "llvm/ADT/SCCIterator.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetOperations.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
37 #include "llvm/IR/Argument.h"
38 #include "llvm/IR/Assumptions.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Constant.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/GlobalValue.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/InstrTypes.h"
47 #include "llvm/IR/Instruction.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/NoFolder.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/IR/ValueHandle.h"
53 #include "llvm/Support/Alignment.h"
54 #include "llvm/Support/Casting.h"
62 #include <cassert>
63 #include <numeric>
64 
65 using namespace llvm;
66 
67 #define DEBUG_TYPE "attributor"
68 
70  "attributor-manifest-internal", cl::Hidden,
71  cl::desc("Manifest Attributor internal string attributes."),
72  cl::init(false));
73 
74 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
75  cl::Hidden);
76 
77 template <>
79 
81 
83  "attributor-max-potential-values", cl::Hidden,
84  cl::desc("Maximum number of potential values to be "
85  "tracked for each position."),
87  cl::init(7));
88 
90  "attributor-max-potential-values-iterations", cl::Hidden,
91  cl::desc(
92  "Maximum number of iterations we keep dismantling potential values."),
93  cl::init(64));
94 
96  "attributor-max-interfering-accesses", cl::Hidden,
97  cl::desc("Maximum number of interfering accesses to "
98  "check before assuming all might interfere."),
99  cl::init(6));
100 
101 STATISTIC(NumAAs, "Number of abstract attributes created");
102 
103 // Some helper macros to deal with statistics tracking.
104 //
105 // Usage:
106 // For simple IR attribute tracking overload trackStatistics in the abstract
107 // attribute and choose the right STATS_DECLTRACK_********* macro,
108 // e.g.,:
109 // void trackStatistics() const override {
110 // STATS_DECLTRACK_ARG_ATTR(returned)
111 // }
112 // If there is a single "increment" side one can use the macro
113 // STATS_DECLTRACK with a custom message. If there are multiple increment
114 // sides, STATS_DECL and STATS_TRACK can also be used separately.
115 //
116 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
117  ("Number of " #TYPE " marked '" #NAME "'")
118 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
119 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
120 #define STATS_DECL(NAME, TYPE, MSG) \
121  STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
122 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
123 #define STATS_DECLTRACK(NAME, TYPE, MSG) \
124  { \
125  STATS_DECL(NAME, TYPE, MSG) \
126  STATS_TRACK(NAME, TYPE) \
127  }
128 #define STATS_DECLTRACK_ARG_ATTR(NAME) \
129  STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
130 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \
131  STATS_DECLTRACK(NAME, CSArguments, \
132  BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
133 #define STATS_DECLTRACK_FN_ATTR(NAME) \
134  STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
135 #define STATS_DECLTRACK_CS_ATTR(NAME) \
136  STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
137 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \
138  STATS_DECLTRACK(NAME, FunctionReturn, \
139  BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
140 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \
141  STATS_DECLTRACK(NAME, CSReturn, \
142  BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
143 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
144  STATS_DECLTRACK(NAME, Floating, \
145  ("Number of floating values known to be '" #NAME "'"))
146 
147 // Specialization of the operator<< for abstract attributes subclasses. This
148 // disambiguates situations where multiple operators are applicable.
149 namespace llvm {
150 #define PIPE_OPERATOR(CLASS) \
151  raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \
152  return OS << static_cast<const AbstractAttribute &>(AA); \
153  }
154 
184 
185 #undef PIPE_OPERATOR
186 
187 template <>
189  const DerefState &R) {
190  ChangeStatus CS0 =
191  clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
192  ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
193  return CS0 | CS1;
194 }
195 
196 } // namespace llvm
197 
198 /// Checks if a type could have padding bytes.
199 static bool isDenselyPacked(Type *Ty, const DataLayout &DL) {
200  // There is no size information, so be conservative.
201  if (!Ty->isSized())
202  return false;
203 
204  // If the alloc size is not equal to the storage size, then there are padding
205  // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
206  if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty))
207  return false;
208 
209  // FIXME: This isn't the right way to check for padding in vectors with
210  // non-byte-size elements.
211  if (VectorType *SeqTy = dyn_cast<VectorType>(Ty))
212  return isDenselyPacked(SeqTy->getElementType(), DL);
213 
214  // For array types, check for padding within members.
215  if (ArrayType *SeqTy = dyn_cast<ArrayType>(Ty))
216  return isDenselyPacked(SeqTy->getElementType(), DL);
217 
218  if (!isa<StructType>(Ty))
219  return true;
220 
221  // Check for padding within and between elements of a struct.
222  StructType *StructTy = cast<StructType>(Ty);
223  const StructLayout *Layout = DL.getStructLayout(StructTy);
224  uint64_t StartPos = 0;
225  for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) {
226  Type *ElTy = StructTy->getElementType(I);
227  if (!isDenselyPacked(ElTy, DL))
228  return false;
229  if (StartPos != Layout->getElementOffsetInBits(I))
230  return false;
231  StartPos += DL.getTypeAllocSizeInBits(ElTy);
232  }
233 
234  return true;
235 }
236 
237 /// Get pointer operand of memory accessing instruction. If \p I is
238 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
239 /// is set to false and the instruction is volatile, return nullptr.
240 static const Value *getPointerOperand(const Instruction *I,
241  bool AllowVolatile) {
242  if (!AllowVolatile && I->isVolatile())
243  return nullptr;
244 
245  if (auto *LI = dyn_cast<LoadInst>(I)) {
246  return LI->getPointerOperand();
247  }
248 
249  if (auto *SI = dyn_cast<StoreInst>(I)) {
250  return SI->getPointerOperand();
251  }
252 
253  if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
254  return CXI->getPointerOperand();
255  }
256 
257  if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
258  return RMWI->getPointerOperand();
259  }
260 
261  return nullptr;
262 }
263 
264 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
265 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
266 /// getelement pointer instructions that traverse the natural type of \p Ptr if
267 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
268 /// through a cast to i8*.
269 ///
270 /// TODO: This could probably live somewhere more prominantly if it doesn't
271 /// already exist.
272 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
273  int64_t Offset, IRBuilder<NoFolder> &IRB,
274  const DataLayout &DL) {
275  assert(Offset >= 0 && "Negative offset not supported yet!");
276  LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
277  << "-bytes as " << *ResTy << "\n");
278 
279  if (Offset) {
280  Type *Ty = PtrElemTy;
281  APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
282  SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
283 
284  SmallVector<Value *, 4> ValIndices;
285  std::string GEPName = Ptr->getName().str();
286  for (const APInt &Index : IntIndices) {
287  ValIndices.push_back(IRB.getInt(Index));
288  GEPName += "." + std::to_string(Index.getZExtValue());
289  }
290 
291  // Create a GEP for the indices collected above.
292  Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
293 
294  // If an offset is left we use byte-wise adjustment.
295  if (IntOffset != 0) {
296  Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
297  Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
298  GEPName + ".b" + Twine(IntOffset.getZExtValue()));
299  }
300  }
301 
302  // Ensure the result has the requested type.
304  Ptr->getName() + ".cast");
305 
306  LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
307  return Ptr;
308 }
309 
312  const AbstractAttribute &QueryingAA,
313  const Instruction *CtxI,
314  bool &UsedAssumedInformation,
316  SmallPtrSetImpl<Value *> *SeenObjects) {
317  SmallPtrSet<Value *, 8> LocalSeenObjects;
318  if (!SeenObjects)
319  SeenObjects = &LocalSeenObjects;
320 
322  if (!A.getAssumedSimplifiedValues(IRPosition::value(Ptr), &QueryingAA, Values,
323  S, UsedAssumedInformation)) {
324  Objects.insert(const_cast<Value *>(&Ptr));
325  return true;
326  }
327 
328  for (auto &VAC : Values) {
329  Value *UO = getUnderlyingObject(VAC.getValue());
330  if (UO && UO != VAC.getValue() && SeenObjects->insert(UO).second) {
331  if (!getAssumedUnderlyingObjects(A, *UO, Objects, QueryingAA,
332  VAC.getCtxI(), UsedAssumedInformation, S,
333  SeenObjects))
334  return false;
335  continue;
336  }
337  Objects.insert(VAC.getValue());
338  }
339  return true;
340 }
341 
342 static const Value *
344  const Value *Val, const DataLayout &DL, APInt &Offset,
345  bool GetMinOffset, bool AllowNonInbounds,
346  bool UseAssumed = false) {
347 
348  auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
349  const IRPosition &Pos = IRPosition::value(V);
350  // Only track dependence if we are going to use the assumed info.
351  const AAValueConstantRange &ValueConstantRangeAA =
352  A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
353  UseAssumed ? DepClassTy::OPTIONAL
354  : DepClassTy::NONE);
355  ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
356  : ValueConstantRangeAA.getKnown();
357  if (Range.isFullSet())
358  return false;
359 
360  // We can only use the lower part of the range because the upper part can
361  // be higher than what the value can really be.
362  if (GetMinOffset)
363  ROffset = Range.getSignedMin();
364  else
365  ROffset = Range.getSignedMax();
366  return true;
367  };
368 
369  return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
370  /* AllowInvariant */ true,
371  AttributorAnalysis);
372 }
373 
374 static const Value *
376  const Value *Ptr, int64_t &BytesOffset,
377  const DataLayout &DL, bool AllowNonInbounds = false) {
378  APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
379  const Value *Base =
380  stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
381  /* GetMinOffset */ true, AllowNonInbounds);
382 
383  BytesOffset = OffsetAPInt.getSExtValue();
384  return Base;
385 }
386 
387 /// Clamp the information known for all returned values of a function
388 /// (identified by \p QueryingAA) into \p S.
389 template <typename AAType, typename StateType = typename AAType::StateType>
391  Attributor &A, const AAType &QueryingAA, StateType &S,
392  const IRPosition::CallBaseContext *CBContext = nullptr) {
393  LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
394  << QueryingAA << " into " << S << "\n");
395 
396  assert((QueryingAA.getIRPosition().getPositionKind() ==
398  QueryingAA.getIRPosition().getPositionKind() ==
400  "Can only clamp returned value states for a function returned or call "
401  "site returned position!");
402 
403  // Use an optional state as there might not be any return values and we want
404  // to join (IntegerState::operator&) the state of all there are.
406 
407  // Callback for each possibly returned value.
408  auto CheckReturnValue = [&](Value &RV) -> bool {
409  const IRPosition &RVPos = IRPosition::value(RV, CBContext);
410  const AAType &AA =
411  A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
412  LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
413  << " @ " << RVPos << "\n");
414  const StateType &AAS = AA.getState();
415  if (!T)
416  T = StateType::getBestState(AAS);
417  *T &= AAS;
418  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
419  << "\n");
420  return T->isValidState();
421  };
422 
423  if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
424  S.indicatePessimisticFixpoint();
425  else if (T)
426  S ^= *T;
427 }
428 
429 namespace {
430 /// Helper class for generic deduction: return value -> returned position.
431 template <typename AAType, typename BaseType,
432  typename StateType = typename BaseType::StateType,
433  bool PropagateCallBaseContext = false>
434 struct AAReturnedFromReturnedValues : public BaseType {
435  AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
436  : BaseType(IRP, A) {}
437 
438  /// See AbstractAttribute::updateImpl(...).
439  ChangeStatus updateImpl(Attributor &A) override {
440  StateType S(StateType::getBestState(this->getState()));
441  clampReturnedValueStates<AAType, StateType>(
442  A, *this, S,
443  PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
444  // TODO: If we know we visited all returned values, thus no are assumed
445  // dead, we can take the known information from the state T.
446  return clampStateAndIndicateChange<StateType>(this->getState(), S);
447  }
448 };
449 
450 /// Clamp the information known at all call sites for a given argument
451 /// (identified by \p QueryingAA) into \p S.
452 template <typename AAType, typename StateType = typename AAType::StateType>
453 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
454  StateType &S) {
455  LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
456  << QueryingAA << " into " << S << "\n");
457 
458  assert(QueryingAA.getIRPosition().getPositionKind() ==
460  "Can only clamp call site argument states for an argument position!");
461 
462  // Use an optional state as there might not be any return values and we want
463  // to join (IntegerState::operator&) the state of all there are.
465 
466  // The argument number which is also the call site argument number.
467  unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
468 
469  auto CallSiteCheck = [&](AbstractCallSite ACS) {
470  const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
471  // Check if a coresponding argument was found or if it is on not associated
472  // (which can happen for callback calls).
473  if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
474  return false;
475 
476  const AAType &AA =
477  A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
478  LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
479  << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
480  const StateType &AAS = AA.getState();
481  if (!T)
482  T = StateType::getBestState(AAS);
483  *T &= AAS;
484  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
485  << "\n");
486  return T->isValidState();
487  };
488 
489  bool UsedAssumedInformation = false;
490  if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
491  UsedAssumedInformation))
492  S.indicatePessimisticFixpoint();
493  else if (T)
494  S ^= *T;
495 }
496 
497 /// This function is the bridge between argument position and the call base
498 /// context.
499 template <typename AAType, typename BaseType,
500  typename StateType = typename AAType::StateType>
501 bool getArgumentStateFromCallBaseContext(Attributor &A,
502  BaseType &QueryingAttribute,
503  IRPosition &Pos, StateType &State) {
505  "Expected an 'argument' position !");
506  const CallBase *CBContext = Pos.getCallBaseContext();
507  if (!CBContext)
508  return false;
509 
510  int ArgNo = Pos.getCallSiteArgNo();
511  assert(ArgNo >= 0 && "Invalid Arg No!");
512 
513  const auto &AA = A.getAAFor<AAType>(
514  QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
516  const StateType &CBArgumentState =
517  static_cast<const StateType &>(AA.getState());
518 
519  LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
520  << "Position:" << Pos << "CB Arg state:" << CBArgumentState
521  << "\n");
522 
523  // NOTE: If we want to do call site grouping it should happen here.
524  State ^= CBArgumentState;
525  return true;
526 }
527 
528 /// Helper class for generic deduction: call site argument -> argument position.
529 template <typename AAType, typename BaseType,
530  typename StateType = typename AAType::StateType,
531  bool BridgeCallBaseContext = false>
532 struct AAArgumentFromCallSiteArguments : public BaseType {
533  AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
534  : BaseType(IRP, A) {}
535 
536  /// See AbstractAttribute::updateImpl(...).
537  ChangeStatus updateImpl(Attributor &A) override {
538  StateType S = StateType::getBestState(this->getState());
539 
540  if (BridgeCallBaseContext) {
541  bool Success =
542  getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
543  A, *this, this->getIRPosition(), S);
544  if (Success)
545  return clampStateAndIndicateChange<StateType>(this->getState(), S);
546  }
547  clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
548 
549  // TODO: If we know we visited all incoming values, thus no are assumed
550  // dead, we can take the known information from the state T.
551  return clampStateAndIndicateChange<StateType>(this->getState(), S);
552  }
553 };
554 
555 /// Helper class for generic replication: function returned -> cs returned.
556 template <typename AAType, typename BaseType,
557  typename StateType = typename BaseType::StateType,
558  bool IntroduceCallBaseContext = false>
559 struct AACallSiteReturnedFromReturned : public BaseType {
560  AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
561  : BaseType(IRP, A) {}
562 
563  /// See AbstractAttribute::updateImpl(...).
564  ChangeStatus updateImpl(Attributor &A) override {
565  assert(this->getIRPosition().getPositionKind() ==
567  "Can only wrap function returned positions for call site returned "
568  "positions!");
569  auto &S = this->getState();
570 
571  const Function *AssociatedFunction =
572  this->getIRPosition().getAssociatedFunction();
573  if (!AssociatedFunction)
574  return S.indicatePessimisticFixpoint();
575 
576  CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
577  if (IntroduceCallBaseContext)
578  LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
579  << CBContext << "\n");
580 
582  *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
583  const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
584  return clampStateAndIndicateChange(S, AA.getState());
585  }
586 };
587 
588 /// Helper function to accumulate uses.
589 template <class AAType, typename StateType = typename AAType::StateType>
590 static void followUsesInContext(AAType &AA, Attributor &A,
592  const Instruction *CtxI,
594  StateType &State) {
595  auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
596  for (unsigned u = 0; u < Uses.size(); ++u) {
597  const Use *U = Uses[u];
598  if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
599  bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
600  if (Found && AA.followUseInMBEC(A, U, UserI, State))
601  for (const Use &Us : UserI->uses())
602  Uses.insert(&Us);
603  }
604  }
605 }
606 
607 /// Use the must-be-executed-context around \p I to add information into \p S.
608 /// The AAType class is required to have `followUseInMBEC` method with the
609 /// following signature and behaviour:
610 ///
611 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
612 /// U - Underlying use.
613 /// I - The user of the \p U.
614 /// Returns true if the value should be tracked transitively.
615 ///
616 template <class AAType, typename StateType = typename AAType::StateType>
617 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
618  Instruction &CtxI) {
619 
620  // Container for (transitive) uses of the associated value.
622  for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
623  Uses.insert(&U);
624 
626  A.getInfoCache().getMustBeExecutedContextExplorer();
627 
628  followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
629 
630  if (S.isAtFixpoint())
631  return;
632 
634  auto Pred = [&](const Instruction *I) {
635  if (const BranchInst *Br = dyn_cast<BranchInst>(I))
636  if (Br->isConditional())
637  BrInsts.push_back(Br);
638  return true;
639  };
640 
641  // Here, accumulate conditional branch instructions in the context. We
642  // explore the child paths and collect the known states. The disjunction of
643  // those states can be merged to its own state. Let ParentState_i be a state
644  // to indicate the known information for an i-th branch instruction in the
645  // context. ChildStates are created for its successors respectively.
646  //
647  // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
648  // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
649  // ...
650  // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
651  //
652  // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
653  //
654  // FIXME: Currently, recursive branches are not handled. For example, we
655  // can't deduce that ptr must be dereferenced in below function.
656  //
657  // void f(int a, int c, int *ptr) {
658  // if(a)
659  // if (b) {
660  // *ptr = 0;
661  // } else {
662  // *ptr = 1;
663  // }
664  // else {
665  // if (b) {
666  // *ptr = 0;
667  // } else {
668  // *ptr = 1;
669  // }
670  // }
671  // }
672 
673  Explorer.checkForAllContext(&CtxI, Pred);
674  for (const BranchInst *Br : BrInsts) {
675  StateType ParentState;
676 
677  // The known state of the parent state is a conjunction of children's
678  // known states so it is initialized with a best state.
679  ParentState.indicateOptimisticFixpoint();
680 
681  for (const BasicBlock *BB : Br->successors()) {
682  StateType ChildState;
683 
684  size_t BeforeSize = Uses.size();
685  followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
686 
687  // Erase uses which only appear in the child.
688  for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
689  It = Uses.erase(It);
690 
691  ParentState &= ChildState;
692  }
693 
694  // Use only known state.
695  S += ParentState;
696  }
697 }
698 } // namespace
699 
700 /// ------------------------ PointerInfo ---------------------------------------
701 
702 namespace llvm {
703 namespace AA {
704 namespace PointerInfo {
705 
706 struct State;
707 
708 } // namespace PointerInfo
709 } // namespace AA
710 
711 /// Helper for AA::PointerInfo::Access DenseMap/Set usage.
712 template <>
713 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
715  static inline Access getEmptyKey();
716  static inline Access getTombstoneKey();
717  static unsigned getHashValue(const Access &A);
718  static bool isEqual(const Access &LHS, const Access &RHS);
719 };
720 
721 /// Helper that allows OffsetAndSize as a key in a DenseMap.
722 template <>
723 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
724  : DenseMapInfo<std::pair<int64_t, int64_t>> {};
725 
726 /// Helper for AA::PointerInfo::Access DenseMap/Set usage ignoring everythign
727 /// but the instruction
728 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
731  static inline Access getEmptyKey();
732  static inline Access getTombstoneKey();
733  static unsigned getHashValue(const Access &A);
734  static bool isEqual(const Access &LHS, const Access &RHS);
735 };
736 
737 } // namespace llvm
738 
739 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
741 
742  ~State() {
743  // We do not delete the Accesses objects but need to destroy them still.
744  for (auto &It : AccessBins)
745  It.second->~Accesses();
746  }
747 
748  /// Return the best possible representable state.
749  static State getBestState(const State &SIS) { return State(); }
750 
751  /// Return the worst possible representable state.
752  static State getWorstState(const State &SIS) {
753  State R;
754  R.indicatePessimisticFixpoint();
755  return R;
756  }
757 
758  State() = default;
759  State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {
760  SIS.AccessBins.clear();
761  }
762 
763  const State &getAssumed() const { return *this; }
764 
765  /// See AbstractState::isValidState().
766  bool isValidState() const override { return BS.isValidState(); }
767 
768  /// See AbstractState::isAtFixpoint().
769  bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
770 
771  /// See AbstractState::indicateOptimisticFixpoint().
773  BS.indicateOptimisticFixpoint();
775  }
776 
777  /// See AbstractState::indicatePessimisticFixpoint().
779  BS.indicatePessimisticFixpoint();
780  return ChangeStatus::CHANGED;
781  }
782 
783  State &operator=(const State &R) {
784  if (this == &R)
785  return *this;
786  BS = R.BS;
787  AccessBins = R.AccessBins;
788  return *this;
789  }
790 
792  if (this == &R)
793  return *this;
794  std::swap(BS, R.BS);
795  std::swap(AccessBins, R.AccessBins);
796  return *this;
797  }
798 
799  bool operator==(const State &R) const {
800  if (BS != R.BS)
801  return false;
802  if (AccessBins.size() != R.AccessBins.size())
803  return false;
804  auto It = begin(), RIt = R.begin(), E = end();
805  while (It != E) {
806  if (It->getFirst() != RIt->getFirst())
807  return false;
808  auto &Accs = It->getSecond();
809  auto &RAccs = RIt->getSecond();
810  if (Accs->size() != RAccs->size())
811  return false;
812  for (const auto &ZipIt : llvm::zip(*Accs, *RAccs))
813  if (std::get<0>(ZipIt) != std::get<1>(ZipIt))
814  return false;
815  ++It;
816  ++RIt;
817  }
818  return true;
819  }
820  bool operator!=(const State &R) const { return !(*this == R); }
821 
822  /// We store accesses in a set with the instruction as key.
823  struct Accesses {
826 
827  unsigned size() const { return Accesses.size(); }
828 
829  using vec_iterator = decltype(Accesses)::iterator;
831  vec_iterator end() { return Accesses.end(); }
832 
833  using iterator = decltype(Map)::const_iterator;
835  return Map.find(Acc.getRemoteInst());
836  }
837  iterator find_end() { return Map.end(); }
838 
840  return Accesses[It->getSecond()];
841  }
842 
844  Map[Acc.getRemoteInst()] = Accesses.size();
845  Accesses.push_back(Acc);
846  }
847  };
848 
849  /// We store all accesses in bins denoted by their offset and size.
851 
852  AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
853  AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
854 
855 protected:
856  /// The bins with all the accesses for the associated pointer.
858 
859  /// Add a new access to the state at offset \p Offset and with size \p Size.
860  /// The access is associated with \p I, writes \p Content (if anything), and
861  /// is of kind \p Kind.
862  /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
863  ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size,
866  Instruction *RemoteI = nullptr,
867  Accesses *BinPtr = nullptr) {
868  AAPointerInfo::OffsetAndSize Key{Offset, Size};
869  Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key];
870  if (!Bin)
871  Bin = new (A.Allocator) Accesses;
872  AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
873  // Check if we have an access for this instruction in this bin, if not,
874  // simply add it.
875  auto It = Bin->find(Acc);
876  if (It == Bin->find_end()) {
877  Bin->insert(Acc);
878  return ChangeStatus::CHANGED;
879  }
880  // If the existing access is the same as then new one, nothing changed.
881  AAPointerInfo::Access &Current = Bin->get(It);
882  AAPointerInfo::Access Before = Current;
883  // The new one will be combined with the existing one.
884  Current &= Acc;
885  return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
886  }
887 
888  /// See AAPointerInfo::forallInterferingAccesses.
891  function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
892  if (!isValidState())
893  return false;
894 
895  for (const auto &It : AccessBins) {
896  AAPointerInfo::OffsetAndSize ItOAS = It.getFirst();
897  if (!OAS.mayOverlap(ItOAS))
898  continue;
899  bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
900  for (auto &Access : *It.getSecond())
901  if (!CB(Access, IsExact))
902  return false;
903  }
904  return true;
905  }
906 
907  /// See AAPointerInfo::forallInterferingAccesses.
909  Instruction &I,
910  function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
911  if (!isValidState())
912  return false;
913 
914  // First find the offset and size of I.
915  AAPointerInfo::OffsetAndSize OAS(-1, -1);
916  for (const auto &It : AccessBins) {
917  for (auto &Access : *It.getSecond()) {
918  if (Access.getRemoteInst() == &I) {
919  OAS = It.getFirst();
920  break;
921  }
922  }
923  if (OAS.getSize() != -1)
924  break;
925  }
926  // No access for I was found, we are done.
927  if (OAS.getSize() == -1)
928  return true;
929 
930  // Now that we have an offset and size, find all overlapping ones and use
931  // the callback on the accesses.
932  return forallInterferingAccesses(OAS, CB);
933  }
934 
935 private:
936  /// State to track fixpoint and validity.
937  BooleanState BS;
938 };
939 
940 namespace {
941 struct AAPointerInfoImpl
942  : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
944  AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
945 
946  /// See AbstractAttribute::getAsStr().
947  const std::string getAsStr() const override {
948  return std::string("PointerInfo ") +
949  (isValidState() ? (std::string("#") +
950  std::to_string(AccessBins.size()) + " bins")
951  : "<invalid>");
952  }
953 
954  /// See AbstractAttribute::manifest(...).
955  ChangeStatus manifest(Attributor &A) override {
956  return AAPointerInfo::manifest(A);
957  }
958 
959  bool forallInterferingAccesses(
960  OffsetAndSize OAS,
961  function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
962  const override {
963  return State::forallInterferingAccesses(OAS, CB);
964  }
965 
966  bool
967  forallInterferingAccesses(Attributor &A, const AbstractAttribute &QueryingAA,
968  Instruction &I,
969  function_ref<bool(const Access &, bool)> UserCB,
970  bool &HasBeenWrittenTo) const override {
971  HasBeenWrittenTo = false;
972 
973  SmallPtrSet<const Access *, 8> DominatingWrites;
974  SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
975 
976  Function &Scope = *I.getFunction();
977  const auto &NoSyncAA = A.getAAFor<AANoSync>(
979  const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
981  const bool NoSync = NoSyncAA.isAssumedNoSync();
982 
983  // Helper to determine if we need to consider threading, which we cannot
984  // right now. However, if the function is (assumed) nosync or the thread
985  // executing all instructions is the main thread only we can ignore
986  // threading.
987  auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
988  if (NoSync)
989  return true;
990  if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
991  return true;
992  return false;
993  };
994 
995  // Helper to determine if the access is executed by the same thread as the
996  // load, for now it is sufficient to avoid any potential threading effects
997  // as we cannot deal with them anyway.
998  auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
999  return CanIgnoreThreading(*Acc.getLocalInst());
1000  };
1001 
1002  // TODO: Use inter-procedural reachability and dominance.
1003  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1005 
1006  const bool FindInterferingWrites = I.mayReadFromMemory();
1007  const bool FindInterferingReads = I.mayWriteToMemory();
1008  const bool UseDominanceReasoning =
1009  FindInterferingWrites && NoRecurseAA.isKnownNoRecurse();
1010  const bool CanUseCFGResoning = CanIgnoreThreading(I);
1011  InformationCache &InfoCache = A.getInfoCache();
1012  const DominatorTree *DT =
1014 
1015  enum GPUAddressSpace : unsigned {
1016  Generic = 0,
1017  Global = 1,
1018  Shared = 3,
1019  Constant = 4,
1020  Local = 5,
1021  };
1022 
1023  // Helper to check if a value has "kernel lifetime", that is it will not
1024  // outlive a GPU kernel. This is true for shared, constant, and local
1025  // globals on AMD and NVIDIA GPUs.
1026  auto HasKernelLifetime = [&](Value *V, Module &M) {
1027  Triple T(M.getTargetTriple());
1028  if (!(T.isAMDGPU() || T.isNVPTX()))
1029  return false;
1030  switch (V->getType()->getPointerAddressSpace()) {
1031  case GPUAddressSpace::Shared:
1033  case GPUAddressSpace::Local:
1034  return true;
1035  default:
1036  return false;
1037  };
1038  };
1039 
1040  // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1041  // to determine if we should look at reachability from the callee. For
1042  // certain pointers we know the lifetime and we do not have to step into the
1043  // callee to determine reachability as the pointer would be dead in the
1044  // callee. See the conditional initialization below.
1045  std::function<bool(const Function &)> IsLiveInCalleeCB;
1046 
1047  if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1048  // If the alloca containing function is not recursive the alloca
1049  // must be dead in the callee.
1050  const Function *AIFn = AI->getFunction();
1051  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1053  if (NoRecurseAA.isAssumedNoRecurse()) {
1054  IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1055  }
1056  } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1057  // If the global has kernel lifetime we can stop if we reach a kernel
1058  // as it is "dead" in the (unknown) callees.
1059  if (HasKernelLifetime(GV, *GV->getParent()))
1060  IsLiveInCalleeCB = [](const Function &Fn) {
1061  return !Fn.hasFnAttribute("kernel");
1062  };
1063  }
1064 
1065  auto AccessCB = [&](const Access &Acc, bool Exact) {
1066  if ((!FindInterferingWrites || !Acc.isWrite()) &&
1067  (!FindInterferingReads || !Acc.isRead()))
1068  return true;
1069 
1070  bool Dominates = DT && Exact && Acc.isMustAccess() &&
1071  (Acc.getLocalInst()->getFunction() == &Scope) &&
1072  DT->dominates(Acc.getRemoteInst(), &I);
1073  if (FindInterferingWrites && Dominates)
1074  HasBeenWrittenTo = true;
1075 
1076  // For now we only filter accesses based on CFG reasoning which does not
1077  // work yet if we have threading effects, or the access is complicated.
1078  if (CanUseCFGResoning && Dominates && UseDominanceReasoning &&
1079  IsSameThreadAsLoad(Acc))
1080  DominatingWrites.insert(&Acc);
1081 
1082  InterferingAccesses.push_back({&Acc, Exact});
1083  return true;
1084  };
1085  if (!State::forallInterferingAccesses(I, AccessCB))
1086  return false;
1087 
1088  if (HasBeenWrittenTo) {
1089  const Function *ScopePtr = &Scope;
1090  IsLiveInCalleeCB = [ScopePtr](const Function &Fn) {
1091  return ScopePtr != &Fn;
1092  };
1093  }
1094 
1095  // Helper to determine if we can skip a specific write access. This is in
1096  // the worst case quadratic as we are looking for another write that will
1097  // hide the effect of this one.
1098  auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1099  if ((!Acc.isWrite() ||
1100  !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
1101  IsLiveInCalleeCB)) &&
1102  (!Acc.isRead() ||
1103  !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
1104  IsLiveInCalleeCB)))
1105  return true;
1106 
1107  if (!DT || !UseDominanceReasoning)
1108  return false;
1109  if (!IsSameThreadAsLoad(Acc))
1110  return false;
1111  if (!DominatingWrites.count(&Acc))
1112  return false;
1113  for (const Access *DomAcc : DominatingWrites) {
1114  assert(Acc.getLocalInst()->getFunction() ==
1115  DomAcc->getLocalInst()->getFunction() &&
1116  "Expected dominating writes to be in the same function!");
1117 
1118  if (DomAcc != &Acc &&
1119  DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1120  return true;
1121  }
1122  }
1123  return false;
1124  };
1125 
1126  // Run the user callback on all accesses we cannot skip and return if that
1127  // succeeded for all or not.
1128  unsigned NumInterferingAccesses = InterferingAccesses.size();
1129  for (auto &It : InterferingAccesses) {
1130  if (NumInterferingAccesses > MaxInterferingAccesses ||
1131  !CanSkipAccess(*It.first, It.second)) {
1132  if (!UserCB(*It.first, It.second))
1133  return false;
1134  }
1135  }
1136  return true;
1137  }
1138 
1139  ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA,
1140  int64_t Offset, CallBase &CB,
1141  bool FromCallee = false) {
1142  using namespace AA::PointerInfo;
1143  if (!OtherAA.getState().isValidState() || !isValidState())
1144  return indicatePessimisticFixpoint();
1145 
1146  const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1147  bool IsByval =
1148  FromCallee && OtherAAImpl.getAssociatedArgument()->hasByValAttr();
1149 
1150  // Combine the accesses bin by bin.
1152  for (const auto &It : OtherAAImpl.getState()) {
1153  OffsetAndSize OAS = OffsetAndSize::getUnknown();
1154  if (Offset != OffsetAndSize::Unknown)
1155  OAS = OffsetAndSize(It.first.getOffset() + Offset, It.first.getSize());
1156  Accesses *Bin = AccessBins.lookup(OAS);
1157  for (const AAPointerInfo::Access &RAcc : *It.second) {
1158  if (IsByval && !RAcc.isRead())
1159  continue;
1160  bool UsedAssumedInformation = false;
1161  AccessKind AK = RAcc.getKind();
1163  if (FromCallee) {
1164  Content = A.translateArgumentToCallSiteContent(
1165  RAcc.getContent(), CB, *this, UsedAssumedInformation);
1166  AK =
1167  AccessKind(AK & (IsByval ? AccessKind::AK_R : AccessKind::AK_RW));
1168  AK = AccessKind(AK | (RAcc.isMayAccess() ? AK_MAY : AK_MUST));
1169  }
1170  Changed =
1171  Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
1172  AK, RAcc.getType(), RAcc.getRemoteInst(), Bin);
1173  }
1174  }
1175  return Changed;
1176  }
1177 
1178  /// Statistic tracking for all AAPointerInfo implementations.
1179  /// See AbstractAttribute::trackStatistics().
1180  void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1181 
1182  /// Dump the state into \p O.
1183  void dumpState(raw_ostream &O) {
1184  for (auto &It : AccessBins) {
1185  O << "[" << It.first.getOffset() << "-"
1186  << It.first.getOffset() + It.first.getSize()
1187  << "] : " << It.getSecond()->size() << "\n";
1188  for (auto &Acc : *It.getSecond()) {
1189  O << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() << "\n";
1190  if (Acc.getLocalInst() != Acc.getRemoteInst())
1191  O << " --> " << *Acc.getRemoteInst()
1192  << "\n";
1193  if (!Acc.isWrittenValueYetUndetermined()) {
1194  if (Acc.getWrittenValue())
1195  O << " - c: " << *Acc.getWrittenValue() << "\n";
1196  else
1197  O << " - c: <unknown>\n";
1198  }
1199  }
1200  }
1201  }
1202 };
1203 
1204 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1206  AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1207  : AAPointerInfoImpl(IRP, A) {}
1208 
1209  /// Deal with an access and signal if it was handled successfully.
1210  bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1211  Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1212  ChangeStatus &Changed, Type *Ty,
1213  int64_t Size = OffsetAndSize::Unknown) {
1214  using namespace AA::PointerInfo;
1215  // No need to find a size if one is given or the offset is unknown.
1216  if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1217  Ty) {
1218  const DataLayout &DL = A.getDataLayout();
1219  TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1220  if (!AccessSize.isScalable())
1221  Size = AccessSize.getFixedSize();
1222  }
1223  Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty);
1224  return true;
1225  };
1226 
1227  /// Helper struct, will support ranges eventually.
1228  struct OffsetInfo {
1229  int64_t Offset = OffsetAndSize::Unassigned;
1230 
1231  bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1232  };
1233 
1234  /// See AbstractAttribute::updateImpl(...).
1235  ChangeStatus updateImpl(Attributor &A) override {
1236  using namespace AA::PointerInfo;
1238  Value &AssociatedValue = getAssociatedValue();
1239 
1240  const DataLayout &DL = A.getDataLayout();
1241  DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1242  OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1243 
1244  auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI,
1245  bool &Follow) {
1246  assert(PtrOI.Offset != OffsetAndSize::Unassigned &&
1247  "Cannot pass through if the input Ptr was not visited!");
1248  OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1249  UsrOI = PtrOI;
1250  Follow = true;
1251  return true;
1252  };
1253 
1254  const auto *TLI = getAnchorScope()
1255  ? A.getInfoCache().getTargetLibraryInfoForFunction(
1256  *getAnchorScope())
1257  : nullptr;
1258  auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1259  Value *CurPtr = U.get();
1260  User *Usr = U.getUser();
1261  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1262  << *Usr << "\n");
1263  assert(OffsetInfoMap.count(CurPtr) &&
1264  "The current pointer offset should have been seeded!");
1265 
1266  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1267  if (CE->isCast())
1268  return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1269  if (CE->isCompare())
1270  return true;
1271  if (!isa<GEPOperator>(CE)) {
1272  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1273  << "\n");
1274  return false;
1275  }
1276  }
1277  if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1278  // Note the order here, the Usr access might change the map, CurPtr is
1279  // already in it though.
1280  OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1281  OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1282  UsrOI = PtrOI;
1283 
1284  // TODO: Use range information.
1285  APInt GEPOffset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
1286  if (PtrOI.Offset == OffsetAndSize::Unknown ||
1287  !GEP->accumulateConstantOffset(DL, GEPOffset)) {
1288  LLVM_DEBUG(dbgs() << "[AAPointerInfo] GEP offset not constant "
1289  << *GEP << "\n");
1290  UsrOI.Offset = OffsetAndSize::Unknown;
1291  Follow = true;
1292  return true;
1293  }
1294 
1295  LLVM_DEBUG(dbgs() << "[AAPointerInfo] GEP offset is constant " << *GEP
1296  << "\n");
1297  UsrOI.Offset = PtrOI.Offset + GEPOffset.getZExtValue();
1298  Follow = true;
1299  return true;
1300  }
1301  if (isa<PtrToIntInst>(Usr))
1302  return false;
1303  if (isa<CastInst>(Usr) || isa<SelectInst>(Usr) || isa<ReturnInst>(Usr))
1304  return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1305 
1306  // For PHIs we need to take care of the recurrence explicitly as the value
1307  // might change while we iterate through a loop. For now, we give up if
1308  // the PHI is not invariant.
1309  if (isa<PHINode>(Usr)) {
1310  // Note the order here, the Usr access might change the map, CurPtr is
1311  // already in it though.
1312  bool IsFirstPHIUser = !OffsetInfoMap.count(Usr);
1313  OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1314  OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1315 
1316  // Check if the PHI operand has already an unknown offset as we can't
1317  // improve on that anymore.
1318  if (PtrOI.Offset == OffsetAndSize::Unknown) {
1319  LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand offset unknown "
1320  << *CurPtr << " in " << *Usr << "\n");
1321  Follow = UsrOI.Offset != OffsetAndSize::Unknown;
1322  UsrOI = PtrOI;
1323  return true;
1324  }
1325 
1326  // Check if the PHI is invariant (so far).
1327  if (UsrOI == PtrOI) {
1328  assert(PtrOI.Offset != OffsetAndSize::Unassigned &&
1329  "Cannot assign if the current Ptr was not visited!");
1330  LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant (so far)");
1331  return true;
1332  }
1333 
1334  // Check if the PHI operand is not dependent on the PHI itself.
1335  APInt Offset(
1336  DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1337  0);
1338  Value *CurPtrBase = CurPtr->stripAndAccumulateConstantOffsets(
1339  DL, Offset, /* AllowNonInbounds */ true);
1340  auto It = OffsetInfoMap.find(CurPtrBase);
1341  if (It != OffsetInfoMap.end()) {
1342  Offset += It->getSecond().Offset;
1343  if (IsFirstPHIUser || Offset == UsrOI.Offset)
1344  return HandlePassthroughUser(Usr, PtrOI, Follow);
1345  LLVM_DEBUG(dbgs()
1346  << "[AAPointerInfo] PHI operand pointer offset mismatch "
1347  << *CurPtr << " in " << *Usr << "\n");
1348  } else {
1349  LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1350  << *CurPtr << " in " << *Usr << "\n");
1351  }
1352 
1353  // TODO: Approximate in case we know the direction of the recurrence.
1354  UsrOI = PtrOI;
1355  UsrOI.Offset = OffsetAndSize::Unknown;
1356  Follow = true;
1357  return true;
1358  }
1359 
1360  if (auto *LoadI = dyn_cast<LoadInst>(Usr)) {
1361  // If the access is to a pointer that may or may not be the associated
1362  // value, e.g. due to a PHI, we cannot assume it will be read.
1363  AccessKind AK = AccessKind::AK_R;
1364  if (getUnderlyingObject(CurPtr) == &AssociatedValue)
1365  AK = AccessKind(AK | AccessKind::AK_MUST);
1366  else
1367  AK = AccessKind(AK | AccessKind::AK_MAY);
1368  return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr, AK,
1369  OffsetInfoMap[CurPtr].Offset, Changed,
1370  LoadI->getType());
1371  }
1372 
1373  if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1374  if (StoreI->getValueOperand() == CurPtr) {
1375  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1376  << *StoreI << "\n");
1377  return false;
1378  }
1379  // If the access is to a pointer that may or may not be the associated
1380  // value, e.g. due to a PHI, we cannot assume it will be written.
1381  AccessKind AK = AccessKind::AK_W;
1382  if (getUnderlyingObject(CurPtr) == &AssociatedValue)
1383  AK = AccessKind(AK | AccessKind::AK_MUST);
1384  else
1385  AK = AccessKind(AK | AccessKind::AK_MAY);
1386  bool UsedAssumedInformation = false;
1388  A.getAssumedSimplified(*StoreI->getValueOperand(), *this,
1389  UsedAssumedInformation, AA::Interprocedural);
1390  return handleAccess(A, *StoreI, *CurPtr, Content, AK,
1391  OffsetInfoMap[CurPtr].Offset, Changed,
1392  StoreI->getValueOperand()->getType());
1393  }
1394  if (auto *CB = dyn_cast<CallBase>(Usr)) {
1395  if (CB->isLifetimeStartOrEnd())
1396  return true;
1397  if (getFreedOperand(CB, TLI) == U)
1398  return true;
1399  if (CB->isArgOperand(&U)) {
1400  unsigned ArgNo = CB->getArgOperandNo(&U);
1401  const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1402  *this, IRPosition::callsite_argument(*CB, ArgNo),
1404  Changed = translateAndAddState(A, CSArgPI,
1405  OffsetInfoMap[CurPtr].Offset, *CB) |
1406  Changed;
1407  return isValidState();
1408  }
1409  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1410  << "\n");
1411  // TODO: Allow some call uses
1412  return false;
1413  }
1414 
1415  LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1416  return false;
1417  };
1418  auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1419  if (OffsetInfoMap.count(NewU)) {
1420  LLVM_DEBUG({
1421  if (!(OffsetInfoMap[NewU] == OffsetInfoMap[OldU])) {
1422  dbgs() << "[AAPointerInfo] Equivalent use callback failed: "
1423  << OffsetInfoMap[NewU].Offset << " vs "
1424  << OffsetInfoMap[OldU].Offset << "\n";
1425  }
1426  });
1427  return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1428  }
1429  OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1430  return true;
1431  };
1432  if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1433  /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1434  /* IgnoreDroppableUses */ true, EquivalentUseCB)) {
1435  LLVM_DEBUG(
1436  dbgs() << "[AAPointerInfo] Check for all uses failed, abort!\n");
1437  return indicatePessimisticFixpoint();
1438  }
1439 
1440  LLVM_DEBUG({
1441  dbgs() << "Accesses by bin after update:\n";
1442  dumpState(dbgs());
1443  });
1444 
1445  return Changed;
1446  }
1447 
1448  /// See AbstractAttribute::trackStatistics()
1449  void trackStatistics() const override {
1450  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1451  }
1452 };
1453 
1454 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1455  AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1456  : AAPointerInfoImpl(IRP, A) {}
1457 
1458  /// See AbstractAttribute::updateImpl(...).
1459  ChangeStatus updateImpl(Attributor &A) override {
1460  return indicatePessimisticFixpoint();
1461  }
1462 
1463  /// See AbstractAttribute::trackStatistics()
1464  void trackStatistics() const override {
1465  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1466  }
1467 };
1468 
1469 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1470  AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1471  : AAPointerInfoFloating(IRP, A) {}
1472 
1473  /// See AbstractAttribute::initialize(...).
1474  void initialize(Attributor &A) override {
1476  if (getAnchorScope()->isDeclaration())
1477  indicatePessimisticFixpoint();
1478  }
1479 
1480  /// See AbstractAttribute::trackStatistics()
1481  void trackStatistics() const override {
1482  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1483  }
1484 };
1485 
1486 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1487  AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1488  : AAPointerInfoFloating(IRP, A) {}
1489 
1490  /// See AbstractAttribute::updateImpl(...).
1491  ChangeStatus updateImpl(Attributor &A) override {
1492  using namespace AA::PointerInfo;
1493  // We handle memory intrinsics explicitly, at least the first (=
1494  // destination) and second (=source) arguments as we know how they are
1495  // accessed.
1496  if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1497  ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1498  int64_t LengthVal = OffsetAndSize::Unknown;
1499  if (Length)
1500  LengthVal = Length->getSExtValue();
1501  Value &Ptr = getAssociatedValue();
1502  unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1504  if (ArgNo == 0) {
1505  handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_MUST_WRITE, 0,
1506  Changed, nullptr, LengthVal);
1507  } else if (ArgNo == 1) {
1508  handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_MUST_READ, 0, Changed,
1509  nullptr, LengthVal);
1510  } else {
1511  LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1512  << *MI << "\n");
1513  return indicatePessimisticFixpoint();
1514  }
1515 
1516  LLVM_DEBUG({
1517  dbgs() << "Accesses by bin after update:\n";
1518  dumpState(dbgs());
1519  });
1520 
1521  return Changed;
1522  }
1523 
1524  // TODO: Once we have call site specific value information we can provide
1525  // call site specific liveness information and then it makes
1526  // sense to specialize attributes for call sites arguments instead of
1527  // redirecting requests to the callee argument.
1528  Argument *Arg = getAssociatedArgument();
1529  if (!Arg)
1530  return indicatePessimisticFixpoint();
1531  const IRPosition &ArgPos = IRPosition::argument(*Arg);
1532  auto &ArgAA =
1533  A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1534  return translateAndAddState(A, ArgAA, 0, *cast<CallBase>(getCtxI()),
1535  /* FromCallee */ true);
1536  }
1537 
1538  /// See AbstractAttribute::trackStatistics()
1539  void trackStatistics() const override {
1540  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1541  }
1542 };
1543 
1544 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1545  AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1546  : AAPointerInfoFloating(IRP, A) {}
1547 
1548  /// See AbstractAttribute::trackStatistics()
1549  void trackStatistics() const override {
1550  AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1551  }
1552 };
1553 } // namespace
1554 
1555 /// -----------------------NoUnwind Function Attribute--------------------------
1556 
1557 namespace {
1558 struct AANoUnwindImpl : AANoUnwind {
1559  AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1560 
1561  const std::string getAsStr() const override {
1562  return getAssumed() ? "nounwind" : "may-unwind";
1563  }
1564 
1565  /// See AbstractAttribute::updateImpl(...).
1566  ChangeStatus updateImpl(Attributor &A) override {
1567  auto Opcodes = {
1568  (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
1569  (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
1570  (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1571 
1572  auto CheckForNoUnwind = [&](Instruction &I) {
1573  if (!I.mayThrow())
1574  return true;
1575 
1576  if (const auto *CB = dyn_cast<CallBase>(&I)) {
1577  const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1579  return NoUnwindAA.isAssumedNoUnwind();
1580  }
1581  return false;
1582  };
1583 
1584  bool UsedAssumedInformation = false;
1585  if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1586  UsedAssumedInformation))
1587  return indicatePessimisticFixpoint();
1588 
1589  return ChangeStatus::UNCHANGED;
1590  }
1591 };
1592 
1593 struct AANoUnwindFunction final : public AANoUnwindImpl {
1594  AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1595  : AANoUnwindImpl(IRP, A) {}
1596 
1597  /// See AbstractAttribute::trackStatistics()
1598  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1599 };
1600 
1601 /// NoUnwind attribute deduction for a call sites.
1602 struct AANoUnwindCallSite final : AANoUnwindImpl {
1603  AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1604  : AANoUnwindImpl(IRP, A) {}
1605 
1606  /// See AbstractAttribute::initialize(...).
1607  void initialize(Attributor &A) override {
1609  Function *F = getAssociatedFunction();
1610  if (!F || F->isDeclaration())
1611  indicatePessimisticFixpoint();
1612  }
1613 
1614  /// See AbstractAttribute::updateImpl(...).
1615  ChangeStatus updateImpl(Attributor &A) override {
1616  // TODO: Once we have call site specific value information we can provide
1617  // call site specific liveness information and then it makes
1618  // sense to specialize attributes for call sites arguments instead of
1619  // redirecting requests to the callee argument.
1620  Function *F = getAssociatedFunction();
1621  const IRPosition &FnPos = IRPosition::function(*F);
1622  auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1623  return clampStateAndIndicateChange(getState(), FnAA.getState());
1624  }
1625 
1626  /// See AbstractAttribute::trackStatistics()
1627  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1628 };
1629 } // namespace
1630 
1631 /// --------------------- Function Return Values -------------------------------
1632 
1633 namespace {
1634 /// "Attribute" that collects all potential returned values and the return
1635 /// instructions that they arise from.
1636 ///
1637 /// If there is a unique returned value R, the manifest method will:
1638 /// - mark R with the "returned" attribute, if R is an argument.
1639 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1640 
1641  /// Mapping of values potentially returned by the associated function to the
1642  /// return instructions that might return them.
1644 
1645  /// State flags
1646  ///
1647  ///{
1648  bool IsFixed = false;
1649  bool IsValidState = true;
1650  ///}
1651 
1652 public:
1653  AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1654  : AAReturnedValues(IRP, A) {}
1655 
1656  /// See AbstractAttribute::initialize(...).
1657  void initialize(Attributor &A) override {
1658  // Reset the state.
1659  IsFixed = false;
1660  IsValidState = true;
1661  ReturnedValues.clear();
1662 
1663  Function *F = getAssociatedFunction();
1664  if (!F || F->isDeclaration()) {
1665  indicatePessimisticFixpoint();
1666  return;
1667  }
1668  assert(!F->getReturnType()->isVoidTy() &&
1669  "Did not expect a void return type!");
1670 
1671  // The map from instruction opcodes to those instructions in the function.
1672  auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1673 
1674  // Look through all arguments, if one is marked as returned we are done.
1675  for (Argument &Arg : F->args()) {
1676  if (Arg.hasReturnedAttr()) {
1677  auto &ReturnInstSet = ReturnedValues[&Arg];
1678  if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1679  for (Instruction *RI : *Insts)
1680  ReturnInstSet.insert(cast<ReturnInst>(RI));
1681 
1682  indicateOptimisticFixpoint();
1683  return;
1684  }
1685  }
1686 
1687  if (!A.isFunctionIPOAmendable(*F))
1688  indicatePessimisticFixpoint();
1689  }
1690 
1691  /// See AbstractAttribute::manifest(...).
1692  ChangeStatus manifest(Attributor &A) override;
1693 
1694  /// See AbstractAttribute::getState(...).
1695  AbstractState &getState() override { return *this; }
1696 
1697  /// See AbstractAttribute::getState(...).
1698  const AbstractState &getState() const override { return *this; }
1699 
1700  /// See AbstractAttribute::updateImpl(Attributor &A).
1701  ChangeStatus updateImpl(Attributor &A) override;
1702 
1703  llvm::iterator_range<iterator> returned_values() override {
1704  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1705  }
1706 
1707  llvm::iterator_range<const_iterator> returned_values() const override {
1708  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1709  }
1710 
1711  /// Return the number of potential return values, -1 if unknown.
1712  size_t getNumReturnValues() const override {
1713  return isValidState() ? ReturnedValues.size() : -1;
1714  }
1715 
1716  /// Return an assumed unique return value if a single candidate is found. If
1717  /// there cannot be one, return a nullptr. If it is not clear yet, return the
1718  /// Optional::NoneType.
1719  Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1720 
1721  /// See AbstractState::checkForAllReturnedValues(...).
1722  bool checkForAllReturnedValuesAndReturnInsts(
1723  function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1724  const override;
1725 
1726  /// Pretty print the attribute similar to the IR representation.
1727  const std::string getAsStr() const override;
1728 
1729  /// See AbstractState::isAtFixpoint().
1730  bool isAtFixpoint() const override { return IsFixed; }
1731 
1732  /// See AbstractState::isValidState().
1733  bool isValidState() const override { return IsValidState; }
1734 
1735  /// See AbstractState::indicateOptimisticFixpoint(...).
1736  ChangeStatus indicateOptimisticFixpoint() override {
1737  IsFixed = true;
1738  return ChangeStatus::UNCHANGED;
1739  }
1740 
1741  ChangeStatus indicatePessimisticFixpoint() override {
1742  IsFixed = true;
1743  IsValidState = false;
1744  return ChangeStatus::CHANGED;
1745  }
1746 };
1747 
1748 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1750 
1751  // Bookkeeping.
1752  assert(isValidState());
1753  STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1754  "Number of function with known return values");
1755 
1756  // Check if we have an assumed unique return value that we could manifest.
1757  Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1758 
1759  if (!UniqueRV || !UniqueRV.value())
1760  return Changed;
1761 
1762  // Bookkeeping.
1763  STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1764  "Number of function with unique return");
1765  // If the assumed unique return value is an argument, annotate it.
1766  if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.value())) {
1767  if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1768  getAssociatedFunction()->getReturnType())) {
1769  getIRPosition() = IRPosition::argument(*UniqueRVArg);
1770  Changed = IRAttribute::manifest(A);
1771  }
1772  }
1773  return Changed;
1774 }
1775 
1776 const std::string AAReturnedValuesImpl::getAsStr() const {
1777  return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1778  (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1779 }
1780 
1782 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1783  // If checkForAllReturnedValues provides a unique value, ignoring potential
1784  // undef values that can also be present, it is assumed to be the actual
1785  // return value and forwarded to the caller of this method. If there are
1786  // multiple, a nullptr is returned indicating there cannot be a unique
1787  // returned value.
1788  Optional<Value *> UniqueRV;
1789  Type *Ty = getAssociatedFunction()->getReturnType();
1790 
1791  auto Pred = [&](Value &RV) -> bool {
1792  UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1793  return UniqueRV != Optional<Value *>(nullptr);
1794  };
1795 
1796  if (!A.checkForAllReturnedValues(Pred, *this))
1797  UniqueRV = nullptr;
1798 
1799  return UniqueRV;
1800 }
1801 
1802 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1803  function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1804  const {
1805  if (!isValidState())
1806  return false;
1807 
1808  // Check all returned values but ignore call sites as long as we have not
1809  // encountered an overdefined one during an update.
1810  for (const auto &It : ReturnedValues) {
1811  Value *RV = It.first;
1812  if (!Pred(*RV, It.second))
1813  return false;
1814  }
1815 
1816  return true;
1817 }
1818 
1819 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1821 
1823  bool UsedAssumedInformation = false;
1824  auto ReturnInstCB = [&](Instruction &I) {
1825  ReturnInst &Ret = cast<ReturnInst>(I);
1826  Values.clear();
1827  if (!A.getAssumedSimplifiedValues(IRPosition::value(*Ret.getReturnValue()),
1828  *this, Values, AA::Intraprocedural,
1829  UsedAssumedInformation))
1830  Values.push_back({*Ret.getReturnValue(), Ret});
1831 
1832  for (auto &VAC : Values) {
1833  assert(AA::isValidInScope(*VAC.getValue(), Ret.getFunction()) &&
1834  "Assumed returned value should be valid in function scope!");
1835  if (ReturnedValues[VAC.getValue()].insert(&Ret))
1836  Changed = ChangeStatus::CHANGED;
1837  }
1838  return true;
1839  };
1840 
1841  // Discover returned values from all live returned instructions in the
1842  // associated function.
1843  if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1844  UsedAssumedInformation))
1845  return indicatePessimisticFixpoint();
1846  return Changed;
1847 }
1848 
1849 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1850  AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1851  : AAReturnedValuesImpl(IRP, A) {}
1852 
1853  /// See AbstractAttribute::trackStatistics()
1854  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1855 };
1856 
1857 /// Returned values information for a call sites.
1858 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1859  AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1860  : AAReturnedValuesImpl(IRP, A) {}
1861 
1862  /// See AbstractAttribute::initialize(...).
1863  void initialize(Attributor &A) override {
1864  // TODO: Once we have call site specific value information we can provide
1865  // call site specific liveness information and then it makes
1866  // sense to specialize attributes for call sites instead of
1867  // redirecting requests to the callee.
1868  llvm_unreachable("Abstract attributes for returned values are not "
1869  "supported for call sites yet!");
1870  }
1871 
1872  /// See AbstractAttribute::updateImpl(...).
1873  ChangeStatus updateImpl(Attributor &A) override {
1874  return indicatePessimisticFixpoint();
1875  }
1876 
1877  /// See AbstractAttribute::trackStatistics()
1878  void trackStatistics() const override {}
1879 };
1880 } // namespace
1881 
1882 /// ------------------------ NoSync Function Attribute -------------------------
1883 
1885  if (!I->isAtomic())
1886  return false;
1887 
1888  if (auto *FI = dyn_cast<FenceInst>(I))
1889  // All legal orderings for fence are stronger than monotonic.
1890  return FI->getSyncScopeID() != SyncScope::SingleThread;
1891  if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1892  // Unordered is not a legal ordering for cmpxchg.
1893  return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1894  AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1895  }
1896 
1897  AtomicOrdering Ordering;
1898  switch (I->getOpcode()) {
1899  case Instruction::AtomicRMW:
1900  Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1901  break;
1902  case Instruction::Store:
1903  Ordering = cast<StoreInst>(I)->getOrdering();
1904  break;
1905  case Instruction::Load:
1906  Ordering = cast<LoadInst>(I)->getOrdering();
1907  break;
1908  default:
1910  "New atomic operations need to be known in the attributor.");
1911  }
1912 
1913  return (Ordering != AtomicOrdering::Unordered &&
1914  Ordering != AtomicOrdering::Monotonic);
1915 }
1916 
1917 /// Return true if this intrinsic is nosync. This is only used for intrinsics
1918 /// which would be nosync except that they have a volatile flag. All other
1919 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1921  if (auto *MI = dyn_cast<MemIntrinsic>(I))
1922  return !MI->isVolatile();
1923  return false;
1924 }
1925 
1926 namespace {
1927 struct AANoSyncImpl : AANoSync {
1928  AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1929 
1930  const std::string getAsStr() const override {
1931  return getAssumed() ? "nosync" : "may-sync";
1932  }
1933 
1934  /// See AbstractAttribute::updateImpl(...).
1935  ChangeStatus updateImpl(Attributor &A) override;
1936 };
1937 
1938 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1939 
1940  auto CheckRWInstForNoSync = [&](Instruction &I) {
1941  return AA::isNoSyncInst(A, I, *this);
1942  };
1943 
1944  auto CheckForNoSync = [&](Instruction &I) {
1945  // At this point we handled all read/write effects and they are all
1946  // nosync, so they can be skipped.
1947  if (I.mayReadOrWriteMemory())
1948  return true;
1949 
1950  // non-convergent and readnone imply nosync.
1951  return !cast<CallBase>(I).isConvergent();
1952  };
1953 
1954  bool UsedAssumedInformation = false;
1955  if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1956  UsedAssumedInformation) ||
1957  !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1958  UsedAssumedInformation))
1959  return indicatePessimisticFixpoint();
1960 
1961  return ChangeStatus::UNCHANGED;
1962 }
1963 
1964 struct AANoSyncFunction final : public AANoSyncImpl {
1965  AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1966  : AANoSyncImpl(IRP, A) {}
1967 
1968  /// See AbstractAttribute::trackStatistics()
1969  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1970 };
1971 
1972 /// NoSync attribute deduction for a call sites.
1973 struct AANoSyncCallSite final : AANoSyncImpl {
1974  AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1975  : AANoSyncImpl(IRP, A) {}
1976 
1977  /// See AbstractAttribute::initialize(...).
1978  void initialize(Attributor &A) override {
1980  Function *F = getAssociatedFunction();
1981  if (!F || F->isDeclaration())
1982  indicatePessimisticFixpoint();
1983  }
1984 
1985  /// See AbstractAttribute::updateImpl(...).
1986  ChangeStatus updateImpl(Attributor &A) override {
1987  // TODO: Once we have call site specific value information we can provide
1988  // call site specific liveness information and then it makes
1989  // sense to specialize attributes for call sites arguments instead of
1990  // redirecting requests to the callee argument.
1991  Function *F = getAssociatedFunction();
1992  const IRPosition &FnPos = IRPosition::function(*F);
1993  auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1994  return clampStateAndIndicateChange(getState(), FnAA.getState());
1995  }
1996 
1997  /// See AbstractAttribute::trackStatistics()
1998  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1999 };
2000 } // namespace
2001 
2002 /// ------------------------ No-Free Attributes ----------------------------
2003 
2004 namespace {
2005 struct AANoFreeImpl : public AANoFree {
2006  AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2007 
2008  /// See AbstractAttribute::updateImpl(...).
2009  ChangeStatus updateImpl(Attributor &A) override {
2010  auto CheckForNoFree = [&](Instruction &I) {
2011  const auto &CB = cast<CallBase>(I);
2012  if (CB.hasFnAttr(Attribute::NoFree))
2013  return true;
2014 
2015  const auto &NoFreeAA = A.getAAFor<AANoFree>(
2017  return NoFreeAA.isAssumedNoFree();
2018  };
2019 
2020  bool UsedAssumedInformation = false;
2021  if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2022  UsedAssumedInformation))
2023  return indicatePessimisticFixpoint();
2024  return ChangeStatus::UNCHANGED;
2025  }
2026 
2027  /// See AbstractAttribute::getAsStr().
2028  const std::string getAsStr() const override {
2029  return getAssumed() ? "nofree" : "may-free";
2030  }
2031 };
2032 
2033 struct AANoFreeFunction final : public AANoFreeImpl {
2034  AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2035  : AANoFreeImpl(IRP, A) {}
2036 
2037  /// See AbstractAttribute::trackStatistics()
2038  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2039 };
2040 
2041 /// NoFree attribute deduction for a call sites.
2042 struct AANoFreeCallSite final : AANoFreeImpl {
2043  AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2044  : AANoFreeImpl(IRP, A) {}
2045 
2046  /// See AbstractAttribute::initialize(...).
2047  void initialize(Attributor &A) override {
2049  Function *F = getAssociatedFunction();
2050  if (!F || F->isDeclaration())
2051  indicatePessimisticFixpoint();
2052  }
2053 
2054  /// See AbstractAttribute::updateImpl(...).
2055  ChangeStatus updateImpl(Attributor &A) override {
2056  // TODO: Once we have call site specific value information we can provide
2057  // call site specific liveness information and then it makes
2058  // sense to specialize attributes for call sites arguments instead of
2059  // redirecting requests to the callee argument.
2060  Function *F = getAssociatedFunction();
2061  const IRPosition &FnPos = IRPosition::function(*F);
2062  auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2063  return clampStateAndIndicateChange(getState(), FnAA.getState());
2064  }
2065 
2066  /// See AbstractAttribute::trackStatistics()
2067  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2068 };
2069 
2070 /// NoFree attribute for floating values.
2071 struct AANoFreeFloating : AANoFreeImpl {
2072  AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2073  : AANoFreeImpl(IRP, A) {}
2074 
2075  /// See AbstractAttribute::trackStatistics()
2076  void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2077 
2078  /// See Abstract Attribute::updateImpl(...).
2079  ChangeStatus updateImpl(Attributor &A) override {
2080  const IRPosition &IRP = getIRPosition();
2081 
2082  const auto &NoFreeAA = A.getAAFor<AANoFree>(
2084  if (NoFreeAA.isAssumedNoFree())
2085  return ChangeStatus::UNCHANGED;
2086 
2087  Value &AssociatedValue = getIRPosition().getAssociatedValue();
2088  auto Pred = [&](const Use &U, bool &Follow) -> bool {
2089  Instruction *UserI = cast<Instruction>(U.getUser());
2090  if (auto *CB = dyn_cast<CallBase>(UserI)) {
2091  if (CB->isBundleOperand(&U))
2092  return false;
2093  if (!CB->isArgOperand(&U))
2094  return true;
2095  unsigned ArgNo = CB->getArgOperandNo(&U);
2096 
2097  const auto &NoFreeArg = A.getAAFor<AANoFree>(
2098  *this, IRPosition::callsite_argument(*CB, ArgNo),
2100  return NoFreeArg.isAssumedNoFree();
2101  }
2102 
2103  if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2104  isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2105  Follow = true;
2106  return true;
2107  }
2108  if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2109  isa<ReturnInst>(UserI))
2110  return true;
2111 
2112  // Unknown user.
2113  return false;
2114  };
2115  if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2116  return indicatePessimisticFixpoint();
2117 
2118  return ChangeStatus::UNCHANGED;
2119  }
2120 };
2121 
2122 /// NoFree attribute for a call site argument.
2123 struct AANoFreeArgument final : AANoFreeFloating {
2124  AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2125  : AANoFreeFloating(IRP, A) {}
2126 
2127  /// See AbstractAttribute::trackStatistics()
2128  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2129 };
2130 
2131 /// NoFree attribute for call site arguments.
2132 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2133  AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2134  : AANoFreeFloating(IRP, A) {}
2135 
2136  /// See AbstractAttribute::updateImpl(...).
2137  ChangeStatus updateImpl(Attributor &A) override {
2138  // TODO: Once we have call site specific value information we can provide
2139  // call site specific liveness information and then it makes
2140  // sense to specialize attributes for call sites arguments instead of
2141  // redirecting requests to the callee argument.
2142  Argument *Arg = getAssociatedArgument();
2143  if (!Arg)
2144  return indicatePessimisticFixpoint();
2145  const IRPosition &ArgPos = IRPosition::argument(*Arg);
2146  auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2147  return clampStateAndIndicateChange(getState(), ArgAA.getState());
2148  }
2149 
2150  /// See AbstractAttribute::trackStatistics()
2151  void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2152 };
2153 
2154 /// NoFree attribute for function return value.
2155 struct AANoFreeReturned final : AANoFreeFloating {
2156  AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2157  : AANoFreeFloating(IRP, A) {
2158  llvm_unreachable("NoFree is not applicable to function returns!");
2159  }
2160 
2161  /// See AbstractAttribute::initialize(...).
2162  void initialize(Attributor &A) override {
2163  llvm_unreachable("NoFree is not applicable to function returns!");
2164  }
2165 
2166  /// See AbstractAttribute::updateImpl(...).
2167  ChangeStatus updateImpl(Attributor &A) override {
2168  llvm_unreachable("NoFree is not applicable to function returns!");
2169  }
2170 
2171  /// See AbstractAttribute::trackStatistics()
2172  void trackStatistics() const override {}
2173 };
2174 
2175 /// NoFree attribute deduction for a call site return value.
2176 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2177  AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2178  : AANoFreeFloating(IRP, A) {}
2179 
2180  ChangeStatus manifest(Attributor &A) override {
2181  return ChangeStatus::UNCHANGED;
2182  }
2183  /// See AbstractAttribute::trackStatistics()
2184  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2185 };
2186 } // namespace
2187 
2188 /// ------------------------ NonNull Argument Attribute ------------------------
2189 namespace {
2190 static int64_t getKnownNonNullAndDerefBytesForUse(
2191  Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2192  const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2193  TrackUse = false;
2194 
2195  const Value *UseV = U->get();
2196  if (!UseV->getType()->isPointerTy())
2197  return 0;
2198 
2199  // We need to follow common pointer manipulation uses to the accesses they
2200  // feed into. We can try to be smart to avoid looking through things we do not
2201  // like for now, e.g., non-inbounds GEPs.
2202  if (isa<CastInst>(I)) {
2203  TrackUse = true;
2204  return 0;
2205  }
2206 
2207  if (isa<GetElementPtrInst>(I)) {
2208  TrackUse = true;
2209  return 0;
2210  }
2211 
2212  Type *PtrTy = UseV->getType();
2213  const Function *F = I->getFunction();
2214  bool NullPointerIsDefined =
2216  const DataLayout &DL = A.getInfoCache().getDL();
2217  if (const auto *CB = dyn_cast<CallBase>(I)) {
2218  if (CB->isBundleOperand(U)) {
2220  U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2221  IsNonNull |=
2222  (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2223  return RK.ArgValue;
2224  }
2225  return 0;
2226  }
2227 
2228  if (CB->isCallee(U)) {
2229  IsNonNull |= !NullPointerIsDefined;
2230  return 0;
2231  }
2232 
2233  unsigned ArgNo = CB->getArgOperandNo(U);
2234  IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2235  // As long as we only use known information there is no need to track
2236  // dependences here.
2237  auto &DerefAA =
2238  A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2239  IsNonNull |= DerefAA.isKnownNonNull();
2240  return DerefAA.getKnownDereferenceableBytes();
2241  }
2242 
2244  if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2245  return 0;
2246 
2247  int64_t Offset;
2248  const Value *Base =
2249  getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2250  if (Base && Base == &AssociatedValue) {
2251  int64_t DerefBytes = Loc->Size.getValue() + Offset;
2252  IsNonNull |= !NullPointerIsDefined;
2253  return std::max(int64_t(0), DerefBytes);
2254  }
2255 
2256  /// Corner case when an offset is 0.
2257  Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2258  /*AllowNonInbounds*/ true);
2259  if (Base && Base == &AssociatedValue && Offset == 0) {
2260  int64_t DerefBytes = Loc->Size.getValue();
2261  IsNonNull |= !NullPointerIsDefined;
2262  return std::max(int64_t(0), DerefBytes);
2263  }
2264 
2265  return 0;
2266 }
2267 
2268 struct AANonNullImpl : AANonNull {
2269  AANonNullImpl(const IRPosition &IRP, Attributor &A)
2270  : AANonNull(IRP, A),
2271  NullIsDefined(NullPointerIsDefined(
2272  getAnchorScope(),
2273  getAssociatedValue().getType()->getPointerAddressSpace())) {}
2274 
2275  /// See AbstractAttribute::initialize(...).
2276  void initialize(Attributor &A) override {
2277  Value &V = *getAssociatedValue().stripPointerCasts();
2278  if (!NullIsDefined &&
2279  hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2280  /* IgnoreSubsumingPositions */ false, &A)) {
2281  indicateOptimisticFixpoint();
2282  return;
2283  }
2284 
2285  if (isa<ConstantPointerNull>(V)) {
2286  indicatePessimisticFixpoint();
2287  return;
2288  }
2289 
2291 
2292  bool CanBeNull, CanBeFreed;
2293  if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2294  CanBeFreed)) {
2295  if (!CanBeNull) {
2296  indicateOptimisticFixpoint();
2297  return;
2298  }
2299  }
2300 
2301  if (isa<GlobalValue>(V)) {
2302  indicatePessimisticFixpoint();
2303  return;
2304  }
2305 
2306  if (Instruction *CtxI = getCtxI())
2307  followUsesInMBEC(*this, A, getState(), *CtxI);
2308  }
2309 
2310  /// See followUsesInMBEC
2311  bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2312  AANonNull::StateType &State) {
2313  bool IsNonNull = false;
2314  bool TrackUse = false;
2315  getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2316  IsNonNull, TrackUse);
2317  State.setKnown(IsNonNull);
2318  return TrackUse;
2319  }
2320 
2321  /// See AbstractAttribute::getAsStr().
2322  const std::string getAsStr() const override {
2323  return getAssumed() ? "nonnull" : "may-null";
2324  }
2325 
2326  /// Flag to determine if the underlying value can be null and still allow
2327  /// valid accesses.
2328  const bool NullIsDefined;
2329 };
2330 
2331 /// NonNull attribute for a floating value.
2332 struct AANonNullFloating : public AANonNullImpl {
2333  AANonNullFloating(const IRPosition &IRP, Attributor &A)
2334  : AANonNullImpl(IRP, A) {}
2335 
2336  /// See AbstractAttribute::updateImpl(...).
2337  ChangeStatus updateImpl(Attributor &A) override {
2338  const DataLayout &DL = A.getDataLayout();
2339 
2340  bool Stripped;
2341  bool UsedAssumedInformation = false;
2343  if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
2344  AA::AnyScope, UsedAssumedInformation)) {
2345  Values.push_back({getAssociatedValue(), getCtxI()});
2346  Stripped = false;
2347  } else {
2348  Stripped = Values.size() != 1 ||
2349  Values.front().getValue() != &getAssociatedValue();
2350  }
2351 
2352  DominatorTree *DT = nullptr;
2353  AssumptionCache *AC = nullptr;
2354  InformationCache &InfoCache = A.getInfoCache();
2355  if (const Function *Fn = getAnchorScope()) {
2357  AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2358  }
2359 
2361  auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
2362  const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2364  if (!Stripped && this == &AA) {
2365  if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2366  T.indicatePessimisticFixpoint();
2367  } else {
2368  // Use abstract attribute information.
2369  const AANonNull::StateType &NS = AA.getState();
2370  T ^= NS;
2371  }
2372  return T.isValidState();
2373  };
2374 
2375  for (const auto &VAC : Values)
2376  if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI()))
2377  return indicatePessimisticFixpoint();
2378 
2379  return clampStateAndIndicateChange(getState(), T);
2380  }
2381 
2382  /// See AbstractAttribute::trackStatistics()
2383  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2384 };
2385 
2386 /// NonNull attribute for function return value.
2387 struct AANonNullReturned final
2388  : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2389  AANonNullReturned(const IRPosition &IRP, Attributor &A)
2390  : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2391 
2392  /// See AbstractAttribute::getAsStr().
2393  const std::string getAsStr() const override {
2394  return getAssumed() ? "nonnull" : "may-null";
2395  }
2396 
2397  /// See AbstractAttribute::trackStatistics()
2398  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2399 };
2400 
2401 /// NonNull attribute for function argument.
2402 struct AANonNullArgument final
2403  : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2404  AANonNullArgument(const IRPosition &IRP, Attributor &A)
2405  : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2406 
2407  /// See AbstractAttribute::trackStatistics()
2408  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2409 };
2410 
2411 struct AANonNullCallSiteArgument final : AANonNullFloating {
2412  AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2413  : AANonNullFloating(IRP, A) {}
2414 
2415  /// See AbstractAttribute::trackStatistics()
2416  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2417 };
2418 
2419 /// NonNull attribute for a call site return position.
2420 struct AANonNullCallSiteReturned final
2421  : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2422  AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2423  : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2424 
2425  /// See AbstractAttribute::trackStatistics()
2426  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2427 };
2428 } // namespace
2429 
2430 /// ------------------------ No-Recurse Attributes ----------------------------
2431 
2432 namespace {
2433 struct AANoRecurseImpl : public AANoRecurse {
2434  AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2435 
2436  /// See AbstractAttribute::getAsStr()
2437  const std::string getAsStr() const override {
2438  return getAssumed() ? "norecurse" : "may-recurse";
2439  }
2440 };
2441 
2442 struct AANoRecurseFunction final : AANoRecurseImpl {
2443  AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2444  : AANoRecurseImpl(IRP, A) {}
2445 
2446  /// See AbstractAttribute::updateImpl(...).
2447  ChangeStatus updateImpl(Attributor &A) override {
2448 
2449  // If all live call sites are known to be no-recurse, we are as well.
2450  auto CallSitePred = [&](AbstractCallSite ACS) {
2451  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2452  *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2454  return NoRecurseAA.isKnownNoRecurse();
2455  };
2456  bool UsedAssumedInformation = false;
2457  if (A.checkForAllCallSites(CallSitePred, *this, true,
2458  UsedAssumedInformation)) {
2459  // If we know all call sites and all are known no-recurse, we are done.
2460  // If all known call sites, which might not be all that exist, are known
2461  // to be no-recurse, we are not done but we can continue to assume
2462  // no-recurse. If one of the call sites we have not visited will become
2463  // live, another update is triggered.
2464  if (!UsedAssumedInformation)
2465  indicateOptimisticFixpoint();
2466  return ChangeStatus::UNCHANGED;
2467  }
2468 
2469  const AAFunctionReachability &EdgeReachability =
2470  A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2472  if (EdgeReachability.canReach(A, *getAnchorScope()))
2473  return indicatePessimisticFixpoint();
2474  return ChangeStatus::UNCHANGED;
2475  }
2476 
2477  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2478 };
2479 
2480 /// NoRecurse attribute deduction for a call sites.
2481 struct AANoRecurseCallSite final : AANoRecurseImpl {
2482  AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2483  : AANoRecurseImpl(IRP, A) {}
2484 
2485  /// See AbstractAttribute::initialize(...).
2486  void initialize(Attributor &A) override {
2488  Function *F = getAssociatedFunction();
2489  if (!F || F->isDeclaration())
2490  indicatePessimisticFixpoint();
2491  }
2492 
2493  /// See AbstractAttribute::updateImpl(...).
2494  ChangeStatus updateImpl(Attributor &A) override {
2495  // TODO: Once we have call site specific value information we can provide
2496  // call site specific liveness information and then it makes
2497  // sense to specialize attributes for call sites arguments instead of
2498  // redirecting requests to the callee argument.
2499  Function *F = getAssociatedFunction();
2500  const IRPosition &FnPos = IRPosition::function(*F);
2501  auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2502  return clampStateAndIndicateChange(getState(), FnAA.getState());
2503  }
2504 
2505  /// See AbstractAttribute::trackStatistics()
2506  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2507 };
2508 } // namespace
2509 
2510 /// -------------------- Undefined-Behavior Attributes ------------------------
2511 
2512 namespace {
2513 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2514  AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2515  : AAUndefinedBehavior(IRP, A) {}
2516 
2517  /// See AbstractAttribute::updateImpl(...).
2518  // through a pointer (i.e. also branches etc.)
2519  ChangeStatus updateImpl(Attributor &A) override {
2520  const size_t UBPrevSize = KnownUBInsts.size();
2521  const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2522 
2523  auto InspectMemAccessInstForUB = [&](Instruction &I) {
2524  // Lang ref now states volatile store is not UB, let's skip them.
2525  if (I.isVolatile() && I.mayWriteToMemory())
2526  return true;
2527 
2528  // Skip instructions that are already saved.
2529  if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2530  return true;
2531 
2532  // If we reach here, we know we have an instruction
2533  // that accesses memory through a pointer operand,
2534  // for which getPointerOperand() should give it to us.
2535  Value *PtrOp =
2536  const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2537  assert(PtrOp &&
2538  "Expected pointer operand of memory accessing instruction");
2539 
2540  // Either we stopped and the appropriate action was taken,
2541  // or we got back a simplified value to continue.
2542  Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2543  if (!SimplifiedPtrOp || !SimplifiedPtrOp.value())
2544  return true;
2545  const Value *PtrOpVal = SimplifiedPtrOp.value();
2546 
2547  // A memory access through a pointer is considered UB
2548  // only if the pointer has constant null value.
2549  // TODO: Expand it to not only check constant values.
2550  if (!isa<ConstantPointerNull>(PtrOpVal)) {
2551  AssumedNoUBInsts.insert(&I);
2552  return true;
2553  }
2554  const Type *PtrTy = PtrOpVal->getType();
2555 
2556  // Because we only consider instructions inside functions,
2557  // assume that a parent function exists.
2558  const Function *F = I.getFunction();
2559 
2560  // A memory access using constant null pointer is only considered UB
2561  // if null pointer is _not_ defined for the target platform.
2563  AssumedNoUBInsts.insert(&I);
2564  else
2565  KnownUBInsts.insert(&I);
2566  return true;
2567  };
2568 
2569  auto InspectBrInstForUB = [&](Instruction &I) {
2570  // A conditional branch instruction is considered UB if it has `undef`
2571  // condition.
2572 
2573  // Skip instructions that are already saved.
2574  if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2575  return true;
2576 
2577  // We know we have a branch instruction.
2578  auto *BrInst = cast<BranchInst>(&I);
2579 
2580  // Unconditional branches are never considered UB.
2581  if (BrInst->isUnconditional())
2582  return true;
2583 
2584  // Either we stopped and the appropriate action was taken,
2585  // or we got back a simplified value to continue.
2586  Optional<Value *> SimplifiedCond =
2587  stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2588  if (!SimplifiedCond || !*SimplifiedCond)
2589  return true;
2590  AssumedNoUBInsts.insert(&I);
2591  return true;
2592  };
2593 
2594  auto InspectCallSiteForUB = [&](Instruction &I) {
2595  // Check whether a callsite always cause UB or not
2596 
2597  // Skip instructions that are already saved.
2598  if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2599  return true;
2600 
2601  // Check nonnull and noundef argument attribute violation for each
2602  // callsite.
2603  CallBase &CB = cast<CallBase>(I);
2605  if (!Callee)
2606  return true;
2607  for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2608  // If current argument is known to be simplified to null pointer and the
2609  // corresponding argument position is known to have nonnull attribute,
2610  // the argument is poison. Furthermore, if the argument is poison and
2611  // the position is known to have noundef attriubte, this callsite is
2612  // considered UB.
2613  if (idx >= Callee->arg_size())
2614  break;
2615  Value *ArgVal = CB.getArgOperand(idx);
2616  if (!ArgVal)
2617  continue;
2618  // Here, we handle three cases.
2619  // (1) Not having a value means it is dead. (we can replace the value
2620  // with undef)
2621  // (2) Simplified to undef. The argument violate noundef attriubte.
2622  // (3) Simplified to null pointer where known to be nonnull.
2623  // The argument is a poison value and violate noundef attribute.
2624  IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2625  auto &NoUndefAA =
2626  A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2627  if (!NoUndefAA.isKnownNoUndef())
2628  continue;
2629  bool UsedAssumedInformation = false;
2630  Optional<Value *> SimplifiedVal =
2631  A.getAssumedSimplified(IRPosition::value(*ArgVal), *this,
2632  UsedAssumedInformation, AA::Interprocedural);
2633  if (UsedAssumedInformation)
2634  continue;
2635  if (SimplifiedVal && !SimplifiedVal.value())
2636  return true;
2637  if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.value())) {
2638  KnownUBInsts.insert(&I);
2639  continue;
2640  }
2641  if (!ArgVal->getType()->isPointerTy() ||
2642  !isa<ConstantPointerNull>(*SimplifiedVal.value()))
2643  continue;
2644  auto &NonNullAA =
2645  A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2646  if (NonNullAA.isKnownNonNull())
2647  KnownUBInsts.insert(&I);
2648  }
2649  return true;
2650  };
2651 
2652  auto InspectReturnInstForUB = [&](Instruction &I) {
2653  auto &RI = cast<ReturnInst>(I);
2654  // Either we stopped and the appropriate action was taken,
2655  // or we got back a simplified return value to continue.
2656  Optional<Value *> SimplifiedRetValue =
2657  stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2658  if (!SimplifiedRetValue || !*SimplifiedRetValue)
2659  return true;
2660 
2661  // Check if a return instruction always cause UB or not
2662  // Note: It is guaranteed that the returned position of the anchor
2663  // scope has noundef attribute when this is called.
2664  // We also ensure the return position is not "assumed dead"
2665  // because the returned value was then potentially simplified to
2666  // `undef` in AAReturnedValues without removing the `noundef`
2667  // attribute yet.
2668 
2669  // When the returned position has noundef attriubte, UB occurs in the
2670  // following cases.
2671  // (1) Returned value is known to be undef.
2672  // (2) The value is known to be a null pointer and the returned
2673  // position has nonnull attribute (because the returned value is
2674  // poison).
2675  if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2676  auto &NonNullAA = A.getAAFor<AANonNull>(
2677  *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2678  if (NonNullAA.isKnownNonNull())
2679  KnownUBInsts.insert(&I);
2680  }
2681 
2682  return true;
2683  };
2684 
2685  bool UsedAssumedInformation = false;
2686  A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2688  Instruction::AtomicCmpXchg,
2689  Instruction::AtomicRMW},
2690  UsedAssumedInformation,
2691  /* CheckBBLivenessOnly */ true);
2692  A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2693  UsedAssumedInformation,
2694  /* CheckBBLivenessOnly */ true);
2695  A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2696  UsedAssumedInformation);
2697 
2698  // If the returned position of the anchor scope has noundef attriubte, check
2699  // all returned instructions.
2700  if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2701  const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2702  if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2703  auto &RetPosNoUndefAA =
2704  A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2705  if (RetPosNoUndefAA.isKnownNoUndef())
2706  A.checkForAllInstructions(InspectReturnInstForUB, *this,
2707  {Instruction::Ret}, UsedAssumedInformation,
2708  /* CheckBBLivenessOnly */ true);
2709  }
2710  }
2711 
2712  if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2713  UBPrevSize != KnownUBInsts.size())
2714  return ChangeStatus::CHANGED;
2715  return ChangeStatus::UNCHANGED;
2716  }
2717 
2718  bool isKnownToCauseUB(Instruction *I) const override {
2719  return KnownUBInsts.count(I);
2720  }
2721 
2722  bool isAssumedToCauseUB(Instruction *I) const override {
2723  // In simple words, if an instruction is not in the assumed to _not_
2724  // cause UB, then it is assumed UB (that includes those
2725  // in the KnownUBInsts set). The rest is boilerplate
2726  // is to ensure that it is one of the instructions we test
2727  // for UB.
2728 
2729  switch (I->getOpcode()) {
2730  case Instruction::Load:
2731  case Instruction::Store:
2732  case Instruction::AtomicCmpXchg:
2733  case Instruction::AtomicRMW:
2734  return !AssumedNoUBInsts.count(I);
2735  case Instruction::Br: {
2736  auto *BrInst = cast<BranchInst>(I);
2737  if (BrInst->isUnconditional())
2738  return false;
2739  return !AssumedNoUBInsts.count(I);
2740  } break;
2741  default:
2742  return false;
2743  }
2744  return false;
2745  }
2746 
2747  ChangeStatus manifest(Attributor &A) override {
2748  if (KnownUBInsts.empty())
2749  return ChangeStatus::UNCHANGED;
2750  for (Instruction *I : KnownUBInsts)
2751  A.changeToUnreachableAfterManifest(I);
2752  return ChangeStatus::CHANGED;
2753  }
2754 
2755  /// See AbstractAttribute::getAsStr()
2756  const std::string getAsStr() const override {
2757  return getAssumed() ? "undefined-behavior" : "no-ub";
2758  }
2759 
2760  /// Note: The correctness of this analysis depends on the fact that the
2761  /// following 2 sets will stop changing after some point.
2762  /// "Change" here means that their size changes.
2763  /// The size of each set is monotonically increasing
2764  /// (we only add items to them) and it is upper bounded by the number of
2765  /// instructions in the processed function (we can never save more
2766  /// elements in either set than this number). Hence, at some point,
2767  /// they will stop increasing.
2768  /// Consequently, at some point, both sets will have stopped
2769  /// changing, effectively making the analysis reach a fixpoint.
2770 
2771  /// Note: These 2 sets are disjoint and an instruction can be considered
2772  /// one of 3 things:
2773  /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2774  /// the KnownUBInsts set.
2775  /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2776  /// has a reason to assume it).
2777  /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2778  /// could not find a reason to assume or prove that it can cause UB,
2779  /// hence it assumes it doesn't. We have a set for these instructions
2780  /// so that we don't reprocess them in every update.
2781  /// Note however that instructions in this set may cause UB.
2782 
2783 protected:
2784  /// A set of all live instructions _known_ to cause UB.
2785  SmallPtrSet<Instruction *, 8> KnownUBInsts;
2786 
2787 private:
2788  /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2789  SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2790 
2791  // Should be called on updates in which if we're processing an instruction
2792  // \p I that depends on a value \p V, one of the following has to happen:
2793  // - If the value is assumed, then stop.
2794  // - If the value is known but undef, then consider it UB.
2795  // - Otherwise, do specific processing with the simplified value.
2796  // We return None in the first 2 cases to signify that an appropriate
2797  // action was taken and the caller should stop.
2798  // Otherwise, we return the simplified value that the caller should
2799  // use for specific processing.
2800  Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2801  Instruction *I) {
2802  bool UsedAssumedInformation = false;
2803  Optional<Value *> SimplifiedV =
2804  A.getAssumedSimplified(IRPosition::value(*V), *this,
2805  UsedAssumedInformation, AA::Interprocedural);
2806  if (!UsedAssumedInformation) {
2807  // Don't depend on assumed values.
2808  if (!SimplifiedV) {
2809  // If it is known (which we tested above) but it doesn't have a value,
2810  // then we can assume `undef` and hence the instruction is UB.
2811  KnownUBInsts.insert(I);
2812  return llvm::None;
2813  }
2814  if (!*SimplifiedV)
2815  return nullptr;
2816  V = *SimplifiedV;
2817  }
2818  if (isa<UndefValue>(V)) {
2819  KnownUBInsts.insert(I);
2820  return llvm::None;
2821  }
2822  return V;
2823  }
2824 };
2825 
2826 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2827  AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2828  : AAUndefinedBehaviorImpl(IRP, A) {}
2829 
2830  /// See AbstractAttribute::trackStatistics()
2831  void trackStatistics() const override {
2832  STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2833  "Number of instructions known to have UB");
2834  BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2835  KnownUBInsts.size();
2836  }
2837 };
2838 } // namespace
2839 
2840 /// ------------------------ Will-Return Attributes ----------------------------
2841 
2842 namespace {
2843 // Helper function that checks whether a function has any cycle which we don't
2844 // know if it is bounded or not.
2845 // Loops with maximum trip count are considered bounded, any other cycle not.
2846 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2847  ScalarEvolution *SE =
2848  A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2849  LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2850  // If either SCEV or LoopInfo is not available for the function then we assume
2851  // any cycle to be unbounded cycle.
2852  // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2853  // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2854  if (!SE || !LI) {
2855  for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2856  if (SCCI.hasCycle())
2857  return true;
2858  return false;
2859  }
2860 
2861  // If there's irreducible control, the function may contain non-loop cycles.
2863  return true;
2864 
2865  // Any loop that does not have a max trip count is considered unbounded cycle.
2866  for (auto *L : LI->getLoopsInPreorder()) {
2867  if (!SE->getSmallConstantMaxTripCount(L))
2868  return true;
2869  }
2870  return false;
2871 }
2872 
2873 struct AAWillReturnImpl : public AAWillReturn {
2874  AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2875  : AAWillReturn(IRP, A) {}
2876 
2877  /// See AbstractAttribute::initialize(...).
2878  void initialize(Attributor &A) override {
2880 
2881  if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2882  indicateOptimisticFixpoint();
2883  return;
2884  }
2885  }
2886 
2887  /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2888  bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2889  // Check for `mustprogress` in the scope and the associated function which
2890  // might be different if this is a call site.
2891  if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2892  (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2893  return false;
2894 
2895  bool IsKnown;
2896  if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
2897  return IsKnown || !KnownOnly;
2898  return false;
2899  }
2900 
2901  /// See AbstractAttribute::updateImpl(...).
2902  ChangeStatus updateImpl(Attributor &A) override {
2903  if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2904  return ChangeStatus::UNCHANGED;
2905 
2906  auto CheckForWillReturn = [&](Instruction &I) {
2907  IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2908  const auto &WillReturnAA =
2909  A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2910  if (WillReturnAA.isKnownWillReturn())
2911  return true;
2912  if (!WillReturnAA.isAssumedWillReturn())
2913  return false;
2914  const auto &NoRecurseAA =
2915  A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2916  return NoRecurseAA.isAssumedNoRecurse();
2917  };
2918 
2919  bool UsedAssumedInformation = false;
2920  if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2921  UsedAssumedInformation))
2922  return indicatePessimisticFixpoint();
2923 
2924  return ChangeStatus::UNCHANGED;
2925  }
2926 
2927  /// See AbstractAttribute::getAsStr()
2928  const std::string getAsStr() const override {
2929  return getAssumed() ? "willreturn" : "may-noreturn";
2930  }
2931 };
2932 
2933 struct AAWillReturnFunction final : AAWillReturnImpl {
2934  AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2935  : AAWillReturnImpl(IRP, A) {}
2936 
2937  /// See AbstractAttribute::initialize(...).
2938  void initialize(Attributor &A) override {
2940 
2941  Function *F = getAnchorScope();
2942  if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2943  indicatePessimisticFixpoint();
2944  }
2945 
2946  /// See AbstractAttribute::trackStatistics()
2947  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2948 };
2949 
2950 /// WillReturn attribute deduction for a call sites.
2951 struct AAWillReturnCallSite final : AAWillReturnImpl {
2952  AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2953  : AAWillReturnImpl(IRP, A) {}
2954 
2955  /// See AbstractAttribute::initialize(...).
2956  void initialize(Attributor &A) override {
2958  Function *F = getAssociatedFunction();
2959  if (!F || !A.isFunctionIPOAmendable(*F))
2960  indicatePessimisticFixpoint();
2961  }
2962 
2963  /// See AbstractAttribute::updateImpl(...).
2964  ChangeStatus updateImpl(Attributor &A) override {
2965  if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2966  return ChangeStatus::UNCHANGED;
2967 
2968  // TODO: Once we have call site specific value information we can provide
2969  // call site specific liveness information and then it makes
2970  // sense to specialize attributes for call sites arguments instead of
2971  // redirecting requests to the callee argument.
2972  Function *F = getAssociatedFunction();
2973  const IRPosition &FnPos = IRPosition::function(*F);
2974  auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2975  return clampStateAndIndicateChange(getState(), FnAA.getState());
2976  }
2977 
2978  /// See AbstractAttribute::trackStatistics()
2979  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2980 };
2981 } // namespace
2982 
2983 /// -------------------AAReachability Attribute--------------------------
2984 
2985 namespace {
2986 struct AAReachabilityImpl : AAReachability {
2987  AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2988  : AAReachability(IRP, A) {}
2989 
2990  const std::string getAsStr() const override {
2991  // TODO: Return the number of reachable queries.
2992  return "reachable";
2993  }
2994 
2995  /// See AbstractAttribute::updateImpl(...).
2996  ChangeStatus updateImpl(Attributor &A) override {
2997  return ChangeStatus::UNCHANGED;
2998  }
2999 };
3000 
3001 struct AAReachabilityFunction final : public AAReachabilityImpl {
3002  AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
3003  : AAReachabilityImpl(IRP, A) {}
3004 
3005  /// See AbstractAttribute::trackStatistics()
3006  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
3007 };
3008 } // namespace
3009 
3010 /// ------------------------ NoAlias Argument Attribute ------------------------
3011 
3012 namespace {
3013 struct AANoAliasImpl : AANoAlias {
3014  AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3015  assert(getAssociatedType()->isPointerTy() &&
3016  "Noalias is a pointer attribute");
3017  }
3018 
3019  const std::string getAsStr() const override {
3020  return getAssumed() ? "noalias" : "may-alias";
3021  }
3022 };
3023 
3024 /// NoAlias attribute for a floating value.
3025 struct AANoAliasFloating final : AANoAliasImpl {
3026  AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3027  : AANoAliasImpl(IRP, A) {}
3028 
3029  /// See AbstractAttribute::initialize(...).
3030  void initialize(Attributor &A) override {
3032  Value *Val = &getAssociatedValue();
3033  do {
3034  CastInst *CI = dyn_cast<CastInst>(Val);
3035  if (!CI)
3036  break;
3037  Value *Base = CI->getOperand(0);
3038  if (!Base->hasOneUse())
3039  break;
3040  Val = Base;
3041  } while (true);
3042 
3043  if (!Val->getType()->isPointerTy()) {
3044  indicatePessimisticFixpoint();
3045  return;
3046  }
3047 
3048  if (isa<AllocaInst>(Val))
3049  indicateOptimisticFixpoint();
3050  else if (isa<ConstantPointerNull>(Val) &&
3051  !NullPointerIsDefined(getAnchorScope(),
3052  Val->getType()->getPointerAddressSpace()))
3053  indicateOptimisticFixpoint();
3054  else if (Val != &getAssociatedValue()) {
3055  const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3056  *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3057  if (ValNoAliasAA.isKnownNoAlias())
3058  indicateOptimisticFixpoint();
3059  }
3060  }
3061 
3062  /// See AbstractAttribute::updateImpl(...).
3063  ChangeStatus updateImpl(Attributor &A) override {
3064  // TODO: Implement this.
3065  return indicatePessimisticFixpoint();
3066  }
3067 
3068  /// See AbstractAttribute::trackStatistics()
3069  void trackStatistics() const override {
3071  }
3072 };
3073 
3074 /// NoAlias attribute for an argument.
3075 struct AANoAliasArgument final
3076  : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3077  using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3078  AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3079 
3080  /// See AbstractAttribute::initialize(...).
3081  void initialize(Attributor &A) override {
3082  Base::initialize(A);
3083  // See callsite argument attribute and callee argument attribute.
3084  if (hasAttr({Attribute::ByVal}))
3085  indicateOptimisticFixpoint();
3086  }
3087 
3088  /// See AbstractAttribute::update(...).
3089  ChangeStatus updateImpl(Attributor &A) override {
3090  // We have to make sure no-alias on the argument does not break
3091  // synchronization when this is a callback argument, see also [1] below.
3092  // If synchronization cannot be affected, we delegate to the base updateImpl
3093  // function, otherwise we give up for now.
3094 
3095  // If the function is no-sync, no-alias cannot break synchronization.
3096  const auto &NoSyncAA =
3097  A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3099  if (NoSyncAA.isAssumedNoSync())
3100  return Base::updateImpl(A);
3101 
3102  // If the argument is read-only, no-alias cannot break synchronization.
3103  bool IsKnown;
3104  if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3105  return Base::updateImpl(A);
3106 
3107  // If the argument is never passed through callbacks, no-alias cannot break
3108  // synchronization.
3109  bool UsedAssumedInformation = false;
3110  if (A.checkForAllCallSites(
3111  [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3112  true, UsedAssumedInformation))
3113  return Base::updateImpl(A);
3114 
3115  // TODO: add no-alias but make sure it doesn't break synchronization by
3116  // introducing fake uses. See:
3117  // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3118  // International Workshop on OpenMP 2018,
3119  // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3120 
3121  return indicatePessimisticFixpoint();
3122  }
3123 
3124  /// See AbstractAttribute::trackStatistics()
3125  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3126 };
3127 
3128 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3129  AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3130  : AANoAliasImpl(IRP, A) {}
3131 
3132  /// See AbstractAttribute::initialize(...).
3133  void initialize(Attributor &A) override {
3134  // See callsite argument attribute and callee argument attribute.
3135  const auto &CB = cast<CallBase>(getAnchorValue());
3136  if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3137  indicateOptimisticFixpoint();
3138  Value &Val = getAssociatedValue();
3139  if (isa<ConstantPointerNull>(Val) &&
3140  !NullPointerIsDefined(getAnchorScope(),
3141  Val.getType()->getPointerAddressSpace()))
3142  indicateOptimisticFixpoint();
3143  }
3144 
3145  /// Determine if the underlying value may alias with the call site argument
3146  /// \p OtherArgNo of \p ICS (= the underlying call site).
3147  bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3148  const AAMemoryBehavior &MemBehaviorAA,
3149  const CallBase &CB, unsigned OtherArgNo) {
3150  // We do not need to worry about aliasing with the underlying IRP.
3151  if (this->getCalleeArgNo() == (int)OtherArgNo)
3152  return false;
3153 
3154  // If it is not a pointer or pointer vector we do not alias.
3155  const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3156  if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3157  return false;
3158 
3159  auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3160  *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3161 
3162  // If the argument is readnone, there is no read-write aliasing.
3163  if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3164  A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3165  return false;
3166  }
3167 
3168  // If the argument is readonly and the underlying value is readonly, there
3169  // is no read-write aliasing.
3170  bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3171  if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3172  A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3173  A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3174  return false;
3175  }
3176 
3177  // We have to utilize actual alias analysis queries so we need the object.
3178  if (!AAR)
3179  AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3180 
3181  // Try to rule it out at the call site.
3182  bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3183  LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3184  "callsite arguments: "
3185  << getAssociatedValue() << " " << *ArgOp << " => "
3186  << (IsAliasing ? "" : "no-") << "alias \n");
3187 
3188  return IsAliasing;
3189  }
3190 
3191  bool
3192  isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3193  const AAMemoryBehavior &MemBehaviorAA,
3194  const AANoAlias &NoAliasAA) {
3195  // We can deduce "noalias" if the following conditions hold.
3196  // (i) Associated value is assumed to be noalias in the definition.
3197  // (ii) Associated value is assumed to be no-capture in all the uses
3198  // possibly executed before this callsite.
3199  // (iii) There is no other pointer argument which could alias with the
3200  // value.
3201 
3202  bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3203  if (!AssociatedValueIsNoAliasAtDef) {
3204  LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3205  << " is not no-alias at the definition\n");
3206  return false;
3207  }
3208 
3209  auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3210  const auto &DerefAA = A.getAAFor<AADereferenceable>(
3212  return DerefAA.getAssumedDereferenceableBytes();
3213  };
3214 
3215  A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3216 
3217  const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3218  const Function *ScopeFn = VIRP.getAnchorScope();
3219  auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3220  // Check whether the value is captured in the scope using AANoCapture.
3221  // Look at CFG and check only uses possibly executed before this
3222  // callsite.
3223  auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3224  Instruction *UserI = cast<Instruction>(U.getUser());
3225 
3226  // If UserI is the curr instruction and there is a single potential use of
3227  // the value in UserI we allow the use.
3228  // TODO: We should inspect the operands and allow those that cannot alias
3229  // with the value.
3230  if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3231  return true;
3232 
3233  if (ScopeFn) {
3234  if (auto *CB = dyn_cast<CallBase>(UserI)) {
3235  if (CB->isArgOperand(&U)) {
3236 
3237  unsigned ArgNo = CB->getArgOperandNo(&U);
3238 
3239  const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3240  *this, IRPosition::callsite_argument(*CB, ArgNo),
3242 
3243  if (NoCaptureAA.isAssumedNoCapture())
3244  return true;
3245  }
3246  }
3247 
3249  A, *UserI, *getCtxI(), *this,
3250  [ScopeFn](const Function &Fn) { return &Fn != ScopeFn; }))
3251  return true;
3252  }
3253 
3254  // TODO: We should track the capturing uses in AANoCapture but the problem
3255  // is CGSCC runs. For those we would need to "allow" AANoCapture for
3256  // a value in the module slice.
3257  switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3259  return true;
3261  LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3262  << "\n");
3263  return false;
3265  Follow = true;
3266  return true;
3267  }
3268  llvm_unreachable("unknown UseCaptureKind");
3269  };
3270 
3271  if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3272  if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3273  LLVM_DEBUG(
3274  dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3275  << " cannot be noalias as it is potentially captured\n");
3276  return false;
3277  }
3278  }
3279  A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3280 
3281  // Check there is no other pointer argument which could alias with the
3282  // value passed at this call site.
3283  // TODO: AbstractCallSite
3284  const auto &CB = cast<CallBase>(getAnchorValue());
3285  for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3286  if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3287  return false;
3288 
3289  return true;
3290  }
3291 
3292  /// See AbstractAttribute::updateImpl(...).
3293  ChangeStatus updateImpl(Attributor &A) override {
3294  // If the argument is readnone we are done as there are no accesses via the
3295  // argument.
3296  auto &MemBehaviorAA =
3297  A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3298  if (MemBehaviorAA.isAssumedReadNone()) {
3299  A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3300  return ChangeStatus::UNCHANGED;
3301  }
3302 
3303  const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3304  const auto &NoAliasAA =
3305  A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3306 
3307  AAResults *AAR = nullptr;
3308  if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3309  NoAliasAA)) {
3310  LLVM_DEBUG(
3311  dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3312  return ChangeStatus::UNCHANGED;
3313  }
3314 
3315  return indicatePessimisticFixpoint();
3316  }
3317 
3318  /// See AbstractAttribute::trackStatistics()
3319  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3320 };
3321 
3322 /// NoAlias attribute for function return value.
3323 struct AANoAliasReturned final : AANoAliasImpl {
3324  AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3325  : AANoAliasImpl(IRP, A) {}
3326 
3327  /// See AbstractAttribute::initialize(...).
3328  void initialize(Attributor &A) override {
3330  Function *F = getAssociatedFunction();
3331  if (!F || F->isDeclaration())
3332  indicatePessimisticFixpoint();
3333  }
3334 
3335  /// See AbstractAttribute::updateImpl(...).
3336  ChangeStatus updateImpl(Attributor &A) override {
3337 
3338  auto CheckReturnValue = [&](Value &RV) -> bool {
3339  if (Constant *C = dyn_cast<Constant>(&RV))
3340  if (C->isNullValue() || isa<UndefValue>(C))
3341  return true;
3342 
3343  /// For now, we can only deduce noalias if we have call sites.
3344  /// FIXME: add more support.
3345  if (!isa<CallBase>(&RV))
3346  return false;
3347 
3348  const IRPosition &RVPos = IRPosition::value(RV);
3349  const auto &NoAliasAA =
3350  A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3351  if (!NoAliasAA.isAssumedNoAlias())
3352  return false;
3353 
3354  const auto &NoCaptureAA =
3355  A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3356  return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3357  };
3358 
3359  if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3360  return indicatePessimisticFixpoint();
3361 
3362  return ChangeStatus::UNCHANGED;
3363  }
3364 
3365  /// See AbstractAttribute::trackStatistics()
3366  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3367 };
3368 
3369 /// NoAlias attribute deduction for a call site return value.
3370 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3371  AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3372  : AANoAliasImpl(IRP, A) {}
3373 
3374  /// See AbstractAttribute::initialize(...).
3375  void initialize(Attributor &A) override {
3377  Function *F = getAssociatedFunction();
3378  if (!F || F->isDeclaration())
3379  indicatePessimisticFixpoint();
3380  }
3381 
3382  /// See AbstractAttribute::updateImpl(...).
3383  ChangeStatus updateImpl(Attributor &A) override {
3384  // TODO: Once we have call site specific value information we can provide
3385  // call site specific liveness information and then it makes
3386  // sense to specialize attributes for call sites arguments instead of
3387  // redirecting requests to the callee argument.
3388  Function *F = getAssociatedFunction();
3389  const IRPosition &FnPos = IRPosition::returned(*F);
3390  auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3391  return clampStateAndIndicateChange(getState(), FnAA.getState());
3392  }
3393 
3394  /// See AbstractAttribute::trackStatistics()
3395  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3396 };
3397 } // namespace
3398 
3399 /// -------------------AAIsDead Function Attribute-----------------------
3400 
3401 namespace {
3402 struct AAIsDeadValueImpl : public AAIsDead {
3403  AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3404 
3405  /// See AbstractAttribute::initialize(...).
3406  void initialize(Attributor &A) override {
3407  if (auto *Scope = getAnchorScope())
3408  if (!A.isRunOn(*Scope))
3409  indicatePessimisticFixpoint();
3410  }
3411 
3412  /// See AAIsDead::isAssumedDead().
3413  bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3414 
3415  /// See AAIsDead::isKnownDead().
3416  bool isKnownDead() const override { return isKnown(IS_DEAD); }
3417 
3418  /// See AAIsDead::isAssumedDead(BasicBlock *).
3419  bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3420 
3421  /// See AAIsDead::isKnownDead(BasicBlock *).
3422  bool isKnownDead(const BasicBlock *BB) const override { return false; }
3423 
3424  /// See AAIsDead::isAssumedDead(Instruction *I).
3425  bool isAssumedDead(const Instruction *I) const override {
3426  return I == getCtxI() && isAssumedDead();
3427  }
3428 
3429  /// See AAIsDead::isKnownDead(Instruction *I).
3430  bool isKnownDead(const Instruction *I) const override {
3431  return isAssumedDead(I) && isKnownDead();
3432  }
3433 
3434  /// See AbstractAttribute::getAsStr().
3435  const std::string getAsStr() const override {
3436  return isAssumedDead() ? "assumed-dead" : "assumed-live";
3437  }
3438 
3439  /// Check if all uses are assumed dead.
3440  bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3441  // Callers might not check the type, void has no uses.
3442  if (V.getType()->isVoidTy() || V.use_empty())
3443  return true;
3444 
3445  // If we replace a value with a constant there are no uses left afterwards.
3446  if (!isa<Constant>(V)) {
3447  if (auto *I = dyn_cast<Instruction>(&V))
3448  if (!A.isRunOn(*I->getFunction()))
3449  return false;
3450  bool UsedAssumedInformation = false;
3452  A.getAssumedConstant(V, *this, UsedAssumedInformation);
3453  if (!C || *C)
3454  return true;
3455  }
3456 
3457  auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3458  // Explicitly set the dependence class to required because we want a long
3459  // chain of N dependent instructions to be considered live as soon as one is
3460  // without going through N update cycles. This is not required for
3461  // correctness.
3462  return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3464  /* IgnoreDroppableUses */ false);
3465  }
3466 
3467  /// Determine if \p I is assumed to be side-effect free.
3468  bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3470  return true;
3471 
3472  auto *CB = dyn_cast<CallBase>(I);
3473  if (!CB || isa<IntrinsicInst>(CB))
3474  return false;
3475 
3476  const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3477  const auto &NoUnwindAA =
3478  A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3479  if (!NoUnwindAA.isAssumedNoUnwind())
3480  return false;
3481  if (!NoUnwindAA.isKnownNoUnwind())
3482  A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3483 
3484  bool IsKnown;
3485  return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3486  }
3487 };
3488 
3489 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3490  AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3491  : AAIsDeadValueImpl(IRP, A) {}
3492 
3493  /// See AbstractAttribute::initialize(...).
3494  void initialize(Attributor &A) override {
3496 
3497  if (isa<UndefValue>(getAssociatedValue())) {
3498  indicatePessimisticFixpoint();
3499  return;
3500  }
3501 
3502  Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3503  if (!isAssumedSideEffectFree(A, I)) {
3504  if (!isa_and_nonnull<StoreInst>(I))
3505  indicatePessimisticFixpoint();
3506  else
3507  removeAssumedBits(HAS_NO_EFFECT);
3508  }
3509  }
3510 
3511  bool isDeadStore(Attributor &A, StoreInst &SI) {
3512  // Lang ref now states volatile store is not UB/dead, let's skip them.
3513  if (SI.isVolatile())
3514  return false;
3515 
3516  bool UsedAssumedInformation = false;
3517  SmallSetVector<Value *, 4> PotentialCopies;
3518  if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3519  UsedAssumedInformation)) {
3520  LLVM_DEBUG(
3521  dbgs()
3522  << "[AAIsDead] Could not determine potential copies of store!\n");
3523  return false;
3524  }
3525  LLVM_DEBUG(dbgs() << "[AAIsDead] Store has " << PotentialCopies.size()
3526  << " potential copies.\n");
3527  return llvm::all_of(PotentialCopies, [&](Value *V) {
3528  if (A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3529  UsedAssumedInformation))
3530  return true;
3531  LLVM_DEBUG(dbgs() << "[AAIsDead] Potential copy " << *V
3532  << " is assumed live!\n");
3533  return false;
3534  });
3535  }
3536 
3537  /// See AbstractAttribute::getAsStr().
3538  const std::string getAsStr() const override {
3539  Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3540  if (isa_and_nonnull<StoreInst>(I))
3541  if (isValidState())
3542  return "assumed-dead-store";
3543  return AAIsDeadValueImpl::getAsStr();
3544  }
3545 
3546  /// See AbstractAttribute::updateImpl(...).
3547  ChangeStatus updateImpl(Attributor &A) override {
3548  Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3549  if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3550  if (!isDeadStore(A, *SI))
3551  return indicatePessimisticFixpoint();
3552  } else {
3553  if (!isAssumedSideEffectFree(A, I))
3554  return indicatePessimisticFixpoint();
3555  if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3556  return indicatePessimisticFixpoint();
3557  }
3558  return ChangeStatus::UNCHANGED;
3559  }
3560 
3561  bool isRemovableStore() const override {
3562  return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
3563  }
3564 
3565  /// See AbstractAttribute::manifest(...).
3566  ChangeStatus manifest(Attributor &A) override {
3567  Value &V = getAssociatedValue();
3568  if (auto *I = dyn_cast<Instruction>(&V)) {
3569  // If we get here we basically know the users are all dead. We check if
3570  // isAssumedSideEffectFree returns true here again because it might not be
3571  // the case and only the users are dead but the instruction (=call) is
3572  // still needed.
3573  if (isa<StoreInst>(I) ||
3574  (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3575  A.deleteAfterManifest(*I);
3576  return ChangeStatus::CHANGED;
3577  }
3578  }
3579  return ChangeStatus::UNCHANGED;
3580  }
3581 
3582  /// See AbstractAttribute::trackStatistics()
3583  void trackStatistics() const override {
3585  }
3586 };
3587 
3588 struct AAIsDeadArgument : public AAIsDeadFloating {
3589  AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3590  : AAIsDeadFloating(IRP, A) {}
3591 
3592  /// See AbstractAttribute::initialize(...).
3593  void initialize(Attributor &A) override {
3595  if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3596  indicatePessimisticFixpoint();
3597  }
3598 
3599  /// See AbstractAttribute::manifest(...).
3600  ChangeStatus manifest(Attributor &A) override {
3601  Argument &Arg = *getAssociatedArgument();
3602  if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3603  if (A.registerFunctionSignatureRewrite(
3604  Arg, /* ReplacementTypes */ {},
3607  return ChangeStatus::CHANGED;
3608  }
3609  return ChangeStatus::UNCHANGED;
3610  }
3611 
3612  /// See AbstractAttribute::trackStatistics()
3613  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3614 };
3615 
3616 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3617  AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3618  : AAIsDeadValueImpl(IRP, A) {}
3619 
3620  /// See AbstractAttribute::initialize(...).
3621  void initialize(Attributor &A) override {
3623  if (isa<UndefValue>(getAssociatedValue()))
3624  indicatePessimisticFixpoint();
3625  }
3626 
3627  /// See AbstractAttribute::updateImpl(...).
3628  ChangeStatus updateImpl(Attributor &A) override {
3629  // TODO: Once we have call site specific value information we can provide
3630  // call site specific liveness information and then it makes
3631  // sense to specialize attributes for call sites arguments instead of
3632  // redirecting requests to the callee argument.
3633  Argument *Arg = getAssociatedArgument();
3634  if (!Arg)
3635  return indicatePessimisticFixpoint();
3636  const IRPosition &ArgPos = IRPosition::argument(*Arg);
3637  auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3638  return clampStateAndIndicateChange(getState(), ArgAA.getState());
3639  }
3640 
3641  /// See AbstractAttribute::manifest(...).
3642  ChangeStatus manifest(Attributor &A) override {
3643  CallBase &CB = cast<CallBase>(getAnchorValue());
3644  Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3645  assert(!isa<UndefValue>(U.get()) &&
3646  "Expected undef values to be filtered out!");
3647  UndefValue &UV = *UndefValue::get(U->getType());
3648  if (A.changeUseAfterManifest(U, UV))
3649  return ChangeStatus::CHANGED;
3650  return ChangeStatus::UNCHANGED;
3651  }
3652 
3653  /// See AbstractAttribute::trackStatistics()
3654  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3655 };
3656 
3657 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3658  AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3659  : AAIsDeadFloating(IRP, A) {}
3660 
3661  /// See AAIsDead::isAssumedDead().
3662  bool isAssumedDead() const override {
3663  return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3664  }
3665 
3666  /// See AbstractAttribute::initialize(...).
3667  void initialize(Attributor &A) override {
3669  if (isa<UndefValue>(getAssociatedValue())) {
3670  indicatePessimisticFixpoint();
3671  return;
3672  }
3673 
3674  // We track this separately as a secondary state.
3675  IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3676  }
3677 
3678  /// See AbstractAttribute::updateImpl(...).
3679  ChangeStatus updateImpl(Attributor &A) override {
3681  if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3682  IsAssumedSideEffectFree = false;
3683  Changed = ChangeStatus::CHANGED;
3684  }
3685  if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3686  return indicatePessimisticFixpoint();
3687  return Changed;
3688  }
3689 
3690  /// See AbstractAttribute::trackStatistics()
3691  void trackStatistics() const override {
3692  if (IsAssumedSideEffectFree)
3694  else
3695  STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3696  }
3697 
3698  /// See AbstractAttribute::getAsStr().
3699  const std::string getAsStr() const override {
3700  return isAssumedDead()
3701  ? "assumed-dead"
3702  : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3703  }
3704 
3705 private:
3706  bool IsAssumedSideEffectFree = true;
3707 };
3708 
3709 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3710  AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3711  : AAIsDeadValueImpl(IRP, A) {}
3712 
3713  /// See AbstractAttribute::updateImpl(...).
3714  ChangeStatus updateImpl(Attributor &A) override {
3715 
3716  bool UsedAssumedInformation = false;
3717  A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3718  {Instruction::Ret}, UsedAssumedInformation);
3719 
3720  auto PredForCallSite = [&](AbstractCallSite ACS) {
3721  if (ACS.isCallbackCall() || !ACS.getInstruction())
3722  return false;
3723  return areAllUsesAssumedDead(A, *ACS.getInstruction());
3724  };
3725 
3726  if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3727  UsedAssumedInformation))
3728  return indicatePessimisticFixpoint();
3729 
3730  return ChangeStatus::UNCHANGED;
3731  }
3732 
3733  /// See AbstractAttribute::manifest(...).
3734  ChangeStatus manifest(Attributor &A) override {
3735  // TODO: Rewrite the signature to return void?
3736  bool AnyChange = false;
3737  UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3738  auto RetInstPred = [&](Instruction &I) {
3739  ReturnInst &RI = cast<ReturnInst>(I);
3740  if (!isa<UndefValue>(RI.getReturnValue()))
3741  AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3742  return true;
3743  };
3744  bool UsedAssumedInformation = false;
3745  A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3746  UsedAssumedInformation);
3747  return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3748  }
3749 
3750  /// See AbstractAttribute::trackStatistics()
3751  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3752 };
3753 
3754 struct AAIsDeadFunction : public AAIsDead {
3755  AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3756 
3757  /// See AbstractAttribute::initialize(...).
3758  void initialize(Attributor &A) override {
3759  Function *F = getAnchorScope();
3760  if (!F || F->isDeclaration() || !A.isRunOn(*F)) {
3761  indicatePessimisticFixpoint();
3762  return;
3763  }
3764  ToBeExploredFrom.insert(&F->getEntryBlock().front());
3765  assumeLive(A, F->getEntryBlock());
3766  }
3767 
3768  /// See AbstractAttribute::getAsStr().
3769  const std::string getAsStr() const override {
3770  return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3771  std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3772  std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3773  std::to_string(KnownDeadEnds.size()) + "]";
3774  }
3775 
3776  /// See AbstractAttribute::manifest(...).
3777  ChangeStatus manifest(Attributor &A) override {
3778  assert(getState().isValidState() &&
3779  "Attempted to manifest an invalid state!");
3780 
3782  Function &F = *getAnchorScope();
3783 
3784  if (AssumedLiveBlocks.empty()) {
3785  A.deleteAfterManifest(F);
3786  return ChangeStatus::CHANGED;
3787  }
3788 
3789  // Flag to determine if we can change an invoke to a call assuming the
3790  // callee is nounwind. This is not possible if the personality of the
3791  // function allows to catch asynchronous exceptions.
3792  bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3793 
3794  KnownDeadEnds.set_union(ToBeExploredFrom);
3795  for (const Instruction *DeadEndI : KnownDeadEnds) {
3796  auto *CB = dyn_cast<CallBase>(DeadEndI);
3797  if (!CB)
3798  continue;
3799  const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3801  bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3802  if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3803  continue;
3804 
3805  if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3806  A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3807  else
3808  A.changeToUnreachableAfterManifest(
3809  const_cast<Instruction *>(DeadEndI->getNextNode()));
3810  HasChanged = ChangeStatus::CHANGED;
3811  }
3812 
3813  STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3814  for (BasicBlock &BB : F)
3815  if (!AssumedLiveBlocks.count(&BB)) {
3816  A.deleteAfterManifest(BB);
3818  HasChanged = ChangeStatus::CHANGED;
3819  }
3820 
3821  return HasChanged;
3822  }
3823 
3824  /// See AbstractAttribute::updateImpl(...).
3825  ChangeStatus updateImpl(Attributor &A) override;
3826 
3827  bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3828  assert(From->getParent() == getAnchorScope() &&
3829  To->getParent() == getAnchorScope() &&
3830  "Used AAIsDead of the wrong function");
3831  return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3832  }
3833 
3834  /// See AbstractAttribute::trackStatistics()
3835  void trackStatistics() const override {}
3836 
3837  /// Returns true if the function is assumed dead.
3838  bool isAssumedDead() const override { return false; }
3839 
3840  /// See AAIsDead::isKnownDead().
3841  bool isKnownDead() const override { return false; }
3842 
3843  /// See AAIsDead::isAssumedDead(BasicBlock *).
3844  bool isAssumedDead(const BasicBlock *BB) const override {
3845  assert(BB->getParent() == getAnchorScope() &&
3846  "BB must be in the same anchor scope function.");
3847 
3848  if (!getAssumed())
3849  return false;
3850  return !AssumedLiveBlocks.count(BB);
3851  }
3852 
3853  /// See AAIsDead::isKnownDead(BasicBlock *).
3854  bool isKnownDead(const BasicBlock *BB) const override {
3855  return getKnown() && isAssumedDead(BB);
3856  }
3857 
3858  /// See AAIsDead::isAssumed(Instruction *I).
3859  bool isAssumedDead(const Instruction *I) const override {
3860  assert(I->getParent()->getParent() == getAnchorScope() &&
3861  "Instruction must be in the same anchor scope function.");
3862 
3863  if (!getAssumed())
3864  return false;
3865 
3866  // If it is not in AssumedLiveBlocks then it for sure dead.
3867  // Otherwise, it can still be after noreturn call in a live block.
3868  if (!AssumedLiveBlocks.count(I->getParent()))
3869  return true;
3870 
3871  // If it is not after a liveness barrier it is live.
3872  const Instruction *PrevI = I->getPrevNode();
3873  while (PrevI) {
3874  if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3875  return true;
3876  PrevI = PrevI->getPrevNode();
3877  }
3878  return false;
3879  }
3880 
3881  /// See AAIsDead::isKnownDead(Instruction *I).
3882  bool isKnownDead(const Instruction *I) const override {
3883  return getKnown() && isAssumedDead(I);
3884  }
3885 
3886  /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3887  /// that internal function called from \p BB should now be looked at.
3888  bool assumeLive(Attributor &A, const BasicBlock &BB) {
3889  if (!AssumedLiveBlocks.insert(&BB).second)
3890  return false;
3891 
3892  // We assume that all of BB is (probably) live now and if there are calls to
3893  // internal functions we will assume that those are now live as well. This
3894  // is a performance optimization for blocks with calls to a lot of internal
3895  // functions. It can however cause dead functions to be treated as live.
3896  for (const Instruction &I : BB)
3897  if (const auto *CB = dyn_cast<CallBase>(&I))
3898  if (const Function *F = CB->getCalledFunction())
3899  if (F->hasLocalLinkage())
3900  A.markLiveInternalFunction(*F);
3901  return true;
3902  }
3903 
3904  /// Collection of instructions that need to be explored again, e.g., we
3905  /// did assume they do not transfer control to (one of their) successors.
3906  SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3907 
3908  /// Collection of instructions that are known to not transfer control.
3910 
3911  /// Collection of all assumed live edges
3913 
3914  /// Collection of all assumed live BasicBlocks.
3915  DenseSet<const BasicBlock *> AssumedLiveBlocks;
3916 };
3917 
3918 static bool
3919 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3921  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3922  const IRPosition &IPos = IRPosition::callsite_function(CB);
3923 
3924  const auto &NoReturnAA =
3925  A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3926  if (NoReturnAA.isAssumedNoReturn())
3927  return !NoReturnAA.isKnownNoReturn();
3928  if (CB.isTerminator())
3929  AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3930  else
3931  AliveSuccessors.push_back(CB.getNextNode());
3932  return false;
3933 }
3934 
3935 static bool
3936 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3938  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3939  bool UsedAssumedInformation =
3940  identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3941 
3942  // First, determine if we can change an invoke to a call assuming the
3943  // callee is nounwind. This is not possible if the personality of the
3944  // function allows to catch asynchronous exceptions.
3945  if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3946  AliveSuccessors.push_back(&II.getUnwindDest()->front());
3947  } else {
3948  const IRPosition &IPos = IRPosition::callsite_function(II);
3949  const auto &AANoUnw =
3950  A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3951  if (AANoUnw.isAssumedNoUnwind()) {
3952  UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3953  } else {
3954  AliveSuccessors.push_back(&II.getUnwindDest()->front());
3955  }
3956  }
3957  return UsedAssumedInformation;
3958 }
3959 
3960 static bool
3961 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3963  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3964  bool UsedAssumedInformation = false;
3965  if (BI.getNumSuccessors() == 1) {
3966  AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3967  } else {
3969  A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3970  if (!C || isa_and_nonnull<UndefValue>(*C)) {
3971  // No value yet, assume both edges are dead.
3972  } else if (isa_and_nonnull<ConstantInt>(*C)) {
3973  const BasicBlock *SuccBB =
3974  BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3975  AliveSuccessors.push_back(&SuccBB->front());
3976  } else {
3977  AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3978  AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3979  UsedAssumedInformation = false;
3980  }
3981  }
3982  return UsedAssumedInformation;
3983 }
3984 
3985 static bool
3986 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3988  SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3989  bool UsedAssumedInformation = false;
3991  A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3992  if (!C || isa_and_nonnull<UndefValue>(C.value())) {
3993  // No value yet, assume all edges are dead.
3994  } else if (isa_and_nonnull<ConstantInt>(C.value())) {
3995  for (const auto &CaseIt : SI.cases()) {
3996  if (CaseIt.getCaseValue() == C.value()) {
3997  AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3998  return UsedAssumedInformation;
3999  }
4000  }
4001  AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4002  return UsedAssumedInformation;
4003  } else {
4004  for (const BasicBlock *SuccBB : successors(SI.getParent()))
4005  AliveSuccessors.push_back(&SuccBB->front());
4006  }
4007  return UsedAssumedInformation;
4008 }
4009 
4010 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4012 
4013  LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
4014  << getAnchorScope()->size() << "] BBs and "
4015  << ToBeExploredFrom.size() << " exploration points and "
4016  << KnownDeadEnds.size() << " known dead ends\n");
4017 
4018  // Copy and clear the list of instructions we need to explore from. It is
4019  // refilled with instructions the next update has to look at.
4020  SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4021  ToBeExploredFrom.end());
4022  decltype(ToBeExploredFrom) NewToBeExploredFrom;
4023 
4024  SmallVector<const Instruction *, 8> AliveSuccessors;
4025  while (!Worklist.empty()) {
4026  const Instruction *I = Worklist.pop_back_val();
4027  LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4028 
4029  // Fast forward for uninteresting instructions. We could look for UB here
4030  // though.
4031  while (!I->isTerminator() && !isa<CallBase>(I))
4032  I = I->getNextNode();
4033 
4034  AliveSuccessors.clear();
4035 
4036  bool UsedAssumedInformation = false;
4037  switch (I->getOpcode()) {
4038  // TODO: look for (assumed) UB to backwards propagate "deadness".
4039  default:
4040  assert(I->isTerminator() &&
4041  "Expected non-terminators to be handled already!");
4042  for (const BasicBlock *SuccBB : successors(I->getParent()))
4043  AliveSuccessors.push_back(&SuccBB->front());
4044  break;
4045  case Instruction::Call:
4046  UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4047  *this, AliveSuccessors);
4048  break;
4049  case Instruction::Invoke:
4050  UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4051  *this, AliveSuccessors);
4052  break;
4053  case Instruction::Br:
4054  UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4055  *this, AliveSuccessors);
4056  break;
4057  case Instruction::Switch:
4058  UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4059  *this, AliveSuccessors);
4060  break;
4061  }
4062 
4063  if (UsedAssumedInformation) {
4064  NewToBeExploredFrom.insert(I);
4065  } else if (AliveSuccessors.empty() ||
4066  (I->isTerminator() &&
4067  AliveSuccessors.size() < I->getNumSuccessors())) {
4068  if (KnownDeadEnds.insert(I))
4069  Change = ChangeStatus::CHANGED;
4070  }
4071 
4072  LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4073  << AliveSuccessors.size() << " UsedAssumedInformation: "
4074  << UsedAssumedInformation << "\n");
4075 
4076  for (const Instruction *AliveSuccessor : AliveSuccessors) {
4077  if (!I->isTerminator()) {
4078  assert(AliveSuccessors.size() == 1 &&
4079  "Non-terminator expected to have a single successor!");
4080  Worklist.push_back(AliveSuccessor);
4081  } else {
4082  // record the assumed live edge
4083  auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4084  if (AssumedLiveEdges.insert(Edge).second)
4085  Change = ChangeStatus::CHANGED;
4086  if (assumeLive(A, *AliveSuccessor->getParent()))
4087  Worklist.push_back(AliveSuccessor);
4088  }
4089  }
4090  }
4091 
4092  // Check if the content of ToBeExploredFrom changed, ignore the order.
4093  if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4094  llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4095  return !ToBeExploredFrom.count(I);
4096  })) {
4097  Change = ChangeStatus::CHANGED;
4098  ToBeExploredFrom = std::move(NewToBeExploredFrom);
4099  }
4100 
4101  // If we know everything is live there is no need to query for liveness.
4102  // Instead, indicating a pessimistic fixpoint will cause the state to be
4103  // "invalid" and all queries to be answered conservatively without lookups.
4104  // To be in this state we have to (1) finished the exploration and (3) not
4105  // discovered any non-trivial dead end and (2) not ruled unreachable code
4106  // dead.
4107  if (ToBeExploredFrom.empty() &&
4108  getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4109  llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4110  return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4111  }))
4112  return indicatePessimisticFixpoint();
4113  return Change;
4114 }
4115 
4116 /// Liveness information for a call sites.
4117 struct AAIsDeadCallSite final : AAIsDeadFunction {
4118  AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4119  : AAIsDeadFunction(IRP, A) {}
4120 
4121  /// See AbstractAttribute::initialize(...).
4122  void initialize(Attributor &A) override {
4123  // TODO: Once we have call site specific value information we can provide
4124  // call site specific liveness information and then it makes
4125  // sense to specialize attributes for call sites instead of
4126  // redirecting requests to the callee.
4127  llvm_unreachable("Abstract attributes for liveness are not "
4128  "supported for call sites yet!");
4129  }
4130 
4131  /// See AbstractAttribute::updateImpl(...).
4132  ChangeStatus updateImpl(Attributor &A) override {
4133  return indicatePessimisticFixpoint();
4134  }
4135 
4136  /// See AbstractAttribute::trackStatistics()
4137  void trackStatistics() const override {}
4138 };
4139 } // namespace
4140 
4141 /// -------------------- Dereferenceable Argument Attribute --------------------
4142 
4143 namespace {
4144 struct AADereferenceableImpl : AADereferenceable {
4145  AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4146  : AADereferenceable(IRP, A) {}
4147  using StateType = DerefState;
4148 
4149  /// See AbstractAttribute::initialize(...).
4150  void initialize(Attributor &A) override {
4151  Value &V = *getAssociatedValue().stripPointerCasts();
4153  getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4154  Attrs, /* IgnoreSubsumingPositions */ false, &A);
4155  for (const Attribute &Attr : Attrs)
4156  takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4157 
4158  const IRPosition &IRP = this->getIRPosition();
4159  NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4160 
4161  bool CanBeNull, CanBeFreed;
4162  takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes(
4163  A.getDataLayout(), CanBeNull, CanBeFreed));
4164 
4165  bool IsFnInterface = IRP.isFnInterfaceKind();
4166  Function *FnScope = IRP.getAnchorScope();
4167  if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4168  indicatePessimisticFixpoint();
4169  return;
4170  }
4171 
4172  if (Instruction *CtxI = getCtxI())
4173  followUsesInMBEC(*this, A, getState(), *CtxI);
4174  }
4175 
4176  /// See AbstractAttribute::getState()
4177  /// {
4178  StateType &getState() override { return *this; }
4179  const StateType &getState() const override { return *this; }
4180  /// }
4181 
4182  /// Helper function for collecting accessed bytes in must-be-executed-context
4183  void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4184  DerefState &State) {
4185  const Value *UseV = U->get();
4186  if (!UseV->getType()->isPointerTy())
4187  return;
4188 
4190  if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4191  return;
4192 
4193  int64_t Offset;
4195  Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4196  if (Base && Base == &getAssociatedValue())
4197  State.addAccessedBytes(Offset, Loc->Size.getValue());
4198  }
4199 
4200  /// See followUsesInMBEC
4201  bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4203  bool IsNonNull = false;
4204  bool TrackUse = false;
4205  int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4206  A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4207  LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4208  << " for instruction " << *I << "\n");
4209 
4210  addAccessedBytesForUse(A, U, I, State);
4211  State.takeKnownDerefBytesMaximum(DerefBytes);
4212  return TrackUse;
4213  }
4214 
4215  /// See AbstractAttribute::manifest(...).
4216  ChangeStatus manifest(Attributor &A) override {
4218  if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4219  removeAttrs({Attribute::DereferenceableOrNull});
4220  return ChangeStatus::CHANGED;
4221  }
4222  return Change;
4223  }
4224 
4225  void getDeducedAttributes(LLVMContext &Ctx,
4226  SmallVectorImpl<Attribute> &Attrs) const override {
4227  // TODO: Add *_globally support
4228  if (isAssumedNonNull())
4230  Ctx, getAssumedDereferenceableBytes()));
4231  else
4233  Ctx, getAssumedDereferenceableBytes()));
4234  }
4235 
4236  /// See AbstractAttribute::getAsStr().
4237  const std::string getAsStr() const override {
4238  if (!getAssumedDereferenceableBytes())
4239  return "unknown-dereferenceable";
4240  return std::string("dereferenceable") +
4241  (isAssumedNonNull() ? "" : "_or_null") +
4242  (isAssumedGlobal() ? "_globally" : "") + "<" +
4243  std::to_string(getKnownDereferenceableBytes()) + "-" +
4244  std::to_string(getAssumedDereferenceableBytes()) + ">";
4245  }
4246 };
4247 
4248 /// Dereferenceable attribute for a floating value.
4249 struct AADereferenceableFloating : AADereferenceableImpl {
4250  AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4251  : AADereferenceableImpl(IRP, A) {}
4252 
4253  /// See AbstractAttribute::updateImpl(...).
4254  ChangeStatus updateImpl(Attributor &A) override {
4255 
4256  bool Stripped;
4257  bool UsedAssumedInformation = false;
4259  if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
4260  AA::AnyScope, UsedAssumedInformation)) {
4261  Values.push_back({getAssociatedValue(), getCtxI()});
4262  Stripped = false;
4263  } else {
4264  Stripped = Values.size() != 1 ||
4265  Values.front().getValue() != &getAssociatedValue();
4266  }
4267 
4268  const DataLayout &DL = A.getDataLayout();
4269  DerefState T;
4270 
4271  auto VisitValueCB = [&](const Value &V) -> bool {
4272  unsigned IdxWidth =
4273  DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4274  APInt Offset(IdxWidth, 0);
4276  A, *this, &V, DL, Offset, /* GetMinOffset */ false,
4277  /* AllowNonInbounds */ true);
4278 
4279  const auto &AA = A.getAAFor<AADereferenceable>(
4281  int64_t DerefBytes = 0;
4282  if (!Stripped && this == &AA) {
4283  // Use IR information if we did not strip anything.
4284  // TODO: track globally.
4285  bool CanBeNull, CanBeFreed;
4286  DerefBytes =
4287  Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4288  T.GlobalState.indicatePessimisticFixpoint();
4289  } else {
4290  const DerefState &DS = AA.getState();
4291  DerefBytes = DS.DerefBytesState.getAssumed();
4292  T.GlobalState &= DS.GlobalState;
4293  }
4294 
4295  // For now we do not try to "increase" dereferenceability due to negative
4296  // indices as we first have to come up with code to deal with loops and
4297  // for overflows of the dereferenceable bytes.
4298  int64_t OffsetSExt = Offset.getSExtValue();
4299  if (OffsetSExt < 0)
4300  OffsetSExt = 0;
4301 
4302  T.takeAssumedDerefBytesMinimum(
4303  std::max(int64_t(0), DerefBytes - OffsetSExt));
4304 
4305  if (this == &AA) {
4306  if (!Stripped) {
4307  // If nothing was stripped IR information is all we got.
4308  T.takeKnownDerefBytesMaximum(
4309  std::max(int64_t(0), DerefBytes - OffsetSExt));
4310  T.indicatePessimisticFixpoint();
4311  } else if (OffsetSExt > 0) {
4312  // If something was stripped but there is circular reasoning we look
4313  // for the offset. If it is positive we basically decrease the
4314  // dereferenceable bytes in a circular loop now, which will simply
4315  // drive them down to the known value in a very slow way which we
4316  // can accelerate.
4317  T.indicatePessimisticFixpoint();
4318  }
4319  }
4320 
4321  return T.isValidState();
4322  };
4323 
4324  for (const auto &VAC : Values)
4325  if (!VisitValueCB(*VAC.getValue()))
4326  return indicatePessimisticFixpoint();
4327 
4328  return clampStateAndIndicateChange(getState(), T);
4329  }
4330 
4331  /// See AbstractAttribute::trackStatistics()
4332  void trackStatistics() const override {
4333  STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4334  }
4335 };
4336 
4337 /// Dereferenceable attribute for a return value.
4338 struct AADereferenceableReturned final
4339  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4340  AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4341  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4342  IRP, A) {}
4343 
4344  /// See AbstractAttribute::trackStatistics()
4345  void trackStatistics() const override {
4346  STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4347  }
4348 };
4349 
4350 /// Dereferenceable attribute for an argument
4351 struct AADereferenceableArgument final
4352  : AAArgumentFromCallSiteArguments<AADereferenceable,
4353  AADereferenceableImpl> {
4354  using Base =
4355  AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4356  AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4357  : Base(IRP, A) {}
4358 
4359  /// See AbstractAttribute::trackStatistics()
4360  void trackStatistics() const override {
4361  STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4362  }
4363 };
4364 
4365 /// Dereferenceable attribute for a call site argument.
4366 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4367  AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4368  : AADereferenceableFloating(IRP, A) {}
4369 
4370  /// See AbstractAttribute::trackStatistics()
4371  void trackStatistics() const override {
4372  STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4373  }
4374 };
4375 
4376 /// Dereferenceable attribute deduction for a call site return value.
4377 struct AADereferenceableCallSiteReturned final
4378  : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4379  using Base =
4380  AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4381  AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4382  : Base(IRP, A) {}
4383 
4384  /// See AbstractAttribute::trackStatistics()
4385  void trackStatistics() const override {
4386  STATS_DECLTRACK_CS_ATTR(dereferenceable);
4387  }
4388 };
4389 } // namespace
4390 
4391 // ------------------------ Align Argument Attribute ------------------------
4392 
4393 namespace {
4394 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4395  Value &AssociatedValue, const Use *U,
4396  const Instruction *I, bool &TrackUse) {
4397  // We need to follow common pointer manipulation uses to the accesses they
4398  // feed into.
4399  if (isa<CastInst>(I)) {
4400  // Follow all but ptr2int casts.
4401  TrackUse = !isa<PtrToIntInst>(I);
4402  return 0;
4403  }
4404  if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4405  if (GEP->hasAllConstantIndices())
4406  TrackUse = true;
4407  return 0;
4408  }
4409 
4410  MaybeAlign MA;
4411  if (const auto *CB = dyn_cast<CallBase>(I)) {
4412  if (CB->isBundleOperand(U) || CB->isCallee(U))
4413  return 0;
4414 
4415  unsigned ArgNo = CB->getArgOperandNo(U);
4416  IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4417  // As long as we only use known information there is no need to track
4418  // dependences here.
4419  auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4420  MA = MaybeAlign(AlignAA.getKnownAlign());
4421  }
4422 
4423  const DataLayout &DL = A.getDataLayout();
4424  const Value *UseV = U->get();
4425  if (auto *SI = dyn_cast<StoreInst>(I)) {
4426  if (SI->getPointerOperand() == UseV)
4427  MA = SI->getAlign();
4428  } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4429  if (LI->getPointerOperand() == UseV)
4430  MA = LI->getAlign();
4431  }
4432 
4433  if (!MA || *MA <= QueryingAA.getKnownAlign())
4434  return 0;
4435 
4436  unsigned Alignment = MA->value();
4437  int64_t Offset;
4438 
4439  if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4440  if (Base == &AssociatedValue) {
4441  // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4442  // So we can say that the maximum power of two which is a divisor of
4443  // gcd(Offset, Alignment) is an alignment.
4444 
4445  uint32_t gcd = std::gcd(uint32_t(abs((int32_t)Offset)), Alignment);
4446  Alignment = llvm::PowerOf2Floor(gcd);
4447  }
4448  }
4449 
4450  return Alignment;
4451 }
4452 
4453 struct AAAlignImpl : AAAlign {
4454  AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4455 
4456  /// See AbstractAttribute::initialize(...).
4457  void initialize(Attributor &A) override {
4459  getAttrs({Attribute::Alignment}, Attrs);
4460  for (const Attribute &Attr : Attrs)
4461  takeKnownMaximum(Attr.getValueAsInt());
4462 
4463  Value &V = *getAssociatedValue().stripPointerCasts();
4464  takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4465 
4466  if (getIRPosition().isFnInterfaceKind() &&
4467  (!getAnchorScope() ||
4468  !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4469  indicatePessimisticFixpoint();
4470  return;
4471  }
4472 
4473  if (Instruction *CtxI = getCtxI())
4474  followUsesInMBEC(*this, A, getState(), *CtxI);
4475  }
4476 
4477  /// See AbstractAttribute::manifest(...).
4478  ChangeStatus manifest(Attributor &A) override {
4479  ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4480 
4481  // Check for users that allow alignment annotations.
4482  Value &AssociatedValue = getAssociatedValue();
4483  for (const Use &U : AssociatedValue.uses()) {
4484  if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4485  if (SI->getPointerOperand() == &AssociatedValue)
4486  if (SI->getAlign() < getAssumedAlign()) {
4488  "Number of times alignment added to a store");
4489  SI->setAlignment(getAssumedAlign());
4490  LoadStoreChanged = ChangeStatus::CHANGED;
4491  }
4492  } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4493  if (LI->getPointerOperand() == &AssociatedValue)
4494  if (LI->getAlign() < getAssumedAlign()) {
4495  LI->setAlignment(getAssumedAlign());
4497  "Number of times alignment added to a load");
4498  LoadStoreChanged = ChangeStatus::CHANGED;
4499  }
4500  }
4501  }
4502 
4503  ChangeStatus Changed = AAAlign::manifest(A);
4504 
4505  Align InheritAlign =
4506  getAssociatedValue().getPointerAlignment(A.getDataLayout());
4507  if (InheritAlign >= getAssumedAlign())
4508  return LoadStoreChanged;
4509  return Changed | LoadStoreChanged;
4510  }
4511 
4512  // TODO: Provide a helper to determine the implied ABI alignment and check in
4513  // the existing manifest method and a new one for AAAlignImpl that value
4514  // to avoid making the alignment explicit if it did not improve.
4515 
4516  /// See AbstractAttribute::getDeducedAttributes
4517  void getDeducedAttributes(LLVMContext &Ctx,
4518  SmallVectorImpl<Attribute> &Attrs) const override {
4519  if (getAssumedAlign() > 1)
4520  Attrs.emplace_back(
4521  Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4522  }
4523 
4524  /// See followUsesInMBEC
4525  bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4526  AAAlign::StateType &State) {
4527  bool TrackUse = false;
4528 
4529  unsigned int KnownAlign =
4530  getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4531  State.takeKnownMaximum(KnownAlign);
4532 
4533  return TrackUse;
4534  }
4535 
4536  /// See AbstractAttribute::getAsStr().
4537  const std::string getAsStr() const override {
4538  return "align<" + std::to_string(getKnownAlign().value()) + "-" +
4539  std::to_string(getAssumedAlign().value()) + ">";
4540  }
4541 };
4542 
4543 /// Align attribute for a floating value.
4544 struct AAAlignFloating : AAAlignImpl {
4545  AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4546 
4547  /// See AbstractAttribute::updateImpl(...).
4548  ChangeStatus updateImpl(Attributor &A) override {
4549  const DataLayout &DL = A.getDataLayout();
4550 
4551  bool Stripped;
4552  bool UsedAssumedInformation = false;
4554  if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
4555  AA::AnyScope, UsedAssumedInformation)) {
4556  Values.push_back({getAssociatedValue(), getCtxI()});
4557  Stripped = false;
4558  } else {
4559  Stripped = Values.size() != 1 ||
4560  Values.front().getValue() != &getAssociatedValue();
4561  }
4562 
4563  StateType T;
4564  auto VisitValueCB = [&](Value &V) -> bool {
4565  if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4566  return true;
4567  const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4569  if (!Stripped && this == &AA) {
4570  int64_t Offset;
4571  unsigned Alignment = 1;
4572  if (const Value *Base =
4573  GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4574  // TODO: Use AAAlign for the base too.
4575  Align PA = Base->getPointerAlignment(DL);
4576  // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4577  // So we can say that the maximum power of two which is a divisor of
4578  // gcd(Offset, Alignment) is an alignment.
4579 
4580  uint32_t gcd =
4581  std::gcd(uint32_t(abs((int32_t)Offset)), uint32_t(PA.value()));
4582  Alignment = llvm::PowerOf2Floor(gcd);
4583  } else {
4584  Alignment = V.getPointerAlignment(DL).value();
4585  }
4586  // Use only IR information if we did not strip anything.
4587  T.takeKnownMaximum(Alignment);
4588  T.indicatePessimisticFixpoint();
4589  } else {
4590  // Use abstract attribute information.
4591  const AAAlign::StateType &DS = AA.getState();
4592  T ^= DS;
4593  }
4594  return T.isValidState();
4595  };
4596 
4597  for (const auto &VAC : Values) {
4598  if (!VisitValueCB(*VAC.getValue()))
4599  return indicatePessimisticFixpoint();
4600  }
4601 
4602  // TODO: If we know we visited all incoming values, thus no are assumed
4603  // dead, we can take the known information from the state T.
4604  return clampStateAndIndicateChange(getState(), T);
4605  }
4606 
4607  /// See AbstractAttribute::trackStatistics()
4608  void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4609 };
4610 
4611 /// Align attribute for function return value.
4612 struct AAAlignReturned final
4613  : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4614  using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4615  AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4616 
4617  /// See AbstractAttribute::initialize(...).
4618  void initialize(Attributor &A) override {
4619  Base::initialize(A);
4620  Function *F = getAssociatedFunction();
4621  if (!F || F->isDeclaration())
4622  indicatePessimisticFixpoint();
4623  }
4624 
4625  /// See AbstractAttribute::trackStatistics()
4626  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4627 };
4628 
4629 /// Align attribute for function argument.
4630 struct AAAlignArgument final
4631  : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4632  using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4633  AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4634 
4635  /// See AbstractAttribute::manifest(...).
4636  ChangeStatus manifest(Attributor &A) override {
4637  // If the associated argument is involved in a must-tail call we give up
4638  // because we would need to keep the argument alignments of caller and
4639  // callee in-sync. Just does not seem worth the trouble right now.
4640  if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4641  return ChangeStatus::UNCHANGED;
4642  return Base::manifest(A);
4643  }
4644 
4645  /// See AbstractAttribute::trackStatistics()
4646  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4647 };
4648 
4649 struct AAAlignCallSiteArgument final : AAAlignFloating {
4650  AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4651  : AAAlignFloating(IRP, A) {}
4652 
4653  /// See AbstractAttribute::manifest(...).
4654  ChangeStatus manifest(Attributor &A) override {
4655  // If the associated argument is involved in a must-tail call we give up
4656  // because we would need to keep the argument alignments of caller and
4657  // callee in-sync. Just does not seem worth the trouble right now.
4658  if (Argument *Arg = getAssociatedArgument())
4659  if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4660  return ChangeStatus::UNCHANGED;
4661  ChangeStatus Changed = AAAlignImpl::manifest(A);
4662  Align InheritAlign =
4663  getAssociatedValue().getPointerAlignment(A.getDataLayout());
4664  if (InheritAlign >= getAssumedAlign())
4665  Changed = ChangeStatus::UNCHANGED;
4666  return Changed;
4667  }
4668 
4669  /// See AbstractAttribute::updateImpl(Attributor &A).
4670  ChangeStatus updateImpl(Attributor &A) override {
4671  ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4672  if (Argument *Arg = getAssociatedArgument()) {
4673  // We only take known information from the argument
4674  // so we do not need to track a dependence.
4675  const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4677  takeKnownMaximum(ArgAlignAA.getKnownAlign().value());
4678  }
4679  return Changed;
4680  }
4681 
4682  /// See AbstractAttribute::trackStatistics()
4683  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4684 };
4685 
4686 /// Align attribute deduction for a call site return value.
4687 struct AAAlignCallSiteReturned final
4688  : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4689  using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4690  AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4691  : Base(IRP, A) {}
4692 
4693  /// See AbstractAttribute::initialize(...).
4694  void initialize(Attributor &A) override {
4695  Base::initialize(A);
4696  Function *F = getAssociatedFunction();
4697  if (!F || F->isDeclaration())
4698  indicatePessimisticFixpoint();
4699  }
4700 
4701  /// See AbstractAttribute::trackStatistics()
4702  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4703 };
4704 } // namespace
4705 
4706 /// ------------------ Function No-Return Attribute ----------------------------
4707 namespace {
4708 struct AANoReturnImpl : public AANoReturn {
4709  AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4710 
4711  /// See AbstractAttribute::initialize(...).
4712  void initialize(Attributor &A) override {
4714  Function *F = getAssociatedFunction();
4715  if (!F || F->isDeclaration())
4716  indicatePessimisticFixpoint();
4717  }
4718 
4719  /// See AbstractAttribute::getAsStr().
4720  const std::string getAsStr() const override {
4721  return getAssumed() ? "noreturn" : "may-return";
4722  }
4723 
4724  /// See AbstractAttribute::updateImpl(Attributor &A).
4725  ChangeStatus updateImpl(Attributor &A) override {
4726  auto CheckForNoReturn = [](Instruction &) { return false; };
4727  bool UsedAssumedInformation = false;
4728  if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4729  {(unsigned)Instruction::Ret},
4730  UsedAssumedInformation))
4731  return indicatePessimisticFixpoint();
4732  return ChangeStatus::UNCHANGED;
4733  }
4734 };
4735 
4736 struct AANoReturnFunction final : AANoReturnImpl {
4737  AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4738  : AANoReturnImpl(IRP, A) {}
4739 
4740  /// See AbstractAttribute::trackStatistics()
4741  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4742 };
4743 
4744 /// NoReturn attribute deduction for a call sites.
4745 struct AANoReturnCallSite final : AANoReturnImpl {
4746  AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4747  : AANoReturnImpl(IRP, A) {}
4748 
4749  /// See AbstractAttribute::initialize(...).
4750  void initialize(Attributor &A) override {
4752  if (Function *F = getAssociatedFunction()) {
4753  const IRPosition &FnPos = IRPosition::function(*F);
4754  auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4755  if (!FnAA.isAssumedNoReturn())
4756  indicatePessimisticFixpoint();
4757  }
4758  }
4759 
4760  /// See AbstractAttribute::updateImpl(...).
4761  ChangeStatus updateImpl(Attributor &A) override {
4762  // TODO: Once we have call site specific value information we can provide
4763  // call site specific liveness information and then it makes
4764  // sense to specialize attributes for call sites arguments instead of
4765  // redirecting requests to the callee argument.
4766  Function *F = getAssociatedFunction();
4767  const IRPosition &FnPos = IRPosition::function(*F);
4768  auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4769  return clampStateAndIndicateChange(getState(), FnAA.getState());
4770  }
4771 
4772  /// See AbstractAttribute::trackStatistics()
4773  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4774 };
4775 } // namespace
4776 
4777 /// ----------------------- Instance Info ---------------------------------
4778 
4779 namespace {
4780 /// A class to hold the state of for no-capture attributes.
4781 struct AAInstanceInfoImpl : public AAInstanceInfo {
4782  AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
4783  : AAInstanceInfo(IRP, A) {}
4784 
4785  /// See AbstractAttribute::initialize(...).
4786  void initialize(Attributor &A) override {
4787  Value &V = getAssociatedValue();
4788  if (auto *C = dyn_cast<Constant>(&V)) {
4789  if (C->isThreadDependent())
4790  indicatePessimisticFixpoint();
4791  else
4792  indicateOptimisticFixpoint();
4793  return;
4794  }
4795  if (auto *CB = dyn_cast<CallBase>(&V))
4796  if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
4797  !CB->mayReadFromMemory()) {
4798  indicateOptimisticFixpoint();
4799  return;
4800  }
4801  }
4802 
4803  /// See AbstractAttribute::updateImpl(...).
4804  ChangeStatus updateImpl(Attributor &A) override {
4806 
4807  Value &V = getAssociatedValue();
4808  const Function *Scope = nullptr;
4809  if (auto *I = dyn_cast<Instruction>(&V))
4810  Scope = I->getFunction();
4811  if (auto *A = dyn_cast<Argument>(&V)) {
4812  Scope = A->getParent();
4813  if (!Scope->hasLocalLinkage())
4814  return Changed;
4815  }
4816  if (!Scope)
4817  return indicateOptimisticFixpoint();
4818 
4819  auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
4821  if (NoRecurseAA.isAssumedNoRecurse())
4822  return Changed;
4823 
4824  auto UsePred = [&](const Use &U, bool &Follow) {
4825  const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
4826  if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
4827  isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4828  Follow = true;
4829  return true;
4830  }
4831  if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
4832  (isa<StoreInst>(UserI) &&
4833  cast<StoreInst>(UserI)->getValueOperand() != U.get()))
4834  return true;
4835  if (auto *CB = dyn_cast<CallBase>(UserI)) {
4836  // This check is not guaranteeing uniqueness but for now that we cannot
4837  // end up with two versions of \p U thinking it was one.
4838  if (!CB->getCalledFunction() ||
4840  return true;
4841  if (!CB->isArgOperand(&U))
4842  return false;
4843  const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
4844  *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
4846  if (!ArgInstanceInfoAA.isAssumedUniqueForAnalysis())
4847  return false;
4848  // If this call base might reach the scope again we might forward the
4849  // argument back here. This is very conservative.
4851  A, *CB, *Scope, *this,
4852  [Scope](const Function &Fn) { return &Fn != Scope; }))
4853  return false;
4854  return true;
4855  }
4856  return false;
4857  };
4858 
4859  auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
4860  if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
4861  auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
4862  if ((isa<AllocaInst>(Ptr) || isNoAliasCall(Ptr)) &&
4863  AA::isDynamicallyUnique(A, *this, *Ptr))
4864  return true;
4865  }
4866  return false;
4867  };
4868 
4869  if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
4871  /* IgnoreDroppableUses */ true, EquivalentUseCB))
4872  return indicatePessimisticFixpoint();
4873 
4874  return Changed;
4875  }
4876 
4877  /// See AbstractState::getAsStr().
4878  const std::string getAsStr() const override {
4879  return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
4880  }
4881 
4882  /// See AbstractAttribute::trackStatistics()
4883  void trackStatistics() const override {}
4884 };
4885 
4886 /// InstanceInfo attribute for floating values.
4887 struct AAInstanceInfoFloating : AAInstanceInfoImpl {
4888  AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
4889  : AAInstanceInfoImpl(IRP, A) {}
4890 };
4891 
4892 /// NoCapture attribute for function arguments.
4893 struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
4894  AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
4895  : AAInstanceInfoFloating(IRP, A) {}
4896 };
4897 
4898 /// InstanceInfo attribute for call site arguments.
4899 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
4900  AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
4901  : AAInstanceInfoImpl(IRP, A) {}
4902 
4903  /// See AbstractAttribute::updateImpl(...).
4904  ChangeStatus updateImpl(Attributor &A) override {
4905  // TODO: Once we have call site specific value information we can provide
4906  // call site specific liveness information and then it makes
4907  // sense to specialize attributes for call sites arguments instead of
4908  // redirecting requests to the callee argument.
4909  Argument *Arg = getAssociatedArgument();
4910  if (!Arg)
4911  return indicatePessimisticFixpoint();
4912  const IRPosition &ArgPos = IRPosition::argument(*Arg);
4913  auto &ArgAA =
4914  A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
4915  return clampStateAndIndicateChange(getState(), ArgAA.getState());
4916  }
4917 };
4918 
4919 /// InstanceInfo attribute for function return value.
4920 struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
4921  AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
4922  : AAInstanceInfoImpl(IRP, A) {
4923  llvm_unreachable("InstanceInfo is not applicable to function returns!");
4924  }
4925 
4926  /// See AbstractAttribute::initialize(...).
4927  void initialize(Attributor &A) override {
4928  llvm_unreachable("InstanceInfo is not applicable to function returns!");
4929  }
4930 
4931  /// See AbstractAttribute::updateImpl(...).
4932  ChangeStatus updateImpl(Attributor &A) override {
4933  llvm_unreachable("InstanceInfo is not applicable to function returns!");
4934  }
4935 };
4936 
4937 /// InstanceInfo attribute deduction for a call site return value.
4938 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
4939  AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
4940  : AAInstanceInfoFloating(IRP, A) {}
4941 };
4942 } // namespace
4943 
4944 /// ----------------------- Variable Capturing ---------------------------------
4945 
4946 namespace {
4947 /// A class to hold the state of for no-capture attributes.
4948 struct AANoCaptureImpl : public AANoCapture {
4949  AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4950 
4951  /// See AbstractAttribute::initialize(...).
4952  void initialize(Attributor &A) override {
4953  if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4954  indicateOptimisticFixpoint();
4955  return;
4956  }
4957  Function *AnchorScope = getAnchorScope();
4958  if (isFnInterfaceKind() &&
4959  (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4960  indicatePessimisticFixpoint();
4961  return;
4962  }
4963 
4964  // You cannot "capture" null in the default address space.
4965  if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4966  getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4967  indicateOptimisticFixpoint();
4968  return;
4969  }
4970 
4971  const Function *F =
4972  isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4973 
4974  // Check what state the associated function can actually capture.
4975  if (F)
4976  determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4977  else
4978  indicatePessimisticFixpoint();
4979  }
4980 
4981  /// See AbstractAttribute::updateImpl(...).
4982  ChangeStatus updateImpl(Attributor &A) override;
4983 
4984  /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4985  void getDeducedAttributes(LLVMContext &Ctx,
4986  SmallVectorImpl<Attribute> &Attrs) const override {
4987  if (!isAssumedNoCaptureMaybeReturned())
4988  return;
4989 
4990  if (isArgumentPosition()) {
4991  if (isAssumedNoCapture())
4992  Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4993  else if (ManifestInternal)
4994  Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4995  }
4996  }
4997 
4998  /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4999  /// depending on the ability of the function associated with \p IRP to capture
5000  /// state in memory and through "returning/throwing", respectively.
5001  static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
5002  const Function &F,
5003  BitIntegerState &State) {
5004  // TODO: Once we have memory behavior attributes we should use them here.
5005 
5006  // If we know we cannot communicate or write to memory, we do not care about
5007  // ptr2int anymore.
5008  if (F.onlyReadsMemory() && F.doesNotThrow() &&
5009  F.getReturnType()->isVoidTy()) {
5010  State.addKnownBits(NO_CAPTURE);
5011  return;
5012  }
5013 
5014  // A function cannot capture state in memory if it only reads memory, it can
5015  // however return/throw state and the state might be influenced by the
5016  // pointer value, e.g., loading from a returned pointer might reveal a bit.
5017  if (F.onlyReadsMemory())
5018  State.addKnownBits(NOT_CAPTURED_IN_MEM);
5019 
5020  // A function cannot communicate state back if it does not through
5021  // exceptions and doesn not return values.
5022  if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
5023  State.addKnownBits(NOT_CAPTURED_IN_RET);
5024 
5025  // Check existing "returned" attributes.
5026  int ArgNo = IRP.getCalleeArgNo();
5027  if (F.doesNotThrow() && ArgNo >= 0) {
5028  for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
5029  if (F.hasParamAttribute(u, Attribute::Returned)) {
5030  if (u == unsigned(ArgNo))
5031  State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5032  else if (F.onlyReadsMemory())
5033  State.addKnownBits(NO_CAPTURE);
5034  else
5035  State.addKnownBits(NOT_CAPTURED_IN_RET);
5036  break;
5037  }
5038  }
5039  }
5040 
5041  /// See AbstractState::getAsStr().
5042  const std::string getAsStr() const override {
5043  if (isKnownNoCapture())
5044  return "known not-captured";
5045  if (isAssumedNoCapture())
5046  return "assumed not-captured";
5047  if (isKnownNoCaptureMaybeReturned())
5048  return "known not-captured-maybe-returned";
5049  if (isAssumedNoCaptureMaybeReturned())
5050  return "assumed not-captured-maybe-returned";
5051  return "assumed-captured";
5052  }
5053 
5054  /// Check the use \p U and update \p State accordingly. Return true if we
5055  /// should continue to update the state.
5056  bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5057  bool &Follow) {
5058  Instruction *UInst = cast<Instruction>(U.getUser());
5059  LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5060  << *UInst << "\n");
5061 
5062  // Deal with ptr2int by following uses.
5063  if (isa<PtrToIntInst>(UInst)) {
5064  LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5065  return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5066  /* Return */ true);
5067  }
5068 
5069  // For stores we already checked if we can follow them, if they make it
5070  // here we give up.
5071  if (isa<StoreInst>(UInst))
5072  return isCapturedIn(State, /* Memory */ true, /* Integer */ false,
5073  /* Return */ false);
5074 
5075  // Explicitly catch return instructions.
5076  if (isa<ReturnInst>(UInst)) {
5077  if (UInst->getFunction() == getAnchorScope())
5078  return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5079  /* Return */ true);
5080  return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5081  /* Return */ true);
5082  }
5083 
5084  // For now we only use special logic for call sites. However, the tracker
5085  // itself knows about a lot of other non-capturing cases already.
5086  auto *CB = dyn_cast<CallBase>(UInst);
5087  if (!CB || !CB->isArgOperand(&U))
5088  return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5089  /* Return */ true);
5090 
5091  unsigned ArgNo = CB->getArgOperandNo(&U);
5092  const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5093  // If we have a abstract no-capture attribute for the argument we can use
5094  // it to justify a non-capture attribute here. This allows recursion!
5095  auto &ArgNoCaptureAA =
5096  A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED);
5097  if (ArgNoCaptureAA.isAssumedNoCapture())
5098  return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5099  /* Return */ false);
5100  if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5101  Follow = true;
5102  return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5103  /* Return */ false);
5104  }
5105 
5106  // Lastly, we could not find a reason no-capture can be assumed so we don't.
5107  return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5108  /* Return */ true);
5109  }
5110 
5111  /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5112  /// \p CapturedInRet, then return true if we should continue updating the
5113  /// state.
5114  static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5115  bool CapturedInInt, bool CapturedInRet) {
5116  LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5117  << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5118  if (CapturedInMem)
5119  State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5120  if (CapturedInInt)
5121  State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5122  if (CapturedInRet)
5123  State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5124  return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5125  }
5126 };
5127 
5128 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5129  const IRPosition &IRP = getIRPosition();
5130  Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5131  : &IRP.getAssociatedValue();
5132  if (!V)
5133  return indicatePessimisticFixpoint();
5134 
5135  const Function *F =
5136  isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5137  assert(F && "Expected a function!");
5138  const IRPosition &FnPos = IRPosition::function(*F);
5139 
5141 
5142  // Readonly means we cannot capture through memory.
5143  bool IsKnown;
5144  if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5145  T.addKnownBits(NOT_CAPTURED_IN_MEM);
5146  if (IsKnown)
5147  addKnownBits(NOT_CAPTURED_IN_MEM);
5148  }
5149 
5150  // Make sure all returned values are different than the underlying value.
5151  // TODO: we could do this in a more sophisticated way inside
5152  // AAReturnedValues, e.g., track all values that escape through returns
5153  // directly somehow.
5154  auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5155  if (!RVAA.getState().isValidState())
5156  return false;
5157  bool SeenConstant = false;
5158  for (const auto &It : RVAA.returned_values()) {
5159  if (isa<Constant>(It.first)) {
5160  if (SeenConstant)
5161  return false;
5162  SeenConstant = true;
5163  } else if (!isa<Argument>(It.first) ||
5164  It.first == getAssociatedArgument())
5165  return false;
5166  }
5167  return true;
5168  };
5169 
5170  const auto &NoUnwindAA =
5171  A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5172  if (NoUnwindAA.isAssumedNoUnwind()) {
5173  bool IsVoidTy = F->getReturnType()->isVoidTy();
5174  const AAReturnedValues *RVAA =
5175  IsVoidTy ? nullptr
5176  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5177 
5179  if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5180  T.addKnownBits(NOT_CAPTURED_IN_RET);
5181  if (T.isKnown(NOT_CAPTURED_IN_MEM))
5182  return ChangeStatus::UNCHANGED;
5183  if (NoUnwindAA.isKnownNoUnwind() &&
5184  (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5185  addKnownBits(NOT_CAPTURED_IN_RET);
5186  if (isKnown(NOT_CAPTURED_IN_MEM))
5187  return indicateOptimisticFixpoint();
5188  }
5189  }
5190  }
5191 
5192  auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5193  const auto &DerefAA = A.getAAFor<AADereferenceable>(
5195  return DerefAA.getAssumedDereferenceableBytes();
5196  };
5197 
5198  auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5199  switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5201  return true;
5203  return checkUse(A, T, U, Follow);
5205  Follow = true;
5206  return true;
5207  }
5208  llvm_unreachable("Unexpected use capture kind!");
5209  };
5210 
5211  if (!A.checkForAllUses(UseCheck, *this, *V))
5212  return indicatePessimisticFixpoint();
5213 
5214  AANoCapture::StateType &S = getState();
5215  auto Assumed = S.getAssumed();
5216  S.intersectAssumedBits(T.getAssumed());
5217  if (!isAssumedNoCaptureMaybeReturned())
5218  return indicatePessimisticFixpoint();
5219  return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5221 }
5222 
5223 /// NoCapture attribute for function arguments.
5224 struct AANoCaptureArgument final : AANoCaptureImpl {
5225  AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5226  : AANoCaptureImpl(IRP, A) {}
5227 
5228  /// See AbstractAttribute::trackStatistics()
5229  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5230 };
5231 
5232 /// NoCapture attribute for call site arguments.
5233 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5234  AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5235  : AANoCaptureImpl(IRP, A) {}
5236 
5237  /// See AbstractAttribute::initialize(...).
5238  void initialize(Attributor &A) override {
5239  if (Argument *Arg = getAssociatedArgument())
5240  if (Arg->hasByValAttr())
5241  indicateOptimisticFixpoint();
5243  }
5244 
5245