LLVM  10.0.0svn
Attributor.cpp
Go to the documentation of this file.
1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
17 
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
26 #include "llvm/Analysis/Loads.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/InstIterator.h"
33 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/Support/Debug.h"
39 
40 #include <cassert>
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "attributor"
45 
46 STATISTIC(NumFnWithExactDefinition,
47  "Number of function with exact definitions");
48 STATISTIC(NumFnWithoutExactDefinition,
49  "Number of function without exact definitions");
50 STATISTIC(NumAttributesTimedOut,
51  "Number of abstract attributes timed out before fixpoint");
52 STATISTIC(NumAttributesValidFixpoint,
53  "Number of abstract attributes in a valid fixpoint state");
54 STATISTIC(NumAttributesManifested,
55  "Number of abstract attributes manifested in IR");
56 
57 // Some helper macros to deal with statistics tracking.
58 //
59 // Usage:
60 // For simple IR attribute tracking overload trackStatistics in the abstract
61 // attribute and choose the right STATS_DECLTRACK_********* macro,
62 // e.g.,:
63 // void trackStatistics() const override {
64 // STATS_DECLTRACK_ARG_ATTR(returned)
65 // }
66 // If there is a single "increment" side one can use the macro
67 // STATS_DECLTRACK with a custom message. If there are multiple increment
68 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
69 //
70 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
71  ("Number of " #TYPE " marked '" #NAME "'")
72 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
73 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
74 #define STATS_DECL(NAME, TYPE, MSG) \
75  STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
76 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
77 #define STATS_DECLTRACK(NAME, TYPE, MSG) \
78  { \
79  STATS_DECL(NAME, TYPE, MSG) \
80  STATS_TRACK(NAME, TYPE) \
81  }
82 #define STATS_DECLTRACK_ARG_ATTR(NAME) \
83  STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
84 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \
85  STATS_DECLTRACK(NAME, CSArguments, \
86  BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
87 #define STATS_DECLTRACK_FN_ATTR(NAME) \
88  STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
89 #define STATS_DECLTRACK_CS_ATTR(NAME) \
90  STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
91 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \
92  STATS_DECLTRACK(NAME, FunctionReturn, \
93  BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
94 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \
95  STATS_DECLTRACK(NAME, CSReturn, \
96  BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
97 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
98  STATS_DECLTRACK(NAME, Floating, \
99  ("Number of floating values known to be '" #NAME "'"))
100 
101 // TODO: Determine a good default value.
102 //
103 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
104 // (when run with the first 5 abstract attributes). The results also indicate
105 // that we never reach 32 iterations but always find a fixpoint sooner.
106 //
107 // This will become more evolved once we perform two interleaved fixpoint
108 // iterations: bottom-up and top-down.
109 static cl::opt<unsigned>
110  MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
111  cl::desc("Maximal number of fixpoint iterations."),
112  cl::init(32));
114  "attributor-max-iterations-verify", cl::Hidden,
115  cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
116  cl::init(false));
117 
119  "attributor-disable", cl::Hidden,
120  cl::desc("Disable the attributor inter-procedural deduction pass."),
121  cl::init(true));
122 
124  "attributor-manifest-internal", cl::Hidden,
125  cl::desc("Manifest Attributor internal string attributes."),
126  cl::init(false));
127 
129  "attributor-dependence-recompute-interval", cl::Hidden,
130  cl::desc("Number of iterations until dependences are recomputed."),
131  cl::init(4));
132 
133 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
134  cl::init(true), cl::Hidden);
135 
136 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
137  cl::Hidden);
138 
139 /// Logic operators for the change status enum class.
140 ///
141 ///{
143  return l == ChangeStatus::CHANGED ? l : r;
144 }
146  return l == ChangeStatus::UNCHANGED ? l : r;
147 }
148 ///}
149 
150 /// Recursively visit all values that might become \p IRP at some point. This
151 /// will be done by looking through cast instructions, selects, phis, and calls
152 /// with the "returned" attribute. Once we cannot look through the value any
153 /// further, the callback \p VisitValueCB is invoked and passed the current
154 /// value, the \p State, and a flag to indicate if we stripped anything. To
155 /// limit how much effort is invested, we will never visit more values than
156 /// specified by \p MaxValues.
157 template <typename AAType, typename StateTy>
159  Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
160  const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
161  int MaxValues = 8) {
162 
163  const AAIsDead *LivenessAA = nullptr;
164  if (IRP.getAnchorScope())
165  LivenessAA = &A.getAAFor<AAIsDead>(
166  QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
167  /* TrackDependence */ false);
168  bool AnyDead = false;
169 
170  // TODO: Use Positions here to allow context sensitivity in VisitValueCB
171  SmallPtrSet<Value *, 16> Visited;
172  SmallVector<Value *, 16> Worklist;
173  Worklist.push_back(&IRP.getAssociatedValue());
174 
175  int Iteration = 0;
176  do {
177  Value *V = Worklist.pop_back_val();
178 
179  // Check if we should process the current value. To prevent endless
180  // recursion keep a record of the values we followed!
181  if (!Visited.insert(V).second)
182  continue;
183 
184  // Make sure we limit the compile time for complex expressions.
185  if (Iteration++ >= MaxValues)
186  return false;
187 
188  // Explicitly look through calls with a "returned" attribute if we do
189  // not have a pointer as stripPointerCasts only works on them.
190  Value *NewV = nullptr;
191  if (V->getType()->isPointerTy()) {
192  NewV = V->stripPointerCasts();
193  } else {
194  CallSite CS(V);
195  if (CS && CS.getCalledFunction()) {
196  for (Argument &Arg : CS.getCalledFunction()->args())
197  if (Arg.hasReturnedAttr()) {
198  NewV = CS.getArgOperand(Arg.getArgNo());
199  break;
200  }
201  }
202  }
203  if (NewV && NewV != V) {
204  Worklist.push_back(NewV);
205  continue;
206  }
207 
208  // Look through select instructions, visit both potential values.
209  if (auto *SI = dyn_cast<SelectInst>(V)) {
210  Worklist.push_back(SI->getTrueValue());
211  Worklist.push_back(SI->getFalseValue());
212  continue;
213  }
214 
215  // Look through phi nodes, visit all live operands.
216  if (auto *PHI = dyn_cast<PHINode>(V)) {
217  assert(LivenessAA &&
218  "Expected liveness in the presence of instructions!");
219  for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
220  const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
221  if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
222  AnyDead = true;
223  continue;
224  }
225  Worklist.push_back(PHI->getIncomingValue(u));
226  }
227  continue;
228  }
229 
230  // Once a leaf is reached we inform the user through the callback.
231  if (!VisitValueCB(*V, State, Iteration > 1))
232  return false;
233  } while (!Worklist.empty());
234 
235  // If we actually used liveness information so we have to record a dependence.
236  if (AnyDead)
237  A.recordDependence(*LivenessAA, QueryingAA);
238 
239  // All values have been visited.
240  return true;
241 }
242 
243 /// Return true if \p New is equal or worse than \p Old.
244 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
245  if (!Old.isIntAttribute())
246  return true;
247 
248  return Old.getValueAsInt() >= New.getValueAsInt();
249 }
250 
251 /// Return true if the information provided by \p Attr was added to the
252 /// attribute list \p Attrs. This is only the case if it was not already present
253 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
254 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
255  AttributeList &Attrs, int AttrIdx) {
256 
257  if (Attr.isEnumAttribute()) {
259  if (Attrs.hasAttribute(AttrIdx, Kind))
260  if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
261  return false;
262  Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
263  return true;
264  }
265  if (Attr.isStringAttribute()) {
266  StringRef Kind = Attr.getKindAsString();
267  if (Attrs.hasAttribute(AttrIdx, Kind))
268  if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
269  return false;
270  Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
271  return true;
272  }
273  if (Attr.isIntAttribute()) {
275  if (Attrs.hasAttribute(AttrIdx, Kind))
276  if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
277  return false;
278  Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
279  Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
280  return true;
281  }
282 
283  llvm_unreachable("Expected enum or string attribute!");
284 }
285 static const Value *getPointerOperand(const Instruction *I) {
286  if (auto *LI = dyn_cast<LoadInst>(I))
287  if (!LI->isVolatile())
288  return LI->getPointerOperand();
289 
290  if (auto *SI = dyn_cast<StoreInst>(I))
291  if (!SI->isVolatile())
292  return SI->getPointerOperand();
293 
294  if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I))
295  if (!CXI->isVolatile())
296  return CXI->getPointerOperand();
297 
298  if (auto *RMWI = dyn_cast<AtomicRMWInst>(I))
299  if (!RMWI->isVolatile())
300  return RMWI->getPointerOperand();
301 
302  return nullptr;
303 }
305  int64_t &BytesOffset,
306  const DataLayout &DL) {
307  const Value *Ptr = getPointerOperand(I);
308  if (!Ptr)
309  return nullptr;
310 
311  return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
312  /*AllowNonInbounds*/ false);
313 }
314 
317  if (getState().isAtFixpoint())
318  return HasChanged;
319 
320  LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
321 
322  HasChanged = updateImpl(A);
323 
324  LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
325  << "\n");
326 
327  return HasChanged;
328 }
329 
332  const ArrayRef<Attribute> &DeducedAttrs) {
333  Function *ScopeFn = IRP.getAssociatedFunction();
335 
336  // In the following some generic code that will manifest attributes in
337  // DeducedAttrs if they improve the current IR. Due to the different
338  // annotation positions we use the underlying AttributeList interface.
339 
341  switch (PK) {
348  Attrs = ScopeFn->getAttributes();
349  break;
354  break;
355  }
356 
358  LLVMContext &Ctx = IRP.getAnchorValue().getContext();
359  for (const Attribute &Attr : DeducedAttrs) {
360  if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
361  continue;
362 
363  HasChanged = ChangeStatus::CHANGED;
364  }
365 
366  if (HasChanged == ChangeStatus::UNCHANGED)
367  return HasChanged;
368 
369  switch (PK) {
373  ScopeFn->setAttributes(Attrs);
374  break;
378  CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
379  break;
382  break;
383  }
384 
385  return HasChanged;
386 }
387 
390 
392  IRPositions.emplace_back(IRP);
393 
394  ImmutableCallSite ICS(&IRP.getAnchorValue());
395  switch (IRP.getPositionKind()) {
399  return;
402  IRPositions.emplace_back(
404  return;
406  assert(ICS && "Expected call site!");
407  // TODO: We need to look at the operand bundles similar to the redirection
408  // in CallBase.
409  if (!ICS.hasOperandBundles())
410  if (const Function *Callee = ICS.getCalledFunction())
411  IRPositions.emplace_back(IRPosition::function(*Callee));
412  return;
414  assert(ICS && "Expected call site!");
415  // TODO: We need to look at the operand bundles similar to the redirection
416  // in CallBase.
417  if (!ICS.hasOperandBundles()) {
418  if (const Function *Callee = ICS.getCalledFunction()) {
419  IRPositions.emplace_back(IRPosition::returned(*Callee));
420  IRPositions.emplace_back(IRPosition::function(*Callee));
421  }
422  }
423  IRPositions.emplace_back(
424  IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
425  return;
427  int ArgNo = IRP.getArgNo();
428  assert(ICS && ArgNo >= 0 && "Expected call site!");
429  // TODO: We need to look at the operand bundles similar to the redirection
430  // in CallBase.
431  if (!ICS.hasOperandBundles()) {
432  const Function *Callee = ICS.getCalledFunction();
433  if (Callee && Callee->arg_size() > unsigned(ArgNo))
434  IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
435  if (Callee)
436  IRPositions.emplace_back(IRPosition::function(*Callee));
437  }
438  IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
439  return;
440  }
441  }
442 }
443 
445  bool IgnoreSubsumingPositions) const {
446  for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
447  for (Attribute::AttrKind AK : AKs)
448  if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
449  return true;
450  // The first position returned by the SubsumingPositionIterator is
451  // always the position itself. If we ignore subsuming positions we
452  // are done after the first iteration.
453  if (IgnoreSubsumingPositions)
454  break;
455  }
456  return false;
457 }
458 
461  for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this))
462  for (Attribute::AttrKind AK : AKs) {
463  const Attribute &Attr = EquivIRP.getAttr(AK);
464  if (Attr.getKindAsEnum() == AK)
465  Attrs.push_back(Attr);
466  }
467 }
468 
469 void IRPosition::verify() {
470  switch (KindOrArgNo) {
471  default:
472  assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
473  assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
474  "Expected call base or argument for positive attribute index!");
475  if (isa<Argument>(AnchorVal)) {
476  assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
477  "Argument number mismatch!");
478  assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
479  "Associated value mismatch!");
480  } else {
481  assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
482  "Call site argument number mismatch!");
483  assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
484  &getAssociatedValue() &&
485  "Associated value mismatch!");
486  }
487  break;
488  case IRP_INVALID:
489  assert(!AnchorVal && "Expected no value for an invalid position!");
490  break;
491  case IRP_FLOAT:
492  assert((!isa<CallBase>(&getAssociatedValue()) &&
493  !isa<Argument>(&getAssociatedValue())) &&
494  "Expected specialized kind for call base and argument values!");
495  break;
496  case IRP_RETURNED:
497  assert(isa<Function>(AnchorVal) &&
498  "Expected function for a 'returned' position!");
499  assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
500  break;
501  case IRP_CALL_SITE_RETURNED:
502  assert((isa<CallBase>(AnchorVal)) &&
503  "Expected call base for 'call site returned' position!");
504  assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
505  break;
506  case IRP_CALL_SITE:
507  assert((isa<CallBase>(AnchorVal)) &&
508  "Expected call base for 'call site function' position!");
509  assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
510  break;
511  case IRP_FUNCTION:
512  assert(isa<Function>(AnchorVal) &&
513  "Expected function for a 'function' position!");
514  assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
515  break;
516  }
517 }
518 
519 namespace {
520 /// Helper functions to clamp a state \p S of type \p StateType with the
521 /// information in \p R and indicate/return if \p S did change (as-in update is
522 /// required to be run again).
523 ///
524 ///{
525 template <typename StateType>
526 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R);
527 
528 template <>
529 ChangeStatus clampStateAndIndicateChange<IntegerState>(IntegerState &S,
530  const IntegerState &R) {
531  auto Assumed = S.getAssumed();
532  S ^= R;
533  return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
535 }
536 
537 template <>
538 ChangeStatus clampStateAndIndicateChange<BooleanState>(BooleanState &S,
539  const BooleanState &R) {
540  return clampStateAndIndicateChange<IntegerState>(S, R);
541 }
542 ///}
543 
544 /// Clamp the information known for all returned values of a function
545 /// (identified by \p QueryingAA) into \p S.
546 template <typename AAType, typename StateType = typename AAType::StateType>
547 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
548  StateType &S) {
549  LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
550  << static_cast<const AbstractAttribute &>(QueryingAA)
551  << " into " << S << "\n");
552 
553  assert((QueryingAA.getIRPosition().getPositionKind() ==
555  QueryingAA.getIRPosition().getPositionKind() ==
557  "Can only clamp returned value states for a function returned or call "
558  "site returned position!");
559 
560  // Use an optional state as there might not be any return values and we want
561  // to join (IntegerState::operator&) the state of all there are.
563 
564  // Callback for each possibly returned value.
565  auto CheckReturnValue = [&](Value &RV) -> bool {
566  const IRPosition &RVPos = IRPosition::value(RV);
567  const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
568  LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
569  << " @ " << RVPos << "\n");
570  const StateType &AAS = static_cast<const StateType &>(AA.getState());
571  if (T.hasValue())
572  *T &= AAS;
573  else
574  T = AAS;
575  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
576  << "\n");
577  return T->isValidState();
578  };
579 
580  if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
581  S.indicatePessimisticFixpoint();
582  else if (T.hasValue())
583  S ^= *T;
584 }
585 
586 /// Helper class to compose two generic deduction
587 template <typename AAType, typename Base, typename StateType,
588  template <typename...> class F, template <typename...> class G>
589 struct AAComposeTwoGenericDeduction
590  : public F<AAType, G<AAType, Base, StateType>, StateType> {
591  AAComposeTwoGenericDeduction(const IRPosition &IRP)
592  : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
593 
594  /// See AbstractAttribute::updateImpl(...).
595  ChangeStatus updateImpl(Attributor &A) override {
596  ChangeStatus ChangedF = F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
597  ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
598  return ChangedF | ChangedG;
599  }
600 };
601 
602 /// Helper class for generic deduction: return value -> returned position.
603 template <typename AAType, typename Base,
604  typename StateType = typename AAType::StateType>
605 struct AAReturnedFromReturnedValues : public Base {
606  AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
607 
608  /// See AbstractAttribute::updateImpl(...).
609  ChangeStatus updateImpl(Attributor &A) override {
610  StateType S;
611  clampReturnedValueStates<AAType, StateType>(A, *this, S);
612  // TODO: If we know we visited all returned values, thus no are assumed
613  // dead, we can take the known information from the state T.
614  return clampStateAndIndicateChange<StateType>(this->getState(), S);
615  }
616 };
617 
618 /// Clamp the information known at all call sites for a given argument
619 /// (identified by \p QueryingAA) into \p S.
620 template <typename AAType, typename StateType = typename AAType::StateType>
621 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
622  StateType &S) {
623  LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
624  << static_cast<const AbstractAttribute &>(QueryingAA)
625  << " into " << S << "\n");
626 
627  assert(QueryingAA.getIRPosition().getPositionKind() ==
629  "Can only clamp call site argument states for an argument position!");
630 
631  // Use an optional state as there might not be any return values and we want
632  // to join (IntegerState::operator&) the state of all there are.
634 
635  // The argument number which is also the call site argument number.
636  unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
637 
638  auto CallSiteCheck = [&](AbstractCallSite ACS) {
639  const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
640  // Check if a coresponding argument was found or if it is on not associated
641  // (which can happen for callback calls).
642  if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
643  return false;
644 
645  const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
646  LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
647  << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
648  const StateType &AAS = static_cast<const StateType &>(AA.getState());
649  if (T.hasValue())
650  *T &= AAS;
651  else
652  T = AAS;
653  LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
654  << "\n");
655  return T->isValidState();
656  };
657 
658  if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
659  S.indicatePessimisticFixpoint();
660  else if (T.hasValue())
661  S ^= *T;
662 }
663 
664 /// Helper class for generic deduction: call site argument -> argument position.
665 template <typename AAType, typename Base,
666  typename StateType = typename AAType::StateType>
667 struct AAArgumentFromCallSiteArguments : public Base {
668  AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
669 
670  /// See AbstractAttribute::updateImpl(...).
671  ChangeStatus updateImpl(Attributor &A) override {
672  StateType S;
673  clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
674  // TODO: If we know we visited all incoming values, thus no are assumed
675  // dead, we can take the known information from the state T.
676  return clampStateAndIndicateChange<StateType>(this->getState(), S);
677  }
678 };
679 
680 /// Helper class for generic replication: function returned -> cs returned.
681 template <typename AAType, typename Base,
682  typename StateType = typename AAType::StateType>
683 struct AACallSiteReturnedFromReturned : public Base {
684  AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
685 
686  /// See AbstractAttribute::updateImpl(...).
687  ChangeStatus updateImpl(Attributor &A) override {
688  assert(this->getIRPosition().getPositionKind() ==
690  "Can only wrap function returned positions for call site returned "
691  "positions!");
692  auto &S = this->getState();
693 
694  const Function *AssociatedFunction =
696  if (!AssociatedFunction)
697  return S.indicatePessimisticFixpoint();
698 
699  IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
700  const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
701  return clampStateAndIndicateChange(
702  S, static_cast<const typename AAType::StateType &>(AA.getState()));
703  }
704 };
705 
706 /// Helper class for generic deduction using must-be-executed-context
707 /// Base class is required to have `followUse` method.
708 
709 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
710 /// U - Underlying use.
711 /// I - The user of the \p U.
712 /// `followUse` returns true if the value should be tracked transitively.
713 
714 template <typename AAType, typename Base,
715  typename StateType = typename AAType::StateType>
716 struct AAFromMustBeExecutedContext : public Base {
717  AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
718 
719  void initialize(Attributor &A) override {
720  Base::initialize(A);
721  IRPosition &IRP = this->getIRPosition();
722  Instruction *CtxI = IRP.getCtxI();
723 
724  if (!CtxI)
725  return;
726 
727  for (const Use &U : IRP.getAssociatedValue().uses())
728  Uses.insert(&U);
729  }
730 
731  /// See AbstractAttribute::updateImpl(...).
732  ChangeStatus updateImpl(Attributor &A) override {
733  auto BeforeState = this->getState();
734  auto &S = this->getState();
735  Instruction *CtxI = this->getIRPosition().getCtxI();
736  if (!CtxI)
738 
741 
742  SetVector<const Use *> NextUses;
743 
744  for (const Use *U : Uses) {
745  if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
746  auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
747  bool Found = EIt.count(UserI);
748  while (!Found && ++EIt != EEnd)
749  Found = EIt.getCurrentInst() == UserI;
750  if (Found && Base::followUse(A, U, UserI))
751  for (const Use &Us : UserI->uses())
752  NextUses.insert(&Us);
753  }
754  }
755  for (const Use *U : NextUses)
756  Uses.insert(U);
757 
758  return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
759  }
760 
761 private:
762  /// Container for (transitive) uses of the associated value.
764 };
765 
766 template <typename AAType, typename Base,
767  typename StateType = typename AAType::StateType>
768 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
769  AAComposeTwoGenericDeduction<AAType, Base, StateType,
770  AAFromMustBeExecutedContext,
771  AAArgumentFromCallSiteArguments>;
772 
773 template <typename AAType, typename Base,
774  typename StateType = typename AAType::StateType>
775 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
776  AAComposeTwoGenericDeduction<AAType, Base, StateType,
777  AAFromMustBeExecutedContext,
778  AACallSiteReturnedFromReturned>;
779 
780 /// -----------------------NoUnwind Function Attribute--------------------------
781 
782 struct AANoUnwindImpl : AANoUnwind {
783  AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
784 
785  const std::string getAsStr() const override {
786  return getAssumed() ? "nounwind" : "may-unwind";
787  }
788 
789  /// See AbstractAttribute::updateImpl(...).
790  ChangeStatus updateImpl(Attributor &A) override {
791  auto Opcodes = {
792  (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
793  (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
794  (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
795 
796  auto CheckForNoUnwind = [&](Instruction &I) {
797  if (!I.mayThrow())
798  return true;
799 
800  if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
801  const auto &NoUnwindAA =
803  return NoUnwindAA.isAssumedNoUnwind();
804  }
805  return false;
806  };
807 
808  if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
809  return indicatePessimisticFixpoint();
810 
812  }
813 };
814 
815 struct AANoUnwindFunction final : public AANoUnwindImpl {
816  AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
817 
818  /// See AbstractAttribute::trackStatistics()
819  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
820 };
821 
822 /// NoUnwind attribute deduction for a call sites.
823 struct AANoUnwindCallSite final : AANoUnwindImpl {
824  AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
825 
826  /// See AbstractAttribute::initialize(...).
827  void initialize(Attributor &A) override {
829  Function *F = getAssociatedFunction();
830  if (!F)
831  indicatePessimisticFixpoint();
832  }
833 
834  /// See AbstractAttribute::updateImpl(...).
835  ChangeStatus updateImpl(Attributor &A) override {
836  // TODO: Once we have call site specific value information we can provide
837  // call site specific liveness information and then it makes
838  // sense to specialize attributes for call sites arguments instead of
839  // redirecting requests to the callee argument.
840  Function *F = getAssociatedFunction();
841  const IRPosition &FnPos = IRPosition::function(*F);
842  auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
843  return clampStateAndIndicateChange(
844  getState(),
845  static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
846  }
847 
848  /// See AbstractAttribute::trackStatistics()
849  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
850 };
851 
852 /// --------------------- Function Return Values -------------------------------
853 
854 /// "Attribute" that collects all potential returned values and the return
855 /// instructions that they arise from.
856 ///
857 /// If there is a unique returned value R, the manifest method will:
858 /// - mark R with the "returned" attribute, if R is an argument.
859 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
860 
861  /// Mapping of values potentially returned by the associated function to the
862  /// return instructions that might return them.
864 
865  /// Mapping to remember the number of returned values for a call site such
866  /// that we can avoid updates if nothing changed.
867  DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
868 
869  /// Set of unresolved calls returned by the associated function.
870  SmallSetVector<CallBase *, 4> UnresolvedCalls;
871 
872  /// State flags
873  ///
874  ///{
875  bool IsFixed = false;
876  bool IsValidState = true;
877  ///}
878 
879 public:
880  AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
881 
882  /// See AbstractAttribute::initialize(...).
883  void initialize(Attributor &A) override {
884  // Reset the state.
885  IsFixed = false;
886  IsValidState = true;
887  ReturnedValues.clear();
888 
889  Function *F = getAssociatedFunction();
890  if (!F) {
891  indicatePessimisticFixpoint();
892  return;
893  }
894 
895  // The map from instruction opcodes to those instructions in the function.
896  auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
897 
898  // Look through all arguments, if one is marked as returned we are done.
899  for (Argument &Arg : F->args()) {
900  if (Arg.hasReturnedAttr()) {
901  auto &ReturnInstSet = ReturnedValues[&Arg];
902  for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
903  ReturnInstSet.insert(cast<ReturnInst>(RI));
904 
905  indicateOptimisticFixpoint();
906  return;
907  }
908  }
909 
910  if (!F->hasExactDefinition())
911  indicatePessimisticFixpoint();
912  }
913 
914  /// See AbstractAttribute::manifest(...).
915  ChangeStatus manifest(Attributor &A) override;
916 
917  /// See AbstractAttribute::getState(...).
918  AbstractState &getState() override { return *this; }
919 
920  /// See AbstractAttribute::getState(...).
921  const AbstractState &getState() const override { return *this; }
922 
923  /// See AbstractAttribute::updateImpl(Attributor &A).
924  ChangeStatus updateImpl(Attributor &A) override;
925 
926  llvm::iterator_range<iterator> returned_values() override {
927  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
928  }
929 
930  llvm::iterator_range<const_iterator> returned_values() const override {
931  return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
932  }
933 
934  const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
935  return UnresolvedCalls;
936  }
937 
938  /// Return the number of potential return values, -1 if unknown.
939  size_t getNumReturnValues() const override {
940  return isValidState() ? ReturnedValues.size() : -1;
941  }
942 
943  /// Return an assumed unique return value if a single candidate is found. If
944  /// there cannot be one, return a nullptr. If it is not clear yet, return the
945  /// Optional::NoneType.
946  Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
947 
948  /// See AbstractState::checkForAllReturnedValues(...).
949  bool checkForAllReturnedValuesAndReturnInsts(
950  const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
951  &Pred) const override;
952 
953  /// Pretty print the attribute similar to the IR representation.
954  const std::string getAsStr() const override;
955 
956  /// See AbstractState::isAtFixpoint().
957  bool isAtFixpoint() const override { return IsFixed; }
958 
959  /// See AbstractState::isValidState().
960  bool isValidState() const override { return IsValidState; }
961 
962  /// See AbstractState::indicateOptimisticFixpoint(...).
963  ChangeStatus indicateOptimisticFixpoint() override {
964  IsFixed = true;
966  }
967 
968  ChangeStatus indicatePessimisticFixpoint() override {
969  IsFixed = true;
970  IsValidState = false;
971  return ChangeStatus::CHANGED;
972  }
973 };
974 
975 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
977 
978  // Bookkeeping.
979  assert(isValidState());
980  STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
981  "Number of function with known return values");
982 
983  // Check if we have an assumed unique return value that we could manifest.
984  Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
985 
986  if (!UniqueRV.hasValue() || !UniqueRV.getValue())
987  return Changed;
988 
989  // Bookkeeping.
990  STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
991  "Number of function with unique return");
992 
993  // Callback to replace the uses of CB with the constant C.
994  auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
995  if (CB.getNumUses() == 0 || CB.isMustTailCall())
997  CB.replaceAllUsesWith(&C);
998  return ChangeStatus::CHANGED;
999  };
1000 
1001  // If the assumed unique return value is an argument, annotate it.
1002  if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1003  getIRPosition() = IRPosition::argument(*UniqueRVArg);
1004  Changed = IRAttribute::manifest(A);
1005  } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
1006  // We can replace the returned value with the unique returned constant.
1007  Value &AnchorValue = getAnchorValue();
1008  if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1009  for (const Use &U : F->uses())
1010  if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1011  if (CB->isCallee(&U)) {
1012  Constant *RVCCast =
1014  Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1015  }
1016  } else {
1017  assert(isa<CallBase>(AnchorValue) &&
1018  "Expcected a function or call base anchor!");
1019  Constant *RVCCast =
1020  ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1021  Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1022  }
1023  if (Changed == ChangeStatus::CHANGED)
1024  STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1025  "Number of function returns replaced by constant return");
1026  }
1027 
1028  return Changed;
1029 }
1030 
1031 const std::string AAReturnedValuesImpl::getAsStr() const {
1032  return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1033  (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1034  ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1035 }
1036 
1038 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1039  // If checkForAllReturnedValues provides a unique value, ignoring potential
1040  // undef values that can also be present, it is assumed to be the actual
1041  // return value and forwarded to the caller of this method. If there are
1042  // multiple, a nullptr is returned indicating there cannot be a unique
1043  // returned value.
1044  Optional<Value *> UniqueRV;
1045 
1046  auto Pred = [&](Value &RV) -> bool {
1047  // If we found a second returned value and neither the current nor the saved
1048  // one is an undef, there is no unique returned value. Undefs are special
1049  // since we can pretend they have any value.
1050  if (UniqueRV.hasValue() && UniqueRV != &RV &&
1051  !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1052  UniqueRV = nullptr;
1053  return false;
1054  }
1055 
1056  // Do not overwrite a value with an undef.
1057  if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1058  UniqueRV = &RV;
1059 
1060  return true;
1061  };
1062 
1063  if (!A.checkForAllReturnedValues(Pred, *this))
1064  UniqueRV = nullptr;
1065 
1066  return UniqueRV;
1067 }
1068 
1069 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1070  const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1071  &Pred) const {
1072  if (!isValidState())
1073  return false;
1074 
1075  // Check all returned values but ignore call sites as long as we have not
1076  // encountered an overdefined one during an update.
1077  for (auto &It : ReturnedValues) {
1078  Value *RV = It.first;
1079 
1080  CallBase *CB = dyn_cast<CallBase>(RV);
1081  if (CB && !UnresolvedCalls.count(CB))
1082  continue;
1083 
1084  if (!Pred(*RV, It.second))
1085  return false;
1086  }
1087 
1088  return true;
1089 }
1090 
1091 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1092  size_t NumUnresolvedCalls = UnresolvedCalls.size();
1093  bool Changed = false;
1094 
1095  // State used in the value traversals starting in returned values.
1096  struct RVState {
1097  // The map in which we collect return values -> return instrs.
1098  decltype(ReturnedValues) &RetValsMap;
1099  // The flag to indicate a change.
1100  bool &Changed;
1101  // The return instrs we come from.
1103  };
1104 
1105  // Callback for a leaf value returned by the associated function.
1106  auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1107  auto Size = RVS.RetValsMap[&Val].size();
1108  RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1109  bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1110  RVS.Changed |= Inserted;
1111  LLVM_DEBUG({
1112  if (Inserted)
1113  dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1114  << " => " << RVS.RetInsts.size() << "\n";
1115  });
1116  return true;
1117  };
1118 
1119  // Helper method to invoke the generic value traversal.
1120  auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1121  IRPosition RetValPos = IRPosition::value(RV);
1122  return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1123  RVS, VisitValueCB);
1124  };
1125 
1126  // Callback for all "return intructions" live in the associated function.
1127  auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1128  ReturnInst &Ret = cast<ReturnInst>(I);
1129  RVState RVS({ReturnedValues, Changed, {}});
1130  RVS.RetInsts.insert(&Ret);
1131  return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1132  };
1133 
1134  // Start by discovering returned values from all live returned instructions in
1135  // the associated function.
1136  if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1137  return indicatePessimisticFixpoint();
1138 
1139  // Once returned values "directly" present in the code are handled we try to
1140  // resolve returned calls.
1141  decltype(ReturnedValues) NewRVsMap;
1142  for (auto &It : ReturnedValues) {
1143  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1144  << " by #" << It.second.size() << " RIs\n");
1145  CallBase *CB = dyn_cast<CallBase>(It.first);
1146  if (!CB || UnresolvedCalls.count(CB))
1147  continue;
1148 
1149  if (!CB->getCalledFunction()) {
1150  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1151  << "\n");
1152  UnresolvedCalls.insert(CB);
1153  continue;
1154  }
1155 
1156  // TODO: use the function scope once we have call site AAReturnedValues.
1157  const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1158  *this, IRPosition::function(*CB->getCalledFunction()));
1159  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1160  << static_cast<const AbstractAttribute &>(RetValAA)
1161  << "\n");
1162 
1163  // Skip dead ends, thus if we do not know anything about the returned
1164  // call we mark it as unresolved and it will stay that way.
1165  if (!RetValAA.getState().isValidState()) {
1166  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1167  << "\n");
1168  UnresolvedCalls.insert(CB);
1169  continue;
1170  }
1171 
1172  // Do not try to learn partial information. If the callee has unresolved
1173  // return values we will treat the call as unresolved/opaque.
1174  auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1175  if (!RetValAAUnresolvedCalls.empty()) {
1176  UnresolvedCalls.insert(CB);
1177  continue;
1178  }
1179 
1180  // Now check if we can track transitively returned values. If possible, thus
1181  // if all return value can be represented in the current scope, do so.
1182  bool Unresolved = false;
1183  for (auto &RetValAAIt : RetValAA.returned_values()) {
1184  Value *RetVal = RetValAAIt.first;
1185  if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1186  isa<Constant>(RetVal))
1187  continue;
1188  // Anything that did not fit in the above categories cannot be resolved,
1189  // mark the call as unresolved.
1190  LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1191  "cannot be translated: "
1192  << *RetVal << "\n");
1193  UnresolvedCalls.insert(CB);
1194  Unresolved = true;
1195  break;
1196  }
1197 
1198  if (Unresolved)
1199  continue;
1200 
1201  // Now track transitively returned values.
1202  unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1203  if (NumRetAA == RetValAA.getNumReturnValues()) {
1204  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1205  "changed since it was seen last\n");
1206  continue;
1207  }
1208  NumRetAA = RetValAA.getNumReturnValues();
1209 
1210  for (auto &RetValAAIt : RetValAA.returned_values()) {
1211  Value *RetVal = RetValAAIt.first;
1212  if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1213  // Arguments are mapped to call site operands and we begin the traversal
1214  // again.
1215  bool Unused = false;
1216  RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1217  VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1218  continue;
1219  } else if (isa<CallBase>(RetVal)) {
1220  // Call sites are resolved by the callee attribute over time, no need to
1221  // do anything for us.
1222  continue;
1223  } else if (isa<Constant>(RetVal)) {
1224  // Constants are valid everywhere, we can simply take them.
1225  NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1226  continue;
1227  }
1228  }
1229  }
1230 
1231  // To avoid modifications to the ReturnedValues map while we iterate over it
1232  // we kept record of potential new entries in a copy map, NewRVsMap.
1233  for (auto &It : NewRVsMap) {
1234  assert(!It.second.empty() && "Entry does not add anything.");
1235  auto &ReturnInsts = ReturnedValues[It.first];
1236  for (ReturnInst *RI : It.second)
1237  if (ReturnInsts.insert(RI)) {
1238  LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1239  << *It.first << " => " << *RI << "\n");
1240  Changed = true;
1241  }
1242  }
1243 
1244  Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1246 }
1247 
1248 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1249  AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1250 
1251  /// See AbstractAttribute::trackStatistics()
1252  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1253 };
1254 
1255 /// Returned values information for a call sites.
1256 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1257  AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1258 
1259  /// See AbstractAttribute::initialize(...).
1260  void initialize(Attributor &A) override {
1261  // TODO: Once we have call site specific value information we can provide
1262  // call site specific liveness information and then it makes
1263  // sense to specialize attributes for call sites instead of
1264  // redirecting requests to the callee.
1265  llvm_unreachable("Abstract attributes for returned values are not "
1266  "supported for call sites yet!");
1267  }
1268 
1269  /// See AbstractAttribute::updateImpl(...).
1270  ChangeStatus updateImpl(Attributor &A) override {
1271  return indicatePessimisticFixpoint();
1272  }
1273 
1274  /// See AbstractAttribute::trackStatistics()
1275  void trackStatistics() const override {}
1276 };
1277 
1278 /// ------------------------ NoSync Function Attribute -------------------------
1279 
1280 struct AANoSyncImpl : AANoSync {
1281  AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1282 
1283  const std::string getAsStr() const override {
1284  return getAssumed() ? "nosync" : "may-sync";
1285  }
1286 
1287  /// See AbstractAttribute::updateImpl(...).
1288  ChangeStatus updateImpl(Attributor &A) override;
1289 
1290  /// Helper function used to determine whether an instruction is non-relaxed
1291  /// atomic. In other words, if an atomic instruction does not have unordered
1292  /// or monotonic ordering
1293  static bool isNonRelaxedAtomic(Instruction *I);
1294 
1295  /// Helper function used to determine whether an instruction is volatile.
1296  static bool isVolatile(Instruction *I);
1297 
1298  /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1299  /// memset).
1300  static bool isNoSyncIntrinsic(Instruction *I);
1301 };
1302 
1303 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1304  if (!I->isAtomic())
1305  return false;
1306 
1307  AtomicOrdering Ordering;
1308  switch (I->getOpcode()) {
1309  case Instruction::AtomicRMW:
1310  Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1311  break;
1312  case Instruction::Store:
1313  Ordering = cast<StoreInst>(I)->getOrdering();
1314  break;
1315  case Instruction::Load:
1316  Ordering = cast<LoadInst>(I)->getOrdering();
1317  break;
1318  case Instruction::Fence: {
1319  auto *FI = cast<FenceInst>(I);
1320  if (FI->getSyncScopeID() == SyncScope::SingleThread)
1321  return false;
1322  Ordering = FI->getOrdering();
1323  break;
1324  }
1325  case Instruction::AtomicCmpXchg: {
1326  AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1327  AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1328  // Only if both are relaxed, than it can be treated as relaxed.
1329  // Otherwise it is non-relaxed.
1330  if (Success != AtomicOrdering::Unordered &&
1331  Success != AtomicOrdering::Monotonic)
1332  return true;
1333  if (Failure != AtomicOrdering::Unordered &&
1334  Failure != AtomicOrdering::Monotonic)
1335  return true;
1336  return false;
1337  }
1338  default:
1340  "New atomic operations need to be known in the attributor.");
1341  }
1342 
1343  // Relaxed.
1344  if (Ordering == AtomicOrdering::Unordered ||
1345  Ordering == AtomicOrdering::Monotonic)
1346  return false;
1347  return true;
1348 }
1349 
1350 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1351 /// FIXME: We should ipmrove the handling of intrinsics.
1352 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1353  if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1354  switch (II->getIntrinsicID()) {
1355  /// Element wise atomic memory intrinsics are can only be unordered,
1356  /// therefore nosync.
1357  case Intrinsic::memset_element_unordered_atomic:
1358  case Intrinsic::memmove_element_unordered_atomic:
1359  case Intrinsic::memcpy_element_unordered_atomic:
1360  return true;
1361  case Intrinsic::memset:
1362  case Intrinsic::memmove:
1363  case Intrinsic::memcpy:
1364  if (!cast<MemIntrinsic>(II)->isVolatile())
1365  return true;
1366  return false;
1367  default:
1368  return false;
1369  }
1370  }
1371  return false;
1372 }
1373 
1375  assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1376  "Calls should not be checked here");
1377 
1378  switch (I->getOpcode()) {
1379  case Instruction::AtomicRMW:
1380  return cast<AtomicRMWInst>(I)->isVolatile();
1381  case Instruction::Store:
1382  return cast<StoreInst>(I)->isVolatile();
1383  case Instruction::Load:
1384  return cast<LoadInst>(I)->isVolatile();
1385  case Instruction::AtomicCmpXchg:
1386  return cast<AtomicCmpXchgInst>(I)->isVolatile();
1387  default:
1388  return false;
1389  }
1390 }
1391 
1392 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1393 
1394  auto CheckRWInstForNoSync = [&](Instruction &I) {
1395  /// We are looking for volatile instructions or Non-Relaxed atomics.
1396  /// FIXME: We should ipmrove the handling of intrinsics.
1397 
1398  if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1399  return true;
1400 
1401  if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1402  if (ICS.hasFnAttr(Attribute::NoSync))
1403  return true;
1404 
1405  const auto &NoSyncAA =
1407  if (NoSyncAA.isAssumedNoSync())
1408  return true;
1409  return false;
1410  }
1411 
1412  if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1413  return true;
1414 
1415  return false;
1416  };
1417 
1418  auto CheckForNoSync = [&](Instruction &I) {
1419  // At this point we handled all read/write effects and they are all
1420  // nosync, so they can be skipped.
1421  if (I.mayReadOrWriteMemory())
1422  return true;
1423 
1424  // non-convergent and readnone imply nosync.
1425  return !ImmutableCallSite(&I).isConvergent();
1426  };
1427 
1428  if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1429  !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1430  return indicatePessimisticFixpoint();
1431 
1432  return ChangeStatus::UNCHANGED;
1433 }
1434 
1435 struct AANoSyncFunction final : public AANoSyncImpl {
1436  AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1437 
1438  /// See AbstractAttribute::trackStatistics()
1439  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1440 };
1441 
1442 /// NoSync attribute deduction for a call sites.
1443 struct AANoSyncCallSite final : AANoSyncImpl {
1444  AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1445 
1446  /// See AbstractAttribute::initialize(...).
1447  void initialize(Attributor &A) override {
1449  Function *F = getAssociatedFunction();
1450  if (!F)
1451  indicatePessimisticFixpoint();
1452  }
1453 
1454  /// See AbstractAttribute::updateImpl(...).
1455  ChangeStatus updateImpl(Attributor &A) override {
1456  // TODO: Once we have call site specific value information we can provide
1457  // call site specific liveness information and then it makes
1458  // sense to specialize attributes for call sites arguments instead of
1459  // redirecting requests to the callee argument.
1460  Function *F = getAssociatedFunction();
1461  const IRPosition &FnPos = IRPosition::function(*F);
1462  auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1463  return clampStateAndIndicateChange(
1464  getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1465  }
1466 
1467  /// See AbstractAttribute::trackStatistics()
1468  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1469 };
1470 
1471 /// ------------------------ No-Free Attributes ----------------------------
1472 
1473 struct AANoFreeImpl : public AANoFree {
1474  AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1475 
1476  /// See AbstractAttribute::updateImpl(...).
1477  ChangeStatus updateImpl(Attributor &A) override {
1478  auto CheckForNoFree = [&](Instruction &I) {
1479  ImmutableCallSite ICS(&I);
1480  if (ICS.hasFnAttr(Attribute::NoFree))
1481  return true;
1482 
1483  const auto &NoFreeAA =
1485  return NoFreeAA.isAssumedNoFree();
1486  };
1487 
1488  if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1489  return indicatePessimisticFixpoint();
1490  return ChangeStatus::UNCHANGED;
1491  }
1492 
1493  /// See AbstractAttribute::getAsStr().
1494  const std::string getAsStr() const override {
1495  return getAssumed() ? "nofree" : "may-free";
1496  }
1497 };
1498 
1499 struct AANoFreeFunction final : public AANoFreeImpl {
1500  AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1501 
1502  /// See AbstractAttribute::trackStatistics()
1503  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1504 };
1505 
1506 /// NoFree attribute deduction for a call sites.
1507 struct AANoFreeCallSite final : AANoFreeImpl {
1508  AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1509 
1510  /// See AbstractAttribute::initialize(...).
1511  void initialize(Attributor &A) override {
1513  Function *F = getAssociatedFunction();
1514  if (!F)
1515  indicatePessimisticFixpoint();
1516  }
1517 
1518  /// See AbstractAttribute::updateImpl(...).
1519  ChangeStatus updateImpl(Attributor &A) override {
1520  // TODO: Once we have call site specific value information we can provide
1521  // call site specific liveness information and then it makes
1522  // sense to specialize attributes for call sites arguments instead of
1523  // redirecting requests to the callee argument.
1524  Function *F = getAssociatedFunction();
1525  const IRPosition &FnPos = IRPosition::function(*F);
1526  auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1527  return clampStateAndIndicateChange(
1528  getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1529  }
1530 
1531  /// See AbstractAttribute::trackStatistics()
1532  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1533 };
1534 
1535 /// ------------------------ NonNull Argument Attribute ------------------------
1536 static int64_t getKnownNonNullAndDerefBytesForUse(
1537  Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1538  const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1539  TrackUse = false;
1540 
1541  const Value *UseV = U->get();
1542  if (!UseV->getType()->isPointerTy())
1543  return 0;
1544 
1545  Type *PtrTy = UseV->getType();
1546  const Function *F = I->getFunction();
1547  bool NullPointerIsDefined =
1548  F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1549  const DataLayout &DL = A.getInfoCache().getDL();
1550  if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1551  if (ICS.isBundleOperand(U))
1552  return 0;
1553 
1554  if (ICS.isCallee(U)) {
1555  IsNonNull |= !NullPointerIsDefined;
1556  return 0;
1557  }
1558 
1559  unsigned ArgNo = ICS.getArgumentNo(U);
1560  IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1561  auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP);
1562  IsNonNull |= DerefAA.isKnownNonNull();
1563  return DerefAA.getKnownDereferenceableBytes();
1564  }
1565 
1566  int64_t Offset;
1567  if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1568  if (Base == &AssociatedValue && getPointerOperand(I) == UseV) {
1569  int64_t DerefBytes =
1570  Offset + (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1571 
1572  IsNonNull |= !NullPointerIsDefined;
1573  return DerefBytes;
1574  }
1575  }
1576  if (const Value *Base =
1577  GetPointerBaseWithConstantOffset(UseV, Offset, DL,
1578  /*AllowNonInbounds*/ false)) {
1579  auto &DerefAA =
1580  A.getAAFor<AADereferenceable>(QueryingAA, IRPosition::value(*Base));
1581  IsNonNull |= (!NullPointerIsDefined && DerefAA.isKnownNonNull());
1582  IsNonNull |= (!NullPointerIsDefined && (Offset != 0));
1583  int64_t DerefBytes = DerefAA.getKnownDereferenceableBytes();
1584  return std::max(int64_t(0), DerefBytes - Offset);
1585  }
1586 
1587  return 0;
1588 }
1589 
1590 struct AANonNullImpl : AANonNull {
1591  AANonNullImpl(const IRPosition &IRP)
1592  : AANonNull(IRP),
1593  NullIsDefined(NullPointerIsDefined(
1594  getAnchorScope(),
1595  getAssociatedValue().getType()->getPointerAddressSpace())) {}
1596 
1597  /// See AbstractAttribute::initialize(...).
1598  void initialize(Attributor &A) override {
1599  if (!NullIsDefined &&
1600  hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1601  indicateOptimisticFixpoint();
1602  else
1604  }
1605 
1606  /// See AAFromMustBeExecutedContext
1607  bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1608  bool IsNonNull = false;
1609  bool TrackUse = false;
1610  getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1611  IsNonNull, TrackUse);
1612  takeKnownMaximum(IsNonNull);
1613  return TrackUse;
1614  }
1615 
1616  /// See AbstractAttribute::getAsStr().
1617  const std::string getAsStr() const override {
1618  return getAssumed() ? "nonnull" : "may-null";
1619  }
1620 
1621  /// Flag to determine if the underlying value can be null and still allow
1622  /// valid accesses.
1623  const bool NullIsDefined;
1624 };
1625 
1626 /// NonNull attribute for a floating value.
1627 struct AANonNullFloating
1628  : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1629  using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1630  AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1631 
1632  /// See AbstractAttribute::initialize(...).
1633  void initialize(Attributor &A) override {
1634  Base::initialize(A);
1635 
1636  if (isAtFixpoint())
1637  return;
1638 
1639  const IRPosition &IRP = getIRPosition();
1640  const Value &V = IRP.getAssociatedValue();
1641  const DataLayout &DL = A.getDataLayout();
1642 
1643  // TODO: This context sensitive query should be removed once we can do
1644  // context sensitive queries in the genericValueTraversal below.
1645  if (isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, IRP.getCtxI(),
1646  /* TODO: DT */ nullptr))
1647  indicateOptimisticFixpoint();
1648  }
1649 
1650  /// See AbstractAttribute::updateImpl(...).
1651  ChangeStatus updateImpl(Attributor &A) override {
1652  ChangeStatus Change = Base::updateImpl(A);
1653  if (isKnownNonNull())
1654  return Change;
1655 
1656  if (!NullIsDefined) {
1657  const auto &DerefAA = A.getAAFor<AADereferenceable>(*this, getIRPosition());
1658  if (DerefAA.getAssumedDereferenceableBytes())
1659  return Change;
1660  }
1661 
1662  const DataLayout &DL = A.getDataLayout();
1663 
1664  auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
1665  bool Stripped) -> bool {
1666  const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1667  if (!Stripped && this == &AA) {
1668  if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr,
1669  /* CtxI */ getCtxI(),
1670  /* TODO: DT */ nullptr))
1672  } else {
1673  // Use abstract attribute information.
1674  const AANonNull::StateType &NS =
1675  static_cast<const AANonNull::StateType &>(AA.getState());
1676  T ^= NS;
1677  }
1678  return T.isValidState();
1679  };
1680 
1681  StateType T;
1682  if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
1683  T, VisitValueCB))
1684  return indicatePessimisticFixpoint();
1685 
1686  return clampStateAndIndicateChange(getState(), T);
1687  }
1688 
1689  /// See AbstractAttribute::trackStatistics()
1690  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1691 };
1692 
1693 /// NonNull attribute for function return value.
1694 struct AANonNullReturned final
1695  : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1696  AANonNullReturned(const IRPosition &IRP)
1697  : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1698 
1699  /// See AbstractAttribute::trackStatistics()
1700  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1701 };
1702 
1703 /// NonNull attribute for function argument.
1704 struct AANonNullArgument final
1705  : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1706  AANonNullImpl> {
1707  AANonNullArgument(const IRPosition &IRP)
1708  : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1709  AANonNullImpl>(
1710  IRP) {}
1711 
1712  /// See AbstractAttribute::trackStatistics()
1713  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1714 };
1715 
1716 struct AANonNullCallSiteArgument final : AANonNullFloating {
1717  AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1718 
1719  /// See AbstractAttribute::trackStatistics()
1720  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1721 };
1722 
1723 /// NonNull attribute for a call site return position.
1724 struct AANonNullCallSiteReturned final
1725  : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1726  AANonNullImpl> {
1727  AANonNullCallSiteReturned(const IRPosition &IRP)
1728  : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1729  AANonNullImpl>(
1730  IRP) {}
1731 
1732  /// See AbstractAttribute::trackStatistics()
1733  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1734 };
1735 
1736 /// ------------------------ No-Recurse Attributes ----------------------------
1737 
1738 struct AANoRecurseImpl : public AANoRecurse {
1739  AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1740 
1741  /// See AbstractAttribute::getAsStr()
1742  const std::string getAsStr() const override {
1743  return getAssumed() ? "norecurse" : "may-recurse";
1744  }
1745 };
1746 
1747 struct AANoRecurseFunction final : AANoRecurseImpl {
1748  AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1749 
1750  /// See AbstractAttribute::initialize(...).
1751  void initialize(Attributor &A) override {
1753  if (const Function *F = getAnchorScope())
1754  if (A.getInfoCache().getSccSize(*F) == 1)
1755  return;
1756  indicatePessimisticFixpoint();
1757  }
1758 
1759  /// See AbstractAttribute::updateImpl(...).
1760  ChangeStatus updateImpl(Attributor &A) override {
1761 
1762  auto CheckForNoRecurse = [&](Instruction &I) {
1763  ImmutableCallSite ICS(&I);
1764  if (ICS.hasFnAttr(Attribute::NoRecurse))
1765  return true;
1766 
1767  const auto &NoRecurseAA =
1769  if (!NoRecurseAA.isAssumedNoRecurse())
1770  return false;
1771 
1772  // Recursion to the same function
1773  if (ICS.getCalledFunction() == getAnchorScope())
1774  return false;
1775 
1776  return true;
1777  };
1778 
1779  if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1780  return indicatePessimisticFixpoint();
1781  return ChangeStatus::UNCHANGED;
1782  }
1783 
1784  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1785 };
1786 
1787 /// NoRecurse attribute deduction for a call sites.
1788 struct AANoRecurseCallSite final : AANoRecurseImpl {
1789  AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1790 
1791  /// See AbstractAttribute::initialize(...).
1792  void initialize(Attributor &A) override {
1794  Function *F = getAssociatedFunction();
1795  if (!F)
1796  indicatePessimisticFixpoint();
1797  }
1798 
1799  /// See AbstractAttribute::updateImpl(...).
1800  ChangeStatus updateImpl(Attributor &A) override {
1801  // TODO: Once we have call site specific value information we can provide
1802  // call site specific liveness information and then it makes
1803  // sense to specialize attributes for call sites arguments instead of
1804  // redirecting requests to the callee argument.
1805  Function *F = getAssociatedFunction();
1806  const IRPosition &FnPos = IRPosition::function(*F);
1807  auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1808  return clampStateAndIndicateChange(
1809  getState(),
1810  static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1811  }
1812 
1813  /// See AbstractAttribute::trackStatistics()
1814  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1815 };
1816 
1817 /// ------------------------ Will-Return Attributes ----------------------------
1818 
1819 // Helper function that checks whether a function has any cycle.
1820 // TODO: Replace with more efficent code
1821 static bool containsCycle(Function &F) {
1823 
1824  // Traverse BB by dfs and check whether successor is already visited.
1825  for (BasicBlock *BB : depth_first(&F)) {
1826  Visited.insert(BB);
1827  for (auto *SuccBB : successors(BB)) {
1828  if (Visited.count(SuccBB))
1829  return true;
1830  }
1831  }
1832  return false;
1833 }
1834 
1835 // Helper function that checks the function have a loop which might become an
1836 // endless loop
1837 // FIXME: Any cycle is regarded as endless loop for now.
1838 // We have to allow some patterns.
1839 static bool containsPossiblyEndlessLoop(Function *F) {
1840  return !F || !F->hasExactDefinition() || containsCycle(*F);
1841 }
1842 
1843 struct AAWillReturnImpl : public AAWillReturn {
1844  AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
1845 
1846  /// See AbstractAttribute::initialize(...).
1847  void initialize(Attributor &A) override {
1849 
1850  Function *F = getAssociatedFunction();
1851  if (containsPossiblyEndlessLoop(F))
1852  indicatePessimisticFixpoint();
1853  }
1854 
1855  /// See AbstractAttribute::updateImpl(...).
1856  ChangeStatus updateImpl(Attributor &A) override {
1857  auto CheckForWillReturn = [&](Instruction &I) {
1859  const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
1860  if (WillReturnAA.isKnownWillReturn())
1861  return true;
1862  if (!WillReturnAA.isAssumedWillReturn())
1863  return false;
1864  const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
1865  return NoRecurseAA.isAssumedNoRecurse();
1866  };
1867 
1868  if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
1869  return indicatePessimisticFixpoint();
1870 
1871  return ChangeStatus::UNCHANGED;
1872  }
1873 
1874  /// See AbstractAttribute::getAsStr()
1875  const std::string getAsStr() const override {
1876  return getAssumed() ? "willreturn" : "may-noreturn";
1877  }
1878 };
1879 
1880 struct AAWillReturnFunction final : AAWillReturnImpl {
1881  AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
1882 
1883  /// See AbstractAttribute::trackStatistics()
1884  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
1885 };
1886 
1887 /// WillReturn attribute deduction for a call sites.
1888 struct AAWillReturnCallSite final : AAWillReturnImpl {
1889  AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
1890 
1891  /// See AbstractAttribute::initialize(...).
1892  void initialize(Attributor &A) override {
1894  Function *F = getAssociatedFunction();
1895  if (!F)
1896  indicatePessimisticFixpoint();
1897  }
1898 
1899  /// See AbstractAttribute::updateImpl(...).
1900  ChangeStatus updateImpl(Attributor &A) override {
1901  // TODO: Once we have call site specific value information we can provide
1902  // call site specific liveness information and then it makes
1903  // sense to specialize attributes for call sites arguments instead of
1904  // redirecting requests to the callee argument.
1905  Function *F = getAssociatedFunction();
1906  const IRPosition &FnPos = IRPosition::function(*F);
1907  auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
1908  return clampStateAndIndicateChange(
1909  getState(),
1910  static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
1911  }
1912 
1913  /// See AbstractAttribute::trackStatistics()
1914  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
1915 };
1916 
1917 /// ------------------------ NoAlias Argument Attribute ------------------------
1918 
1919 struct AANoAliasImpl : AANoAlias {
1920  AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
1921 
1922  const std::string getAsStr() const override {
1923  return getAssumed() ? "noalias" : "may-alias";
1924  }
1925 };
1926 
1927 /// NoAlias attribute for a floating value.
1928 struct AANoAliasFloating final : AANoAliasImpl {
1929  AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1930 
1931  /// See AbstractAttribute::initialize(...).
1932  void initialize(Attributor &A) override {
1934  Value &Val = getAssociatedValue();
1935  if (isa<AllocaInst>(Val))
1936  indicateOptimisticFixpoint();
1937  if (isa<ConstantPointerNull>(Val) &&
1938  Val.getType()->getPointerAddressSpace() == 0)
1939  indicateOptimisticFixpoint();
1940  }
1941 
1942  /// See AbstractAttribute::updateImpl(...).
1943  ChangeStatus updateImpl(Attributor &A) override {
1944  // TODO: Implement this.
1945  return indicatePessimisticFixpoint();
1946  }
1947 
1948  /// See AbstractAttribute::trackStatistics()
1949  void trackStatistics() const override {
1951  }
1952 };
1953 
1954 /// NoAlias attribute for an argument.
1955 struct AANoAliasArgument final
1956  : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
1957  AANoAliasArgument(const IRPosition &IRP)
1958  : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>(IRP) {}
1959 
1960  /// See AbstractAttribute::trackStatistics()
1961  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
1962 };
1963 
1964 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
1965  AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1966 
1967  /// See AbstractAttribute::initialize(...).
1968  void initialize(Attributor &A) override {
1969  // See callsite argument attribute and callee argument attribute.
1970  ImmutableCallSite ICS(&getAnchorValue());
1971  if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
1972  indicateOptimisticFixpoint();
1973  }
1974 
1975  /// See AbstractAttribute::updateImpl(...).
1976  ChangeStatus updateImpl(Attributor &A) override {
1977  // We can deduce "noalias" if the following conditions hold.
1978  // (i) Associated value is assumed to be noalias in the definition.
1979  // (ii) Associated value is assumed to be no-capture in all the uses
1980  // possibly executed before this callsite.
1981  // (iii) There is no other pointer argument which could alias with the
1982  // value.
1983 
1984  const Value &V = getAssociatedValue();
1985  const IRPosition IRP = IRPosition::value(V);
1986 
1987  // (i) Check whether noalias holds in the definition.
1988 
1989  auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
1990 
1991  if (!NoAliasAA.isAssumedNoAlias())
1992  return indicatePessimisticFixpoint();
1993 
1994  LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] " << V
1995  << " is assumed NoAlias in the definition\n");
1996 
1997  // (ii) Check whether the value is captured in the scope using AANoCapture.
1998  // FIXME: This is conservative though, it is better to look at CFG and
1999  // check only uses possibly executed before this callsite.
2000 
2001  auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
2002  if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2003  LLVM_DEBUG(
2004  dbgs() << "[Attributor][AANoAliasCSArg] " << V
2005  << " cannot be noalias as it is potentially captured\n");
2006  return indicatePessimisticFixpoint();
2007  }
2008 
2009  // (iii) Check there is no other pointer argument which could alias with the
2010  // value.
2011  ImmutableCallSite ICS(&getAnchorValue());
2012  for (unsigned i = 0; i < ICS.getNumArgOperands(); i++) {
2013  if (getArgNo() == (int)i)
2014  continue;
2015  const Value *ArgOp = ICS.getArgOperand(i);
2016  if (!ArgOp->getType()->isPointerTy())
2017  continue;
2018 
2019  if (const Function *F = getAnchorScope()) {
2020  if (AAResults *AAR = A.getInfoCache().getAAResultsForFunction(*F)) {
2021  bool IsAliasing = AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2022  LLVM_DEBUG(dbgs()
2023  << "[Attributor][NoAliasCSArg] Check alias between "
2024  "callsite arguments "
2025  << AAR->isNoAlias(&getAssociatedValue(), ArgOp) << " "
2026  << getAssociatedValue() << " " << *ArgOp << " => "
2027  << (IsAliasing ? "" : "no-") << "alias \n");
2028 
2029  if (IsAliasing)
2030  continue;
2031  }
2032  }
2033  return indicatePessimisticFixpoint();
2034  }
2035 
2036  return ChangeStatus::UNCHANGED;
2037  }
2038 
2039  /// See AbstractAttribute::trackStatistics()
2040  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2041 };
2042 
2043 /// NoAlias attribute for function return value.
2044 struct AANoAliasReturned final : AANoAliasImpl {
2045  AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2046 
2047  /// See AbstractAttribute::updateImpl(...).
2048  virtual ChangeStatus updateImpl(Attributor &A) override {
2049 
2050  auto CheckReturnValue = [&](Value &RV) -> bool {
2051  if (Constant *C = dyn_cast<Constant>(&RV))
2052  if (C->isNullValue() || isa<UndefValue>(C))
2053  return true;
2054 
2055  /// For now, we can only deduce noalias if we have call sites.
2056  /// FIXME: add more support.
2057  ImmutableCallSite ICS(&RV);
2058  if (!ICS)
2059  return false;
2060 
2061  const IRPosition &RVPos = IRPosition::value(RV);
2062  const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2063  if (!NoAliasAA.isAssumedNoAlias())
2064  return false;
2065 
2066  const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2067  return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2068  };
2069 
2070  if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2071  return indicatePessimisticFixpoint();
2072 
2073  return ChangeStatus::UNCHANGED;
2074  }
2075 
2076  /// See AbstractAttribute::trackStatistics()
2077  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2078 };
2079 
2080 /// NoAlias attribute deduction for a call site return value.
2081 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2082  AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2083 
2084  /// See AbstractAttribute::initialize(...).
2085  void initialize(Attributor &A) override {
2087  Function *F = getAssociatedFunction();
2088  if (!F)
2089  indicatePessimisticFixpoint();
2090  }
2091 
2092  /// See AbstractAttribute::updateImpl(...).
2093  ChangeStatus updateImpl(Attributor &A) override {
2094  // TODO: Once we have call site specific value information we can provide
2095  // call site specific liveness information and then it makes
2096  // sense to specialize attributes for call sites arguments instead of
2097  // redirecting requests to the callee argument.
2098  Function *F = getAssociatedFunction();
2099  const IRPosition &FnPos = IRPosition::returned(*F);
2100  auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2101  return clampStateAndIndicateChange(
2102  getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2103  }
2104 
2105  /// See AbstractAttribute::trackStatistics()
2106  void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2107 };
2108 
2109 /// -------------------AAIsDead Function Attribute-----------------------
2110 
2111 struct AAIsDeadImpl : public AAIsDead {
2112  AAIsDeadImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2113 
2114  void initialize(Attributor &A) override {
2115  const Function *F = getAssociatedFunction();
2116  if (F && !F->isDeclaration())
2117  exploreFromEntry(A, F);
2118  }
2119 
2120  void exploreFromEntry(Attributor &A, const Function *F) {
2121  ToBeExploredPaths.insert(&(F->getEntryBlock().front()));
2122 
2123  for (size_t i = 0; i < ToBeExploredPaths.size(); ++i)
2124  if (const Instruction *NextNoReturnI =
2125  findNextNoReturn(A, ToBeExploredPaths[i]))
2126  NoReturnCalls.insert(NextNoReturnI);
2127 
2128  // Mark the block live after we looked for no-return instructions.
2129  assumeLive(A, F->getEntryBlock());
2130  }
2131 
2132  /// Find the next assumed noreturn instruction in the block of \p I starting
2133  /// from, thus including, \p I.
2134  ///
2135  /// The caller is responsible to monitor the ToBeExploredPaths set as new
2136  /// instructions discovered in other basic block will be placed in there.
2137  ///
2138  /// \returns The next assumed noreturn instructions in the block of \p I
2139  /// starting from, thus including, \p I.
2140  const Instruction *findNextNoReturn(Attributor &A, const Instruction *I);
2141 
2142  /// See AbstractAttribute::getAsStr().
2143  const std::string getAsStr() const override {
2144  return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2145  std::to_string(getAssociatedFunction()->size()) + "][#NRI " +
2146  std::to_string(NoReturnCalls.size()) + "]";
2147  }
2148 
2149  /// See AbstractAttribute::manifest(...).
2150  ChangeStatus manifest(Attributor &A) override {
2151  assert(getState().isValidState() &&
2152  "Attempted to manifest an invalid state!");
2153 
2155  Function &F = *getAssociatedFunction();
2156 
2157  if (AssumedLiveBlocks.empty()) {
2158  A.deleteAfterManifest(F);
2159  return ChangeStatus::CHANGED;
2160  }
2161 
2162  // Flag to determine if we can change an invoke to a call assuming the
2163  // callee is nounwind. This is not possible if the personality of the
2164  // function allows to catch asynchronous exceptions.
2165  bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2166 
2167  for (const Instruction *NRC : NoReturnCalls) {
2168  Instruction *I = const_cast<Instruction *>(NRC);
2169  BasicBlock *BB = I->getParent();
2170  Instruction *SplitPos = I->getNextNode();
2171  // TODO: mark stuff before unreachable instructions as dead.
2172 
2173  if (auto *II = dyn_cast<InvokeInst>(I)) {
2174  // If we keep the invoke the split position is at the beginning of the
2175  // normal desitination block (it invokes a noreturn function after all).
2176  BasicBlock *NormalDestBB = II->getNormalDest();
2177  SplitPos = &NormalDestBB->front();
2178 
2179  /// Invoke is replaced with a call and unreachable is placed after it if
2180  /// the callee is nounwind and noreturn. Otherwise, we keep the invoke
2181  /// and only place an unreachable in the normal successor.
2182  if (Invoke2CallAllowed) {
2183  if (II->getCalledFunction()) {
2184  const IRPosition &IPos = IRPosition::callsite_function(*II);
2185  const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
2186  if (AANoUnw.isAssumedNoUnwind()) {
2187  LLVM_DEBUG(dbgs()
2188  << "[AAIsDead] Replace invoke with call inst\n");
2189  // We do not need an invoke (II) but instead want a call followed
2190  // by an unreachable. However, we do not remove II as other
2191  // abstract attributes might have it cached as part of their
2192  // results. Given that we modify the CFG anyway, we simply keep II
2193  // around but in a new dead block. To avoid II being live through
2194  // a different edge we have to ensure the block we place it in is
2195  // only reached from the current block of II and then not reached
2196  // at all when we insert the unreachable.
2197  SplitBlockPredecessors(NormalDestBB, {BB}, ".i2c");
2199  CI->insertBefore(II);
2200  CI->takeName(II);
2201  II->replaceAllUsesWith(CI);
2202  SplitPos = CI->getNextNode();
2203  }
2204  }
2205  }
2206 
2207  if (SplitPos == &NormalDestBB->front()) {
2208  // If this is an invoke of a noreturn function the edge to the normal
2209  // destination block is dead but not necessarily the block itself.
2210  // TODO: We need to move to an edge based system during deduction and
2211  // also manifest.
2212  assert(!NormalDestBB->isLandingPad() &&
2213  "Expected the normal destination not to be a landingpad!");
2214  if (NormalDestBB->getUniquePredecessor() == BB) {
2215  assumeLive(A, *NormalDestBB);
2216  } else {
2217  BasicBlock *SplitBB =
2218  SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2219  // The split block is live even if it contains only an unreachable
2220  // instruction at the end.
2221  assumeLive(A, *SplitBB);
2222  SplitPos = SplitBB->getTerminator();
2223  HasChanged = ChangeStatus::CHANGED;
2224  }
2225  }
2226  }
2227 
2228  if (isa_and_nonnull<UnreachableInst>(SplitPos))
2229  continue;
2230 
2231  BB = SplitPos->getParent();
2232  SplitBlock(BB, SplitPos);
2233  changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false);
2234  HasChanged = ChangeStatus::CHANGED;
2235  }
2236 
2237  for (BasicBlock &BB : F)
2238  if (!AssumedLiveBlocks.count(&BB))
2239  A.deleteAfterManifest(BB);
2240 
2241  return HasChanged;
2242  }
2243 
2244  /// See AbstractAttribute::updateImpl(...).
2245  ChangeStatus updateImpl(Attributor &A) override;
2246 
2247  /// See AAIsDead::isAssumedDead(BasicBlock *).
2248  bool isAssumedDead(const BasicBlock *BB) const override {
2249  assert(BB->getParent() == getAssociatedFunction() &&
2250  "BB must be in the same anchor scope function.");
2251 
2252  if (!getAssumed())
2253  return false;
2254  return !AssumedLiveBlocks.count(BB);
2255  }
2256 
2257  /// See AAIsDead::isKnownDead(BasicBlock *).
2258  bool isKnownDead(const BasicBlock *BB) const override {
2259  return getKnown() && isAssumedDead(BB);
2260  }
2261 
2262  /// See AAIsDead::isAssumed(Instruction *I).
2263  bool isAssumedDead(const Instruction *I) const override {
2264  assert(I->getParent()->getParent() == getAssociatedFunction() &&
2265  "Instruction must be in the same anchor scope function.");
2266 
2267  if (!getAssumed())
2268  return false;
2269 
2270  // If it is not in AssumedLiveBlocks then it for sure dead.
2271  // Otherwise, it can still be after noreturn call in a live block.
2272  if (!AssumedLiveBlocks.count(I->getParent()))
2273  return true;
2274 
2275  // If it is not after a noreturn call, than it is live.
2276  return isAfterNoReturn(I);
2277  }
2278 
2279  /// See AAIsDead::isKnownDead(Instruction *I).
2280  bool isKnownDead(const Instruction *I) const override {
2281  return getKnown() && isAssumedDead(I);
2282  }
2283 
2284  /// Check if instruction is after noreturn call, in other words, assumed dead.
2285  bool isAfterNoReturn(const Instruction *I) const;
2286 
2287  /// Determine if \p F might catch asynchronous exceptions.
2288  static bool mayCatchAsynchronousExceptions(const Function &F) {
2289  return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
2290  }
2291 
2292  /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2293  /// that internal function called from \p BB should now be looked at.
2294  void assumeLive(Attributor &A, const BasicBlock &BB) {
2295  if (!AssumedLiveBlocks.insert(&BB).second)
2296  return;
2297 
2298  // We assume that all of BB is (probably) live now and if there are calls to
2299  // internal functions we will assume that those are now live as well. This
2300  // is a performance optimization for blocks with calls to a lot of internal
2301  // functions. It can however cause dead functions to be treated as live.
2302  for (const Instruction &I : BB)
2303  if (ImmutableCallSite ICS = ImmutableCallSite(&I))
2304  if (const Function *F = ICS.getCalledFunction())
2305  if (F->hasLocalLinkage())
2307  }
2308 
2309  /// Collection of to be explored paths.
2310  SmallSetVector<const Instruction *, 8> ToBeExploredPaths;
2311 
2312  /// Collection of all assumed live BasicBlocks.
2313  DenseSet<const BasicBlock *> AssumedLiveBlocks;
2314 
2315  /// Collection of calls with noreturn attribute, assumed or knwon.
2317 };
2318 
2319 struct AAIsDeadFunction final : public AAIsDeadImpl {
2320  AAIsDeadFunction(const IRPosition &IRP) : AAIsDeadImpl(IRP) {}
2321 
2322  /// See AbstractAttribute::trackStatistics()
2323  void trackStatistics() const override {
2324  STATS_DECL(PartiallyDeadBlocks, Function,
2325  "Number of basic blocks classified as partially dead");
2326  BUILD_STAT_NAME(PartiallyDeadBlocks, Function) += NoReturnCalls.size();
2327  }
2328 };
2329 
2330 bool AAIsDeadImpl::isAfterNoReturn(const Instruction *I) const {
2331  const Instruction *PrevI = I->getPrevNode();
2332  while (PrevI) {
2333  if (NoReturnCalls.count(PrevI))
2334  return true;
2335  PrevI = PrevI->getPrevNode();
2336  }
2337  return false;
2338 }
2339 
2340 const Instruction *AAIsDeadImpl::findNextNoReturn(Attributor &A,
2341  const Instruction *I) {
2342  const BasicBlock *BB = I->getParent();
2343  const Function &F = *BB->getParent();
2344 
2345  // Flag to determine if we can change an invoke to a call assuming the callee
2346  // is nounwind. This is not possible if the personality of the function allows
2347  // to catch asynchronous exceptions.
2348  bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2349 
2350  // TODO: We should have a function that determines if an "edge" is dead.
2351  // Edges could be from an instruction to the next or from a terminator
2352  // to the successor. For now, we need to special case the unwind block
2353  // of InvokeInst below.
2354 
2355  while (I) {
2356  ImmutableCallSite ICS(I);
2357 
2358  if (ICS) {
2359  const IRPosition &IPos = IRPosition::callsite_function(ICS);
2360  // Regarless of the no-return property of an invoke instruction we only
2361  // learn that the regular successor is not reachable through this
2362  // instruction but the unwind block might still be.
2363  if (auto *Invoke = dyn_cast<InvokeInst>(I)) {
2364  // Use nounwind to justify the unwind block is dead as well.
2365  const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
2366  if (!Invoke2CallAllowed || !AANoUnw.isAssumedNoUnwind()) {
2367  assumeLive(A, *Invoke->getUnwindDest());
2368  ToBeExploredPaths.insert(&Invoke->getUnwindDest()->front());
2369  }
2370  }
2371 
2372  const auto &NoReturnAA = A.getAAFor<AANoReturn>(*this, IPos);
2373  if (NoReturnAA.isAssumedNoReturn())
2374  return I;
2375  }
2376 
2377  I = I->getNextNode();
2378  }
2379 
2380  // get new paths (reachable blocks).
2381  for (const BasicBlock *SuccBB : successors(BB)) {
2382  assumeLive(A, *SuccBB);
2383  ToBeExploredPaths.insert(&SuccBB->front());
2384  }
2385 
2386  // No noreturn instruction found.
2387  return nullptr;
2388 }
2389 
2390 ChangeStatus AAIsDeadImpl::updateImpl(Attributor &A) {
2392 
2393  // Temporary collection to iterate over existing noreturn instructions. This
2394  // will alow easier modification of NoReturnCalls collection
2395  SmallVector<const Instruction *, 8> NoReturnChanged;
2396 
2397  for (const Instruction *I : NoReturnCalls)
2398  NoReturnChanged.push_back(I);
2399 
2400  for (const Instruction *I : NoReturnChanged) {
2401  size_t Size = ToBeExploredPaths.size();
2402 
2403  const Instruction *NextNoReturnI = findNextNoReturn(A, I);
2404  if (NextNoReturnI != I) {
2405  Status = ChangeStatus::CHANGED;
2406  NoReturnCalls.remove(I);
2407  if (NextNoReturnI)
2408  NoReturnCalls.insert(NextNoReturnI);
2409  }
2410 
2411  // Explore new paths.
2412  while (Size != ToBeExploredPaths.size()) {
2413  Status = ChangeStatus::CHANGED;
2414  if (const Instruction *NextNoReturnI =
2415  findNextNoReturn(A, ToBeExploredPaths[Size++]))
2416  NoReturnCalls.insert(NextNoReturnI);
2417  }
2418  }
2419 
2420  LLVM_DEBUG(dbgs() << "[AAIsDead] AssumedLiveBlocks: "
2421  << AssumedLiveBlocks.size() << " Total number of blocks: "
2422  << getAssociatedFunction()->size() << "\n");
2423 
2424  // If we know everything is live there is no need to query for liveness.
2425  if (NoReturnCalls.empty() &&
2426  getAssociatedFunction()->size() == AssumedLiveBlocks.size()) {
2427  // Indicating a pessimistic fixpoint will cause the state to be "invalid"
2428  // which will cause the Attributor to not return the AAIsDead on request,
2429  // which will prevent us from querying isAssumedDead().
2430  indicatePessimisticFixpoint();
2431  assert(!isValidState() && "Expected an invalid state!");
2432  Status = ChangeStatus::CHANGED;
2433  }
2434 
2435  return Status;
2436 }
2437 
2438 /// Liveness information for a call sites.
2439 struct AAIsDeadCallSite final : AAIsDeadImpl {
2440  AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadImpl(IRP) {}
2441 
2442  /// See AbstractAttribute::initialize(...).
2443  void initialize(Attributor &A) override {
2444  // TODO: Once we have call site specific value information we can provide
2445  // call site specific liveness information and then it makes
2446  // sense to specialize attributes for call sites instead of
2447  // redirecting requests to the callee.
2448  llvm_unreachable("Abstract attributes for liveness are not "
2449  "supported for call sites yet!");
2450  }
2451 
2452  /// See AbstractAttribute::updateImpl(...).
2453  ChangeStatus updateImpl(Attributor &A) override {
2454  return indicatePessimisticFixpoint();
2455  }
2456 
2457  /// See AbstractAttribute::trackStatistics()
2458  void trackStatistics() const override {}
2459 };
2460 
2461 /// -------------------- Dereferenceable Argument Attribute --------------------
2462 
2463 template <>
2464 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
2465  const DerefState &R) {
2466  ChangeStatus CS0 = clampStateAndIndicateChange<IntegerState>(
2467  S.DerefBytesState, R.DerefBytesState);
2468  ChangeStatus CS1 =
2469  clampStateAndIndicateChange<IntegerState>(S.GlobalState, R.GlobalState);
2470  return CS0 | CS1;
2471 }
2472 
2473 struct AADereferenceableImpl : AADereferenceable {
2474  AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
2475  using StateType = DerefState;
2476 
2477  void initialize(Attributor &A) override {
2479  getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
2480  Attrs);
2481  for (const Attribute &Attr : Attrs)
2482  takeKnownDerefBytesMaximum(Attr.getValueAsInt());
2483 
2484  NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
2485 
2486  const IRPosition &IRP = this->getIRPosition();
2487  bool IsFnInterface = IRP.isFnInterfaceKind();
2488  const Function *FnScope = IRP.getAnchorScope();
2489  if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
2490  indicatePessimisticFixpoint();
2491  }
2492 
2493  /// See AbstractAttribute::getState()
2494  /// {
2495  StateType &getState() override { return *this; }
2496  const StateType &getState() const override { return *this; }
2497  /// }
2498 
2499  /// See AAFromMustBeExecutedContext
2500  bool followUse(Attributor &A, const Use *U, const Instruction *I) {
2501  bool IsNonNull = false;
2502  bool TrackUse = false;
2503  int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
2504  A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
2505  takeKnownDerefBytesMaximum(DerefBytes);
2506  return TrackUse;
2507  }
2508 
2509  void getDeducedAttributes(LLVMContext &Ctx,
2510  SmallVectorImpl<Attribute> &Attrs) const override {
2511  // TODO: Add *_globally support
2512  if (isAssumedNonNull())
2514  Ctx, getAssumedDereferenceableBytes()));
2515  else
2517  Ctx, getAssumedDereferenceableBytes()));
2518  }
2519 
2520  /// See AbstractAttribute::getAsStr().
2521  const std::string getAsStr() const override {
2522  if (!getAssumedDereferenceableBytes())
2523  return "unknown-dereferenceable";
2524  return std::string("dereferenceable") +
2525  (isAssumedNonNull() ? "" : "_or_null") +
2526  (isAssumedGlobal() ? "_globally" : "") + "<" +
2527  std::to_string(getKnownDereferenceableBytes()) + "-" +
2528  std::to_string(getAssumedDereferenceableBytes()) + ">";
2529  }
2530 };
2531 
2532 /// Dereferenceable attribute for a floating value.
2533 struct AADereferenceableFloating
2534  : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
2535  using Base =
2536  AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
2537  AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
2538 
2539  /// See AbstractAttribute::updateImpl(...).
2540  ChangeStatus updateImpl(Attributor &A) override {
2541  ChangeStatus Change = Base::updateImpl(A);
2542 
2543  const DataLayout &DL = A.getDataLayout();
2544 
2545  auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
2546  unsigned IdxWidth =
2548  APInt Offset(IdxWidth, 0);
2549  const Value *Base =
2551 
2552  const auto &AA =
2553  A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
2554  int64_t DerefBytes = 0;
2555  if (!Stripped && this == &AA) {
2556  // Use IR information if we did not strip anything.
2557  // TODO: track globally.
2558  bool CanBeNull;
2559  DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
2560  T.GlobalState.indicatePessimisticFixpoint();
2561  } else {
2562  const DerefState &DS = static_cast<const DerefState &>(AA.getState());
2563  DerefBytes = DS.DerefBytesState.getAssumed();
2564  T.GlobalState &= DS.GlobalState;
2565  }
2566 
2567  // For now we do not try to "increase" dereferenceability due to negative
2568  // indices as we first have to come up with code to deal with loops and
2569  // for overflows of the dereferenceable bytes.
2570  int64_t OffsetSExt = Offset.getSExtValue();
2571  if (OffsetSExt < 0)
2572  OffsetSExt = 0;
2573 
2574  T.takeAssumedDerefBytesMinimum(
2575  std::max(int64_t(0), DerefBytes - OffsetSExt));
2576 
2577  if (this == &AA) {
2578  if (!Stripped) {
2579  // If nothing was stripped IR information is all we got.
2580  T.takeKnownDerefBytesMaximum(
2581  std::max(int64_t(0), DerefBytes - OffsetSExt));
2582  T.indicatePessimisticFixpoint();
2583  } else if (OffsetSExt > 0) {
2584  // If something was stripped but there is circular reasoning we look
2585  // for the offset. If it is positive we basically decrease the
2586  // dereferenceable bytes in a circluar loop now, which will simply
2587  // drive them down to the known value in a very slow way which we
2588  // can accelerate.
2589  T.indicatePessimisticFixpoint();
2590  }
2591  }
2592 
2593  return T.isValidState();
2594  };
2595 
2596  DerefState T;
2597  if (!genericValueTraversal<AADereferenceable, DerefState>(
2598  A, getIRPosition(), *this, T, VisitValueCB))
2599  return indicatePessimisticFixpoint();
2600 
2601  return Change | clampStateAndIndicateChange(getState(), T);
2602  }
2603 
2604  /// See AbstractAttribute::trackStatistics()
2605  void trackStatistics() const override {
2606  STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
2607  }
2608 };
2609 
2610 /// Dereferenceable attribute for a return value.
2611 struct AADereferenceableReturned final
2612  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
2613  DerefState> {
2614  AADereferenceableReturned(const IRPosition &IRP)
2615  : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
2616  DerefState>(IRP) {}
2617 
2618  /// See AbstractAttribute::trackStatistics()
2619  void trackStatistics() const override {
2620  STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
2621  }
2622 };
2623 
2624 /// Dereferenceable attribute for an argument
2625 struct AADereferenceableArgument final
2626  : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
2627  AADereferenceable, AADereferenceableImpl, DerefState> {
2628  using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
2629  AADereferenceable, AADereferenceableImpl, DerefState>;
2630  AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
2631 
2632  /// See AbstractAttribute::trackStatistics()
2633  void trackStatistics() const override {
2634  STATS_DECLTRACK_ARG_ATTR(dereferenceable)
2635  }
2636 };
2637 
2638 /// Dereferenceable attribute for a call site argument.
2639 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
2640  AADereferenceableCallSiteArgument(const IRPosition &IRP)
2641  : AADereferenceableFloating(IRP) {}
2642 
2643  /// See AbstractAttribute::trackStatistics()
2644  void trackStatistics() const override {
2645  STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
2646  }
2647 };
2648 
2649 /// Dereferenceable attribute deduction for a call site return value.
2650 struct AADereferenceableCallSiteReturned final
2651  : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
2652  AADereferenceable, AADereferenceableImpl> {
2653  using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
2654  AADereferenceable, AADereferenceableImpl>;
2655  AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
2656 
2657  /// See AbstractAttribute::initialize(...).
2658  void initialize(Attributor &A) override {
2659  Base::initialize(A);
2660  Function *F = getAssociatedFunction();
2661  if (!F)
2662  indicatePessimisticFixpoint();
2663  }
2664 
2665  /// See AbstractAttribute::updateImpl(...).
2666  ChangeStatus updateImpl(Attributor &A) override {
2667  // TODO: Once we have call site specific value information we can provide
2668  // call site specific liveness information and then it makes
2669  // sense to specialize attributes for call sites arguments instead of
2670  // redirecting requests to the callee argument.
2671 
2672  ChangeStatus Change = Base::updateImpl(A);
2673  Function *F = getAssociatedFunction();
2674  const IRPosition &FnPos = IRPosition::returned(*F);
2675  auto &FnAA = A.getAAFor<AADereferenceable>(*this, FnPos);
2676  return Change |
2677  clampStateAndIndicateChange(
2678  getState(), static_cast<const DerefState &>(FnAA.getState()));
2679  }
2680 
2681  /// See AbstractAttribute::trackStatistics()
2682  void trackStatistics() const override {
2683  STATS_DECLTRACK_CS_ATTR(dereferenceable);
2684  }
2685 };
2686 
2687 // ------------------------ Align Argument Attribute ------------------------
2688 
2689 struct AAAlignImpl : AAAlign {
2690  AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
2691 
2692  // Max alignemnt value allowed in IR
2693  static const unsigned MAX_ALIGN = 1U << 29;
2694 
2695  /// See AbstractAttribute::initialize(...).
2696  void initialize(Attributor &A) override {
2697  takeAssumedMinimum(MAX_ALIGN);
2698 
2700  getAttrs({Attribute::Alignment}, Attrs);
2701  for (const Attribute &Attr : Attrs)
2702  takeKnownMaximum(Attr.getValueAsInt());
2703 
2704  if (getIRPosition().isFnInterfaceKind() &&
2705  (!getAssociatedFunction() ||
2706  !getAssociatedFunction()->hasExactDefinition()))
2707  indicatePessimisticFixpoint();
2708  }
2709 
2710  /// See AbstractAttribute::manifest(...).
2711  ChangeStatus manifest(Attributor &A) override {
2713 
2714  // Check for users that allow alignment annotations.
2715  Value &AnchorVal = getIRPosition().getAnchorValue();
2716  for (const Use &U : AnchorVal.uses()) {
2717  if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
2718  if (SI->getPointerOperand() == &AnchorVal)
2719  if (SI->getAlignment() < getAssumedAlign()) {
2721  "Number of times alignemnt added to a store");
2722  SI->setAlignment(Align(getAssumedAlign()));
2723  Changed = ChangeStatus::CHANGED;
2724  }
2725  } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
2726  if (LI->getPointerOperand() == &AnchorVal)
2727  if (LI->getAlignment() < getAssumedAlign()) {
2728  LI->setAlignment(Align(getAssumedAlign()));
2730  "Number of times alignemnt added to a load");
2731  Changed = ChangeStatus::CHANGED;
2732  }
2733  }
2734  }
2735 
2736  return AAAlign::manifest(A) | Changed;
2737  }
2738 
2739  // TODO: Provide a helper to determine the implied ABI alignment and check in
2740  // the existing manifest method and a new one for AAAlignImpl that value
2741  // to avoid making the alignment explicit if it did not improve.
2742 
2743  /// See AbstractAttribute::getDeducedAttributes
2744  virtual void
2745  getDeducedAttributes(LLVMContext &Ctx,
2746  SmallVectorImpl<Attribute> &Attrs) const override {
2747  if (getAssumedAlign() > 1)
2748  Attrs.emplace_back(Attribute::getWithAlignment(Ctx, getAssumedAlign()));
2749  }
2750 
2751  /// See AbstractAttribute::getAsStr().
2752  const std::string getAsStr() const override {
2753  return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
2754  "-" + std::to_string(getAssumedAlign()) + ">")
2755  : "unknown-align";
2756  }
2757 };
2758 
2759 /// Align attribute for a floating value.
2760 struct AAAlignFloating : AAAlignImpl {
2761  AAAlignFloating(const IRPosition &IRP) : AAAlignImpl(IRP) {}
2762 
2763  /// See AbstractAttribute::updateImpl(...).
2764  ChangeStatus updateImpl(Attributor &A) override {
2765  const DataLayout &DL = A.getDataLayout();
2766 
2767  auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
2768  bool Stripped) -> bool {
2769  const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
2770  if (!Stripped && this == &AA) {
2771  // Use only IR information if we did not strip anything.
2774  } else {
2775  // Use abstract attribute information.
2776  const AAAlign::StateType &DS =
2777  static_cast<const AAAlign::StateType &>(AA.getState());
2778  T ^= DS;
2779  }
2780  return T.isValidState();
2781  };
2782 
2783  StateType T;
2784  if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
2785  VisitValueCB))
2786  return indicatePessimisticFixpoint();
2787 
2788  // TODO: If we know we visited all incoming values, thus no are assumed
2789  // dead, we can take the known information from the state T.
2790  return clampStateAndIndicateChange(getState(), T);
2791  }
2792 
2793  /// See AbstractAttribute::trackStatistics()
2794  void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
2795 };
2796 
2797 /// Align attribute for function return value.
2798 struct AAAlignReturned final
2799  : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
2800  AAAlignReturned(const IRPosition &IRP)
2801  : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
2802 
2803  /// See AbstractAttribute::trackStatistics()
2804  void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
2805 };
2806 
2807 /// Align attribute for function argument.
2808 struct AAAlignArgument final
2809  : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
2810  AAAlignArgument(const IRPosition &IRP)
2811  : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>(IRP) {}
2812 
2813  /// See AbstractAttribute::trackStatistics()
2814  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
2815 };
2816 
2817 struct AAAlignCallSiteArgument final : AAAlignFloating {
2818  AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
2819 
2820  /// See AbstractAttribute::manifest(...).
2821  ChangeStatus manifest(Attributor &A) override {
2822  return AAAlignImpl::manifest(A);
2823  }
2824 
2825  /// See AbstractAttribute::trackStatistics()
2826  void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
2827 };
2828 
2829 /// Align attribute deduction for a call site return value.
2830 struct AAAlignCallSiteReturned final : AAAlignImpl {
2831  AAAlignCallSiteReturned(const IRPosition &IRP) : AAAlignImpl(IRP) {}
2832 
2833  /// See AbstractAttribute::initialize(...).
2834  void initialize(Attributor &A) override {
2836  Function *F = getAssociatedFunction();
2837  if (!F)
2838  indicatePessimisticFixpoint();
2839  }
2840 
2841  /// See AbstractAttribute::updateImpl(...).
2842  ChangeStatus updateImpl(Attributor &A) override {
2843  // TODO: Once we have call site specific value information we can provide
2844  // call site specific liveness information and then it makes
2845  // sense to specialize attributes for call sites arguments instead of
2846  // redirecting requests to the callee argument.
2847  Function *F = getAssociatedFunction();
2848  const IRPosition &FnPos = IRPosition::returned(*F);
2849  auto &FnAA = A.getAAFor<AAAlign>(*this, FnPos);
2850  return clampStateAndIndicateChange(
2851  getState(), static_cast<const AAAlign::StateType &>(FnAA.getState()));
2852  }
2853 
2854  /// See AbstractAttribute::trackStatistics()
2855  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
2856 };
2857 
2858 /// ------------------ Function No-Return Attribute ----------------------------
2859 struct AANoReturnImpl : public AANoReturn {
2860  AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
2861 
2862  /// See AbstractAttribute::initialize(...).
2863  void initialize(Attributor &A) override {
2865  Function *F = getAssociatedFunction();
2866  if (!F || F->hasFnAttribute(Attribute::WillReturn))
2867  indicatePessimisticFixpoint();
2868  }
2869 
2870  /// See AbstractAttribute::getAsStr().
2871  const std::string getAsStr() const override {
2872  return getAssumed() ? "noreturn" : "may-return";
2873  }
2874 
2875  /// See AbstractAttribute::updateImpl(Attributor &A).
2876  virtual ChangeStatus updateImpl(Attributor &A) override {
2877  const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, getIRPosition());
2878  if (WillReturnAA.isKnownWillReturn())
2879  return indicatePessimisticFixpoint();
2880  auto CheckForNoReturn = [](Instruction &) { return false; };
2881  if (!A.checkForAllInstructions(CheckForNoReturn, *this,
2882  {(unsigned)Instruction::Ret}))
2883  return indicatePessimisticFixpoint();
2884  return ChangeStatus::UNCHANGED;
2885  }
2886 };
2887 
2888 struct AANoReturnFunction final : AANoReturnImpl {
2889  AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
2890 
2891  /// See AbstractAttribute::trackStatistics()
2892  void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
2893 };
2894 
2895 /// NoReturn attribute deduction for a call sites.
2896 struct AANoReturnCallSite final : AANoReturnImpl {
2897  AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
2898 
2899  /// See AbstractAttribute::updateImpl(...).
2900  ChangeStatus updateImpl(Attributor &A) override {
2901  // TODO: Once we have call site specific value information we can provide
2902  // call site specific liveness information and then it makes
2903  // sense to specialize attributes for call sites arguments instead of
2904  // redirecting requests to the callee argument.
2905  Function *F = getAssociatedFunction();
2906  const IRPosition &FnPos = IRPosition::function(*F);
2907  auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
2908  return clampStateAndIndicateChange(
2909  getState(),
2910  static_cast<const AANoReturn::StateType &>(FnAA.getState()));
2911  }
2912 
2913  /// See AbstractAttribute::trackStatistics()
2914  void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
2915 };
2916 
2917 /// ----------------------- Variable Capturing ---------------------------------
2918 
2919 /// A class to hold the state of for no-capture attributes.
2920 struct AANoCaptureImpl : public AANoCapture {
2921  AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
2922 
2923  /// See AbstractAttribute::initialize(...).
2924  void initialize(Attributor &A) override {
2926 
2927  // You cannot "capture" null in the default address space.
2928  if (isa<ConstantPointerNull>(getAssociatedValue()) &&
2929  getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
2930  indicateOptimisticFixpoint();
2931  return;
2932  }
2933 
2934  const IRPosition &IRP = getIRPosition();
2935  const Function *F =
2936  getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
2937 
2938  // Check what state the associated function can actually capture.
2939  if (F)
2940  determineFunctionCaptureCapabilities(*F, *this);
2941  else
2942  indicatePessimisticFixpoint();
2943  }
2944 
2945  /// See AbstractAttribute::updateImpl(...).
2946  ChangeStatus updateImpl(Attributor &A) override;
2947 
2948  /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
2949  virtual void
2950  getDeducedAttributes(LLVMContext &Ctx,
2951  SmallVectorImpl<Attribute> &Attrs) const override {
2952  if (!isAssumedNoCaptureMaybeReturned())
2953  return;
2954 
2955  if (getArgNo() >= 0) {
2956  if (isAssumedNoCapture())
2957  Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
2958  else if (ManifestInternal)
2959  Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
2960  }
2961  }
2962 
2963  /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
2964  /// depending on the ability of the function associated with \p IRP to capture
2965  /// state in memory and through "returning/throwing", respectively.
2966  static void determineFunctionCaptureCapabilities(const Function &F,
2967  IntegerState &State) {
2968  // TODO: Once we have memory behavior attributes we should use them here.
2969 
2970  // If we know we cannot communicate or write to memory, we do not care about
2971  // ptr2int anymore.
2972  if (F.onlyReadsMemory() && F.doesNotThrow() &&
2973  F.getReturnType()->isVoidTy()) {
2974  State.addKnownBits(NO_CAPTURE);
2975  return;
2976  }
2977 
2978  // A function cannot capture state in memory if it only reads memory, it can
2979  // however return/throw state and the state might be influenced by the
2980  // pointer value, e.g., loading from a returned pointer might reveal a bit.
2981  if (F.onlyReadsMemory())
2982  State.addKnownBits(NOT_CAPTURED_IN_MEM);
2983 
2984  // A function cannot communicate state back if it does not through
2985  // exceptions and doesn not return values.
2986  if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
2987  State.addKnownBits(NOT_CAPTURED_IN_RET);
2988  }
2989 
2990  /// See AbstractState::getAsStr().
2991  const std::string getAsStr() const override {
2992  if (isKnownNoCapture())
2993  return "known not-captured";
2994  if (isAssumedNoCapture())
2995  return "assumed not-captured";
2996  if (isKnownNoCaptureMaybeReturned())
2997  return "known not-captured-maybe-returned";
2998  if (isAssumedNoCaptureMaybeReturned())
2999  return "assumed not-captured-maybe-returned";
3000  return "assumed-captured";
3001  }
3002 };
3003 
3004 /// Attributor-aware capture tracker.
3005 struct AACaptureUseTracker final : public CaptureTracker {
3006 
3007  /// Create a capture tracker that can lookup in-flight abstract attributes
3008  /// through the Attributor \p A.
3009  ///
3010  /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3011  /// search is stopped. If a use leads to a return instruction,
3012  /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3013  /// If a use leads to a ptr2int which may capture the value,
3014  /// \p CapturedInInteger is set. If a use is found that is currently assumed
3015  /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3016  /// set. All values in \p PotentialCopies are later tracked as well. For every
3017  /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3018  /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3019  /// conservatively set to true.
3020  AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3021  const AAIsDead &IsDeadAA, IntegerState &State,
3022  SmallVectorImpl<const Value *> &PotentialCopies,
3023  unsigned &RemainingUsesToExplore)
3024  : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3025  PotentialCopies(PotentialCopies),
3026  RemainingUsesToExplore(RemainingUsesToExplore) {}
3027 
3028  /// Determine if \p V maybe captured. *Also updates the state!*
3029  bool valueMayBeCaptured(const Value *V) {
3030  if (V->getType()->isPointerTy()) {
3031  PointerMayBeCaptured(V, this);
3032  } else {
3034  }
3036  }
3037 
3038  /// See CaptureTracker::tooManyUses().
3039  void tooManyUses() override {
3041  }
3042 
3043  bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3045  return true;
3046  const auto &DerefAA =
3047  A.getAAFor<AADereferenceable>(NoCaptureAA, IRPosition::value(*O));
3048  return DerefAA.getAssumedDereferenceableBytes();
3049  }
3050 
3051  /// See CaptureTracker::captured(...).
3052  bool captured(const Use *U) override {
3053  Instruction *UInst = cast<Instruction>(U->getUser());
3054  LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3055  << "\n");
3056 
3057  // Because we may reuse the tracker multiple times we keep track of the
3058  // number of explored uses ourselves as well.
3059  if (RemainingUsesToExplore-- == 0) {
3060  LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3061  return isCapturedIn(/* Memory */ true, /* Integer */ true,
3062  /* Return */ true);
3063  }
3064 
3065  // Deal with ptr2int by following uses.
3066  if (isa<PtrToIntInst>(UInst)) {
3067  LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3068  return valueMayBeCaptured(UInst);
3069  }
3070 
3071  // Explicitly catch return instructions.
3072  if (isa<ReturnInst>(UInst))
3073  return isCapturedIn(/* Memory */ false, /* Integer */ false,
3074  /* Return */ true);
3075 
3076  // For now we only use special logic for call sites. However, the tracker
3077  // itself knows about a lot of other non-capturing cases already.
3078  CallSite CS(UInst);
3079  if (!CS || !CS.isArgOperand(U))
3080  return isCapturedIn(/* Memory */ true, /* Integer */ true,
3081  /* Return */ true);
3082 
3083  unsigned ArgNo = CS.getArgumentNo(U);
3084  const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
3085  // If we have a abstract no-capture attribute for the argument we can use
3086  // it to justify a non-capture attribute here. This allows recursion!
3087  auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3088  if (ArgNoCaptureAA.isAssumedNoCapture())
3089  return isCapturedIn(/* Memory */ false, /* Integer */ false,
3090  /* Return */ false);
3091  if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3092  addPotentialCopy(CS);
3093  return isCapturedIn(/* Memory */ false, /* Integer */ false,
3094  /* Return */ false);
3095  }
3096 
3097  // Lastly, we could not find a reason no-capture can be assumed so we don't.
3098  return isCapturedIn(/* Memory */ true, /* Integer */ true,
3099  /* Return */ true);
3100  }
3101 
3102  /// Register \p CS as potential copy of the value we are checking.
3103  void addPotentialCopy(CallSite CS) {
3104  PotentialCopies.push_back(CS.getInstruction());
3105  }
3106 
3107  /// See CaptureTracker::shouldExplore(...).
3108  bool shouldExplore(const Use *U) override {
3109  // Check liveness.
3110  return !IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser()));
3111  }
3112 
3113  /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
3114  /// \p CapturedInRet, then return the appropriate value for use in the
3115  /// CaptureTracker::captured() interface.
3116  bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
3117  bool CapturedInRet) {
3118  LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
3119  << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
3120  if (CapturedInMem)
3122  if (CapturedInInt)
3124  if (CapturedInRet)
3127  }
3128 
3129 private:
3130  /// The attributor providing in-flight abstract attributes.
3131  Attributor &A;
3132 
3133  /// The abstract attribute currently updated.
3134  AANoCapture &NoCaptureAA;
3135 
3136  /// The abstract liveness state.
3137  const AAIsDead &IsDeadAA;
3138 
3139  /// The state currently updated.
3140  IntegerState &State;
3141 
3142  /// Set of potential copies of the tracked value.
3143  SmallVectorImpl<const Value *> &PotentialCopies;
3144 
3145  /// Global counter to limit the number of explored uses.
3146  unsigned &RemainingUsesToExplore;
3147 };
3148 
3149 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
3150  const IRPosition &IRP = getIRPosition();
3151  const Value *V =
3152  getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
3153  if (!V)
3154  return indicatePessimisticFixpoint();
3155 
3156  const Function *F =
3157  getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
3158  assert(F && "Expected a function!");
3159  const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, IRPosition::function(*F));
3160 
3162  // TODO: Once we have memory behavior attributes we should use them here
3163  // similar to the reasoning in
3164  // AANoCaptureImpl::determineFunctionCaptureCapabilities(...).
3165 
3166  // TODO: Use the AAReturnedValues to learn if the argument can return or
3167  // not.
3168 
3169  // Use the CaptureTracker interface and logic with the specialized tracker,
3170  // defined in AACaptureUseTracker, that can look at in-flight abstract
3171  // attributes and directly updates the assumed state.
3172  SmallVector<const Value *, 4> PotentialCopies;
3173  unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
3174  AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
3175  RemainingUsesToExplore);
3176 
3177  // Check all potential copies of the associated value until we can assume
3178  // none will be captured or we have to assume at least one might be.
3179  unsigned Idx = 0;
3180  PotentialCopies.push_back(V);
3181  while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
3182  Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
3183 
3185  auto Assumed = S.getAssumed();
3186  S.intersectAssumedBits(T.getAssumed());
3187  return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
3189 }
3190 
3191 /// NoCapture attribute for function arguments.
3192 struct AANoCaptureArgument final : AANoCaptureImpl {
3193  AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3194 
3195  /// See AbstractAttribute::trackStatistics()
3196  void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
3197 };
3198 
3199 /// NoCapture attribute for call site arguments.
3200 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
3201  AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3202 
3203  /// See AbstractAttribute::updateImpl(...).
3204  ChangeStatus updateImpl(Attributor &A) override {
3205  // TODO: Once we have call site specific value information we can provide
3206  // call site specific liveness information and then it makes
3207  // sense to specialize attributes for call sites arguments instead of
3208  // redirecting requests to the callee argument.
3209  Argument *Arg = getAssociatedArgument();
3210  if (!Arg)
3211  return indicatePessimisticFixpoint();
3212  const IRPosition &ArgPos = IRPosition::argument(*Arg);
3213  auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
3214  return clampStateAndIndicateChange(
3215  getState(),
3216  static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
3217  }
3218 
3219  /// See AbstractAttribute::trackStatistics()
3220  void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
3221 };
3222 
3223 /// NoCapture attribute for floating values.
3224 struct AANoCaptureFloating final : AANoCaptureImpl {
3225  AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3226 
3227  /// See AbstractAttribute::trackStatistics()
3228  void trackStatistics() const override {
3230  }
3231 };
3232 
3233 /// NoCapture attribute for function return value.
3234 struct AANoCaptureReturned final : AANoCaptureImpl {
3235  AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
3236  llvm_unreachable("NoCapture is not applicable to function returns!");
3237  }
3238 
3239  /// See AbstractAttribute::initialize(...).
3240  void initialize(Attributor &A) override {
3241  llvm_unreachable("NoCapture is not applicable to function returns!");
3242  }
3243 
3244  /// See AbstractAttribute::updateImpl(...).
3245  ChangeStatus updateImpl(Attributor &A) override {
3246  llvm_unreachable("NoCapture is not applicable to function returns!");
3247  }
3248 
3249  /// See AbstractAttribute::trackStatistics()
3250  void trackStatistics() const override {}
3251 };
3252 
3253 /// NoCapture attribute deduction for a call site return value.
3254 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
3255  AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3256 
3257  /// See AbstractAttribute::trackStatistics()
3258  void trackStatistics() const override {
3259  STATS_DECLTRACK_CSRET_ATTR(nocapture)
3260  }
3261 };
3262 
3263 /// ------------------ Value Simplify Attribute ----------------------------
3264 struct AAValueSimplifyImpl : AAValueSimplify {
3265  AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
3266 
3267  /// See AbstractAttribute::getAsStr().
3268  const std::string getAsStr() const override {
3269  return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
3270  : "not-simple";
3271  }
3272 
3273  /// See AbstractAttribute::trackStatistics()
3274  void trackStatistics() const override {}
3275 
3276  /// See AAValueSimplify::getAssumedSimplifiedValue()
3277  Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
3278  if (!getAssumed())
3279  return const_cast<Value *>(&getAssociatedValue());
3280  return SimplifiedAssociatedValue;
3281  }
3282  void initialize(Attributor &A) override {}
3283 
3284  /// Helper function for querying AAValueSimplify and updating candicate.
3285  /// \param QueryingValue Value trying to unify with SimplifiedValue
3286  /// \param AccumulatedSimplifiedValue Current simplification result.
3287  static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
3288  Value &QueryingValue,
3289  Optional<Value *> &AccumulatedSimplifiedValue) {
3290  // FIXME: Add a typecast support.
3291 
3292  auto &ValueSimpifyAA = A.getAAFor<AAValueSimplify>(
3293  QueryingAA, IRPosition::value(QueryingValue));
3294 
3295  Optional<Value *> QueryingValueSimplified =
3296  ValueSimpifyAA.getAssumedSimplifiedValue(A);
3297 
3298  if (!QueryingValueSimplified.hasValue())
3299  return true;
3300 
3301  if (!QueryingValueSimplified.getValue())
3302  return false;
3303 
3304  Value &QueryingValueSimplifiedUnwrapped =
3305  *QueryingValueSimplified.getValue();
3306 
3307  if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
3308  return true;
3309 
3310  if (AccumulatedSimplifiedValue.hasValue())
3311  return AccumulatedSimplifiedValue == QueryingValueSimplified;
3312 
3313  LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << QueryingValue
3314  << " is assumed to be "
3315  << QueryingValueSimplifiedUnwrapped << "\n");
3316 
3317  AccumulatedSimplifiedValue = QueryingValueSimplified;
3318  return true;
3319  }
3320 
3321  /// See AbstractAttribute::manifest(...).
3322  ChangeStatus manifest(Attributor &A) override {
3324 
3325  if (!SimplifiedAssociatedValue.hasValue() ||
3326  !SimplifiedAssociatedValue.getValue())
3327  return Changed;
3328 
3329  if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) {
3330  // We can replace the AssociatedValue with the constant.
3331  Value &V = getAssociatedValue();
3332  if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
3333  LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << V << " -> " << *C
3334  << "\n");
3335  V.replaceAllUsesWith(C);
3336  Changed = ChangeStatus::CHANGED;
3337  }
3338  }
3339 
3340  return Changed | AAValueSimplify::manifest(A);
3341  }
3342 
3343 protected:
3344  // An assumed simplified value. Initially, it is set to Optional::None, which
3345  // means that the value is not clear under current assumption. If in the
3346  // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
3347  // returns orignal associated value.
3348  Optional<Value *> SimplifiedAssociatedValue;
3349 };
3350 
3351 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
3352  AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3353 
3354  /// See AbstractAttribute::updateImpl(...).
3355  ChangeStatus updateImpl(Attributor &A) override {
3356  bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3357 
3358  auto PredForCallSite = [&](AbstractCallSite ACS) {
3359  // Check if we have an associated argument or not (which can happen for
3360  // callback calls).
3361  if (Value *ArgOp = ACS.getCallArgOperand(getArgNo()))
3362  return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
3363  return false;
3364  };
3365 
3366  if (!A.checkForAllCallSites(PredForCallSite, *this, true))
3367  return indicatePessimisticFixpoint();
3368 
3369  // If a candicate was found in this update, return CHANGED.
3370  return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3373  }
3374 
3375  /// See AbstractAttribute::trackStatistics()
3376  void trackStatistics() const override {
3377  STATS_DECLTRACK_ARG_ATTR(value_simplify)
3378  }
3379 };
3380 
3381 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
3382  AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3383 
3384  /// See AbstractAttribute::updateImpl(...).
3385  ChangeStatus updateImpl(Attributor &A) override {
3386  bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3387 
3388  auto PredForReturned = [&](Value &V) {
3389  return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3390  };
3391 
3392  if (!A.checkForAllReturnedValues(PredForReturned, *this))
3393  return indicatePessimisticFixpoint();
3394 
3395  // If a candicate was found in this update, return CHANGED.
3396  return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3399  }
3400  /// See AbstractAttribute::trackStatistics()
3401  void trackStatistics() const override {
3402  STATS_DECLTRACK_FNRET_ATTR(value_simplify)
3403  }
3404 };
3405 
3406 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
3407  AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3408 
3409  /// See AbstractAttribute::initialize(...).
3410  void initialize(Attributor &A) override {
3411  Value &V = getAnchorValue();
3412 
3413  // TODO: add other stuffs
3414  if (isa<Constant>(V) || isa<UndefValue>(V))
3415  indicatePessimisticFixpoint();
3416  }
3417 
3418  /// See AbstractAttribute::updateImpl(...).
3419  ChangeStatus updateImpl(Attributor &A) override {
3420  bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3421 
3422  auto VisitValueCB = [&](Value &V, BooleanState, bool Stripped) -> bool {
3423  auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
3424  if (!Stripped && this == &AA) {
3425  // TODO: Look the instruction and check recursively.
3426  LLVM_DEBUG(
3427  dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "
3428  << V << "\n");
3429  indicatePessimisticFixpoint();
3430  return false;
3431  }
3432  return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3433  };
3434 
3435  if (!genericValueTraversal<AAValueSimplify, BooleanState>(
3436  A, getIRPosition(), *this, static_cast<BooleanState &>(*this),
3437  VisitValueCB))
3438  return indicatePessimisticFixpoint();
3439 
3440  // If a candicate was found in this update, return CHANGED.
3441 
3442  return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3445  }
3446 
3447  /// See AbstractAttribute::trackStatistics()
3448  void trackStatistics() const override {
3449  STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
3450  }
3451 };
3452 
3453 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
3454  AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3455 
3456  /// See AbstractAttribute::initialize(...).
3457  void initialize(Attributor &A) override {
3458  SimplifiedAssociatedValue = &getAnchorValue();
3459  indicateOptimisticFixpoint();
3460  }
3461  /// See AbstractAttribute::initialize(...).
3462  ChangeStatus updateImpl(Attributor &A) override {
3464  "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
3465  }
3466  /// See AbstractAttribute::trackStatistics()
3467  void trackStatistics() const override {
3468  STATS_DECLTRACK_FN_ATTR(value_simplify)
3469  }
3470 };
3471 
3472 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
3473  AAValueSimplifyCallSite(const IRPosition &IRP)
3474  : AAValueSimplifyFunction(IRP) {}
3475  /// See AbstractAttribute::trackStatistics()
3476  void trackStatistics() const override {
3477  STATS_DECLTRACK_CS_ATTR(value_simplify)
3478  }
3479 };
3480 
3481 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
3482  AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
3483  : AAValueSimplifyReturned(IRP) {}
3484 
3485  void trackStatistics() const override {
3486  STATS_DECLTRACK_CSRET_ATTR(value_simplify)
3487  }
3488 };
3489 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
3490  AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
3491  : AAValueSimplifyFloating(IRP) {}
3492 
3493  void trackStatistics() const override {
3494  STATS_DECLTRACK_CSARG_ATTR(value_simplify)
3495  }
3496 };
3497 
3498 /// ----------------------- Heap-To-Stack Conversion ---------------------------
3499 struct AAHeapToStackImpl : public AAHeapToStack {
3500  AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
3501 
3502  const std::string getAsStr() const override {
3503  return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
3504  }
3505 
3506  ChangeStatus manifest(Attributor &A) override {
3507  assert(getState().isValidState() &&
3508  "Attempted to manifest an invalid state!");
3509 
3511  Function *F = getAssociatedFunction();
3512  const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
3513 
3514  for (Instruction *MallocCall : MallocCalls) {
3515  // This malloc cannot be replaced.
3516  if (BadMallocCalls.count(MallocCall))
3517  continue;
3518 
3519  for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
3520  LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
3521  A.deleteAfterManifest(*FreeCall);
3522  HasChanged = ChangeStatus::CHANGED;
3523  }
3524 
3525  LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
3526  << "\n");
3527 
3528  Constant *Size;
3529  if (isCallocLikeFn(MallocCall, TLI)) {
3530  auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
3531  auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
3532  APInt TotalSize = SizeT->getValue() * Num->getValue();
3533  Size =
3534  ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
3535  } else {
3536  Size = cast<ConstantInt>(MallocCall->getOperand(0));
3537  }
3538 
3539  unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
3540  Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
3541  Size, "", MallocCall->getNextNode());
3542 
3543  if (AI->getType() != MallocCall->getType())
3544  AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
3545  AI->getNextNode());
3546 
3547  MallocCall->replaceAllUsesWith(AI);
3548 
3549  if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
3550  auto *NBB = II->getNormalDest();
3551  BranchInst::Create(NBB, MallocCall->getParent());
3552  A.deleteAfterManifest(*MallocCall);
3553  } else {
3554  A.deleteAfterManifest(*MallocCall);
3555  }
3556 
3557  if (isCallocLikeFn(MallocCall, TLI)) {
3558  auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
3559  AI->getNextNode());
3560  Value *Ops[] = {
3561  BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
3563 
3564  Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
3565  Module *M = F->getParent();
3566  Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
3567  CallInst::Create(Fn, Ops, "", BI->getNextNode());
3568  }
3569  HasChanged = ChangeStatus::CHANGED;
3570  }
3571 
3572  return HasChanged;
3573  }
3574 
3575  /// Collection of all malloc calls in a function.
3577 
3578  /// Collection of malloc calls that cannot be converted.
3579  DenseSet<const Instruction *> BadMallocCalls;
3580 
3581  /// A map for each malloc call to the set of associated free calls.
3583 
3584  ChangeStatus updateImpl(Attributor &A) override;
3585 };
3586 
3587 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
3588  const Function *F = getAssociatedFunction();
3589  const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
3590 
3591  auto UsesCheck = [&](Instruction &I) {
3593  SmallVector<const Use *, 8> Worklist;
3594 
3595  for (Use &U : I.uses())
3596  Worklist.push_back(&U);
3597 
3598  while (!Worklist.empty()) {
3599  const Use *U = Worklist.pop_back_val();
3600  if (!Visited.insert(U).second)
3601  continue;
3602 
3603  auto *UserI = U->getUser();
3604 
3605  if (isa<LoadInst>(UserI))
3606  continue;
3607  if (auto *SI = dyn_cast<StoreInst>(UserI)) {
3608  if (SI->getValueOperand() == U->get()) {
3609  LLVM_DEBUG(dbgs() << "[H2S] escaping store to memory: " << *UserI << "\n");
3610  return false;
3611  }
3612  // A store into the malloc'ed memory is fine.
3613  continue;
3614  }
3615 
3616  // NOTE: Right now, if a function that has malloc pointer as an argument
3617  // frees memory, we assume that the malloc pointer is freed.
3618 
3619  // TODO: Add nofree callsite argument attribute to indicate that pointer
3620  // argument is not freed.
3621  if (auto *CB = dyn_cast<CallBase>(UserI)) {
3622  if (!CB->isArgOperand(U))
3623  continue;
3624 
3625  if (CB->isLifetimeStartOrEnd())
3626  continue;
3627 
3628  // Record malloc.
3629  if (isFreeCall(UserI, TLI)) {
3630  FreesForMalloc[&I].insert(
3631  cast<Instruction>(const_cast<User *>(UserI)));
3632  continue;
3633  }
3634 
3635  // If a function does not free memory we are fine
3636  const auto &NoFreeAA =
3638 
3639  unsigned ArgNo = U - CB->arg_begin();
3640  const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3641  *this, IRPosition::callsite_argument(*CB, ArgNo));
3642 
3643  if (!NoCaptureAA.isAssumedNoCapture() || !NoFreeAA.isAssumedNoFree()) {
3644  LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
3645  return false;
3646  }
3647  continue;
3648  }
3649 
3650  if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI)) {
3651  for (Use &U : UserI->uses())
3652  Worklist.push_back(&U);
3653  continue;
3654  }
3655 
3656  // Unknown user.
3657  LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
3658  return false;
3659  }
3660  return true;
3661  };
3662 
3663  auto MallocCallocCheck = [&](Instruction &I) {
3664  if (BadMallocCalls.count(&I))
3665  return true;
3666 
3667  bool IsMalloc = isMallocLikeFn(&I, TLI);
3668  bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
3669  if (!IsMalloc && !IsCalloc) {
3670  BadMallocCalls.insert(&I);
3671  return true;
3672  }
3673 
3674  if (IsMalloc) {
3675  if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
3676  if (Size->getValue().sle(MaxHeapToStackSize))
3677  if (UsesCheck(I)) {
3678  MallocCalls.insert(&I);
3679  return true;
3680  }
3681  } else if (IsCalloc) {
3682  bool Overflow = false;
3683  if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
3684  if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
3685  if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
3686  .sle(MaxHeapToStackSize))
3687  if (!Overflow && UsesCheck(I)) {
3688  MallocCalls.insert(&I);
3689  return true;
3690  }
3691  }
3692 
3693  BadMallocCalls.insert(&I);
3694  return true;
3695  };
3696 
3697  size_t NumBadMallocs = BadMallocCalls.size();
3698 
3699  A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
3700 
3701  if (NumBadMallocs != BadMallocCalls.size())
3702  return ChangeStatus::CHANGED;
3703 
3704  return ChangeStatus::UNCHANGED;
3705 }
3706 
3707 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
3708  AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
3709 
3710  /// See AbstractAttribute::trackStatistics()
3711  void trackStatistics() const override {
3712  STATS_DECL(MallocCalls, Function,
3713  "Number of MallocCalls converted to allocas");
3714  BUILD_STAT_NAME(MallocCalls, Function) += MallocCalls.size();
3715  }
3716 };
3717 
3718 /// -------------------- Memory Behavior Attributes ----------------------------
3719 /// Includes read-none, read-only, and write-only.
3720 /// ----------------------------------------------------------------------------
3721 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
3722  AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
3723 
3724  /// See AbstractAttribute::initialize(...).
3725  void initialize(Attributor &A) override {
3726  intersectAssumedBits(BEST_STATE);
3727  getKnownStateFromValue(getIRPosition(), getState());
3729  }
3730 
3731  /// Return the memory behavior information encoded in the IR for \p IRP.
3732  static void getKnownStateFromValue(const IRPosition &IRP,
3733  IntegerState &State) {
3735  IRP.getAttrs(AttrKinds, Attrs);
3736  for (const Attribute &Attr : Attrs) {
3737  switch (Attr.getKindAsEnum()) {
3738  case Attribute::ReadNone:
3739  State.addKnownBits(NO_ACCESSES);
3740  break;
3741  case Attribute::ReadOnly:
3742  State.addKnownBits(NO_WRITES);
3743  break;
3744  case Attribute::WriteOnly:
3745  State.addKnownBits(NO_READS);
3746  break;
3747  default:
3748  llvm_unreachable("Unexpcted attribute!");
3749  }
3750  }
3751 
3752  if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
3753  if (!I->mayReadFromMemory())
3754  State.addKnownBits(NO_READS);
3755  if (!I->mayWriteToMemory())
3756  State.addKnownBits(NO_WRITES);
3757  }
3758  }
3759 
3760  /// See AbstractAttribute::getDeducedAttributes(...).
3761  void getDeducedAttributes(LLVMContext &Ctx,
3762  SmallVectorImpl<Attribute> &Attrs) const override {
3763  assert(Attrs.size() == 0);
3764  if (isAssumedReadNone())
3765  Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
3766  else if (isAssumedReadOnly())
3767  Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
3768  else if (isAssumedWriteOnly())
3769  Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
3770  assert(Attrs.size() <= 1);
3771  }
3772 
3773  /// See AbstractAttribute::manifest(...).
3774  ChangeStatus manifest(Attributor &A) override {
3775  IRPosition &IRP = getIRPosition();
3776 
3777  // Check if we would improve the existing attributes first.
3778  SmallVector<Attribute, 4> DeducedAttrs;
3779  getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
3780  if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
3781  return IRP.hasAttr(Attr.getKindAsEnum(),
3782  /* IgnoreSubsumingPositions */ true);
3783  }))
3784  return ChangeStatus::UNCHANGED;
3785 
3786  // Clear existing attributes.
3787  IRP.removeAttrs(AttrKinds);
3788 
3789  // Use the generic manifest method.
3790  return IRAttribute::manifest(A);
3791  }
3792 
3793  /// See AbstractState::getAsStr().
3794  const std::string getAsStr() const override {
3795  if (isAssumedReadNone())
3796  return "readnone";
3797  if (isAssumedReadOnly())
3798  return "readonly";
3799  if (isAssumedWriteOnly())
3800  return "writeonly";
3801  return "may-read/write";
3802  }
3803 
3804  /// The set of IR attributes AAMemoryBehavior deals with.
3805  static const Attribute::AttrKind AttrKinds[3];
3806 };
3807 
3808 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
3809  Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
3810 
3811 /// Memory behavior attribute for a floating value.
3812 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
3813  AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
3814 
3815  /// See AbstractAttribute::initialize(...).
3816  void initialize(Attributor &A) override {
3818  // Initialize the use vector with all direct uses of the associated value.
3819  for (const Use &U : getAssociatedValue().uses())
3820  Uses.insert(&U);
3821  }
3822 
3823  /// See AbstractAttribute::updateImpl(...).
3824  ChangeStatus updateImpl(Attributor &A) override;
3825 
3826  /// See AbstractAttribute::trackStatistics()
3827  void trackStatistics() const override {
3828  if (isAssumedReadNone())
3830  else if (isAssumedReadOnly())
3832  else if (isAssumedWriteOnly())
3834  }
3835 
3836 private:
3837  /// Return true if users of \p UserI might access the underlying
3838  /// variable/location described by \p U and should therefore be analyzed.
3839  bool followUsersOfUseIn(Attributor &A, const Use *U,
3840  const Instruction *UserI);
3841 
3842  /// Update the state according to the effect of use \p U in \p UserI.
3843  void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
3844 
3845 protected:
3846  /// Container for (transitive) uses of the associated argument.
3848 };
3849 
3850 /// Memory behavior attribute for function argument.
3851 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
3852  AAMemoryBehaviorArgument(const IRPosition &IRP)
3853  : AAMemoryBehaviorFloating(IRP) {}
3854 
3855  /// See AbstractAttribute::initialize(...).
3856  void initialize(Attributor &A) override {
3858 
3859  // Initialize the use vector with all direct uses of the associated value.
3860  Argument *Arg = getAssociatedArgument();
3861  if (!Arg || !Arg->getParent()->hasExactDefinition())
3862  indicatePessimisticFixpoint();
3863  }
3864 
3865  ChangeStatus manifest(Attributor &A) override {
3866  // TODO: From readattrs.ll: "inalloca parameters are always
3867  // considered written"
3868  if (hasAttr({Attribute::InAlloca})) {
3869  removeKnownBits(NO_WRITES);
3870  removeAssumedBits(NO_WRITES);
3871  }
3872  return AAMemoryBehaviorFloating::manifest(A);
3873  }
3874 
3875 
3876  /// See AbstractAttribute::trackStatistics()
3877  void trackStatistics() const override {
3878  if (isAssumedReadNone())
3879  STATS_DECLTRACK_ARG_ATTR(readnone)
3880  else if (isAssumedReadOnly())
3881  STATS_DECLTRACK_ARG_ATTR(readonly)
3882  else if (isAssumedWriteOnly())
3883  STATS_DECLTRACK_ARG_ATTR(writeonly)
3884  }
3885 };
3886 
3887 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
3888  AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
3889  : AAMemoryBehaviorArgument(IRP) {}
3890 
3891  /// See AbstractAttribute::updateImpl(...).
3892  ChangeStatus updateImpl(Attributor &A) override {
3893  // TODO: Once we have call site specific value information we can provide
3894  // call site specific liveness liveness information and then it makes
3895  // sense to specialize attributes for call sites arguments instead of
3896  // redirecting requests to the callee argument.
3897  Argument *Arg = getAssociatedArgument();
3898  const IRPosition &ArgPos = IRPosition::argument(*Arg);
3899  auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
3900  return clampStateAndIndicateChange(
3901  getState(),
3902  static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
3903  }
3904 
3905  /// See AbstractAttribute::trackStatistics()
3906  void trackStatistics() const override {
3907  if (isAssumedReadNone())
3908  STATS_DECLTRACK_CSARG_ATTR(readnone)
3909  else if (isAssumedReadOnly())
3910  STATS_DECLTRACK_CSARG_ATTR(readonly)
3911  else if (isAssumedWriteOnly())
3912  STATS_DECLTRACK_CSARG_ATTR(writeonly)
3913  }
3914 };
3915 
3916 /// Memory behavior attribute for a call site return position.
3917 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
3918  AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
3919  : AAMemoryBehaviorFloating(IRP) {}
3920 
3921  /// See AbstractAttribute::manifest(...).
3922  ChangeStatus manifest(Attributor &A) override {
3923  // We do not annotate returned values.
3924  return ChangeStatus::UNCHANGED;
3925  }
3926 
3927  /// See AbstractAttribute::trackStatistics()
3928  void trackStatistics() const override {}
3929 };
3930 
3931 /// An AA to represent the memory behavior function attributes.
3932 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
3933  AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
3934 
3935  /// See AbstractAttribute::updateImpl(Attributor &A).
3936  virtual ChangeStatus updateImpl(Attributor &A) override;
3937 
3938  /// See AbstractAttribute::manifest(...).
3939  ChangeStatus manifest(Attributor &A) override {
3940  Function &F = cast<Function>(getAnchorValue());
3941  if (isAssumedReadNone()) {
3942  F.removeFnAttr(Attribute::ArgMemOnly);
3943  F.removeFnAttr(Attribute::InaccessibleMemOnly);
3944  F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
3945  }
3946  return AAMemoryBehaviorImpl::manifest(A);
3947  }
3948 
3949  /// See AbstractAttribute::trackStatistics()
3950  void trackStatistics() const override {
3951  if (isAssumedReadNone())
3952  STATS_DECLTRACK_FN_ATTR(readnone)
3953  else if (isAssumedReadOnly())
3954  STATS_DECLTRACK_FN_ATTR(readonly)
3955  else if (isAssumedWriteOnly())
3956  STATS_DECLTRACK_FN_ATTR(writeonly)
3957  }
3958 };
3959 
3960 /// AAMemoryBehavior attribute for call sites.
3961 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
3962  AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
3963 
3964  /// See AbstractAttribute::initialize(...).
3965  void initialize(Attributor &A) override {
3967  Function *F = getAssociatedFunction();
3968  if (!F || !F->hasExactDefinition())
3969  indicatePessimisticFixpoint();
3970  }
3971 
3972  /// See AbstractAttribute::updateImpl(...).
3973  ChangeStatus updateImpl(Attributor &A) override {
3974  // TODO: Once we have call site specific value information we can provide
3975  // call site specific liveness liveness information and then it makes
3976  // sense to specialize attributes for call sites arguments instead of
3977  // redirecting requests to the callee argument.
3978  Function *F = getAssociatedFunction();
3979  const IRPosition &FnPos = IRPosition::function(*F);
3980  auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
3981  return clampStateAndIndicateChange(
3982  getState(), static_cast<const AAAlign::StateType &>(FnAA.getState()));
3983  }
3984 
3985  /// See AbstractAttribute::trackStatistics()
3986  void trackStatistics() const override {
3987  if (isAssumedReadNone())
3988  STATS_DECLTRACK_CS_ATTR(readnone)
3989  else if (isAssumedReadOnly())
3990  STATS_DECLTRACK_CS_ATTR(readonly)
3991  else if (isAssumedWriteOnly())
3992  STATS_DECLTRACK_CS_ATTR(writeonly)
3993  }
3994 };
3995 } // namespace
3996 
3997 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
3998 
3999  // The current assumed state used to determine a change.
4000  auto AssumedState = getAssumed();
4001 
4002  auto CheckRWInst = [&](Instruction &I) {
4003  // If the instruction has an own memory behavior state, use it to restrict
4004  // the local state. No further analysis is required as the other memory
4005  // state is as optimistic as it gets.
4006  if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
4007  const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
4008  *this, IRPosition::callsite_function(ICS));
4009  intersectAssumedBits(MemBehaviorAA.getAssumed());
4010  return !isAtFixpoint();
4011  }
4012 
4013  // Remove access kind modifiers if necessary.
4014  if (I.mayReadFromMemory())
4015  removeAssumedBits(NO_READS);
4016  if (I.mayWriteToMemory())
4017  removeAssumedBits(NO_WRITES);
4018  return !isAtFixpoint();
4019  };
4020 
4021  if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
4022  return indicatePessimisticFixpoint();
4023 
4024  return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4026 }
4027 
4028 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
4029 
4030  const IRPosition &IRP = getIRPosition();
4031  const IRPosition &FnPos = IRPosition::function_scope(IRP);
4033 
4034  // First, check the function scope. We take the known information and we avoid
4035  // work if the assumed information implies the current assumed information for
4036  // this attribute.
4037  const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4038  S.addKnownBits(FnMemAA.getKnown());
4039  if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
4040  return ChangeStatus::UNCHANGED;
4041 
4042  // Make sure the value is not captured (except through "return"), if
4043  // it is, any information derived would be irrelevant anyway as we cannot
4044  // check the potential aliases introduced by the capture. However, no need
4045  // to fall back to anythign less optimistic than the function state.
4046  const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
4047  if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4048  S.intersectAssumedBits(FnMemAA.getAssumed());
4049  return ChangeStatus::CHANGED;
4050  }
4051 
4052  // The current assumed state used to determine a change.
4053  auto AssumedState = S.getAssumed();
4054 
4055  // Liveness information to exclude dead users.
4056  // TODO: Take the FnPos once we have call site specific liveness information.
4057  const auto &LivenessAA = A.getAAFor<AAIsDead>(
4059 
4060  // Visit and expand uses until all are analyzed or a fixpoint is reached.
4061  for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
4062  const Use *U = Uses[i];
4063  Instruction *UserI = cast<Instruction>(U->getUser());
4064  LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
4065  << " [Dead: " << (LivenessAA.isAssumedDead(UserI))
4066  << "]\n");
4067  if (LivenessAA.isAssumedDead(UserI))
4068  continue;
4069 
4070  // Check if the users of UserI should also be visited.
4071  if (followUsersOfUseIn(A, U, UserI))
4072  for (const Use &UserIUse : UserI->uses())
4073  Uses.insert(&UserIUse);
4074 
4075  // If UserI might touch memory we analyze the use in detail.
4076  if (UserI->mayReadOrWriteMemory())
4077  analyzeUseIn(A, U, UserI);
4078  }
4079 
4080  return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4082 }
4083 
4084 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
4085  const Instruction *UserI) {
4086  // The loaded value is unrelated to the pointer argument, no need to
4087  // follow the users of the load.
4088  if (isa<LoadInst>(UserI))
4089  return false;
4090 
4091  // By default we follow all uses assuming UserI might leak information on U,
4092  // we have special handling for call sites operands though.
4093  ImmutableCallSite ICS(UserI);
4094  if (!ICS || !ICS.isArgOperand(U))
4095  return true;
4096 
4097  // If the use is a call argument known not to be captured, the users of
4098  // the call do not need to be visited because they have to be unrelated to
4099  // the input. Note that this check is not trivial even though we disallow
4100  // general capturing of the underlying argument. The reason is that the
4101  // call might the argument "through return", which we allow and for which we
4102  // need to check call users.
4103  unsigned ArgNo = ICS.getArgumentNo(U);
4104  const auto &ArgNoCaptureAA =
4105  A.getAAFor<AANoCapture>(*this, IRPosition::callsite_argument(ICS, ArgNo));
4106  return !ArgNoCaptureAA.isAssumedNoCapture();
4107 }
4108 
4109 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
4110  const Instruction *UserI) {
4111  assert(UserI->mayReadOrWriteMemory());
4112 
4113  switch (UserI->getOpcode()) {
4114  default:
4115  // TODO: Handle all atomics and other side-effect operations we know of.
4116  break;
4117  case Instruction::Load:
4118  // Loads cause the NO_READS property to disappear.
4119  removeAssumedBits(NO_READS);
4120  return;
4121 
4122  case Instruction::Store:
4123  // Stores cause the NO_WRITES property to disappear if the use is the
4124  // pointer operand. Note that we do assume that capturing was taken care of
4125  // somewhere else.
4126  if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
4127  removeAssumedBits(NO_WRITES);
4128  return;
4129 
4130  case Instruction::Call:
4131  case Instruction::CallBr:
4132  case Instruction::Invoke: {
4133  // For call sites we look at the argument memory behavior attribute (this
4134  // could be recursive!) in order to restrict our own state.
4135  ImmutableCallSite ICS(UserI);
4136 
4137  // Give up on operand bundles.
4138  if (ICS.isBundleOperand(U)) {
4139  indicatePessimisticFixpoint();
4140  return;
4141  }
4142 
4143  // Calling a function does read the function pointer, maybe write it if the
4144  // function is self-modifying.
4145  if (ICS.isCallee(U)) {
4146  removeAssumedBits(NO_READS);
4147  break;
4148  }
4149 
4150  // Adjust the possible access behavior based on the information on the
4151  // argument.
4152  unsigned ArgNo = ICS.getArgumentNo(U);
4153  const IRPosition &ArgPos = IRPosition::callsite_argument(ICS, ArgNo);
4154  const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4155  // "assumed" has at most the same bits as the MemBehaviorAA assumed
4156  // and at least "known".
4157  intersectAssumedBits(MemBehaviorAA.getAssumed());
4158  return;
4159  }
4160  };
4161 
4162  // Generally, look at the "may-properties" and adjust the assumed state if we
4163  // did not trigger special handling before.
4164  if (UserI->mayReadFromMemory())
4165  removeAssumedBits(NO_READS);
4166  if (UserI->mayWriteToMemory())
4167  removeAssumedBits(NO_WRITES);
4168 }
4169 
4170 /// ----------------------------------------------------------------------------
4171 /// Attributor
4172 /// ----------------------------------------------------------------------------
4173 
4175  const AAIsDead *LivenessAA) {
4176  const Instruction *CtxI = AA.getIRPosition().getCtxI();
4177  if (!CtxI)
4178  return false;
4179 
4180  if (!LivenessAA)
4181  LivenessAA =
4182  &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
4183  /* TrackDependence */ false);
4184 
4185  // Don't check liveness for AAIsDead.
4186  if (&AA == LivenessAA)
4187  return false;
4188 
4189  if (!LivenessAA->isAssumedDead(CtxI))
4190  return false;
4191 
4192  // We actually used liveness information so we have to record a dependence.
4193  recordDependence(*LivenessAA, AA);
4194 
4195  return true;
4196 }
4197 
4199  const function_ref<bool(AbstractCallSite)> &Pred,
4200  const AbstractAttribute &QueryingAA, bool RequireAllCallSites) {
4201  // We can try to determine information from
4202  // the call sites. However, this is only possible all call sites are known,
4203  // hence the function has internal linkage.
4204  const IRPosition &IRP = QueryingAA.getIRPosition();
4205  const Function *AssociatedFunction = IRP.getAssociatedFunction();
4206  if (!AssociatedFunction) {
4207  LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
4208  << "\n");
4209  return false;
4210  }
4211 
4212  return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
4213  &QueryingAA);
4214 }
4215 
4217  const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
4218  bool RequireAllCallSites, const AbstractAttribute *QueryingAA) {
4219  if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
4220  LLVM_DEBUG(
4221  dbgs()
4222  << "[Attributor] Function " << Fn.getName()
4223  << " has no internal linkage, hence not all call sites are known\n");
4224  return false;
4225  }
4226 
4227  for (const Use &U : Fn.uses()) {
4228  AbstractCallSite ACS(&U);
4229  if (!ACS) {
4230  LLVM_DEBUG(dbgs() << "[Attributor] Function "
4231  << Fn.getName()
4232  << " has non call site use " << *U.get() << " in "
4233  << *U.getUser() << "\n");
4234  return false;
4235  }
4236 
4237  Instruction *I = ACS.getInstruction();
4238  Function *Caller = I->getFunction();
4239 
4240  const auto *LivenessAA =
4241  lookupAAFor<AAIsDead>(IRPosition::function(*Caller), QueryingAA,
4242  /* TrackDependence */ false);
4243 
4244  // Skip dead calls.
4245  if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4246  // We actually used liveness information so we have to record a
4247  // dependence.
4248  if (QueryingAA)
4249  recordDependence(*LivenessAA, *QueryingAA);
4250  continue;
4251  }
4252 
4253  const Use *EffectiveUse =
4254  ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
4255  if (!ACS.isCallee(EffectiveUse)) {
4256  if (!RequireAllCallSites)
4257  continue;
4258  LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
4259  << " is an invalid use of "
4260  << Fn.getName() << "\n");
4261  return false;
4262  }
4263 
4264  if (Pred(ACS))
4265  continue;
4266 
4267  LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
4268  << *ACS.getInstruction() << "\n");
4269  return false;
4270  }
4271 
4272  return true;
4273 }
4274 
4276  const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
4277  &Pred,
4278  const AbstractAttribute &QueryingAA) {
4279 
4280  const IRPosition &IRP = QueryingAA.getIRPosition();
4281  // Since we need to provide return instructions we have to have an exact
4282  // definition.
4283  const Function *AssociatedFunction = IRP.getAssociatedFunction();
4284  if (!AssociatedFunction)
4285  return false;
4286 
4287  // If this is a call site query we use the call site specific return values
4288  // and liveness information.
4289  // TODO: use the function scope once we have call site AAReturnedValues.
4290  const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4291  const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4292  if (!AARetVal.getState().isValidState())
4293  return false;
4294 
4295  return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
4296 }
4297 
4299  const function_ref<bool(Value &)> &Pred,
4300  const AbstractAttribute &QueryingAA) {
4301 
4302  const IRPosition &IRP = QueryingAA.getIRPosition();
4303  const Function *AssociatedFunction = IRP.getAssociatedFunction();
4304  if (!AssociatedFunction)
4305  return false;
4306 
4307  // TODO: use the function scope once we have call site AAReturnedValues.
4308  const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4309  const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4310  if (!AARetVal.getState().isValidState())
4311  return false;
4312 
4313  return AARetVal.checkForAllReturnedValuesAndReturnInsts(
4314  [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
4315  return Pred(RV);
4316  });
4317 }
4318 
4319 static bool
4321  const function_ref<bool(Instruction &)> &Pred,
4322  const AAIsDead *LivenessAA, bool &AnyDead,
4323  const ArrayRef<unsigned> &Opcodes) {
4324  for (unsigned Opcode : Opcodes) {
4325  for (Instruction *I : OpcodeInstMap[Opcode]) {
4326  // Skip dead instructions.
4327  if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4328  AnyDead = true;
4329  continue;
4330  }
4331 
4332  if (!Pred(*I))
4333  return false;
4334  }
4335  }
4336  return true;
4337 }
4338 
4340  const llvm::function_ref<bool(Instruction &)> &Pred,
4341  const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
4342 
4343  const IRPosition &IRP = QueryingAA.getIRPosition();
4344  // Since we need to provide instructions we have to have an exact definition.
4345  const Function *AssociatedFunction = IRP.getAssociatedFunction();
4346  if (!AssociatedFunction)
4347  return false;
4348 
4349  // TODO: use the function scope once we have call site AAReturnedValues.
4350  const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4351  const auto &LivenessAA =
4352  getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
4353  bool AnyDead = false;
4354 
4355  auto &OpcodeInstMap =
4356  InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
4357  if (!checkForAllInstructionsImpl(OpcodeInstMap, Pred, &LivenessAA, AnyDead,
4358  Opcodes))
4359  return false;
4360 
4361  // If we actually used liveness information so we have to record a dependence.
4362  if (AnyDead)
4363  recordDependence(LivenessAA, QueryingAA);
4364 
4365  return true;
4366 }
4367 
4369  const llvm::function_ref<bool(Instruction &)> &Pred,
4370  AbstractAttribute &QueryingAA) {
4371 
4372  const Function *AssociatedFunction =
4373  QueryingAA.getIRPosition().getAssociatedFunction();
4374  if (!AssociatedFunction)
4375  return false;
4376 
4377  // TODO: use the function scope once we have call site AAReturnedValues.
4378  const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4379  const auto &LivenessAA =
4380  getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
4381  bool AnyDead = false;
4382 
4383  for (Instruction *I :
4384  InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
4385  // Skip dead instructions.
4386  if (LivenessAA.isAssumedDead(I)) {
4387  AnyDead = true;
4388  continue;
4389  }
4390 
4391  if (!Pred(*I))
4392  return false;
4393  }
4394 
4395  // If we actually used liveness information so we have to record a dependence.
4396  if (AnyDead)
4397  recordDependence(LivenessAA, QueryingAA);
4398 
4399  return true;
4400 }
4401 
4403  LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
4404  << AllAbstractAttributes.size()
4405  << " abstract attributes.\n");
4406 
4407  // Now that all abstract attributes are collected and initialized we start
4408  // the abstract analysis.
4409 
4410  unsigned IterationCounter = 1;
4411 
4414  Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
4415 
4416  bool RecomputeDependences = false;
4417 
4418  do {
4419  // Remember the size to determine new attributes.
4420  size_t NumAAs = AllAbstractAttributes.size();
4421  LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
4422  << ", Worklist size: " << Worklist.size() << "\n");
4423 
4424  // If dependences (=QueryMap) are recomputed we have to look at all abstract
4425  // attributes again, regardless of what changed in the last iteration.
4426  if (RecomputeDependences) {
4427  LLVM_DEBUG(
4428  dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
4429  QueryMap.clear();
4430  ChangedAAs.clear();
4431  Worklist.insert(AllAbstractAttributes.begin(),
4432  AllAbstractAttributes.end());
4433  }
4434 
4435  // Add all abstract attributes that are potentially dependent on one that
4436  // changed to the work list.
4437  for (AbstractAttribute *ChangedAA : ChangedAAs) {
4438  auto &QuerriedAAs = QueryMap[ChangedAA];
4439  Worklist.insert(QuerriedAAs.begin(), QuerriedAAs.end());
4440  }
4441 
4442  LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
4443  << ", Worklist+Dependent size: " << Worklist.size()
4444  << "\n");
4445 
4446  // Reset the changed set.
4447  ChangedAAs.clear();
4448 
4449  // Update all abstract attribute in the work list and record the ones that
4450  // changed.
4451  for (AbstractAttribute *AA : Worklist)
4452  if (!isAssumedDead(*AA, nullptr))
4453  if (AA->update(*this) == ChangeStatus::CHANGED)
4454  ChangedAAs.push_back(AA);
4455 
4456  // Check if we recompute the dependences in the next iteration.
4457  RecomputeDependences = (DepRecomputeInterval > 0 &&
4458  IterationCounter % DepRecomputeInterval == 0);
4459 
4460  // Add attributes to the changed set if they have been created in the last
4461  // iteration.
4462  ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
4463  AllAbstractAttributes.end());
4464 
4465  // Reset the work list and repopulate with the changed abstract attributes.
4466  // Note that dependent ones are added above.
4467  Worklist.clear();
4468  Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
4469 
4470  } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
4472 
4473  LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
4474  << IterationCounter << "/" << MaxFixpointIterations
4475  << " iterations\n");
4476 
4477  size_t NumFinalAAs = AllAbstractAttributes.size();
4478 
4479  // Reset abstract arguments not settled in a sound fixpoint by now. This
4480  // happens when we stopped the fixpoint iteration early. Note that only the
4481  // ones marked as "changed" *and* the ones transitively depending on them
4482  // need to be reverted to a pessimistic state. Others might not be in a
4483  // fixpoint state but we can use the optimistic results for them anyway.
4485  for (unsigned u = 0; u < ChangedAAs.size(); u++) {
4486  AbstractAttribute *ChangedAA = ChangedAAs[u];
4487  if (!Visited.insert(ChangedAA).second)
4488  continue;
4489 
4490  AbstractState &State = ChangedAA->getState();
4491  if (!State.isAtFixpoint()) {
4493 
4494  NumAttributesTimedOut++;
4495  }
4496 
4497  auto &QuerriedAAs = QueryMap[ChangedAA];
4498  ChangedAAs.append(QuerriedAAs.begin(), QuerriedAAs.end());
4499  }
4500 
4501  LLVM_DEBUG({
4502  if (!Visited.empty())
4503  dbgs() << "\n[Attributor] Finalized " << Visited.size()
4504  << " abstract attributes.\n";
4505  });
4506 
4507  unsigned NumManifested = 0;
4508  unsigned NumAtFixpoint = 0;
4509  ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
4510  for (AbstractAttribute *AA : AllAbstractAttributes) {
4511  AbstractState &State = AA->getState();
4512 
4513  // If there is not already a fixpoint reached, we can now take the
4514  // optimistic state. This is correct because we enforced a pessimistic one
4515  // on abstract attributes that were transitively dependent on a changed one
4516  // already above.
4517  if (!State.isAtFixpoint())
4519 
4520  // If the state is invalid, we do not try to manifest it.
4521  if (!State.isValidState())
4522  continue;
4523 
4524  // Skip dead code.
4525  if (isAssumedDead(*AA, nullptr))
4526  continue;
4527  // Manifest the state and record if we changed the IR.
4528  ChangeStatus LocalChange = AA->manifest(*this);
4529  if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
4530  AA->trackStatistics();
4531 
4532  ManifestChange = ManifestChange | LocalChange;
4533 
4534  NumAtFixpoint++;
4535  NumManifested += (LocalChange == ChangeStatus::CHANGED);
4536  }
4537 
4538  (void)NumManifested;
4539  (void)NumAtFixpoint;
4540  LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
4541  << " arguments while " << NumAtFixpoint
4542  << " were in a valid fixpoint state\n");
4543 
4544  NumAttributesManifested += NumManifested;
4545  NumAttributesValidFixpoint += NumAtFixpoint;
4546 
4547  (void)NumFinalAAs;
4548  assert(
4549  NumFinalAAs == AllAbstractAttributes.size() &&
4550  "Expected the final number of abstract attributes to remain unchanged!");
4551 
4552  // Delete stuff at the end to avoid invalid references and a nice order.
4553  {
4554  LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
4555  << ToBeDeletedFunctions.size() << " functions and "
4556  << ToBeDeletedBlocks.size() << " blocks and "
4557  << ToBeDeletedInsts.size() << " instructions\n");
4558  for (Instruction *I : ToBeDeletedInsts) {
4559  if (!I->use_empty())
4560  I->replaceAllUsesWith(UndefValue::get(I->getType()));
4561  I->eraseFromParent();
4562  }
4563 
4564  if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
4565  SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
4566  ToBeDeletedBBs.reserve(NumDeadBlocks);
4567  ToBeDeletedBBs.append(ToBeDeletedBlocks.begin(), ToBeDeletedBlocks.end());
4568  DeleteDeadBlocks(ToBeDeletedBBs);
4570  "Number of dead basic blocks deleted.");
4571  }
4572 
4573  STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
4574  for (Function *Fn : ToBeDeletedFunctions) {
4576  Fn->eraseFromParent();
4578  }
4579 
4580  // Identify dead internal functions and delete them. This happens outside
4581  // the other fixpoint analysis as we might treat potentially dead functions
4582  // as live to lower the number of iterations. If they happen to be dead, the
4583  // below fixpoint loop will identify and eliminate them.
4584  SmallVector<Function *, 8> InternalFns;
4585  for (Function &F : M)
4586  if (F.hasLocalLinkage())
4587  InternalFns.push_back(&F);
4588 
4589  bool FoundDeadFn = true;
4590  while (FoundDeadFn) {
4591  FoundDeadFn = false;
4592  for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
4593  Function *F = InternalFns[u];
4594  if (!F)
4595  continue;
4596 
4597  const auto *LivenessAA =
4598  lookupAAFor<AAIsDead>(IRPosition::function(*F));
4599  if (LivenessAA &&
4600  !checkForAllCallSites([](AbstractCallSite ACS) { return false; },
4601  *LivenessAA, true))
4602  continue;
4603 
4606  F->eraseFromParent();
4607  InternalFns[u] = nullptr;
4608  FoundDeadFn = true;
4609  }
4610  }
4611  }
4612 
4614  IterationCounter != MaxFixpointIterations) {
4615  errs() << "\n[Attributor] Fixpoint iteration done after: "
4616  << IterationCounter << "/" << MaxFixpointIterations
4617  << " iterations\n";
4618  llvm_unreachable("The fixpoint was not reached with exactly the number of "
4619  "specified iterations!");
4620  }
4621 
4622  return ManifestChange;
4623 }
4624 
4626 
4627  // Walk all instructions to find interesting instructions that might be
4628  // queried by abstract attributes during their initialization or update.
4629  // This has to happen before we create attributes.
4630  auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
4631  auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
4632 
4633  for (Instruction &I : instructions(&F)) {
4634  bool IsInterestingOpcode = false;
4635 
4636  // To allow easy access to all instructions in a function with a given
4637  // opcode we store them in the InfoCache. As not all opcodes are interesting
4638  // to concrete attributes we only cache the ones that are as identified in
4639  // the following switch.
4640  // Note: There are no concrete attributes now so this is initially empty.
4641  switch (I.getOpcode()) {
4642  default:
4643  assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
4644  "New call site/base instruction type needs to be known int the "
4645  "Attributor.");
4646  break;
4647  case Instruction::Load:
4648  // The alignment of a pointer is interesting for loads.
4649  case Instruction::Store:
4650  // The alignment of a pointer is interesting for stores.
4651  case Instruction::Call:
4652  case Instruction::CallBr:
4653  case Instruction::Invoke:
4654  case Instruction::CleanupRet:
4655  case Instruction::CatchSwitch:
4656  case Instruction::Resume:
4657  case Instruction::Ret:
4658  IsInterestingOpcode = true;
4659  }
4660  if (IsInterestingOpcode)
4661  InstOpcodeMap[I.getOpcode()].push_back(&I);
4662  if (I.mayReadOrWriteMemory())
4663  ReadOrWriteInsts.push_back(&I);
4664  }
4665 }
4666 
4668  if (!VisitedFunctions.insert(&F).second)
4669  return;
4670 
4671  IRPosition FPos = IRPosition::function(F);
4672 
4673  // Check for dead BasicBlocks in every function.
4674  // We need dead instruction detection because we do not want to deal with
4675  // broken IR in which SSA rules do not apply.
4676  getOrCreateAAFor<AAIsDead>(FPos);
4677 
4678  // Every function might be "will-return".
4679  getOrCreateAAFor<AAWillReturn>(FPos);
4680 
4681  // Every function can be nounwind.
4682  getOrCreateAAFor<AANoUnwind>(FPos);
4683 
4684  // Every function might be marked "nosync"
4685  getOrCreateAAFor<AANoSync>(FPos);
4686 
4687  // Every function might be "no-free".
4688  getOrCreateAAFor<AANoFree>(FPos);
4689 
4690  // Every function might be "no-return".
4691  getOrCreateAAFor<AANoReturn>(FPos);
4692 
4693  // Every function might be "no-recurse".
4694  getOrCreateAAFor<AANoRecurse>(FPos);
4695 
4696  // Every function might be "readnone/readonly/writeonly/...".
4697  getOrCreateAAFor<AAMemoryBehavior>(FPos);
4698 
4699  // Every function might be applicable for Heap-To-Stack conversion.
4700  if (EnableHeapToStack)
4701  getOrCreateAAFor<AAHeapToStack>(FPos);
4702 
4703  // Return attributes are only appropriate if the return type is non void.
4704  Type *ReturnType = F.getReturnType();
4705  if (!ReturnType->isVoidTy()) {
4706  // Argument attribute "returned" --- Create only one per function even
4707  // though it is an argument attribute.
4708  getOrCreateAAFor<AAReturnedValues>(FPos);
4709 
4710  IRPosition RetPos = IRPosition::returned(F);
4711 
4712  // Every function might be simplified.
4713  getOrCreateAAFor<AAValueSimplify>(RetPos);
4714 
4715  if (ReturnType->isPointerTy()) {
4716 
4717  // Every function with pointer return type might be marked align.
4718  getOrCreateAAFor<AAAlign>(RetPos);
4719 
4720  // Every function with pointer return type might be marked nonnull.
4721  getOrCreateAAFor<AANonNull>(RetPos);
4722 
4723  // Every function with pointer return type might be marked noalias.
4724  getOrCreateAAFor<AANoAlias>(RetPos);
4725 
4726  // Every function with pointer return type might be marked
4727  // dereferenceable.
4728  getOrCreateAAFor<AADereferenceable>(RetPos);
4729  }
4730  }
4731 
4732  for (Argument &Arg : F.args()) {
4734 
4735  // Every argument might be simplified.
4736  getOrCreateAAFor<AAValueSimplify>(ArgPos);
4737 
4738  if (Arg.getType()->isPointerTy()) {
4739  // Every argument with pointer type might be marked nonnull.
4740  getOrCreateAAFor<AANonNull>(ArgPos);
4741 
4742  // Every argument with pointer type might be marked noalias.
4743  getOrCreateAAFor<AANoAlias>(ArgPos);
4744 
4745  // Every argument with pointer type might be marked dereferenceable.
4746  getOrCreateAAFor<AADereferenceable>(ArgPos);
4747 
4748  // Every argument with pointer type might be marked align.
4749  getOrCreateAAFor<AAAlign>(ArgPos);
4750 
4751  // Every argument with pointer type might be marked nocapture.
4752  getOrCreateAAFor<AANoCapture>(ArgPos);
4753 
4754  // Every argument with pointer type might be marked
4755  // "readnone/readonly/writeonly/..."
4756  getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
4757  }
4758  }
4759 
4760  auto CallSitePred = [&](Instruction &I) -> bool {
4761  CallSite CS(&I);
4762  if (CS.getCalledFunction()) {
4763  for (int i = 0, e = CS.getCalledFunction()->arg_size(); i < e; i++) {
4764 
4765  IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
4766 
4767  // Call site argument might be simplified.
4768  getOrCreateAAFor<AAValueSimplify>(CSArgPos);
4769 
4770  if (!CS.getArgument(i)->getType()->isPointerTy())
4771  continue;
4772 
4773  // Call site argument attribute "non-null".
4774  getOrCreateAAFor<AANonNull>(CSArgPos);
4775 
4776  // Call site argument attribute "no-alias".
4777  getOrCreateAAFor<AANoAlias>(CSArgPos);
4778 
4779  // Call site argument attribute "dereferenceable".
4780  getOrCreateAAFor<AADereferenceable>(CSArgPos);
4781 
4782  // Call site argument attribute "align".
4783  getOrCreateAAFor<AAAlign>(CSArgPos);
4784  }
4785  }
4786  return true;
4787  };
4788 
4789  auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
4790  bool Success, AnyDead = false;
4791  Success = checkForAllInstructionsImpl(
4792  OpcodeInstMap, CallSitePred, nullptr, AnyDead,
4793  {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
4795  (void)Success;
4796  assert(Success && !AnyDead && "Expected the check call to be successful!");
4797 
4798  auto LoadStorePred = [&](Instruction &I) -> bool {
4799  if (isa<LoadInst>(I))
4800  getOrCreateAAFor<AAAlign>(
4801  IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
4802  else
4803  getOrCreateAAFor<AAAlign>(
4804  IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
4805  return true;
4806  };
4807  Success = checkForAllInstructionsImpl(
4808  OpcodeInstMap, LoadStorePred, nullptr, AnyDead,
4810  (void)Success;
4811  assert(Success && !AnyDead && "Expected the check call to be successful!");
4812 }
4813 
4814 /// Helpers to ease debugging through output streams and print calls.
4815 ///
4816 ///{
4818  return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
4819 }
4820 
4822  switch (AP) {
4824  return OS << "inv";
4825  case IRPosition::IRP_FLOAT:
4826  return OS << "flt";
4828  return OS << "fn_ret";
4830  return OS << "cs_ret";
4832  return OS << "fn";
4834  return OS << "cs";
4836  return OS << "arg";
4838  return OS << "cs_arg";
4839  }
4840  llvm_unreachable("Unknown attribute position!");
4841 }
4842 
4844  const Value &AV = Pos.getAssociatedValue();
4845  return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
4846  << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
4847 }
4848 
4850  return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
4851  << static_cast<const AbstractState &>(S);
4852 }
4853 
4855  return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
4856 }
4857 
4859  AA.print(OS);
4860  return OS;
4861 }
4862 
4864  OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
4865  << "]";
4866 }
4867 ///}
4868 
4869 /// ----------------------------------------------------------------------------
4870 /// Pass (Manager) Boilerplate
4871 /// ----------------------------------------------------------------------------
4872 
4874  if (DisableAttributor)
4875  return false;
4876 
4877  LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
4878  << " functions.\n");
4879 
4880  // Create an Attributor and initially empty information cache that is filled
4881  // while we identify default attribute opportunities.
4882  InformationCache InfoCache(M, AG);
4883  Attributor A(InfoCache, DepRecInterval);
4884 
4885  for (Function &F : M)
4887 
4888  for (Function &F : M) {
4889  if (F.hasExactDefinition())
4890  NumFnWithExactDefinition++;
4891  else
4892  NumFnWithoutExactDefinition++;
4893 
4894  // We look at internal functions only on-demand but if any use is not a
4895  // direct call, we have to do it eagerly.
4896  if (F.hasLocalLinkage()) {
4897  if (llvm::all_of(F.uses(), [](const Use &U) {
4898  return ImmutableCallSite(U.getUser()) &&
4899  ImmutableCallSite(U.getUser()).isCallee(&U);
4900  }))
4901  continue;
4902  }
4903 
4904  // Populate the Attributor with abstract attribute opportunities in the
4905  // function and the information cache with IR information.
4907  }
4908 
4909  return A.run(M) == ChangeStatus::CHANGED;
4910 }
4911 
4913  AnalysisGetter AG(AM);
4914  if (runAttributorOnModule(M, AG)) {
4915  // FIXME: Think about passes we will preserve and add them here.
4916  return PreservedAnalyses::none();
4917  }
4918  return PreservedAnalyses::all();
4919 }
4920 
4921 namespace {
4922 
4923 struct AttributorLegacyPass : public ModulePass {
4924  static char ID;
4925 
4926  AttributorLegacyPass() : ModulePass(ID) {
4928  }
4929 
4930  bool runOnModule(Module &M) override {
4931  if (skipModule(M))
4932  return false;
4933 
4934  AnalysisGetter AG;
4935  return runAttributorOnModule(M, AG);
4936  }
4937 
4938  void getAnalysisUsage(AnalysisUsage &AU) const override {
4939  // FIXME: Think about passes we will preserve and add them here.
4941  }
4942 };
4943 
4944 } // end anonymous namespace
4945 
4946 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
4947 
4948 char AttributorLegacyPass::ID = 0;
4949 
4950 const char AAReturnedValues::ID = 0;
4951 const char AANoUnwind::ID = 0;
4952 const char AANoSync::ID = 0;
4953 const char AANoFree::ID = 0;
4954 const char AANonNull::ID = 0;
4955 const char AANoRecurse::ID = 0;
4956 const char AAWillReturn::ID = 0;
4957 const char AANoAlias::ID = 0;
4958 const char AANoReturn::ID = 0;
4959 const char AAIsDead::ID = 0;
4960 const char AADereferenceable::ID = 0;
4961 const char AAAlign::ID = 0;
4962 const char AANoCapture::ID = 0;
4963 const char AAValueSimplify::ID = 0;
4964 const char AAHeapToStack::ID = 0;
4965 const char AAMemoryBehavior::ID = 0;
4966 
4967 // Macro magic to create the static generator function for attributes that
4968 // follow the naming scheme.
4969 
4970 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \
4971  case IRPosition::PK: \
4972  llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
4973 
4974 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \
4975  case IRPosition::PK: \
4976  AA = new CLASS##SUFFIX(IRP); \
4977  break;
4978 
4979 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
4980  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
4981  CLASS *AA = nullptr; \
4982  switch (IRP.getPositionKind()) { \
4983  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
4984  SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
4985  SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
4986  SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
4987  SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
4988  SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
4989  SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
4990  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
4991  } \
4992  return *AA; \
4993  }
4994 
4995 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
4996  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
4997  CLASS *AA = nullptr; \
4998  switch (IRP.getPositionKind()) { \
4999  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
5000  SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \
5001  SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
5002  SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
5003  SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
5004  SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
5005  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
5006  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
5007  } \
5008  return *AA; \
5009  }
5010 
5011 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
5012  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
5013  CLASS *AA = nullptr; \
5014  switch (IRP.getPositionKind()) { \
5015  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
5016  SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
5017  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
5018  SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
5019  SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
5020  SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
5021  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
5022  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
5023  } \
5024  return *AA; \
5025  }
5026 
5027 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
5028  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
5029  CLASS *AA = nullptr; \
5030  switch (IRP.getPositionKind()) { \
5031  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
5032  SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
5033  SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
5034  SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
5035  SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
5036  SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
5037  SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
5038  SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
5039  } \
5040  return *AA; \
5041  }
5042 
5043 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
5044  CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
5045  CLASS *AA = nullptr; \
5046  switch (IRP.getPositionKind()) { \
5047  SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
5048  SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
5049  SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
5050  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
5051  SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
5052  SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
5053  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
5054  SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
5055  } \
5056  return *AA; \
5057  }
5058 
5067 
5073 
5075 
5077 
5079 
5080 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
5081 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
5082 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
5083 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
5084 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
5085 #undef SWITCH_PK_CREATE
5086 #undef SWITCH_PK_INV
5087 
5088 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
5089  "Deduce and propagate attributes", false, false)
5091 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
5092  "Deduce and propagate attributes", false, false)
An attribute for a call site return value.
Definition: Attributor.h:151
Pass interface - Implemented by all &#39;passes&#39;.
Definition: Pass.h:80
void DeleteDeadBlocks(ArrayRef< BasicBlock *> BBs, DomTreeUpdater *DTU=nullptr, bool KeepOneInputPHIs=false)
Delete the specified blocks from BB.
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:481
OpcodeInstMapTy & getOpcodeInstMapForFunction(const Function &F)
Return the map that relates "interesting" opcodes with all instructions with that opcode in F...
Definition: Attributor.h:618
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1871
uint64_t CallInst * C
Return a value (possibly void), from a function.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:112
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
iterator_range< use_iterator > uses()
Definition: Value.h:374
StringRef getKindAsString() const
Return the attribute&#39;s kind as a string.
Definition: Attributes.cpp:216
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:177
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:403
static bool isEqualOrWorse(const Attribute &New, const Attribute &Old)
Return true if New is equal or worse than Old.
Definition: Attributor.cpp:244
bool hasLocalLinkage() const
Definition: GlobalValue.h:445
void clear()
Definition: MapVector.h:88
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
size_type size() const
Definition: MapVector.h:60
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
This callback is used in conjunction with PointerMayBeCaptured.
static ChangeStatus manifestAttrs(Attributor &A, IRPosition &IRP, const ArrayRef< Attribute > &DeducedAttrs)
Definition: Attributor.cpp:331
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:288
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:77
This is the interface for a simple mod/ref and alias analysis over globals.
SubsumingPositionIterator(const IRPosition &IRP)
Definition: Attributor.cpp:391
#define STATS_TRACK(NAME, TYPE)
Definition: Attributor.cpp:76
static Attribute getWithAlignment(LLVMContext &Context, uint64_t Align)
Return a uniquified Attribute object that has the specific alignment set.
Definition: Attributes.cpp:145
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
bool user_empty() const
Definition: Value.h:383
static Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
Definition: Attributes.cpp:158
ChangeStatus
Simple enum class that forces the status to be spelled out explicitly.
Definition: Attributor.h:120
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
virtual void print(raw_ostream &OS) const
Helper functions, for debug purposes only.
A position that is not associated with a spot suitable for attributes.
Definition: Attributor.h:148
Implements a dense probed hash-table based set.
Definition: DenseSet.h:249
If we do not capture the value in memory, through integers, or as a derived pointer we know it is not...
Definition: Attributor.h:1818
static const Value * getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset, const DataLayout &DL)
Definition: Attributor.cpp:304
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1943
IntegerState DerefBytesState
State representing for dereferenceable bytes.
Definition: Attributor.h:1659
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1588
An abstract interface for all nocapture attributes.
Definition: Attributor.h:1800
This class represents a function call, abstracting a target machine&#39;s calling convention.
iterator & end()
Return an universal end iterator.
Definition: MustExecute.h:405
unsigned constexpr DefaultMaxUsesToExplore
The default value for MaxUsesToExplore argument.
Abstract Attribute Classes
Definition: Attributor.h:1421
bool mayWriteToMemory() const
Return true if this instruction may modify memory.
The two locations do not alias at all.
Definition: AliasAnalysis.h:84
static cl::opt< bool > VerifyMaxFixpointIterations("attributor-max-iterations-verify", cl::Hidden, cl::desc("Verify that max-iterations is a tight bound for a fixpoint"), cl::init(false))
An efficient, type-erasing, non-owning reference to a callable.
Definition: STLExtras.h:104
base_t getAssumed() const
Return the assumed state encoding.
Definition: Attributor.h:1104
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1895
An attribute for a call site argument.
Definition: Attributor.h:155
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:743
virtual const IRPosition & getIRPosition() const =0
Return an IR position, see struct IRPosition.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:323
This class implements a map that also provides access to all stored values in a deterministic order...
Definition: MapVector.h:37
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1165
bool isAssumedNoRecurse() const
Return true if "norecurse" is assumed.
Definition: Attributor.h:1522
An abstract attribute for willreturn.
Definition: Attributor.h:1535
STATISTIC(NumFunctions, "Total number of functions")
APInt operator &(APInt a, const APInt &b)
Definition: APInt.h:1987
Value & getAssociatedValue()
}
Definition: Attributor.h:361
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1100
F(f)
MustBeExecutedContextExplorer & getMustBeExecutedContextExplorer()
Return MustBeExecutedContextExplorer.
Definition: Attributor.h:631
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:621
FunTy * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it&#39;s an indirect...
Definition: CallSite.h:111
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:144
BasicBlock * SplitBlock(BasicBlock *Old, Instruction *SplitPt, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction - everything before SplitPt stays in Old and e...
void reserve(size_type N)
Definition: SmallVector.h:369
Kind
The positions we distinguish in the IR.
Definition: Attributor.h:146
Wrapper for FunctoinAnalysisManager.
Definition: Attributor.h:561
virtual ChangeStatus indicatePessimisticFixpoint()=0
Indicate that the abstract state should converge to the pessimistic state.
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
Instruction * getCtxI()
}
Definition: Attributor.h:341
Value * get() const
Definition: Use.h:107
unsigned getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition: Value.cpp:674
const CallInst * isFreeCall(const Value *I, const TargetLibraryInfo *TLI)
isFreeCall - Returns non-null if the value is a call to the builtin free()
bool checkForAllCallSites(const function_ref< bool(AbstractCallSite)> &Pred, const AbstractAttribute &QueryingAA, bool RequireAllCallSites)
Check Pred on all function call sites.
virtual bool isDereferenceableOrNull(Value *O, const DataLayout &DL)
isDereferenceableOrNull - Overload to allow clients with additional knowledge about pointer dereferen...
void initializeInformationCache(Function &F)
Initialize the information cache for queries regarding function F.
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:50
static const IRPosition function_scope(const IRPosition &IRP)
Create a position with function scope matching the "context" of IRP.
Definition: Attributor.h:234
bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA)
Return true if AA (or its context instruction) is assumed dead.
Type * getPointerElementType() const
Definition: Type.h:380
An AbstractAttribute for noreturn.
Definition: Attributor.h:1592
uint64_t getValueAsInt() const
Return the attribute&#39;s value as an integer.
Definition: Attributes.cpp:209
A visitor class for IR positions.
Definition: Attributor.h:550
bool isStringAttribute() const
Return true if the attribute is a string (target-dependent) attribute.
Definition: Attributes.cpp:194
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1796
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
An abstract attribute for norecurse.
Definition: Attributor.h:1516
static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
Definition: Attributes.cpp:164
#define STATS_DECLTRACK_ARG_ATTR(NAME)
Definition: Attributor.cpp:82
unsigned getArgumentNo(Value::const_user_iterator I) const
Given a value use iterator, returns the argument that corresponds to it.
Definition: CallSite.h:206
This file contains the simple types necessary to represent the attributes associated with functions a...
bool isAssumedNoUnwind() const
Returns true if nounwind is assumed.
Definition: Attributor.h:1466
#define STATS_DECLTRACK_CSARG_ATTR(NAME)
Definition: Attributor.cpp:84
bool canSimplifyInvokeNoUnwind(const Function *F)
static cl::opt< unsigned > MaxFixpointIterations("attributor-max-iterations", cl::Hidden, cl::desc("Maximal number of fixpoint iterations."), cl::init(32))
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1531
An abstract interface for all noalias attributes.
Definition: Attributor.h:1554
bool checkForAllReadWriteInstructions(const llvm::function_ref< bool(Instruction &)> &Pred, AbstractAttribute &QueryingAA)
Check Pred on all Read/Write instructions.
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
An attribute for the function return value.
Definition: Attributor.h:150
InstrTy * getInstruction() const
Definition: CallSite.h:96
uint32_t getAssumedDereferenceableBytes() const
Return assumed dereferenceable bytes.
Definition: Attributor.h:1763
ChangeStatus indicatePessimisticFixpoint() override
See AbstractState::indicatePessimisticFixpoint(...)
Definition: Attributor.h:1095
attributor
User * getUser() const LLVM_READONLY
Returns the User that contains this Use.
Definition: Use.cpp:40
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1583
int getArgNo() const
}
Definition: Attributor.h:376
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
static cl::opt< bool > ManifestInternal("attributor-manifest-internal", cl::Hidden, cl::desc("Manifest Attributor internal string attributes."), cl::init(false))
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
An abstract interface for liveness abstract attribute.
Definition: Attributor.h:1611
bool checkForAllReturnedValuesAndReturnInsts(const function_ref< bool(Value &, const SmallSetVector< ReturnInst *, 4 > &)> &Pred, const AbstractAttribute &QueryingAA)
Check Pred on all values potentially returned by F.
const T & getValue() const LLVM_LVALUE_FUNCTION
Definition: Optional.h:255
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:253
bool isAssumedNoFree() const
Return true if "nofree" is assumed.
Definition: Attributor.h:1579
This class represents a no-op cast from one type to another.
void initializeAttributorLegacyPassPass(PassRegistry &)
ChangeStatus run(Module &M)
Run the analyses until a fixpoint is reached or enforced (timeout).
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringLiteral > StandardNames)
Initialize the set of available library functions based on the specified target triple.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
#define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
Definition: CallSite.h:385
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:223
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:601
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:732
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
An abstract interface for all nonnull attributes.
Definition: Attributor.h:1497
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1093
ChangeStatus manifest(Attributor &A) override
See AbstractAttribute::manifest(...).
Definition: Attributor.h:1261
Value * getOperand(unsigned i) const
Definition: User.h:169
ChangeStatus update(Attributor &A)
Hook for the Attributor to trigger an update of the internal state.
Definition: Attributor.cpp:315
#define STATS_DECLTRACK_CS_ATTR(NAME)
Definition: Attributor.cpp:89
IntegerState & takeKnownMaximum(base_t Value)
Take maximum of known and Value.
Definition: Attributor.h:1152
unsigned getAttrIdx() const
Return the index in the attribute list for this position.
Definition: Attributor.h:379
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:156
virtual bool isAtFixpoint() const =0
Return if this abstract state is fixed, thus does not need to be updated if information changes as it...
const AAType & getAAFor(const AbstractAttribute &QueryingAA, const IRPosition &IRP, bool TrackDependence=true)
Lookup an abstract attribute of type AAType at position IRP.
Definition: Attributor.h:757
virtual bool isValidState() const =0
Return if this abstract state is in a valid state.
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1569
#define BUILD_STAT_NAME(NAME, TYPE)
Definition: Attributor.cpp:72
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:141
const BasicBlock & getEntryBlock() const
Definition: Function.h:664
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock *> Preds, const char *Suffix, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:432
AbstractState StateType
Definition: Attributor.h:1328
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:168
base_t getKnown() const
Return the known state encoding.
Definition: Attributor.h:1101
#define STATS_DECLTRACK_FN_ATTR(NAME)
Definition: Attributor.cpp:87
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:153
bool checkForAllCallLikeInstructions(const function_ref< bool(Instruction &)> &Pred, const AbstractAttribute &QueryingAA)
Check Pred on all call-like instructions (=CallBased derived).
Definition: Attributor.h:881
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1846
void removeAttrs(ArrayRef< Attribute::AttrKind > AKs)
Remove the attribute of kind AKs existing in the IR at this position.
Definition: Attributor.h:451
unsigned getNumArgOperands() const
Definition: CallSite.h:303
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
Instruction * getInstruction() const
Return the underlying instruction.
Definition: CallSite.h:772
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:1550
An abstract interface for all dereferenceable attribute.
Definition: Attributor.h:1739
This is an important base class in LLVM.
Definition: Constant.h:41
LLVM_NODISCARD bool empty() const
Definition: SmallPtrSet.h:91
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:224
const Instruction & front() const
Definition: BasicBlock.h:285
ValTy * getArgument(unsigned ArgNo) const
De