LLVM  16.0.0git
ObjCARCOpts.cpp
Go to the documentation of this file.
1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
12 /// in Objective C.
13 ///
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
17 ///
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
20 ///
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "ARCRuntimeEntryPoints.h"
28 #include "BlotMapVector.h"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARC.h"
31 #include "ProvenanceAnalysis.h"
32 #include "PtrState.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/None.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/CFG.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GlobalVariable.h"
52 #include "llvm/IR/InstIterator.h"
53 #include "llvm/IR/InstrTypes.h"
54 #include "llvm/IR/Instruction.h"
55 #include "llvm/IR/Instructions.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/Support/Casting.h"
63 #include "llvm/Support/Compiler.h"
64 #include "llvm/Support/Debug.h"
68 #include <cassert>
69 #include <iterator>
70 #include <utility>
71 
72 using namespace llvm;
73 using namespace llvm::objcarc;
74 
75 #define DEBUG_TYPE "objc-arc-opts"
76 
77 static cl::opt<unsigned> MaxPtrStates("arc-opt-max-ptr-states",
78  cl::Hidden,
79  cl::desc("Maximum number of ptr states the optimizer keeps track of"),
80  cl::init(4095));
81 
82 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
83 /// @{
84 
85 /// This is similar to GetRCIdentityRoot but it stops as soon
86 /// as it finds a value with multiple uses.
88  // ConstantData (like ConstantPointerNull and UndefValue) is used across
89  // modules. It's never a single-use value.
90  if (isa<ConstantData>(Arg))
91  return nullptr;
92 
93  if (Arg->hasOneUse()) {
94  if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
95  return FindSingleUseIdentifiedObject(BC->getOperand(0));
96  if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
97  if (GEP->hasAllZeroIndices())
98  return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
101  cast<CallInst>(Arg)->getArgOperand(0));
103  return nullptr;
104  return Arg;
105  }
106 
107  // If we found an identifiable object but it has multiple uses, but they are
108  // trivial uses, we can still consider this to be a single-use value.
110  for (const User *U : Arg->users())
111  if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
112  return nullptr;
113 
114  return Arg;
115  }
116 
117  return nullptr;
118 }
119 
120 /// @}
121 ///
122 /// \defgroup ARCOpt ARC Optimization.
123 /// @{
124 
125 // TODO: On code like this:
126 //
127 // objc_retain(%x)
128 // stuff_that_cannot_release()
129 // objc_autorelease(%x)
130 // stuff_that_cannot_release()
131 // objc_retain(%x)
132 // stuff_that_cannot_release()
133 // objc_autorelease(%x)
134 //
135 // The second retain and autorelease can be deleted.
136 
137 // TODO: It should be possible to delete
138 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
139 // pairs if nothing is actually autoreleased between them. Also, autorelease
140 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
141 // after inlining) can be turned into plain release calls.
142 
143 // TODO: Critical-edge splitting. If the optimial insertion point is
144 // a critical edge, the current algorithm has to fail, because it doesn't
145 // know how to split edges. It should be possible to make the optimizer
146 // think in terms of edges, rather than blocks, and then split critical
147 // edges on demand.
148 
149 // TODO: OptimizeSequences could generalized to be Interprocedural.
150 
151 // TODO: Recognize that a bunch of other objc runtime calls have
152 // non-escaping arguments and non-releasing arguments, and may be
153 // non-autoreleasing.
154 
155 // TODO: Sink autorelease calls as far as possible. Unfortunately we
156 // usually can't sink them past other calls, which would be the main
157 // case where it would be useful.
158 
159 // TODO: The pointer returned from objc_loadWeakRetained is retained.
160 
161 // TODO: Delete release+retain pairs (rare).
162 
163 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
164 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
165 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
166 STATISTIC(NumRets, "Number of return value forwarding "
167  "retain+autoreleases eliminated");
168 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
169 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
170 #ifndef NDEBUG
171 STATISTIC(NumRetainsBeforeOpt,
172  "Number of retains before optimization");
173 STATISTIC(NumReleasesBeforeOpt,
174  "Number of releases before optimization");
175 STATISTIC(NumRetainsAfterOpt,
176  "Number of retains after optimization");
177 STATISTIC(NumReleasesAfterOpt,
178  "Number of releases after optimization");
179 #endif
180 
181 namespace {
182 
183  /// Per-BasicBlock state.
184  class BBState {
185  /// The number of unique control paths from the entry which can reach this
186  /// block.
187  unsigned TopDownPathCount = 0;
188 
189  /// The number of unique control paths to exits from this block.
190  unsigned BottomUpPathCount = 0;
191 
192  /// The top-down traversal uses this to record information known about a
193  /// pointer at the bottom of each block.
195 
196  /// The bottom-up traversal uses this to record information known about a
197  /// pointer at the top of each block.
199 
200  /// Effective predecessors of the current block ignoring ignorable edges and
201  /// ignored backedges.
203 
204  /// Effective successors of the current block ignoring ignorable edges and
205  /// ignored backedges.
207 
208  public:
209  static const unsigned OverflowOccurredValue;
210 
211  BBState() = default;
212 
213  using top_down_ptr_iterator = decltype(PerPtrTopDown)::iterator;
214  using const_top_down_ptr_iterator = decltype(PerPtrTopDown)::const_iterator;
215 
216  top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
217  top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
218  const_top_down_ptr_iterator top_down_ptr_begin() const {
219  return PerPtrTopDown.begin();
220  }
221  const_top_down_ptr_iterator top_down_ptr_end() const {
222  return PerPtrTopDown.end();
223  }
224  bool hasTopDownPtrs() const {
225  return !PerPtrTopDown.empty();
226  }
227 
228  unsigned top_down_ptr_list_size() const {
229  return std::distance(top_down_ptr_begin(), top_down_ptr_end());
230  }
231 
232  using bottom_up_ptr_iterator = decltype(PerPtrBottomUp)::iterator;
233  using const_bottom_up_ptr_iterator =
234  decltype(PerPtrBottomUp)::const_iterator;
235 
236  bottom_up_ptr_iterator bottom_up_ptr_begin() {
237  return PerPtrBottomUp.begin();
238  }
239  bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
240  const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
241  return PerPtrBottomUp.begin();
242  }
243  const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
244  return PerPtrBottomUp.end();
245  }
246  bool hasBottomUpPtrs() const {
247  return !PerPtrBottomUp.empty();
248  }
249 
250  unsigned bottom_up_ptr_list_size() const {
251  return std::distance(bottom_up_ptr_begin(), bottom_up_ptr_end());
252  }
253 
254  /// Mark this block as being an entry block, which has one path from the
255  /// entry by definition.
256  void SetAsEntry() { TopDownPathCount = 1; }
257 
258  /// Mark this block as being an exit block, which has one path to an exit by
259  /// definition.
260  void SetAsExit() { BottomUpPathCount = 1; }
261 
262  /// Attempt to find the PtrState object describing the top down state for
263  /// pointer Arg. Return a new initialized PtrState describing the top down
264  /// state for Arg if we do not find one.
265  TopDownPtrState &getPtrTopDownState(const Value *Arg) {
266  return PerPtrTopDown[Arg];
267  }
268 
269  /// Attempt to find the PtrState object describing the bottom up state for
270  /// pointer Arg. Return a new initialized PtrState describing the bottom up
271  /// state for Arg if we do not find one.
272  BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
273  return PerPtrBottomUp[Arg];
274  }
275 
276  /// Attempt to find the PtrState object describing the bottom up state for
277  /// pointer Arg.
278  bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
279  return PerPtrBottomUp.find(Arg);
280  }
281 
282  void clearBottomUpPointers() {
283  PerPtrBottomUp.clear();
284  }
285 
286  void clearTopDownPointers() {
287  PerPtrTopDown.clear();
288  }
289 
290  void InitFromPred(const BBState &Other);
291  void InitFromSucc(const BBState &Other);
292  void MergePred(const BBState &Other);
293  void MergeSucc(const BBState &Other);
294 
295  /// Compute the number of possible unique paths from an entry to an exit
296  /// which pass through this block. This is only valid after both the
297  /// top-down and bottom-up traversals are complete.
298  ///
299  /// Returns true if overflow occurred. Returns false if overflow did not
300  /// occur.
301  bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
302  if (TopDownPathCount == OverflowOccurredValue ||
303  BottomUpPathCount == OverflowOccurredValue)
304  return true;
305  unsigned long long Product =
306  (unsigned long long)TopDownPathCount*BottomUpPathCount;
307  // Overflow occurred if any of the upper bits of Product are set or if all
308  // the lower bits of Product are all set.
309  return (Product >> 32) ||
310  ((PathCount = Product) == OverflowOccurredValue);
311  }
312 
313  // Specialized CFG utilities.
314  using edge_iterator = SmallVectorImpl<BasicBlock *>::const_iterator;
315 
316  edge_iterator pred_begin() const { return Preds.begin(); }
317  edge_iterator pred_end() const { return Preds.end(); }
318  edge_iterator succ_begin() const { return Succs.begin(); }
319  edge_iterator succ_end() const { return Succs.end(); }
320 
321  void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
322  void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
323 
324  bool isExit() const { return Succs.empty(); }
325  };
326 
327 } // end anonymous namespace
328 
329 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
330 
331 namespace llvm {
332 
334  BBState &BBState) LLVM_ATTRIBUTE_UNUSED;
335 
336 } // end namespace llvm
337 
338 void BBState::InitFromPred(const BBState &Other) {
339  PerPtrTopDown = Other.PerPtrTopDown;
340  TopDownPathCount = Other.TopDownPathCount;
341 }
342 
343 void BBState::InitFromSucc(const BBState &Other) {
344  PerPtrBottomUp = Other.PerPtrBottomUp;
345  BottomUpPathCount = Other.BottomUpPathCount;
346 }
347 
348 /// The top-down traversal uses this to merge information about predecessors to
349 /// form the initial state for a new block.
350 void BBState::MergePred(const BBState &Other) {
351  if (TopDownPathCount == OverflowOccurredValue)
352  return;
353 
354  // Other.TopDownPathCount can be 0, in which case it is either dead or a
355  // loop backedge. Loop backedges are special.
356  TopDownPathCount += Other.TopDownPathCount;
357 
358  // In order to be consistent, we clear the top down pointers when by adding
359  // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
360  // has not occurred.
361  if (TopDownPathCount == OverflowOccurredValue) {
362  clearTopDownPointers();
363  return;
364  }
365 
366  // Check for overflow. If we have overflow, fall back to conservative
367  // behavior.
368  if (TopDownPathCount < Other.TopDownPathCount) {
369  TopDownPathCount = OverflowOccurredValue;
370  clearTopDownPointers();
371  return;
372  }
373 
374  // For each entry in the other set, if our set has an entry with the same key,
375  // merge the entries. Otherwise, copy the entry and merge it with an empty
376  // entry.
377  for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
378  MI != ME; ++MI) {
379  auto Pair = PerPtrTopDown.insert(*MI);
380  Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
381  /*TopDown=*/true);
382  }
383 
384  // For each entry in our set, if the other set doesn't have an entry with the
385  // same key, force it to merge with an empty entry.
386  for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
387  if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
388  MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
389 }
390 
391 /// The bottom-up traversal uses this to merge information about successors to
392 /// form the initial state for a new block.
393 void BBState::MergeSucc(const BBState &Other) {
394  if (BottomUpPathCount == OverflowOccurredValue)
395  return;
396 
397  // Other.BottomUpPathCount can be 0, in which case it is either dead or a
398  // loop backedge. Loop backedges are special.
399  BottomUpPathCount += Other.BottomUpPathCount;
400 
401  // In order to be consistent, we clear the top down pointers when by adding
402  // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
403  // has not occurred.
404  if (BottomUpPathCount == OverflowOccurredValue) {
405  clearBottomUpPointers();
406  return;
407  }
408 
409  // Check for overflow. If we have overflow, fall back to conservative
410  // behavior.
411  if (BottomUpPathCount < Other.BottomUpPathCount) {
412  BottomUpPathCount = OverflowOccurredValue;
413  clearBottomUpPointers();
414  return;
415  }
416 
417  // For each entry in the other set, if our set has an entry with the
418  // same key, merge the entries. Otherwise, copy the entry and merge
419  // it with an empty entry.
420  for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
421  MI != ME; ++MI) {
422  auto Pair = PerPtrBottomUp.insert(*MI);
423  Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
424  /*TopDown=*/false);
425  }
426 
427  // For each entry in our set, if the other set doesn't have an entry
428  // with the same key, force it to merge with an empty entry.
429  for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
430  ++MI)
431  if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
432  MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
433 }
434 
435 raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
436  // Dump the pointers we are tracking.
437  OS << " TopDown State:\n";
438  if (!BBInfo.hasTopDownPtrs()) {
439  LLVM_DEBUG(dbgs() << " NONE!\n");
440  } else {
441  for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
442  I != E; ++I) {
443  const PtrState &P = I->second;
444  OS << " Ptr: " << *I->first
445  << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
446  << "\n ImpreciseRelease: "
447  << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
448  << " HasCFGHazards: "
449  << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
450  << " KnownPositive: "
451  << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
452  << " Seq: "
453  << P.GetSeq() << "\n";
454  }
455  }
456 
457  OS << " BottomUp State:\n";
458  if (!BBInfo.hasBottomUpPtrs()) {
459  LLVM_DEBUG(dbgs() << " NONE!\n");
460  } else {
461  for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
462  I != E; ++I) {
463  const PtrState &P = I->second;
464  OS << " Ptr: " << *I->first
465  << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
466  << "\n ImpreciseRelease: "
467  << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
468  << " HasCFGHazards: "
469  << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
470  << " KnownPositive: "
471  << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
472  << " Seq: "
473  << P.GetSeq() << "\n";
474  }
475  }
476 
477  return OS;
478 }
479 
480 namespace {
481 
482  /// The main ARC optimization pass.
483 class ObjCARCOpt {
484  bool Changed = false;
485  bool CFGChanged = false;
487 
488  /// A cache of references to runtime entry point constants.
490 
491  /// A cache of MDKinds that can be passed into other functions to propagate
492  /// MDKind identifiers.
493  ARCMDKindCache MDKindCache;
494 
495  BundledRetainClaimRVs *BundledInsts = nullptr;
496 
497  /// A flag indicating whether the optimization that removes or moves
498  /// retain/release pairs should be performed.
499  bool DisableRetainReleasePairing = false;
500 
501  /// Flags which determine whether each of the interesting runtime functions
502  /// is in fact used in the current function.
503  unsigned UsedInThisFunction;
504 
505  bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
506  void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
507  ARCInstKind &Class);
508  void OptimizeIndividualCalls(Function &F);
509 
510  /// Optimize an individual call, optionally passing the
511  /// GetArgRCIdentityRoot if it has already been computed.
512  void OptimizeIndividualCallImpl(
514  Instruction *Inst, ARCInstKind Class, const Value *Arg);
515 
516  /// Try to optimize an AutoreleaseRV with a RetainRV or UnsafeClaimRV. If the
517  /// optimization occurs, returns true to indicate that the caller should
518  /// assume the instructions are dead.
519  bool OptimizeInlinedAutoreleaseRVCall(
521  Instruction *Inst, const Value *&Arg, ARCInstKind Class,
522  Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg);
523 
524  void CheckForCFGHazards(const BasicBlock *BB,
526  BBState &MyStates) const;
527  bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
529  BBState &MyStates);
530  bool VisitBottomUp(BasicBlock *BB,
533  bool VisitInstructionTopDown(
534  Instruction *Inst, DenseMap<Value *, RRInfo> &Releases, BBState &MyStates,
536  &ReleaseInsertPtToRCIdentityRoots);
537  bool VisitTopDown(
539  DenseMap<Value *, RRInfo> &Releases,
541  &ReleaseInsertPtToRCIdentityRoots);
542  bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
544  DenseMap<Value *, RRInfo> &Releases);
545 
546  void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
548  DenseMap<Value *, RRInfo> &Releases,
549  SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
550 
551  bool PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
553  DenseMap<Value *, RRInfo> &Releases, Module *M,
554  Instruction *Retain,
556  RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
557  Value *Arg, bool KnownSafe,
558  bool &AnyPairsCompletelyEliminated);
559 
560  bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
562  DenseMap<Value *, RRInfo> &Releases, Module *M);
563 
564  void OptimizeWeakCalls(Function &F);
565 
566  bool OptimizeSequences(Function &F);
567 
568  void OptimizeReturns(Function &F);
569 
570 #ifndef NDEBUG
571  void GatherStatistics(Function &F, bool AfterOptimization = false);
572 #endif
573 
574  public:
575  void init(Module &M);
576  bool run(Function &F, AAResults &AA);
577  bool hasCFGChanged() const { return CFGChanged; }
578 };
579 } // end anonymous namespace
580 
581 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
582 /// not a return value.
583 bool
584 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
585  // Check for the argument being from an immediately preceding call or invoke.
586  const Value *Arg = GetArgRCIdentityRoot(RetainRV);
587  if (const Instruction *Call = dyn_cast<CallBase>(Arg)) {
588  if (Call->getParent() == RetainRV->getParent()) {
590  ++I;
591  while (IsNoopInstruction(&*I))
592  ++I;
593  if (&*I == RetainRV)
594  return false;
595  } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
596  BasicBlock *RetainRVParent = RetainRV->getParent();
597  if (II->getNormalDest() == RetainRVParent) {
598  BasicBlock::const_iterator I = RetainRVParent->begin();
599  while (IsNoopInstruction(&*I))
600  ++I;
601  if (&*I == RetainRV)
602  return false;
603  }
604  }
605  }
606 
607  assert(!BundledInsts->contains(RetainRV) &&
608  "a bundled retainRV's argument should be a call");
609 
610  // Turn it to a plain objc_retain.
611  Changed = true;
612  ++NumPeeps;
613 
614  LLVM_DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
615  "objc_retain since the operand is not a return value.\n"
616  "Old = "
617  << *RetainRV << "\n");
618 
619  Function *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
620  cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
621 
622  LLVM_DEBUG(dbgs() << "New = " << *RetainRV << "\n");
623 
624  return false;
625 }
626 
627 bool ObjCARCOpt::OptimizeInlinedAutoreleaseRVCall(
629  Instruction *Inst, const Value *&Arg, ARCInstKind Class,
630  Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg) {
631  if (BundledInsts->contains(Inst))
632  return false;
633 
634  // Must be in the same basic block.
635  assert(Inst->getParent() == AutoreleaseRV->getParent());
636 
637  // Must operate on the same root.
638  Arg = GetArgRCIdentityRoot(Inst);
639  AutoreleaseRVArg = GetArgRCIdentityRoot(AutoreleaseRV);
640  if (Arg != AutoreleaseRVArg) {
641  // If there isn't an exact match, check if we have equivalent PHIs.
642  const PHINode *PN = dyn_cast<PHINode>(Arg);
643  if (!PN)
644  return false;
645 
647  getEquivalentPHIs(*PN, ArgUsers);
648  if (!llvm::is_contained(ArgUsers, AutoreleaseRVArg))
649  return false;
650  }
651 
652  // Okay, this is a match. Merge them.
653  ++NumPeeps;
654  LLVM_DEBUG(dbgs() << "Found inlined objc_autoreleaseReturnValue '"
655  << *AutoreleaseRV << "' paired with '" << *Inst << "'\n");
656 
657  // Delete the RV pair, starting with the AutoreleaseRV.
658  AutoreleaseRV->replaceAllUsesWith(
659  cast<CallInst>(AutoreleaseRV)->getArgOperand(0));
660  Changed = true;
661  EraseInstruction(AutoreleaseRV);
662  if (Class == ARCInstKind::RetainRV) {
663  // AutoreleaseRV and RetainRV cancel out. Delete the RetainRV.
664  Inst->replaceAllUsesWith(cast<CallInst>(Inst)->getArgOperand(0));
665  EraseInstruction(Inst);
666  return true;
667  }
668 
669  // UnsafeClaimRV is a frontend peephole for RetainRV + Release. Since the
670  // AutoreleaseRV and RetainRV cancel out, replace UnsafeClaimRV with Release.
672  Value *CallArg = cast<CallInst>(Inst)->getArgOperand(0);
674  EP.get(ARCRuntimeEntryPointKind::Release), CallArg, "", Inst);
676  "Expected UnsafeClaimRV to be safe to tail call");
677  Release->setTailCall();
679  EraseInstruction(Inst);
680 
681  // Run the normal optimizations on Release.
682  OptimizeIndividualCallImpl(F, BlockColors, Release, ARCInstKind::Release,
683  Arg);
684  return true;
685 }
686 
687 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
688 /// used as a return value.
689 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
690  Instruction *AutoreleaseRV,
691  ARCInstKind &Class) {
692  // Check for a return of the pointer value.
693  const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
694 
695  // If the argument is ConstantPointerNull or UndefValue, its other users
696  // aren't actually interesting to look at.
697  if (isa<ConstantData>(Ptr))
698  return;
699 
701  Users.push_back(Ptr);
702 
703  // Add PHIs that are equivalent to Ptr to Users.
704  if (const PHINode *PN = dyn_cast<PHINode>(Ptr))
705  getEquivalentPHIs(*PN, Users);
706 
707  do {
708  Ptr = Users.pop_back_val();
709  for (const User *U : Ptr->users()) {
710  if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
711  return;
712  if (isa<BitCastInst>(U))
713  Users.push_back(U);
714  }
715  } while (!Users.empty());
716 
717  Changed = true;
718  ++NumPeeps;
719 
720  LLVM_DEBUG(
721  dbgs() << "Transforming objc_autoreleaseReturnValue => "
722  "objc_autorelease since its operand is not used as a return "
723  "value.\n"
724  "Old = "
725  << *AutoreleaseRV << "\n");
726 
727  CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
729  AutoreleaseRVCI->setCalledFunction(NewDecl);
730  AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
732 
733  LLVM_DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
734 }
735 
736 namespace {
737 Instruction *
738 CloneCallInstForBB(CallInst &CI, BasicBlock &BB,
739  const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
741  for (unsigned I = 0, E = CI.getNumOperandBundles(); I != E; ++I) {
742  auto Bundle = CI.getOperandBundleAt(I);
743  // Funclets will be reassociated in the future.
744  if (Bundle.getTagID() == LLVMContext::OB_funclet)
745  continue;
746  OpBundles.emplace_back(Bundle);
747  }
748 
749  if (!BlockColors.empty()) {
750  const ColorVector &CV = BlockColors.find(&BB)->second;
751  assert(CV.size() == 1 && "non-unique color for block!");
752  Instruction *EHPad = CV.front()->getFirstNonPHI();
753  if (EHPad->isEHPad())
754  OpBundles.emplace_back("funclet", EHPad);
755  }
756 
757  return CallInst::Create(&CI, OpBundles);
758 }
759 }
760 
761 /// Visit each call, one at a time, and make simplifications without doing any
762 /// additional analysis.
763 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
764  LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
765  // Reset all the flags in preparation for recomputing them.
766  UsedInThisFunction = 0;
767 
769  if (F.hasPersonalityFn() &&
770  isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
771  BlockColors = colorEHFunclets(F);
772 
773  // Store any delayed AutoreleaseRV intrinsics, so they can be easily paired
774  // with RetainRV and UnsafeClaimRV.
775  Instruction *DelayedAutoreleaseRV = nullptr;
776  const Value *DelayedAutoreleaseRVArg = nullptr;
777  auto setDelayedAutoreleaseRV = [&](Instruction *AutoreleaseRV) {
778  assert(!DelayedAutoreleaseRV || !AutoreleaseRV);
779  DelayedAutoreleaseRV = AutoreleaseRV;
780  DelayedAutoreleaseRVArg = nullptr;
781  };
782  auto optimizeDelayedAutoreleaseRV = [&]() {
783  if (!DelayedAutoreleaseRV)
784  return;
785  OptimizeIndividualCallImpl(F, BlockColors, DelayedAutoreleaseRV,
787  DelayedAutoreleaseRVArg);
788  setDelayedAutoreleaseRV(nullptr);
789  };
790  auto shouldDelayAutoreleaseRV = [&](Instruction *NonARCInst) {
791  // Nothing to delay, but we may as well skip the logic below.
792  if (!DelayedAutoreleaseRV)
793  return true;
794 
795  // If we hit the end of the basic block we're not going to find an RV-pair.
796  // Stop delaying.
797  if (NonARCInst->isTerminator())
798  return false;
799 
800  // Given the frontend rules for emitting AutoreleaseRV, RetainRV, and
801  // UnsafeClaimRV, it's probably safe to skip over even opaque function calls
802  // here since OptimizeInlinedAutoreleaseRVCall will confirm that they
803  // have the same RCIdentityRoot. However, what really matters is
804  // skipping instructions or intrinsics that the inliner could leave behind;
805  // be conservative for now and don't skip over opaque calls, which could
806  // potentially include other ARC calls.
807  auto *CB = dyn_cast<CallBase>(NonARCInst);
808  if (!CB)
809  return true;
810  return CB->getIntrinsicID() != Intrinsic::not_intrinsic;
811  };
812 
813  // Visit all objc_* calls in F.
814  for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
815  Instruction *Inst = &*I++;
816 
817  if (auto *CI = dyn_cast<CallInst>(Inst))
819  BundledInsts->insertRVCall(&*I, CI);
820  Changed = true;
821  }
822 
824 
825  // Skip this loop if this instruction isn't itself an ARC intrinsic.
826  const Value *Arg = nullptr;
827  switch (Class) {
828  default:
829  optimizeDelayedAutoreleaseRV();
830  break;
832  case ARCInstKind::User:
833  case ARCInstKind::None:
834  // This is a non-ARC instruction. If we're delaying an AutoreleaseRV,
835  // check if it's safe to skip over it; if not, optimize the AutoreleaseRV
836  // now.
837  if (!shouldDelayAutoreleaseRV(Inst))
838  optimizeDelayedAutoreleaseRV();
839  continue;
841  optimizeDelayedAutoreleaseRV();
842  setDelayedAutoreleaseRV(Inst);
843  continue;
846  if (DelayedAutoreleaseRV) {
847  // We have a potential RV pair. Check if they cancel out.
848  if (OptimizeInlinedAutoreleaseRVCall(F, BlockColors, Inst, Arg, Class,
849  DelayedAutoreleaseRV,
850  DelayedAutoreleaseRVArg)) {
851  setDelayedAutoreleaseRV(nullptr);
852  continue;
853  }
854  optimizeDelayedAutoreleaseRV();
855  }
856  break;
857  }
858 
859  OptimizeIndividualCallImpl(F, BlockColors, Inst, Class, Arg);
860  }
861 
862  // Catch the final delayed AutoreleaseRV.
863  optimizeDelayedAutoreleaseRV();
864 }
865 
866 /// This function returns true if the value is inert. An ObjC ARC runtime call
867 /// taking an inert operand can be safely deleted.
868 static bool isInertARCValue(Value *V, SmallPtrSet<Value *, 1> &VisitedPhis) {
869  V = V->stripPointerCasts();
870 
871  if (IsNullOrUndef(V))
872  return true;
873 
874  // See if this is a global attribute annotated with an 'objc_arc_inert'.
875  if (auto *GV = dyn_cast<GlobalVariable>(V))
876  if (GV->hasAttribute("objc_arc_inert"))
877  return true;
878 
879  if (auto PN = dyn_cast<PHINode>(V)) {
880  // Ignore this phi if it has already been discovered.
881  if (!VisitedPhis.insert(PN).second)
882  return true;
883  // Look through phis's operands.
884  for (Value *Opnd : PN->incoming_values())
885  if (!isInertARCValue(Opnd, VisitedPhis))
886  return false;
887  return true;
888  }
889 
890  return false;
891 }
892 
893 void ObjCARCOpt::OptimizeIndividualCallImpl(
895  Instruction *Inst, ARCInstKind Class, const Value *Arg) {
896  LLVM_DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
897 
898  // We can delete this call if it takes an inert value.
899  SmallPtrSet<Value *, 1> VisitedPhis;
900 
901  if (BundledInsts->contains(Inst)) {
902  UsedInThisFunction |= 1 << unsigned(Class);
903  return;
904  }
905 
906  if (IsNoopOnGlobal(Class))
907  if (isInertARCValue(Inst->getOperand(0), VisitedPhis)) {
908  if (!Inst->getType()->isVoidTy())
909  Inst->replaceAllUsesWith(Inst->getOperand(0));
910  Inst->eraseFromParent();
911  Changed = true;
912  return;
913  }
914 
915  switch (Class) {
916  default:
917  break;
918 
919  // Delete no-op casts. These function calls have special semantics, but
920  // the semantics are entirely implemented via lowering in the front-end,
921  // so by the time they reach the optimizer, they are just no-op calls
922  // which return their argument.
923  //
924  // There are gray areas here, as the ability to cast reference-counted
925  // pointers to raw void* and back allows code to break ARC assumptions,
926  // however these are currently considered to be unimportant.
928  Changed = true;
929  ++NumNoops;
930  LLVM_DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
931  EraseInstruction(Inst);
932  return;
933 
934  // If the pointer-to-weak-pointer is null, it's undefined behavior.
940  CallInst *CI = cast<CallInst>(Inst);
941  if (IsNullOrUndef(CI->getArgOperand(0))) {
942  Changed = true;
945  Value *NewValue = UndefValue::get(CI->getType());
946  LLVM_DEBUG(
947  dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
948  "\nOld = "
949  << *CI << "\nNew = " << *NewValue << "\n");
950  CI->replaceAllUsesWith(NewValue);
951  CI->eraseFromParent();
952  return;
953  }
954  break;
955  }
957  case ARCInstKind::MoveWeak: {
958  CallInst *CI = cast<CallInst>(Inst);
959  if (IsNullOrUndef(CI->getArgOperand(0)) ||
960  IsNullOrUndef(CI->getArgOperand(1))) {
961  Changed = true;
964 
965  Value *NewValue = UndefValue::get(CI->getType());
966  LLVM_DEBUG(
967  dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
968  "\nOld = "
969  << *CI << "\nNew = " << *NewValue << "\n");
970 
971  CI->replaceAllUsesWith(NewValue);
972  CI->eraseFromParent();
973  return;
974  }
975  break;
976  }
978  if (OptimizeRetainRVCall(F, Inst))
979  return;
980  break;
982  OptimizeAutoreleaseRVCall(F, Inst, Class);
983  break;
984  }
985 
986  // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
987  if (IsAutorelease(Class) && Inst->use_empty()) {
988  CallInst *Call = cast<CallInst>(Inst);
989  const Value *Arg = Call->getArgOperand(0);
991  if (Arg) {
992  Changed = true;
993  ++NumAutoreleases;
994 
995  // Create the declaration lazily.
996  LLVMContext &C = Inst->getContext();
997 
999  CallInst *NewCall =
1000  CallInst::Create(Decl, Call->getArgOperand(0), "", Call);
1001  NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
1002  MDNode::get(C, std::nullopt));
1003 
1004  LLVM_DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
1005  "since x is otherwise unused.\nOld: "
1006  << *Call << "\nNew: " << *NewCall << "\n");
1007 
1008  EraseInstruction(Call);
1009  Inst = NewCall;
1011  }
1012  }
1013 
1014  // For functions which can never be passed stack arguments, add
1015  // a tail keyword.
1016  if (IsAlwaysTail(Class) && !cast<CallInst>(Inst)->isNoTailCall()) {
1017  Changed = true;
1018  LLVM_DEBUG(
1019  dbgs() << "Adding tail keyword to function since it can never be "
1020  "passed stack args: "
1021  << *Inst << "\n");
1022  cast<CallInst>(Inst)->setTailCall();
1023  }
1024 
1025  // Ensure that functions that can never have a "tail" keyword due to the
1026  // semantics of ARC truly do not do so.
1027  if (IsNeverTail(Class)) {
1028  Changed = true;
1029  LLVM_DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst
1030  << "\n");
1031  cast<CallInst>(Inst)->setTailCall(false);
1032  }
1033 
1034  // Set nounwind as needed.
1035  if (IsNoThrow(Class)) {
1036  Changed = true;
1037  LLVM_DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1038  << "\n");
1039  cast<CallInst>(Inst)->setDoesNotThrow();
1040  }
1041 
1042  // Note: This catches instructions unrelated to ARC.
1043  if (!IsNoopOnNull(Class)) {
1044  UsedInThisFunction |= 1 << unsigned(Class);
1045  return;
1046  }
1047 
1048  // If we haven't already looked up the root, look it up now.
1049  if (!Arg)
1050  Arg = GetArgRCIdentityRoot(Inst);
1051 
1052  // ARC calls with null are no-ops. Delete them.
1053  if (IsNullOrUndef(Arg)) {
1054  Changed = true;
1055  ++NumNoops;
1056  LLVM_DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1057  << "\n");
1058  EraseInstruction(Inst);
1059  return;
1060  }
1061 
1062  // Keep track of which of retain, release, autorelease, and retain_block
1063  // are actually present in this function.
1064  UsedInThisFunction |= 1 << unsigned(Class);
1065 
1066  // If Arg is a PHI, and one or more incoming values to the
1067  // PHI are null, and the call is control-equivalent to the PHI, and there
1068  // are no relevant side effects between the PHI and the call, and the call
1069  // is not a release that doesn't have the clang.imprecise_release tag, the
1070  // call could be pushed up to just those paths with non-null incoming
1071  // values. For now, don't bother splitting critical edges for this.
1072  if (Class == ARCInstKind::Release &&
1073  !Inst->getMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease)))
1074  return;
1075 
1077  Worklist.push_back(std::make_pair(Inst, Arg));
1078  do {
1079  std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1080  Inst = Pair.first;
1081  Arg = Pair.second;
1082 
1083  const PHINode *PN = dyn_cast<PHINode>(Arg);
1084  if (!PN)
1085  continue;
1086 
1087  // Determine if the PHI has any null operands, or any incoming
1088  // critical edges.
1089  bool HasNull = false;
1090  bool HasCriticalEdges = false;
1091  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1092  Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
1093  if (IsNullOrUndef(Incoming))
1094  HasNull = true;
1095  else if (PN->getIncomingBlock(i)->getTerminator()->getNumSuccessors() !=
1096  1) {
1097  HasCriticalEdges = true;
1098  break;
1099  }
1100  }
1101  // If we have null operands and no critical edges, optimize.
1102  if (HasCriticalEdges)
1103  continue;
1104  if (!HasNull)
1105  continue;
1106 
1107  Instruction *DepInst = nullptr;
1108 
1109  // Check that there is nothing that cares about the reference
1110  // count between the call and the phi.
1111  switch (Class) {
1112  case ARCInstKind::Retain:
1114  // These can always be moved up.
1115  break;
1116  case ARCInstKind::Release:
1117  // These can't be moved across things that care about the retain
1118  // count.
1120  Inst->getParent(), Inst, PA);
1121  break;
1123  // These can't be moved across autorelease pool scope boundaries.
1125  Inst->getParent(), Inst, PA);
1126  break;
1128  case ARCInstKind::RetainRV:
1130  // Don't move these; the RV optimization depends on the autoreleaseRV
1131  // being tail called, and the retainRV being immediately after a call
1132  // (which might still happen if we get lucky with codegen layout, but
1133  // it's not worth taking the chance).
1134  continue;
1135  default:
1136  llvm_unreachable("Invalid dependence flavor");
1137  }
1138 
1139  if (DepInst != PN)
1140  continue;
1141 
1142  Changed = true;
1143  ++NumPartialNoops;
1144  // Clone the call into each predecessor that has a non-null value.
1145  CallInst *CInst = cast<CallInst>(Inst);
1146  Type *ParamTy = CInst->getArgOperand(0)->getType();
1147  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1148  Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
1149  if (IsNullOrUndef(Incoming))
1150  continue;
1151  Value *Op = PN->getIncomingValue(i);
1152  Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1153  CallInst *Clone = cast<CallInst>(
1154  CloneCallInstForBB(*CInst, *InsertPos->getParent(), BlockColors));
1155  if (Op->getType() != ParamTy)
1156  Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1157  Clone->setArgOperand(0, Op);
1158  Clone->insertBefore(InsertPos);
1159 
1160  LLVM_DEBUG(dbgs() << "Cloning " << *CInst << "\n"
1161  "And inserting clone at "
1162  << *InsertPos << "\n");
1163  Worklist.push_back(std::make_pair(Clone, Incoming));
1164  }
1165  // Erase the original call.
1166  LLVM_DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1167  EraseInstruction(CInst);
1168  } while (!Worklist.empty());
1169 }
1170 
1171 /// If we have a top down pointer in the S_Use state, make sure that there are
1172 /// no CFG hazards by checking the states of various bottom up pointers.
1173 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1174  const bool SuccSRRIKnownSafe,
1175  TopDownPtrState &S,
1176  bool &SomeSuccHasSame,
1177  bool &AllSuccsHaveSame,
1178  bool &NotAllSeqEqualButKnownSafe,
1179  bool &ShouldContinue) {
1180  switch (SuccSSeq) {
1181  case S_CanRelease: {
1182  if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
1183  S.ClearSequenceProgress();
1184  break;
1185  }
1186  S.SetCFGHazardAfflicted(true);
1187  ShouldContinue = true;
1188  break;
1189  }
1190  case S_Use:
1191  SomeSuccHasSame = true;
1192  break;
1193  case S_Stop:
1194  case S_MovableRelease:
1195  if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1196  AllSuccsHaveSame = false;
1197  else
1198  NotAllSeqEqualButKnownSafe = true;
1199  break;
1200  case S_Retain:
1201  llvm_unreachable("bottom-up pointer in retain state!");
1202  case S_None:
1203  llvm_unreachable("This should have been handled earlier.");
1204  }
1205 }
1206 
1207 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1208 /// there are no CFG hazards by checking the states of various bottom up
1209 /// pointers.
1210 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1211  const bool SuccSRRIKnownSafe,
1212  TopDownPtrState &S,
1213  bool &SomeSuccHasSame,
1214  bool &AllSuccsHaveSame,
1215  bool &NotAllSeqEqualButKnownSafe) {
1216  switch (SuccSSeq) {
1217  case S_CanRelease:
1218  SomeSuccHasSame = true;
1219  break;
1220  case S_Stop:
1221  case S_MovableRelease:
1222  case S_Use:
1223  if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1224  AllSuccsHaveSame = false;
1225  else
1226  NotAllSeqEqualButKnownSafe = true;
1227  break;
1228  case S_Retain:
1229  llvm_unreachable("bottom-up pointer in retain state!");
1230  case S_None:
1231  llvm_unreachable("This should have been handled earlier.");
1232  }
1233 }
1234 
1235 /// Check for critical edges, loop boundaries, irreducible control flow, or
1236 /// other CFG structures where moving code across the edge would result in it
1237 /// being executed more.
1238 void
1239 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1241  BBState &MyStates) const {
1242  // If any top-down local-use or possible-dec has a succ which is earlier in
1243  // the sequence, forget it.
1244  for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
1245  I != E; ++I) {
1246  TopDownPtrState &S = I->second;
1247  const Sequence Seq = I->second.GetSeq();
1248 
1249  // We only care about S_Retain, S_CanRelease, and S_Use.
1250  if (Seq == S_None)
1251  continue;
1252 
1253  // Make sure that if extra top down states are added in the future that this
1254  // code is updated to handle it.
1255  assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1256  "Unknown top down sequence state.");
1257 
1258  const Value *Arg = I->first;
1259  bool SomeSuccHasSame = false;
1260  bool AllSuccsHaveSame = true;
1261  bool NotAllSeqEqualButKnownSafe = false;
1262 
1263  for (const BasicBlock *Succ : successors(BB)) {
1264  // If VisitBottomUp has pointer information for this successor, take
1265  // what we know about it.
1267  BBStates.find(Succ);
1268  assert(BBI != BBStates.end());
1269  const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1270  const Sequence SuccSSeq = SuccS.GetSeq();
1271 
1272  // If bottom up, the pointer is in an S_None state, clear the sequence
1273  // progress since the sequence in the bottom up state finished
1274  // suggesting a mismatch in between retains/releases. This is true for
1275  // all three cases that we are handling here: S_Retain, S_Use, and
1276  // S_CanRelease.
1277  if (SuccSSeq == S_None) {
1278  S.ClearSequenceProgress();
1279  continue;
1280  }
1281 
1282  // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1283  // checks.
1284  const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1285 
1286  // *NOTE* We do not use Seq from above here since we are allowing for
1287  // S.GetSeq() to change while we are visiting basic blocks.
1288  switch(S.GetSeq()) {
1289  case S_Use: {
1290  bool ShouldContinue = false;
1291  CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1292  AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1293  ShouldContinue);
1294  if (ShouldContinue)
1295  continue;
1296  break;
1297  }
1298  case S_CanRelease:
1299  CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1300  SomeSuccHasSame, AllSuccsHaveSame,
1301  NotAllSeqEqualButKnownSafe);
1302  break;
1303  case S_Retain:
1304  case S_None:
1305  case S_Stop:
1306  case S_MovableRelease:
1307  break;
1308  }
1309  }
1310 
1311  // If the state at the other end of any of the successor edges
1312  // matches the current state, require all edges to match. This
1313  // guards against loops in the middle of a sequence.
1314  if (SomeSuccHasSame && !AllSuccsHaveSame) {
1315  S.ClearSequenceProgress();
1316  } else if (NotAllSeqEqualButKnownSafe) {
1317  // If we would have cleared the state foregoing the fact that we are known
1318  // safe, stop code motion. This is because whether or not it is safe to
1319  // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1320  // are allowed to perform code motion.
1321  S.SetCFGHazardAfflicted(true);
1322  }
1323  }
1324 }
1325 
1326 bool ObjCARCOpt::VisitInstructionBottomUp(
1328  BBState &MyStates) {
1329  bool NestingDetected = false;
1331  const Value *Arg = nullptr;
1332 
1333  LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
1334 
1335  switch (Class) {
1336  case ARCInstKind::Release: {
1337  Arg = GetArgRCIdentityRoot(Inst);
1338 
1339  BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1340  NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
1341  break;
1342  }
1344  // In OptimizeIndividualCalls, we have strength reduced all optimizable
1345  // objc_retainBlocks to objc_retains. Thus at this point any
1346  // objc_retainBlocks that we see are not optimizable.
1347  break;
1348  case ARCInstKind::Retain:
1349  case ARCInstKind::RetainRV: {
1350  Arg = GetArgRCIdentityRoot(Inst);
1351  BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1352  if (S.MatchWithRetain()) {
1353  // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1354  // it's better to let it remain as the first instruction after a call.
1355  if (Class != ARCInstKind::RetainRV) {
1356  LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1357  Retains[Inst] = S.GetRRInfo();
1358  }
1359  S.ClearSequenceProgress();
1360  }
1361  // A retain moving bottom up can be a use.
1362  break;
1363  }
1365  // Conservatively, clear MyStates for all known pointers.
1366  MyStates.clearBottomUpPointers();
1367  return NestingDetected;
1369  case ARCInstKind::None:
1370  // These are irrelevant.
1371  return NestingDetected;
1372  default:
1373  break;
1374  }
1375 
1376  // Consider any other possible effects of this instruction on each
1377  // pointer being tracked.
1378  for (auto MI = MyStates.bottom_up_ptr_begin(),
1379  ME = MyStates.bottom_up_ptr_end();
1380  MI != ME; ++MI) {
1381  const Value *Ptr = MI->first;
1382  if (Ptr == Arg)
1383  continue; // Handled above.
1384  BottomUpPtrState &S = MI->second;
1385 
1386  if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1387  continue;
1388 
1389  S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
1390  }
1391 
1392  return NestingDetected;
1393 }
1394 
1395 bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1397  BlotMapVector<Value *, RRInfo> &Retains) {
1398  LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1399 
1400  bool NestingDetected = false;
1401  BBState &MyStates = BBStates[BB];
1402 
1403  // Merge the states from each successor to compute the initial state
1404  // for the current block.
1405  BBState::edge_iterator SI(MyStates.succ_begin()),
1406  SE(MyStates.succ_end());
1407  if (SI != SE) {
1408  const BasicBlock *Succ = *SI;
1410  assert(I != BBStates.end());
1411  MyStates.InitFromSucc(I->second);
1412  ++SI;
1413  for (; SI != SE; ++SI) {
1414  Succ = *SI;
1415  I = BBStates.find(Succ);
1416  assert(I != BBStates.end());
1417  MyStates.MergeSucc(I->second);
1418  }
1419  }
1420 
1421  LLVM_DEBUG(dbgs() << "Before:\n"
1422  << BBStates[BB] << "\n"
1423  << "Performing Dataflow:\n");
1424 
1425  // Visit all the instructions, bottom-up.
1426  for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1427  Instruction *Inst = &*std::prev(I);
1428 
1429  // Invoke instructions are visited as part of their successors (below).
1430  if (isa<InvokeInst>(Inst))
1431  continue;
1432 
1433  LLVM_DEBUG(dbgs() << " Visiting " << *Inst << "\n");
1434 
1435  NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1436 
1437  // Bail out if the number of pointers being tracked becomes too large so
1438  // that this pass can complete in a reasonable amount of time.
1439  if (MyStates.bottom_up_ptr_list_size() > MaxPtrStates) {
1440  DisableRetainReleasePairing = true;
1441  return false;
1442  }
1443  }
1444 
1445  // If there's a predecessor with an invoke, visit the invoke as if it were
1446  // part of this block, since we can't insert code after an invoke in its own
1447  // block, and we don't want to split critical edges.
1448  for (BBState::edge_iterator PI(MyStates.pred_begin()),
1449  PE(MyStates.pred_end()); PI != PE; ++PI) {
1450  BasicBlock *Pred = *PI;
1451  if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1452  NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1453  }
1454 
1455  LLVM_DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
1456 
1457  return NestingDetected;
1458 }
1459 
1460 // Fill ReleaseInsertPtToRCIdentityRoots, which is a map from insertion points
1461 // to the set of RC identity roots that would be released by the release calls
1462 // moved to the insertion points.
1464  const BlotMapVector<Value *, RRInfo> &Retains,
1466  &ReleaseInsertPtToRCIdentityRoots) {
1467  for (const auto &P : Retains) {
1468  // Retains is a map from an objc_retain call to a RRInfo of the RC identity
1469  // root of the call. Get the RC identity root of the objc_retain call.
1470  Instruction *Retain = cast<Instruction>(P.first);
1471  Value *Root = GetRCIdentityRoot(Retain->getOperand(0));
1472  // Collect all the insertion points of the objc_release calls that release
1473  // the RC identity root of the objc_retain call.
1474  for (const Instruction *InsertPt : P.second.ReverseInsertPts)
1475  ReleaseInsertPtToRCIdentityRoots[InsertPt].insert(Root);
1476  }
1477 }
1478 
1479 // Get the RC identity roots from an insertion point of an objc_release call.
1480 // Return nullptr if the passed instruction isn't an insertion point.
1481 static const SmallPtrSet<const Value *, 2> *
1483  const Instruction *InsertPt,
1485  &ReleaseInsertPtToRCIdentityRoots) {
1486  auto I = ReleaseInsertPtToRCIdentityRoots.find(InsertPt);
1487  if (I == ReleaseInsertPtToRCIdentityRoots.end())
1488  return nullptr;
1489  return &I->second;
1490 }
1491 
1492 bool ObjCARCOpt::VisitInstructionTopDown(
1493  Instruction *Inst, DenseMap<Value *, RRInfo> &Releases, BBState &MyStates,
1495  &ReleaseInsertPtToRCIdentityRoots) {
1496  bool NestingDetected = false;
1498  const Value *Arg = nullptr;
1499 
1500  // Make sure a call to objc_retain isn't moved past insertion points of calls
1501  // to objc_release.
1502  if (const SmallPtrSet<const Value *, 2> *Roots =
1504  Inst, ReleaseInsertPtToRCIdentityRoots))
1505  for (const auto *Root : *Roots) {
1506  TopDownPtrState &S = MyStates.getPtrTopDownState(Root);
1507  // Disable code motion if the current position is S_Retain to prevent
1508  // moving the objc_retain call past objc_release calls. If it's
1509  // S_CanRelease or larger, it's not necessary to disable code motion as
1510  // the insertion points that prevent the objc_retain call from moving down
1511  // should have been set already.
1512  if (S.GetSeq() == S_Retain)
1513  S.SetCFGHazardAfflicted(true);
1514  }
1515 
1516  LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
1517 
1518  switch (Class) {
1520  // In OptimizeIndividualCalls, we have strength reduced all optimizable
1521  // objc_retainBlocks to objc_retains. Thus at this point any
1522  // objc_retainBlocks that we see are not optimizable. We need to break since
1523  // a retain can be a potential use.
1524  break;
1525  case ARCInstKind::Retain:
1526  case ARCInstKind::RetainRV: {
1527  Arg = GetArgRCIdentityRoot(Inst);
1528  TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1529  NestingDetected |= S.InitTopDown(Class, Inst);
1530  // A retain can be a potential use; proceed to the generic checking
1531  // code below.
1532  break;
1533  }
1534  case ARCInstKind::Release: {
1535  Arg = GetArgRCIdentityRoot(Inst);
1536  TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1537  // Try to form a tentative pair in between this release instruction and the
1538  // top down pointers that we are tracking.
1539  if (S.MatchWithRelease(MDKindCache, Inst)) {
1540  // If we succeed, copy S's RRInfo into the Release -> {Retain Set
1541  // Map}. Then we clear S.
1542  LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1543  Releases[Inst] = S.GetRRInfo();
1544  S.ClearSequenceProgress();
1545  }
1546  break;
1547  }
1549  // Conservatively, clear MyStates for all known pointers.
1550  MyStates.clearTopDownPointers();
1551  return false;
1553  case ARCInstKind::None:
1554  // These can not be uses of
1555  return false;
1556  default:
1557  break;
1558  }
1559 
1560  // Consider any other possible effects of this instruction on each
1561  // pointer being tracked.
1562  for (auto MI = MyStates.top_down_ptr_begin(),
1563  ME = MyStates.top_down_ptr_end();
1564  MI != ME; ++MI) {
1565  const Value *Ptr = MI->first;
1566  if (Ptr == Arg)
1567  continue; // Handled above.
1568  TopDownPtrState &S = MI->second;
1569  if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class, *BundledInsts))
1570  continue;
1571 
1572  S.HandlePotentialUse(Inst, Ptr, PA, Class);
1573  }
1574 
1575  return NestingDetected;
1576 }
1577 
1578 bool ObjCARCOpt::VisitTopDown(
1580  DenseMap<Value *, RRInfo> &Releases,
1582  &ReleaseInsertPtToRCIdentityRoots) {
1583  LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1584  bool NestingDetected = false;
1585  BBState &MyStates = BBStates[BB];
1586 
1587  // Merge the states from each predecessor to compute the initial state
1588  // for the current block.
1589  BBState::edge_iterator PI(MyStates.pred_begin()),
1590  PE(MyStates.pred_end());
1591  if (PI != PE) {
1592  const BasicBlock *Pred = *PI;
1594  assert(I != BBStates.end());
1595  MyStates.InitFromPred(I->second);
1596  ++PI;
1597  for (; PI != PE; ++PI) {
1598  Pred = *PI;
1599  I = BBStates.find(Pred);
1600  assert(I != BBStates.end());
1601  MyStates.MergePred(I->second);
1602  }
1603  }
1604 
1605  // Check that BB and MyStates have the same number of predecessors. This
1606  // prevents retain calls that live outside a loop from being moved into the
1607  // loop.
1608  if (!BB->hasNPredecessors(MyStates.pred_end() - MyStates.pred_begin()))
1609  for (auto I = MyStates.top_down_ptr_begin(),
1610  E = MyStates.top_down_ptr_end();
1611  I != E; ++I)
1612  I->second.SetCFGHazardAfflicted(true);
1613 
1614  LLVM_DEBUG(dbgs() << "Before:\n"
1615  << BBStates[BB] << "\n"
1616  << "Performing Dataflow:\n");
1617 
1618  // Visit all the instructions, top-down.
1619  for (Instruction &Inst : *BB) {
1620  LLVM_DEBUG(dbgs() << " Visiting " << Inst << "\n");
1621 
1622  NestingDetected |= VisitInstructionTopDown(
1623  &Inst, Releases, MyStates, ReleaseInsertPtToRCIdentityRoots);
1624 
1625  // Bail out if the number of pointers being tracked becomes too large so
1626  // that this pass can complete in a reasonable amount of time.
1627  if (MyStates.top_down_ptr_list_size() > MaxPtrStates) {
1628  DisableRetainReleasePairing = true;
1629  return false;
1630  }
1631  }
1632 
1633  LLVM_DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n"
1634  << BBStates[BB] << "\n\n");
1635  CheckForCFGHazards(BB, BBStates, MyStates);
1636  LLVM_DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n");
1637  return NestingDetected;
1638 }
1639 
1640 static void
1642  SmallVectorImpl<BasicBlock *> &PostOrder,
1643  SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1644  unsigned NoObjCARCExceptionsMDKind,
1646  /// The visited set, for doing DFS walks.
1648 
1649  // Do DFS, computing the PostOrder.
1652 
1653  // Functions always have exactly one entry block, and we don't have
1654  // any other block that we treat like an entry block.
1655  BasicBlock *EntryBB = &F.getEntryBlock();
1656  BBState &MyStates = BBStates[EntryBB];
1657  MyStates.SetAsEntry();
1658  Instruction *EntryTI = EntryBB->getTerminator();
1659  SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1660  Visited.insert(EntryBB);
1661  OnStack.insert(EntryBB);
1662  do {
1663  dfs_next_succ:
1664  BasicBlock *CurrBB = SuccStack.back().first;
1665  succ_iterator SE(CurrBB->getTerminator(), false);
1666 
1667  while (SuccStack.back().second != SE) {
1668  BasicBlock *SuccBB = *SuccStack.back().second++;
1669  if (Visited.insert(SuccBB).second) {
1670  SuccStack.push_back(
1671  std::make_pair(SuccBB, succ_iterator(SuccBB->getTerminator())));
1672  BBStates[CurrBB].addSucc(SuccBB);
1673  BBState &SuccStates = BBStates[SuccBB];
1674  SuccStates.addPred(CurrBB);
1675  OnStack.insert(SuccBB);
1676  goto dfs_next_succ;
1677  }
1678 
1679  if (!OnStack.count(SuccBB)) {
1680  BBStates[CurrBB].addSucc(SuccBB);
1681  BBStates[SuccBB].addPred(CurrBB);
1682  }
1683  }
1684  OnStack.erase(CurrBB);
1685  PostOrder.push_back(CurrBB);
1686  SuccStack.pop_back();
1687  } while (!SuccStack.empty());
1688 
1689  Visited.clear();
1690 
1691  // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1692  // Functions may have many exits, and there also blocks which we treat
1693  // as exits due to ignored edges.
1695  for (BasicBlock &ExitBB : F) {
1696  BBState &MyStates = BBStates[&ExitBB];
1697  if (!MyStates.isExit())
1698  continue;
1699 
1700  MyStates.SetAsExit();
1701 
1702  PredStack.push_back(std::make_pair(&ExitBB, MyStates.pred_begin()));
1703  Visited.insert(&ExitBB);
1704  while (!PredStack.empty()) {
1705  reverse_dfs_next_succ:
1706  BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1707  while (PredStack.back().second != PE) {
1708  BasicBlock *BB = *PredStack.back().second++;
1709  if (Visited.insert(BB).second) {
1710  PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1711  goto reverse_dfs_next_succ;
1712  }
1713  }
1714  ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1715  }
1716  }
1717 }
1718 
1719 // Visit the function both top-down and bottom-up.
1720 bool ObjCARCOpt::Visit(Function &F,
1723  DenseMap<Value *, RRInfo> &Releases) {
1724  // Use reverse-postorder traversals, because we magically know that loops
1725  // will be well behaved, i.e. they won't repeatedly call retain on a single
1726  // pointer without doing a release. We can't use the ReversePostOrderTraversal
1727  // class here because we want the reverse-CFG postorder to consider each
1728  // function exit point, and we want to ignore selected cycle edges.
1730  SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1731  ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1732  MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
1733  BBStates);
1734 
1735  // Use reverse-postorder on the reverse CFG for bottom-up.
1736  bool BottomUpNestingDetected = false;
1737  for (BasicBlock *BB : llvm::reverse(ReverseCFGPostOrder)) {
1738  BottomUpNestingDetected |= VisitBottomUp(BB, BBStates, Retains);
1739  if (DisableRetainReleasePairing)
1740  return false;
1741  }
1742 
1744  ReleaseInsertPtToRCIdentityRoots;
1745  collectReleaseInsertPts(Retains, ReleaseInsertPtToRCIdentityRoots);
1746 
1747  // Use reverse-postorder for top-down.
1748  bool TopDownNestingDetected = false;
1749  for (BasicBlock *BB : llvm::reverse(PostOrder)) {
1750  TopDownNestingDetected |=
1751  VisitTopDown(BB, BBStates, Releases, ReleaseInsertPtToRCIdentityRoots);
1752  if (DisableRetainReleasePairing)
1753  return false;
1754  }
1755 
1756  return TopDownNestingDetected && BottomUpNestingDetected;
1757 }
1758 
1759 /// Move the calls in RetainsToMove and ReleasesToMove.
1760 void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1761  RRInfo &ReleasesToMove,
1763  DenseMap<Value *, RRInfo> &Releases,
1764  SmallVectorImpl<Instruction *> &DeadInsts,
1765  Module *M) {
1766  Type *ArgTy = Arg->getType();
1767  Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1768 
1769  LLVM_DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1770 
1771  // Insert the new retain and release calls.
1772  for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1773  Value *MyArg = ArgTy == ParamTy ? Arg :
1774  new BitCastInst(Arg, ParamTy, "", InsertPt);
1776  CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1777  Call->setDoesNotThrow();
1778  Call->setTailCall();
1779 
1780  LLVM_DEBUG(dbgs() << "Inserting new Retain: " << *Call
1781  << "\n"
1782  "At insertion point: "
1783  << *InsertPt << "\n");
1784  }
1785  for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1786  Value *MyArg = ArgTy == ParamTy ? Arg :
1787  new BitCastInst(Arg, ParamTy, "", InsertPt);
1789  CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1790  // Attach a clang.imprecise_release metadata tag, if appropriate.
1791  if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1792  Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
1793  Call->setDoesNotThrow();
1794  if (ReleasesToMove.IsTailCallRelease)
1795  Call->setTailCall();
1796 
1797  LLVM_DEBUG(dbgs() << "Inserting new Release: " << *Call
1798  << "\n"
1799  "At insertion point: "
1800  << *InsertPt << "\n");
1801  }
1802 
1803  // Delete the original retain and release calls.
1804  for (Instruction *OrigRetain : RetainsToMove.Calls) {
1805  Retains.blot(OrigRetain);
1806  DeadInsts.push_back(OrigRetain);
1807  LLVM_DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1808  }
1809  for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1810  Releases.erase(OrigRelease);
1811  DeadInsts.push_back(OrigRelease);
1812  LLVM_DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1813  }
1814 }
1815 
1816 bool ObjCARCOpt::PairUpRetainsAndReleases(
1819  DenseMap<Value *, RRInfo> &Releases, Module *M,
1820  Instruction *Retain,
1821  SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1822  RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1823  bool &AnyPairsCompletelyEliminated) {
1824  // If a pair happens in a region where it is known that the reference count
1825  // is already incremented, we can similarly ignore possible decrements unless
1826  // we are dealing with a retainable object with multiple provenance sources.
1827  bool KnownSafeTD = true, KnownSafeBU = true;
1828  bool CFGHazardAfflicted = false;
1829 
1830  // Connect the dots between the top-down-collected RetainsToMove and
1831  // bottom-up-collected ReleasesToMove to form sets of related calls.
1832  // This is an iterative process so that we connect multiple releases
1833  // to multiple retains if needed.
1834  unsigned OldDelta = 0;
1835  unsigned NewDelta = 0;
1836  unsigned OldCount = 0;
1837  unsigned NewCount = 0;
1838  bool FirstRelease = true;
1839  for (SmallVector<Instruction *, 4> NewRetains{Retain};;) {
1840  SmallVector<Instruction *, 4> NewReleases;
1841  for (Instruction *NewRetain : NewRetains) {
1842  auto It = Retains.find(NewRetain);
1843  assert(It != Retains.end());
1844  const RRInfo &NewRetainRRI = It->second;
1845  KnownSafeTD &= NewRetainRRI.KnownSafe;
1846  CFGHazardAfflicted |= NewRetainRRI.CFGHazardAfflicted;
1847  for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1848  auto Jt = Releases.find(NewRetainRelease);
1849  if (Jt == Releases.end())
1850  return false;
1851  const RRInfo &NewRetainReleaseRRI = Jt->second;
1852 
1853  // If the release does not have a reference to the retain as well,
1854  // something happened which is unaccounted for. Do not do anything.
1855  //
1856  // This can happen if we catch an additive overflow during path count
1857  // merging.
1858  if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1859  return false;
1860 
1861  if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1862  // If we overflow when we compute the path count, don't remove/move
1863  // anything.
1864  const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
1865  unsigned PathCount = BBState::OverflowOccurredValue;
1866  if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1867  return false;
1868  assert(PathCount != BBState::OverflowOccurredValue &&
1869  "PathCount at this point can not be "
1870  "OverflowOccurredValue.");
1871  OldDelta -= PathCount;
1872 
1873  // Merge the ReleaseMetadata and IsTailCallRelease values.
1874  if (FirstRelease) {
1875  ReleasesToMove.ReleaseMetadata =
1876  NewRetainReleaseRRI.ReleaseMetadata;
1877  ReleasesToMove.IsTailCallRelease =
1878  NewRetainReleaseRRI.IsTailCallRelease;
1879  FirstRelease = false;
1880  } else {
1881  if (ReleasesToMove.ReleaseMetadata !=
1882  NewRetainReleaseRRI.ReleaseMetadata)
1883  ReleasesToMove.ReleaseMetadata = nullptr;
1884  if (ReleasesToMove.IsTailCallRelease !=
1885  NewRetainReleaseRRI.IsTailCallRelease)
1886  ReleasesToMove.IsTailCallRelease = false;
1887  }
1888 
1889  // Collect the optimal insertion points.
1890  if (!KnownSafe)
1891  for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
1892  if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
1893  // If we overflow when we compute the path count, don't
1894  // remove/move anything.
1895  const BBState &RIPBBState = BBStates[RIP->getParent()];
1896  PathCount = BBState::OverflowOccurredValue;
1897  if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1898  return false;
1899  assert(PathCount != BBState::OverflowOccurredValue &&
1900  "PathCount at this point can not be "
1901  "OverflowOccurredValue.");
1902  NewDelta -= PathCount;
1903  }
1904  }
1905  NewReleases.push_back(NewRetainRelease);
1906  }
1907  }
1908  }
1909  NewRetains.clear();
1910  if (NewReleases.empty()) break;
1911 
1912  // Back the other way.
1913  for (Instruction *NewRelease : NewReleases) {
1914  auto It = Releases.find(NewRelease);
1915  assert(It != Releases.end());
1916  const RRInfo &NewReleaseRRI = It->second;
1917  KnownSafeBU &= NewReleaseRRI.KnownSafe;
1918  CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
1919  for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
1920  auto Jt = Retains.find(NewReleaseRetain);
1921  if (Jt == Retains.end())
1922  return false;
1923  const RRInfo &NewReleaseRetainRRI = Jt->second;
1924 
1925  // If the retain does not have a reference to the release as well,
1926  // something happened which is unaccounted for. Do not do anything.
1927  //
1928  // This can happen if we catch an additive overflow during path count
1929  // merging.
1930  if (!NewReleaseRetainRRI.Calls.count(NewRelease))
1931  return false;
1932 
1933  if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
1934  // If we overflow when we compute the path count, don't remove/move
1935  // anything.
1936  const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
1937  unsigned PathCount = BBState::OverflowOccurredValue;
1938  if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1939  return false;
1940  assert(PathCount != BBState::OverflowOccurredValue &&
1941  "PathCount at this point can not be "
1942  "OverflowOccurredValue.");
1943  OldDelta += PathCount;
1944  OldCount += PathCount;
1945 
1946  // Collect the optimal insertion points.
1947  if (!KnownSafe)
1948  for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
1949  if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
1950  // If we overflow when we compute the path count, don't
1951  // remove/move anything.
1952  const BBState &RIPBBState = BBStates[RIP->getParent()];
1953 
1954  PathCount = BBState::OverflowOccurredValue;
1955  if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1956  return false;
1957  assert(PathCount != BBState::OverflowOccurredValue &&
1958  "PathCount at this point can not be "
1959  "OverflowOccurredValue.");
1960  NewDelta += PathCount;
1961  NewCount += PathCount;
1962  }
1963  }
1964  NewRetains.push_back(NewReleaseRetain);
1965  }
1966  }
1967  }
1968  if (NewRetains.empty()) break;
1969  }
1970 
1971  // We can only remove pointers if we are known safe in both directions.
1972  bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
1973  if (UnconditionallySafe) {
1974  RetainsToMove.ReverseInsertPts.clear();
1975  ReleasesToMove.ReverseInsertPts.clear();
1976  NewCount = 0;
1977  } else {
1978  // Determine whether the new insertion points we computed preserve the
1979  // balance of retain and release calls through the program.
1980  // TODO: If the fully aggressive solution isn't valid, try to find a
1981  // less aggressive solution which is.
1982  if (NewDelta != 0)
1983  return false;
1984 
1985  // At this point, we are not going to remove any RR pairs, but we still are
1986  // able to move RR pairs. If one of our pointers is afflicted with
1987  // CFGHazards, we cannot perform such code motion so exit early.
1988  const bool WillPerformCodeMotion =
1989  !RetainsToMove.ReverseInsertPts.empty() ||
1990  !ReleasesToMove.ReverseInsertPts.empty();
1991  if (CFGHazardAfflicted && WillPerformCodeMotion)
1992  return false;
1993  }
1994 
1995  // Determine whether the original call points are balanced in the retain and
1996  // release calls through the program. If not, conservatively don't touch
1997  // them.
1998  // TODO: It's theoretically possible to do code motion in this case, as
1999  // long as the existing imbalances are maintained.
2000  if (OldDelta != 0)
2001  return false;
2002 
2003  Changed = true;
2004  assert(OldCount != 0 && "Unreachable code?");
2005  NumRRs += OldCount - NewCount;
2006  // Set to true if we completely removed any RR pairs.
2007  AnyPairsCompletelyEliminated = NewCount == 0;
2008 
2009  // We can move calls!
2010  return true;
2011 }
2012 
2013 /// Identify pairings between the retains and releases, and delete and/or move
2014 /// them.
2015 bool ObjCARCOpt::PerformCodePlacement(
2018  DenseMap<Value *, RRInfo> &Releases, Module *M) {
2019  LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2020 
2021  bool AnyPairsCompletelyEliminated = false;
2023 
2024  // Visit each retain.
2026  E = Retains.end();
2027  I != E; ++I) {
2028  Value *V = I->first;
2029  if (!V) continue; // blotted
2030 
2031  Instruction *Retain = cast<Instruction>(V);
2032 
2033  LLVM_DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2034 
2035  Value *Arg = GetArgRCIdentityRoot(Retain);
2036 
2037  // If the object being released is in static or stack storage, we know it's
2038  // not being managed by ObjC reference counting, so we can delete pairs
2039  // regardless of what possible decrements or uses lie between them.
2040  bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2041 
2042  // A constant pointer can't be pointing to an object on the heap. It may
2043  // be reference-counted, but it won't be deleted.
2044  if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2045  if (const GlobalVariable *GV =
2046  dyn_cast<GlobalVariable>(
2047  GetRCIdentityRoot(LI->getPointerOperand())))
2048  if (GV->isConstant())
2049  KnownSafe = true;
2050 
2051  // Connect the dots between the top-down-collected RetainsToMove and
2052  // bottom-up-collected ReleasesToMove to form sets of related calls.
2053  RRInfo RetainsToMove, ReleasesToMove;
2054 
2055  bool PerformMoveCalls = PairUpRetainsAndReleases(
2056  BBStates, Retains, Releases, M, Retain, DeadInsts,
2057  RetainsToMove, ReleasesToMove, Arg, KnownSafe,
2058  AnyPairsCompletelyEliminated);
2059 
2060  if (PerformMoveCalls) {
2061  // Ok, everything checks out and we're all set. Let's move/delete some
2062  // code!
2063  MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2064  Retains, Releases, DeadInsts, M);
2065  }
2066  }
2067 
2068  // Now that we're done moving everything, we can delete the newly dead
2069  // instructions, as we no longer need them as insert points.
2070  while (!DeadInsts.empty())
2071  EraseInstruction(DeadInsts.pop_back_val());
2072 
2073  return AnyPairsCompletelyEliminated;
2074 }
2075 
2076 /// Weak pointer optimizations.
2077 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2078  LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2079 
2080  // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2081  // itself because it uses AliasAnalysis and we need to do provenance
2082  // queries instead.
2083  for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2084  Instruction *Inst = &*I++;
2085 
2086  LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2087 
2089  if (Class != ARCInstKind::LoadWeak &&
2091  continue;
2092 
2093  // Delete objc_loadWeak calls with no users.
2094  if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
2095  Inst->eraseFromParent();
2096  Changed = true;
2097  continue;
2098  }
2099 
2100  // TODO: For now, just look for an earlier available version of this value
2101  // within the same block. Theoretically, we could do memdep-style non-local
2102  // analysis too, but that would want caching. A better approach would be to
2103  // use the technique that EarlyCSE uses.
2104  inst_iterator Current = std::prev(I);
2105  BasicBlock *CurrentBB = &*Current.getBasicBlockIterator();
2106  for (BasicBlock::iterator B = CurrentBB->begin(),
2107  J = Current.getInstructionIterator();
2108  J != B; --J) {
2109  Instruction *EarlierInst = &*std::prev(J);
2110  ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
2111  switch (EarlierClass) {
2112  case ARCInstKind::LoadWeak:
2114  // If this is loading from the same pointer, replace this load's value
2115  // with that one.
2116  CallInst *Call = cast<CallInst>(Inst);
2117  CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2118  Value *Arg = Call->getArgOperand(0);
2119  Value *EarlierArg = EarlierCall->getArgOperand(0);
2120  switch (PA.getAA()->alias(Arg, EarlierArg)) {
2122  Changed = true;
2123  // If the load has a builtin retain, insert a plain retain for it.
2124  if (Class == ARCInstKind::LoadWeakRetained) {
2126  CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2127  CI->setTailCall();
2128  }
2129  // Zap the fully redundant load.
2130  Call->replaceAllUsesWith(EarlierCall);
2131  Call->eraseFromParent();
2132  goto clobbered;
2133  case AliasResult::MayAlias:
2135  goto clobbered;
2136  case AliasResult::NoAlias:
2137  break;
2138  }
2139  break;
2140  }
2142  case ARCInstKind::InitWeak: {
2143  // If this is storing to the same pointer and has the same size etc.
2144  // replace this load's value with the stored value.
2145  CallInst *Call = cast<CallInst>(Inst);
2146  CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2147  Value *Arg = Call->getArgOperand(0);
2148  Value *EarlierArg = EarlierCall->getArgOperand(0);
2149  switch (PA.getAA()->alias(Arg, EarlierArg)) {
2151  Changed = true;
2152  // If the load has a builtin retain, insert a plain retain for it.
2153  if (Class == ARCInstKind::LoadWeakRetained) {
2155  CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2156  CI->setTailCall();
2157  }
2158  // Zap the fully redundant load.
2159  Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2160  Call->eraseFromParent();
2161  goto clobbered;
2162  case AliasResult::MayAlias:
2164  goto clobbered;
2165  case AliasResult::NoAlias:
2166  break;
2167  }
2168  break;
2169  }
2170  case ARCInstKind::MoveWeak:
2171  case ARCInstKind::CopyWeak:
2172  // TOOD: Grab the copied value.
2173  goto clobbered;
2175  case ARCInstKind::None:
2177  case ARCInstKind::User:
2178  // Weak pointers are only modified through the weak entry points
2179  // (and arbitrary calls, which could call the weak entry points).
2180  break;
2181  default:
2182  // Anything else could modify the weak pointer.
2183  goto clobbered;
2184  }
2185  }
2186  clobbered:;
2187  }
2188 
2189  // Then, for each destroyWeak with an alloca operand, check to see if
2190  // the alloca and all its users can be zapped.
2193  if (Class != ARCInstKind::DestroyWeak)
2194  continue;
2195 
2196  CallInst *Call = cast<CallInst>(&Inst);
2197  Value *Arg = Call->getArgOperand(0);
2198  if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2199  for (User *U : Alloca->users()) {
2200  const Instruction *UserInst = cast<Instruction>(U);
2201  switch (GetBasicARCInstKind(UserInst)) {
2202  case ARCInstKind::InitWeak:
2205  continue;
2206  default:
2207  goto done;
2208  }
2209  }
2210  Changed = true;
2211  for (User *U : llvm::make_early_inc_range(Alloca->users())) {
2212  CallInst *UserInst = cast<CallInst>(U);
2213  switch (GetBasicARCInstKind(UserInst)) {
2214  case ARCInstKind::InitWeak:
2216  // These functions return their second argument.
2217  UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2218  break;
2220  // No return value.
2221  break;
2222  default:
2223  llvm_unreachable("alloca really is used!");
2224  }
2225  UserInst->eraseFromParent();
2226  }
2227  Alloca->eraseFromParent();
2228  done:;
2229  }
2230  }
2231 }
2232 
2233 /// Identify program paths which execute sequences of retains and releases which
2234 /// can be eliminated.
2235 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2236  // Releases, Retains - These are used to store the results of the main flow
2237  // analysis. These use Value* as the key instead of Instruction* so that the
2238  // map stays valid when we get around to rewriting code and calls get
2239  // replaced by arguments.
2240  DenseMap<Value *, RRInfo> Releases;
2242 
2243  // This is used during the traversal of the function to track the
2244  // states for each identified object at each block.
2246 
2247  // Analyze the CFG of the function, and all instructions.
2248  bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2249 
2250  if (DisableRetainReleasePairing)
2251  return false;
2252 
2253  // Transform.
2254  bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
2255  Releases,
2256  F.getParent());
2257 
2258  return AnyPairsCompletelyEliminated && NestingDetected;
2259 }
2260 
2261 /// Check if there is a dependent call earlier that does not have anything in
2262 /// between the Retain and the call that can affect the reference count of their
2263 /// shared pointer argument. Note that Retain need not be in BB.
2265  Instruction *Retain,
2266  ProvenanceAnalysis &PA) {
2267  auto *Call = dyn_cast_or_null<CallInst>(findSingleDependency(
2268  CanChangeRetainCount, Arg, Retain->getParent(), Retain, PA));
2269 
2270  // Check that the pointer is the return value of the call.
2271  if (!Call || Arg != Call)
2272  return nullptr;
2273 
2274  // Check that the call is a regular call.
2275  ARCInstKind Class = GetBasicARCInstKind(Call);
2276  return Class == ARCInstKind::CallOrUser || Class == ARCInstKind::Call
2277  ? Call
2278  : nullptr;
2279 }
2280 
2281 /// Find a dependent retain that precedes the given autorelease for which there
2282 /// is nothing in between the two instructions that can affect the ref count of
2283 /// Arg.
2284 static CallInst *
2286  Instruction *Autorelease,
2287  ProvenanceAnalysis &PA) {
2288  auto *Retain = dyn_cast_or_null<CallInst>(
2290 
2291  // Check that we found a retain with the same argument.
2294  return nullptr;
2295  }
2296 
2297  return Retain;
2298 }
2299 
2300 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2301 /// no instructions dependent on Arg that need a positive ref count in between
2302 /// the autorelease and the ret.
2303 static CallInst *
2305  ReturnInst *Ret,
2306  ProvenanceAnalysis &PA) {
2308  auto *Autorelease = dyn_cast_or_null<CallInst>(
2310 
2311  if (!Autorelease)
2312  return nullptr;
2313  ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2314  if (!IsAutorelease(AutoreleaseClass))
2315  return nullptr;
2317  return nullptr;
2318 
2319  return Autorelease;
2320 }
2321 
2322 /// Look for this pattern:
2323 /// \code
2324 /// %call = call i8* @something(...)
2325 /// %2 = call i8* @objc_retain(i8* %call)
2326 /// %3 = call i8* @objc_autorelease(i8* %2)
2327 /// ret i8* %3
2328 /// \endcode
2329 /// And delete the retain and autorelease.
2330 void ObjCARCOpt::OptimizeReturns(Function &F) {
2331  if (!F.getReturnType()->isPointerTy())
2332  return;
2333 
2334  LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2335 
2336  for (BasicBlock &BB: F) {
2337  ReturnInst *Ret = dyn_cast<ReturnInst>(&BB.back());
2338  if (!Ret)
2339  continue;
2340 
2341  LLVM_DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2342 
2343  const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2344 
2345  // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2346  // dependent on Arg such that there are no instructions dependent on Arg
2347  // that need a positive ref count in between the autorelease and Ret.
2350 
2351  if (!Autorelease)
2352  continue;
2353 
2355  Arg, Autorelease->getParent(), Autorelease, PA);
2356 
2357  if (!Retain)
2358  continue;
2359 
2360  // Check that there is nothing that can affect the reference count
2361  // between the retain and the call. Note that Retain need not be in BB.
2363 
2364  // Don't remove retainRV/autoreleaseRV pairs if the call isn't a tail call.
2365  if (!Call ||
2366  (!Call->isTailCall() &&
2369  continue;
2370 
2371  // If so, we can zap the retain and autorelease.
2372  Changed = true;
2373  ++NumRets;
2374  LLVM_DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: " << *Autorelease
2375  << "\n");
2376  BundledInsts->eraseInst(Retain);
2377  EraseInstruction(Autorelease);
2378  }
2379 }
2380 
2381 #ifndef NDEBUG
2382 void
2383 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2384  Statistic &NumRetains =
2385  AfterOptimization ? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2386  Statistic &NumReleases =
2387  AfterOptimization ? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2388 
2389  for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2390  Instruction *Inst = &*I++;
2391  switch (GetBasicARCInstKind(Inst)) {
2392  default:
2393  break;
2394  case ARCInstKind::Retain:
2395  ++NumRetains;
2396  break;
2397  case ARCInstKind::Release:
2398  ++NumReleases;
2399  break;
2400  }
2401  }
2402 }
2403 #endif
2404 
2405 void ObjCARCOpt::init(Module &M) {
2406  if (!EnableARCOpts)
2407  return;
2408 
2409  // Intuitively, objc_retain and others are nocapture, however in practice
2410  // they are not, because they return their argument value. And objc_release
2411  // calls finalizers which can have arbitrary side effects.
2412  MDKindCache.init(&M);
2413 
2414  // Initialize our runtime entry point cache.
2415  EP.init(&M);
2416 }
2417 
2418 bool ObjCARCOpt::run(Function &F, AAResults &AA) {
2419  if (!EnableARCOpts)
2420  return false;
2421 
2422  Changed = CFGChanged = false;
2423  BundledRetainClaimRVs BRV(/*ContractPass=*/false);
2424  BundledInsts = &BRV;
2425 
2426  LLVM_DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName()
2427  << " >>>"
2428  "\n");
2429 
2430  std::pair<bool, bool> R = BundledInsts->insertAfterInvokes(F, nullptr);
2431  Changed |= R.first;
2432  CFGChanged |= R.second;
2433 
2434  PA.setAA(&AA);
2435 
2436 #ifndef NDEBUG
2437  if (AreStatisticsEnabled()) {
2438  GatherStatistics(F, false);
2439  }
2440 #endif
2441 
2442  // This pass performs several distinct transformations. As a compile-time aid
2443  // when compiling code that isn't ObjC, skip these if the relevant ObjC
2444  // library functions aren't declared.
2445 
2446  // Preliminary optimizations. This also computes UsedInThisFunction.
2447  OptimizeIndividualCalls(F);
2448 
2449  // Optimizations for weak pointers.
2450  if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2451  (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2452  (1 << unsigned(ARCInstKind::StoreWeak)) |
2453  (1 << unsigned(ARCInstKind::InitWeak)) |
2454  (1 << unsigned(ARCInstKind::CopyWeak)) |
2455  (1 << unsigned(ARCInstKind::MoveWeak)) |
2456  (1 << unsigned(ARCInstKind::DestroyWeak))))
2457  OptimizeWeakCalls(F);
2458 
2459  // Optimizations for retain+release pairs.
2460  if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2461  (1 << unsigned(ARCInstKind::RetainRV)) |
2462  (1 << unsigned(ARCInstKind::RetainBlock))))
2463  if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2464  // Run OptimizeSequences until it either stops making changes or
2465  // no retain+release pair nesting is detected.
2466  while (OptimizeSequences(F)) {}
2467 
2468  // Optimizations if objc_autorelease is used.
2469  if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2470  (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2471  OptimizeReturns(F);
2472 
2473  // Gather statistics after optimization.
2474 #ifndef NDEBUG
2475  if (AreStatisticsEnabled()) {
2476  GatherStatistics(F, true);
2477  }
2478 #endif
2479 
2480  LLVM_DEBUG(dbgs() << "\n");
2481 
2482  return Changed;
2483 }
2484 
2485 /// @}
2486 ///
2487 
2490  ObjCARCOpt OCAO;
2491  OCAO.init(*F.getParent());
2492 
2493  bool Changed = OCAO.run(F, AM.getResult<AAManager>(F));
2494  bool CFGChanged = OCAO.hasCFGChanged();
2495  if (Changed) {
2496  PreservedAnalyses PA;
2497  if (!CFGChanged)
2498  PA.preserveSet<CFGAnalyses>();
2499  return PA;
2500  }
2501  return PreservedAnalyses::all();
2502 }
llvm::objcarc::BundledRetainClaimRVs
Definition: ObjCARC.h:105
llvm::SuccIterator
Definition: CFG.h:138
llvm::objcarc::GetBasicARCInstKind
ARCInstKind GetBasicARCInstKind(const Value *V)
Determine which objc runtime call instruction class V belongs to.
Definition: ObjCARCInstKind.h:104
i
i
Definition: README.txt:29
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
llvm::CallBase::getNumOperandBundles
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:1930
llvm::objcarc::ARCInstKind::User
@ User
could "use" a pointer
llvm::AAManager
A manager for alias analyses.
Definition: AliasAnalysis.h:881
llvm::objcarc::operator<<
raw_ostream & operator<<(raw_ostream &OS, const ARCInstKind Class)
Definition: ObjCARCInstKind.cpp:28
llvm::AliasResult::MayAlias
@ MayAlias
The two locations may or may not alias.
Definition: AliasAnalysis.h:104
llvm::Instruction::getNumSuccessors
unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
Definition: Instruction.cpp:810
llvm::AliasResult::PartialAlias
@ PartialAlias
The two locations alias, but only due to a partial overlap.
Definition: AliasAnalysis.h:106
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:109
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
llvm::objcarc::hasAttachedCallOpBundle
bool hasAttachedCallOpBundle(const CallBase *CB)
Definition: ObjCARCUtil.h:29
llvm::objcarc::Sequence
Sequence
Definition: PtrState.h:41
llvm::CallInst::setTailCall
void setTailCall(bool IsTc=true)
Definition: Instructions.h:1679
llvm::ReturnInst
Return a value (possibly void), from a function.
Definition: Instructions.h:3050
llvm::SmallPtrSetImpl::erase
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:379
llvm::PHINode::incoming_values
op_range incoming_values()
Definition: Instructions.h:2783
Metadata.h
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:87
BlotMapVector.h
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
llvm::succ_end
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:102
InstIterator.h
llvm::Function
Definition: Function.h:60
llvm::objcarc::NeedsPositiveRetainCount
@ NeedsPositiveRetainCount
Definition: DependencyAnalysis.h:45
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::CallBase::setCalledFunction
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
Definition: InstrTypes.h:1435
llvm::BitCastInst
This class represents a no-op cast from one type to another.
Definition: Instructions.h:5253
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1199
Statistic.h
llvm::objcarc::S_Use
@ S_Use
any use of x.
Definition: PtrState.h:45
llvm::objcarc::ARCInstKind::RetainBlock
@ RetainBlock
objc_retainBlock
ErrorHandling.h
llvm::GlobalVariable
Definition: GlobalVariable.h:39
llvm::Instruction::insertBefore
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:87
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:139
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:302
BBState::OverflowOccurredValue
static const unsigned OverflowOccurredValue
Definition: ObjCARCOpts.cpp:209
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
DenseMap.h
llvm::objcarc::IsNeverTail
bool IsNeverTail(ARCInstKind Class)
Test if the given class represents instructions which are never safe to mark with the "tail" keyword.
Definition: ObjCARCInstKind.cpp:556
llvm::objcarc::ARCInstKind::LoadWeak
@ LoadWeak
objc_loadWeak (derived)
EHPersonalities.h
llvm::TinyPtrVector::front
EltTy front() const
Definition: TinyPtrVector.h:230
llvm::LLVMContext::OB_funclet
@ OB_funclet
Definition: LLVMContext.h:90
llvm::objcarc::ARCInstKind::Call
@ Call
could call objc_release
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
llvm::objcarc::ARCInstKind::MoveWeak
@ MoveWeak
objc_moveWeak (derived)
llvm::successors
auto successors(MachineBasicBlock *BB)
Definition: MachineSSAContext.h:29
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:119
llvm::Intrinsic::not_intrinsic
@ not_intrinsic
Definition: Intrinsics.h:45
llvm::objcarc::ProvenanceAnalysis
This is similar to BasicAliasAnalysis, and it uses many of the same techniques, except it uses specia...
Definition: ProvenanceAnalysis.h:51
STLExtras.h
LLVM_ATTRIBUTE_UNUSED
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:172
llvm::objcarc::GetARCInstKind
ARCInstKind GetARCInstKind(const Value *V)
Map V to its ARCInstKind equivalence class.
Definition: ObjCARCInstKind.cpp:213
llvm::Type::getInt8Ty
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:236
llvm::objcarc::IsAutorelease
bool IsAutorelease(ARCInstKind Class)
Test if the given class is objc_autorelease or equivalent.
Definition: ObjCARCInstKind.cpp:380
llvm::objcarc::ARCInstKind::Autorelease
@ Autorelease
objc_autorelease
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:101
llvm::MDNode::get
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1400
F
#define F(x, y, z)
Definition: MD5.cpp:55
llvm::RISCVFenceField::R
@ R
Definition: RISCVBaseInfo.h:265
llvm::Instruction::setMetadata
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1455
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:55
AliasAnalysis.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
llvm::InstIterator::getInstructionIterator
BIty & getInstructionIterator()
Definition: InstIterator.h:74
llvm::classifyEHPersonality
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Definition: EHPersonalities.cpp:22
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:187
Instruction.h
CommandLine.h
llvm::objcarc::RRInfo
Unidirectional information about either a retain-decrement-use-release sequence or release-use-decrem...
Definition: PtrState.h:56
llvm::AreStatisticsEnabled
bool AreStatisticsEnabled()
Check if statistics are enabled.
Definition: Statistic.cpp:139
Constants.h
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition: Instructions.h:2793
llvm::AAResults
Definition: AliasAnalysis.h:294
llvm::objcarc::ARCInstKind::InitWeak
@ InitWeak
objc_initWeak (derived)
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::objcarc::ARCRuntimeEntryPointKind::Release
@ Release
llvm::objcarc::GetArgRCIdentityRoot
Value * GetArgRCIdentityRoot(Value *Inst)
Assuming the given instruction is one of the special calls such as objc_retain or objc_release,...
Definition: ObjCARCAnalysisUtils.h:132
llvm::User
Definition: User.h:44
DependencyAnalysis.h
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::objcarc::ARCInstKind::CallOrUser
@ CallOrUser
could call objc_release and/or "use" pointers
llvm::objcarc::IsNoopOnGlobal
bool IsNoopOnGlobal(ARCInstKind Class)
Test if the given class represents instructions which do nothing if passed a global variable.
Definition: ObjCARCInstKind.cpp:485
InstrTypes.h
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:306
collectReleaseInsertPts
static void collectReleaseInsertPts(const BlotMapVector< Value *, RRInfo > &Retains, DenseMap< const Instruction *, SmallPtrSet< const Value *, 2 >> &ReleaseInsertPtToRCIdentityRoots)
Definition: ObjCARCOpts.cpp:1463
llvm::CallInst::Create
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:1517
SI
@ SI
Definition: SIInstrInfo.cpp:7982
MaxPtrStates
static cl::opt< unsigned > MaxPtrStates("arc-opt-max-ptr-states", cl::Hidden, cl::desc("Maximum number of ptr states the optimizer keeps track of"), cl::init(4095))
llvm::ObjCARCOptPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: ObjCARCOpts.cpp:2488
llvm::objcarc::ARCInstKind::Release
@ Release
objc_release
llvm::objcarc::ARCInstKind::RetainRV
@ RetainRV
objc_retainAutoreleasedReturnValue
llvm::BlotMapVector::empty
bool empty() const
Definition: BlotMapVector.h:109
llvm::BlotMapVector::const_iterator
typename VectorTy::const_iterator const_iterator
Definition: BlotMapVector.h:48
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::objcarc::ARCInstKind::NoopCast
@ NoopCast
objc_retainedObject, etc.
llvm::objcarc::getEquivalentPHIs
void getEquivalentPHIs(PHINodeTy &PN, VectorTy &PHIList)
Return the list of PHI nodes that are equivalent to PN.
Definition: ObjCARC.h:74
llvm::objcarc::ARCInstKind::CopyWeak
@ CopyWeak
objc_copyWeak (derived)
llvm::objcarc::RRInfo::KnownSafe
bool KnownSafe
After an objc_retain, the reference count of the referenced object is known to be positive.
Definition: PtrState.h:69
llvm::Instruction
Definition: Instruction.h:42
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
llvm::objcarc::BottomUpPtrState
Definition: PtrState.h:168
llvm::operator<<
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:292
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1713
llvm::objcarc::ARCInstKind::IntrinsicUser
@ IntrinsicUser
llvm.objc.clang.arc.use
llvm::BlotMapVector::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &InsertPair)
Definition: BlotMapVector.h:67
SmallPtrSet.h
llvm::objcarc::S_None
@ S_None
Definition: PtrState.h:42
FindSingleUseIdentifiedObject
static const Value * FindSingleUseIdentifiedObject(const Value *Arg)
This is similar to GetRCIdentityRoot but it stops as soon as it finds a value with multiple uses.
Definition: ObjCARCOpts.cpp:87
FindPredecessorRetainWithSafePath
static CallInst * FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB, Instruction *Autorelease, ProvenanceAnalysis &PA)
Find a dependent retain that precedes the given autorelease for which there is nothing in between the...
Definition: ObjCARCOpts.cpp:2285
llvm::Type::getInt1PtrTy
static PointerType * getInt1PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:286
llvm::objcarc::S_Retain
@ S_Retain
objc_retain(x).
Definition: PtrState.h:43
llvm::MCID::Call
@ Call
Definition: MCInstrDesc.h:155
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2789
llvm::objcarc::RRInfo::ReverseInsertPts
SmallPtrSet< Instruction *, 2 > ReverseInsertPts
The set of optimal insert positions for moving calls in the opposite sequence.
Definition: PtrState.h:84
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:344
llvm::objcarc::ARCInstKind
ARCInstKind
Definition: ObjCARCInstKind.h:28
Type.h
llvm::objcarc::IsAlwaysTail
bool IsAlwaysTail(ARCInstKind Class)
Test if the given class represents instructions which are always safe to mark with the "tail" keyword...
Definition: ObjCARCInstKind.cpp:520
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:276
CFG.h
llvm::dxil::PointerTypeAnalysis::run
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
Definition: PointerTypeAnalysis.cpp:189
ARCRuntimeEntryPoints.h
llvm::InvokeInst
Invoke instruction.
Definition: Instructions.h:3809
llvm::objcarc::ARCInstKind::AutoreleasepoolPop
@ AutoreleasepoolPop
objc_autoreleasePoolPop
HasSafePathToPredecessorCall
static CallInst * HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain, ProvenanceAnalysis &PA)
Check if there is a dependent call earlier that does not have anything in between the Retain and the ...
Definition: ObjCARCOpts.cpp:2264
BasicBlock.h
llvm::cl::opt
Definition: CommandLine.h:1411
llvm::instructions
inst_range instructions(Function *F)
Definition: InstIterator.h:133
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:298
llvm::Instruction::eraseFromParent
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:81
ProvenanceAnalysis.h
llvm::objcarc::ARCMDKindID::ImpreciseRelease
@ ImpreciseRelease
llvm::BlotMapVector
An associative container with fast insertion-order (deterministic) iteration over its elements.
Definition: BlotMapVector.h:22
llvm::PointerType::getUnqual
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:651
llvm::colorEHFunclets
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
Definition: EHPersonalities.cpp:85
llvm::objcarc::IsNoThrow
bool IsNoThrow(ARCInstKind Class)
Test if the given class represents instructions which are always safe to mark with the nounwind attri...
Definition: ObjCARCInstKind.cpp:595
llvm::objcarc::S_Stop
@ S_Stop
code motion is stopped.
Definition: PtrState.h:46
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
llvm::objcarc::ARCMDKindID::NoObjCARCExceptions
@ NoObjCARCExceptions
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:53
llvm::DenseMap
Definition: DenseMap.h:714
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::succ_begin
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:99
llvm::isScopedEHPersonality
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Definition: EHPersonalities.h:79
llvm::GetElementPtrInst
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:929
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:446
llvm::make_early_inc_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:716
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1868
ObjCARCInstKind.h
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
llvm::objcarc::RRInfo::IsTailCallRelease
bool IsTailCallRelease
True of the objc_release calls are all marked with the "tail" keyword.
Definition: PtrState.h:72
isInertARCValue
static bool isInertARCValue(Value *V, SmallPtrSet< Value *, 1 > &VisitedPhis)
This function returns true if the value is inert.
Definition: ObjCARCOpts.cpp:868
llvm::objcarc::S_MovableRelease
@ S_MovableRelease
objc_release(x), !clang.imprecise_release.
Definition: PtrState.h:47
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::NVPTXISD::CallArg
@ CallArg
Definition: NVPTXISelLowering.h:40
llvm::objcarc::CanChangeRetainCount
@ CanChangeRetainCount
Definition: DependencyAnalysis.h:47
Ptr
@ Ptr
Definition: TargetLibraryInfo.cpp:60
llvm::objcarc::EnableARCOpts
bool EnableARCOpts
A handy option to enable/disable all ARC Optimizations.
Definition: ObjCARCAnalysisUtils.cpp:23
llvm::Type::isVoidTy
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
llvm::objcarc::IsRetain
bool IsRetain(ARCInstKind Class)
Test if the given class is objc_retain or equivalent.
Definition: ObjCARCInstKind.cpp:344
ObjCARCAliasAnalysis.h
llvm::objcarc
Definition: ObjCARCAliasAnalysis.h:29
llvm::objcarc::IsObjCIdentifiedObject
bool IsObjCIdentifiedObject(const Value *V)
Return true if this value refers to a distinct and identifiable object.
Definition: ObjCARCAnalysisUtils.h:187
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
llvm::MDNode
Metadata node.
Definition: Metadata.h:944
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::empty
bool empty() const
Definition: DenseMap.h:98
getRCIdentityRootsFromReleaseInsertPt
static const SmallPtrSet< const Value *, 2 > * getRCIdentityRootsFromReleaseInsertPt(const Instruction *InsertPt, const DenseMap< const Instruction *, SmallPtrSet< const Value *, 2 >> &ReleaseInsertPtToRCIdentityRoots)
Definition: ObjCARCOpts.cpp:1482
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
llvm::objcarc::RRInfo::CFGHazardAfflicted
bool CFGHazardAfflicted
If this is true, we cannot perform code motion but can still remove retain/release pairs.
Definition: PtrState.h:88
llvm::objcarc::IsNoopInstruction
bool IsNoopInstruction(const Instruction *I)
Definition: ObjCARCAnalysisUtils.h:140
llvm::objcarc::ARCMDKindCache
A cache of MDKinds used by various ARC optimizations.
Definition: ObjCARCAnalysisUtils.h:229
llvm::SmallPtrSetImplBase::clear
void clear()
Definition: SmallPtrSet.h:95
FindPredecessorAutoreleaseWithSafePath
static CallInst * FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB, ReturnInst *Ret, ProvenanceAnalysis &PA)
Look for an `‘autorelease’' instruction dependent on Arg such that there are no instructions dependen...
Definition: ObjCARCOpts.cpp:2304
None.h
llvm::objcarc::IsNullOrUndef
bool IsNullOrUndef(const Value *V)
Definition: ObjCARCAnalysisUtils.h:136
llvm::objcarc::PtrState::IsKnownSafe
bool IsKnownSafe() const
Definition: PtrState.h:119
llvm::TinyPtrVector::size
unsigned size() const
Definition: TinyPtrVector.h:172
llvm::CallBase::getOperandBundleAt
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:1986
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:143
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::CFGAnalyses
Represents analyses that only rely on functions' control flow.
Definition: PassManager.h:113
llvm::objcarc::RRInfo::ReleaseMetadata
MDNode * ReleaseMetadata
If the Calls are objc_release calls and they all have a clang.imprecise_release tag,...
Definition: PtrState.h:76
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
llvm::AliasResult::NoAlias
@ NoAlias
The two locations do not alias at all.
Definition: AliasAnalysis.h:101
Compiler.h
llvm::Value::getContext
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:994
llvm::AliasResult::MustAlias
@ MustAlias
The two locations precisely alias each other.
Definition: AliasAnalysis.h:108
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::TrackingStatistic
Definition: Statistic.h:50
llvm::objcarc::PtrState::GetSeq
Sequence GetSeq() const
Definition: PtrState.h:150
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:174
llvm::objcarc::GetRCIdentityRoot
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
Definition: ObjCARCAnalysisUtils.h:111
llvm::CallBase::setArgOperand
void setArgOperand(unsigned i, Value *v)
Definition: InstrTypes.h:1346
llvm::Value::stripPointerCasts
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:685
ObjCARCAnalysisUtils.h
llvm::objcarc::RRInfo::Calls
SmallPtrSet< Instruction *, 2 > Calls
For a top-down sequence, the set of objc_retains or objc_retainBlocks.
Definition: PtrState.h:80
llvm::objcarc::ARCInstKind::AutoreleasepoolPush
@ AutoreleasepoolPush
objc_autoreleasePoolPush
llvm::BlotMapVector::find
iterator find(const KeyT &Key)
Definition: BlotMapVector.h:79
ObjCARCUtil.h
Constant.h
llvm::ConstantInt::getTrue
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:827
llvm::objcarc::ARCRuntimeEntryPoints
Declarations for ObjC runtime functions and constants.
Definition: ARCRuntimeEntryPoints.h:52
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:84
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:351
CheckForUseCFGHazard
static void CheckForUseCFGHazard(const Sequence SuccSSeq, const bool SuccSRRIKnownSafe, TopDownPtrState &S, bool &SomeSuccHasSame, bool &AllSuccsHaveSame, bool &NotAllSeqEqualButKnownSafe, bool &ShouldContinue)
If we have a top down pointer in the S_Use state, make sure that there are no CFG hazards by checking...
Definition: ObjCARCOpts.cpp:1173
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
GlobalVariable.h
llvm::objcarc::ARCInstKind::StoreWeak
@ StoreWeak
objc_storeWeak (primitive)
llvm::pred_end
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
llvm::objcarc::TopDownPtrState
Definition: PtrState.h:189
Casting.h
Function.h
llvm::objcarc::ARCInstKind::DestroyWeak
@ DestroyWeak
objc_destroyWeak (derived)
llvm::inst_end
inst_iterator inst_end(Function *F)
Definition: InstIterator.h:132
llvm::Instruction::isEHPad
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:669
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:614
llvm::objcarc::findSingleDependency
llvm::Instruction * findSingleDependency(DependenceKind Flavor, const Value *Arg, BasicBlock *StartBB, Instruction *StartInst, ProvenanceAnalysis &PA)
Find dependent instructions.
Definition: DependencyAnalysis.cpp:258
CheckForCanReleaseCFGHazard
static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq, const bool SuccSRRIKnownSafe, TopDownPtrState &S, bool &SomeSuccHasSame, bool &AllSuccsHaveSame, bool &NotAllSeqEqualButKnownSafe)
If we have a Top Down pointer in the S_CanRelease state, make sure that there are no CFG hazards by c...
Definition: ObjCARCOpts.cpp:1210
llvm::BlotMapVector::blot
void blot(const KeyT &Key)
This is similar to erase, but instead of removing the element from the vector, it just zeros out the ...
Definition: BlotMapVector.h:96
llvm::tgtok::Class
@ Class
Definition: TGLexer.h:50
llvm::objcarc::EraseInstruction
static void EraseInstruction(Instruction *CI)
Erase the given instruction.
Definition: ObjCARC.h:39
llvm::BasicBlock::back
const Instruction & back() const
Definition: BasicBlock.h:320
llvm::pred_begin
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
llvm::objcarc::ARCRuntimeEntryPointKind::Retain
@ Retain
Instructions.h
llvm::PreservedAnalyses::preserveSet
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:188
Other
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1252
llvm::objcarc::ARCInstKind::None
@ None
anything that is inert from an ARC perspective.
SmallVector.h
User.h
ObjCARC.h
PtrState.h
ComputePostOrders
static void ComputePostOrders(Function &F, SmallVectorImpl< BasicBlock * > &PostOrder, SmallVectorImpl< BasicBlock * > &ReverseCFGPostOrder, unsigned NoObjCARCExceptionsMDKind, DenseMap< const BasicBlock *, BBState > &BBStates)
Definition: ObjCARCOpts.cpp:1641
llvm::InstIterator::getBasicBlockIterator
BBIty & getBasicBlockIterator()
Definition: InstIterator.h:73
llvm::CallBase::getArgOperand
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1341
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:91
llvm::SmallVectorImpl::pop_back_val
T pop_back_val()
Definition: SmallVector.h:677
llvm::PHINode::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Definition: Instructions.h:2813
llvm::BlotMapVector::clear
void clear()
Definition: BlotMapVector.h:104
Users
iv Induction Variable Users
Definition: IVUsers.cpp:48
llvm::BlotMapVector::begin
iterator begin()
Definition: BlotMapVector.h:50
llvm::objcarc::ARCRuntimeEntryPointKind::Autorelease
@ Autorelease
llvm::PHINode
Definition: Instructions.h:2697
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:119
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
llvm::reverse
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:485
DerivedTypes.h
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:42
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1474
llvm::InliningAdvisorMode::Release
@ Release
llvm::objcarc::S_CanRelease
@ S_CanRelease
foo(x) – x could possibly see a ref count decrement.
Definition: PtrState.h:44
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::succ_iterator
SuccIterator< Instruction, BasicBlock > succ_iterator
Definition: CFG.h:242
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:171
llvm::inst_begin
inst_iterator inst_begin(Function *F)
Definition: InstIterator.h:131
LLVMContext.h
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition: Instructions.h:59
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::cl::desc
Definition: CommandLine.h:412
llvm::InstIterator
Definition: InstIterator.h:32
raw_ostream.h
llvm::TinyPtrVector
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:30
Value.h
llvm::objcarc::PtrState
This class summarizes several per-pointer runtime properties which are propagated through the flow gr...
Definition: PtrState.h:101
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::objcarc::ARCInstKind::Retain
@ Retain
objc_retain
Debug.h
llvm::objcarc::IsForwarding
bool IsForwarding(ARCInstKind Class)
Test if the given class represents instructions which return their argument verbatim.
Definition: ObjCARCInstKind.cpp:415
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:421
llvm::objcarc::AutoreleasePoolBoundary
@ AutoreleasePoolBoundary
Definition: DependencyAnalysis.h:46
llvm::objcarc::IsNoopOnNull
bool IsNoopOnNull(ARCInstKind Class)
Test if the given class represents instructions which do nothing if passed a null pointer.
Definition: ObjCARCInstKind.cpp:450
llvm::BlotMapVector::end
iterator end()
Definition: BlotMapVector.h:51
llvm::BasicBlock::const_iterator
InstListType::const_iterator const_iterator
Definition: BasicBlock.h:88
llvm::objcarc::ARCInstKind::LoadWeakRetained
@ LoadWeakRetained
objc_loadWeakRetained (primitive)
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:941
llvm::objcarc::ARCInstKind::UnsafeClaimRV
@ UnsafeClaimRV
objc_unsafeClaimAutoreleasedReturnValue
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
llvm::objcarc::ARCInstKind::AutoreleaseRV
@ AutoreleaseRV
objc_autoreleaseReturnValue