LLVM  13.0.0git
ObjCARCOpts.cpp
Go to the documentation of this file.
1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
12 /// in Objective C.
13 ///
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
17 ///
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
20 ///
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "ARCRuntimeEntryPoints.h"
28 #include "BlotMapVector.h"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARC.h"
31 #include "ProvenanceAnalysis.h"
32 #include "PtrState.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/None.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/CFG.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GlobalVariable.h"
52 #include "llvm/IR/InstIterator.h"
53 #include "llvm/IR/InstrTypes.h"
54 #include "llvm/IR/Instruction.h"
55 #include "llvm/IR/Instructions.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/InitializePasses.h"
62 #include "llvm/Pass.h"
63 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/Debug.h"
70 #include <cassert>
71 #include <iterator>
72 #include <utility>
73 
74 using namespace llvm;
75 using namespace llvm::objcarc;
76 
77 #define DEBUG_TYPE "objc-arc-opts"
78 
79 static cl::opt<unsigned> MaxPtrStates("arc-opt-max-ptr-states",
80  cl::Hidden,
81  cl::desc("Maximum number of ptr states the optimizer keeps track of"),
82  cl::init(4095));
83 
84 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
85 /// @{
86 
87 /// This is similar to GetRCIdentityRoot but it stops as soon
88 /// as it finds a value with multiple uses.
90  // ConstantData (like ConstantPointerNull and UndefValue) is used across
91  // modules. It's never a single-use value.
92  if (isa<ConstantData>(Arg))
93  return nullptr;
94 
95  if (Arg->hasOneUse()) {
96  if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
97  return FindSingleUseIdentifiedObject(BC->getOperand(0));
98  if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
99  if (GEP->hasAllZeroIndices())
100  return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
103  cast<CallInst>(Arg)->getArgOperand(0));
105  return nullptr;
106  return Arg;
107  }
108 
109  // If we found an identifiable object but it has multiple uses, but they are
110  // trivial uses, we can still consider this to be a single-use value.
112  for (const User *U : Arg->users())
113  if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
114  return nullptr;
115 
116  return Arg;
117  }
118 
119  return nullptr;
120 }
121 
122 /// @}
123 ///
124 /// \defgroup ARCOpt ARC Optimization.
125 /// @{
126 
127 // TODO: On code like this:
128 //
129 // objc_retain(%x)
130 // stuff_that_cannot_release()
131 // objc_autorelease(%x)
132 // stuff_that_cannot_release()
133 // objc_retain(%x)
134 // stuff_that_cannot_release()
135 // objc_autorelease(%x)
136 //
137 // The second retain and autorelease can be deleted.
138 
139 // TODO: It should be possible to delete
140 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
141 // pairs if nothing is actually autoreleased between them. Also, autorelease
142 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
143 // after inlining) can be turned into plain release calls.
144 
145 // TODO: Critical-edge splitting. If the optimial insertion point is
146 // a critical edge, the current algorithm has to fail, because it doesn't
147 // know how to split edges. It should be possible to make the optimizer
148 // think in terms of edges, rather than blocks, and then split critical
149 // edges on demand.
150 
151 // TODO: OptimizeSequences could generalized to be Interprocedural.
152 
153 // TODO: Recognize that a bunch of other objc runtime calls have
154 // non-escaping arguments and non-releasing arguments, and may be
155 // non-autoreleasing.
156 
157 // TODO: Sink autorelease calls as far as possible. Unfortunately we
158 // usually can't sink them past other calls, which would be the main
159 // case where it would be useful.
160 
161 // TODO: The pointer returned from objc_loadWeakRetained is retained.
162 
163 // TODO: Delete release+retain pairs (rare).
164 
165 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
166 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
167 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
168 STATISTIC(NumRets, "Number of return value forwarding "
169  "retain+autoreleases eliminated");
170 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
171 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
172 #ifndef NDEBUG
173 STATISTIC(NumRetainsBeforeOpt,
174  "Number of retains before optimization");
175 STATISTIC(NumReleasesBeforeOpt,
176  "Number of releases before optimization");
177 STATISTIC(NumRetainsAfterOpt,
178  "Number of retains after optimization");
179 STATISTIC(NumReleasesAfterOpt,
180  "Number of releases after optimization");
181 #endif
182 
183 namespace {
184 
185  /// Per-BasicBlock state.
186  class BBState {
187  /// The number of unique control paths from the entry which can reach this
188  /// block.
189  unsigned TopDownPathCount = 0;
190 
191  /// The number of unique control paths to exits from this block.
192  unsigned BottomUpPathCount = 0;
193 
194  /// The top-down traversal uses this to record information known about a
195  /// pointer at the bottom of each block.
197 
198  /// The bottom-up traversal uses this to record information known about a
199  /// pointer at the top of each block.
201 
202  /// Effective predecessors of the current block ignoring ignorable edges and
203  /// ignored backedges.
205 
206  /// Effective successors of the current block ignoring ignorable edges and
207  /// ignored backedges.
209 
210  public:
211  static const unsigned OverflowOccurredValue;
212 
213  BBState() = default;
214 
215  using top_down_ptr_iterator = decltype(PerPtrTopDown)::iterator;
216  using const_top_down_ptr_iterator = decltype(PerPtrTopDown)::const_iterator;
217 
218  top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
219  top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
220  const_top_down_ptr_iterator top_down_ptr_begin() const {
221  return PerPtrTopDown.begin();
222  }
223  const_top_down_ptr_iterator top_down_ptr_end() const {
224  return PerPtrTopDown.end();
225  }
226  bool hasTopDownPtrs() const {
227  return !PerPtrTopDown.empty();
228  }
229 
230  unsigned top_down_ptr_list_size() const {
231  return std::distance(top_down_ptr_begin(), top_down_ptr_end());
232  }
233 
234  using bottom_up_ptr_iterator = decltype(PerPtrBottomUp)::iterator;
235  using const_bottom_up_ptr_iterator =
236  decltype(PerPtrBottomUp)::const_iterator;
237 
238  bottom_up_ptr_iterator bottom_up_ptr_begin() {
239  return PerPtrBottomUp.begin();
240  }
241  bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
242  const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
243  return PerPtrBottomUp.begin();
244  }
245  const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
246  return PerPtrBottomUp.end();
247  }
248  bool hasBottomUpPtrs() const {
249  return !PerPtrBottomUp.empty();
250  }
251 
252  unsigned bottom_up_ptr_list_size() const {
253  return std::distance(bottom_up_ptr_begin(), bottom_up_ptr_end());
254  }
255 
256  /// Mark this block as being an entry block, which has one path from the
257  /// entry by definition.
258  void SetAsEntry() { TopDownPathCount = 1; }
259 
260  /// Mark this block as being an exit block, which has one path to an exit by
261  /// definition.
262  void SetAsExit() { BottomUpPathCount = 1; }
263 
264  /// Attempt to find the PtrState object describing the top down state for
265  /// pointer Arg. Return a new initialized PtrState describing the top down
266  /// state for Arg if we do not find one.
267  TopDownPtrState &getPtrTopDownState(const Value *Arg) {
268  return PerPtrTopDown[Arg];
269  }
270 
271  /// Attempt to find the PtrState object describing the bottom up state for
272  /// pointer Arg. Return a new initialized PtrState describing the bottom up
273  /// state for Arg if we do not find one.
274  BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
275  return PerPtrBottomUp[Arg];
276  }
277 
278  /// Attempt to find the PtrState object describing the bottom up state for
279  /// pointer Arg.
280  bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
281  return PerPtrBottomUp.find(Arg);
282  }
283 
284  void clearBottomUpPointers() {
285  PerPtrBottomUp.clear();
286  }
287 
288  void clearTopDownPointers() {
289  PerPtrTopDown.clear();
290  }
291 
292  void InitFromPred(const BBState &Other);
293  void InitFromSucc(const BBState &Other);
294  void MergePred(const BBState &Other);
295  void MergeSucc(const BBState &Other);
296 
297  /// Compute the number of possible unique paths from an entry to an exit
298  /// which pass through this block. This is only valid after both the
299  /// top-down and bottom-up traversals are complete.
300  ///
301  /// Returns true if overflow occurred. Returns false if overflow did not
302  /// occur.
303  bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
304  if (TopDownPathCount == OverflowOccurredValue ||
305  BottomUpPathCount == OverflowOccurredValue)
306  return true;
307  unsigned long long Product =
308  (unsigned long long)TopDownPathCount*BottomUpPathCount;
309  // Overflow occurred if any of the upper bits of Product are set or if all
310  // the lower bits of Product are all set.
311  return (Product >> 32) ||
312  ((PathCount = Product) == OverflowOccurredValue);
313  }
314 
315  // Specialized CFG utilities.
316  using edge_iterator = SmallVectorImpl<BasicBlock *>::const_iterator;
317 
318  edge_iterator pred_begin() const { return Preds.begin(); }
319  edge_iterator pred_end() const { return Preds.end(); }
320  edge_iterator succ_begin() const { return Succs.begin(); }
321  edge_iterator succ_end() const { return Succs.end(); }
322 
323  void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
324  void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
325 
326  bool isExit() const { return Succs.empty(); }
327  };
328 
329 } // end anonymous namespace
330 
331 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
332 
333 namespace llvm {
334 
336  BBState &BBState) LLVM_ATTRIBUTE_UNUSED;
337 
338 } // end namespace llvm
339 
340 void BBState::InitFromPred(const BBState &Other) {
341  PerPtrTopDown = Other.PerPtrTopDown;
342  TopDownPathCount = Other.TopDownPathCount;
343 }
344 
345 void BBState::InitFromSucc(const BBState &Other) {
346  PerPtrBottomUp = Other.PerPtrBottomUp;
347  BottomUpPathCount = Other.BottomUpPathCount;
348 }
349 
350 /// The top-down traversal uses this to merge information about predecessors to
351 /// form the initial state for a new block.
352 void BBState::MergePred(const BBState &Other) {
353  if (TopDownPathCount == OverflowOccurredValue)
354  return;
355 
356  // Other.TopDownPathCount can be 0, in which case it is either dead or a
357  // loop backedge. Loop backedges are special.
358  TopDownPathCount += Other.TopDownPathCount;
359 
360  // In order to be consistent, we clear the top down pointers when by adding
361  // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
362  // has not occurred.
363  if (TopDownPathCount == OverflowOccurredValue) {
364  clearTopDownPointers();
365  return;
366  }
367 
368  // Check for overflow. If we have overflow, fall back to conservative
369  // behavior.
370  if (TopDownPathCount < Other.TopDownPathCount) {
371  TopDownPathCount = OverflowOccurredValue;
372  clearTopDownPointers();
373  return;
374  }
375 
376  // For each entry in the other set, if our set has an entry with the same key,
377  // merge the entries. Otherwise, copy the entry and merge it with an empty
378  // entry.
379  for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
380  MI != ME; ++MI) {
381  auto Pair = PerPtrTopDown.insert(*MI);
382  Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
383  /*TopDown=*/true);
384  }
385 
386  // For each entry in our set, if the other set doesn't have an entry with the
387  // same key, force it to merge with an empty entry.
388  for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
389  if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
390  MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
391 }
392 
393 /// The bottom-up traversal uses this to merge information about successors to
394 /// form the initial state for a new block.
395 void BBState::MergeSucc(const BBState &Other) {
396  if (BottomUpPathCount == OverflowOccurredValue)
397  return;
398 
399  // Other.BottomUpPathCount can be 0, in which case it is either dead or a
400  // loop backedge. Loop backedges are special.
401  BottomUpPathCount += Other.BottomUpPathCount;
402 
403  // In order to be consistent, we clear the top down pointers when by adding
404  // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
405  // has not occurred.
406  if (BottomUpPathCount == OverflowOccurredValue) {
407  clearBottomUpPointers();
408  return;
409  }
410 
411  // Check for overflow. If we have overflow, fall back to conservative
412  // behavior.
413  if (BottomUpPathCount < Other.BottomUpPathCount) {
414  BottomUpPathCount = OverflowOccurredValue;
415  clearBottomUpPointers();
416  return;
417  }
418 
419  // For each entry in the other set, if our set has an entry with the
420  // same key, merge the entries. Otherwise, copy the entry and merge
421  // it with an empty entry.
422  for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
423  MI != ME; ++MI) {
424  auto Pair = PerPtrBottomUp.insert(*MI);
425  Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
426  /*TopDown=*/false);
427  }
428 
429  // For each entry in our set, if the other set doesn't have an entry
430  // with the same key, force it to merge with an empty entry.
431  for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
432  ++MI)
433  if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
434  MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
435 }
436 
437 raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
438  // Dump the pointers we are tracking.
439  OS << " TopDown State:\n";
440  if (!BBInfo.hasTopDownPtrs()) {
441  LLVM_DEBUG(dbgs() << " NONE!\n");
442  } else {
443  for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
444  I != E; ++I) {
445  const PtrState &P = I->second;
446  OS << " Ptr: " << *I->first
447  << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
448  << "\n ImpreciseRelease: "
449  << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
450  << " HasCFGHazards: "
451  << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
452  << " KnownPositive: "
453  << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
454  << " Seq: "
455  << P.GetSeq() << "\n";
456  }
457  }
458 
459  OS << " BottomUp State:\n";
460  if (!BBInfo.hasBottomUpPtrs()) {
461  LLVM_DEBUG(dbgs() << " NONE!\n");
462  } else {
463  for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
464  I != E; ++I) {
465  const PtrState &P = I->second;
466  OS << " Ptr: " << *I->first
467  << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
468  << "\n ImpreciseRelease: "
469  << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
470  << " HasCFGHazards: "
471  << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
472  << " KnownPositive: "
473  << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
474  << " Seq: "
475  << P.GetSeq() << "\n";
476  }
477  }
478 
479  return OS;
480 }
481 
482 namespace {
483 
484  /// The main ARC optimization pass.
485 class ObjCARCOpt {
486  bool Changed;
487  bool CFGChanged;
489 
490  /// A cache of references to runtime entry point constants.
492 
493  /// A cache of MDKinds that can be passed into other functions to propagate
494  /// MDKind identifiers.
495  ARCMDKindCache MDKindCache;
496 
497  BundledRetainClaimRVs *BundledInsts = nullptr;
498 
499  /// A flag indicating whether the optimization that removes or moves
500  /// retain/release pairs should be performed.
501  bool DisableRetainReleasePairing = false;
502 
503  /// Flags which determine whether each of the interesting runtime functions
504  /// is in fact used in the current function.
505  unsigned UsedInThisFunction;
506 
507  bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
508  void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
509  ARCInstKind &Class);
510  void OptimizeIndividualCalls(Function &F);
511 
512  /// Optimize an individual call, optionally passing the
513  /// GetArgRCIdentityRoot if it has already been computed.
514  void OptimizeIndividualCallImpl(
516  Instruction *Inst, ARCInstKind Class, const Value *Arg);
517 
518  /// Try to optimize an AutoreleaseRV with a RetainRV or ClaimRV. If the
519  /// optimization occurs, returns true to indicate that the caller should
520  /// assume the instructions are dead.
521  bool OptimizeInlinedAutoreleaseRVCall(
523  Instruction *Inst, const Value *&Arg, ARCInstKind Class,
524  Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg);
525 
526  void CheckForCFGHazards(const BasicBlock *BB,
528  BBState &MyStates) const;
529  bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
531  BBState &MyStates);
532  bool VisitBottomUp(BasicBlock *BB,
535  bool VisitInstructionTopDown(Instruction *Inst,
536  DenseMap<Value *, RRInfo> &Releases,
537  BBState &MyStates);
538  bool VisitTopDown(BasicBlock *BB,
540  DenseMap<Value *, RRInfo> &Releases);
541  bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
543  DenseMap<Value *, RRInfo> &Releases);
544 
545  void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
547  DenseMap<Value *, RRInfo> &Releases,
548  SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
549 
550  bool PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
552  DenseMap<Value *, RRInfo> &Releases, Module *M,
553  Instruction *Retain,
555  RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
556  Value *Arg, bool KnownSafe,
557  bool &AnyPairsCompletelyEliminated);
558 
559  bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
561  DenseMap<Value *, RRInfo> &Releases, Module *M);
562 
563  void OptimizeWeakCalls(Function &F);
564 
565  bool OptimizeSequences(Function &F);
566 
567  void OptimizeReturns(Function &F);
568 
569 #ifndef NDEBUG
570  void GatherStatistics(Function &F, bool AfterOptimization = false);
571 #endif
572 
573  public:
574  void init(Module &M);
575  bool run(Function &F, AAResults &AA);
576  void releaseMemory();
577  bool hasCFGChanged() const { return CFGChanged; }
578 };
579 
580 /// The main ARC optimization pass.
581 class ObjCARCOptLegacyPass : public FunctionPass {
582 public:
583  ObjCARCOptLegacyPass() : FunctionPass(ID) {
585  }
586  void getAnalysisUsage(AnalysisUsage &AU) const override;
587  bool doInitialization(Module &M) override {
588  OCAO.init(M);
589  return false;
590  }
591  bool runOnFunction(Function &F) override {
592  return OCAO.run(F, getAnalysis<AAResultsWrapperPass>().getAAResults());
593  }
594  void releaseMemory() override { OCAO.releaseMemory(); }
595  static char ID;
596 
597 private:
598  ObjCARCOpt OCAO;
599 };
600 } // end anonymous namespace
601 
602 char ObjCARCOptLegacyPass::ID = 0;
603 
604 INITIALIZE_PASS_BEGIN(ObjCARCOptLegacyPass, "objc-arc", "ObjC ARC optimization",
605  false, false)
607 INITIALIZE_PASS_END(ObjCARCOptLegacyPass, "objc-arc", "ObjC ARC optimization",
609 
610 Pass *llvm::createObjCARCOptPass() { return new ObjCARCOptLegacyPass(); }
611 
612 void ObjCARCOptLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
615 }
616 
617 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
618 /// not a return value.
619 bool
620 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
621  // Check for the argument being from an immediately preceding call or invoke.
622  const Value *Arg = GetArgRCIdentityRoot(RetainRV);
623  if (const Instruction *Call = dyn_cast<CallBase>(Arg)) {
624  if (Call->getParent() == RetainRV->getParent()) {
626  ++I;
627  while (IsNoopInstruction(&*I))
628  ++I;
629  if (&*I == RetainRV)
630  return false;
631  } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
632  BasicBlock *RetainRVParent = RetainRV->getParent();
633  if (II->getNormalDest() == RetainRVParent) {
634  BasicBlock::const_iterator I = RetainRVParent->begin();
635  while (IsNoopInstruction(&*I))
636  ++I;
637  if (&*I == RetainRV)
638  return false;
639  }
640  }
641  }
642 
643  assert(!BundledInsts->contains(RetainRV) &&
644  "a bundled retainRV's argument should be a call");
645 
646  // Turn it to a plain objc_retain.
647  Changed = true;
648  ++NumPeeps;
649 
650  LLVM_DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
651  "objc_retain since the operand is not a return value.\n"
652  "Old = "
653  << *RetainRV << "\n");
654 
655  Function *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
656  cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
657 
658  LLVM_DEBUG(dbgs() << "New = " << *RetainRV << "\n");
659 
660  return false;
661 }
662 
663 bool ObjCARCOpt::OptimizeInlinedAutoreleaseRVCall(
665  Instruction *Inst, const Value *&Arg, ARCInstKind Class,
666  Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg) {
667  if (BundledInsts->contains(Inst))
668  return false;
669 
670  // Must be in the same basic block.
671  assert(Inst->getParent() == AutoreleaseRV->getParent());
672 
673  // Must operate on the same root.
674  Arg = GetArgRCIdentityRoot(Inst);
675  AutoreleaseRVArg = GetArgRCIdentityRoot(AutoreleaseRV);
676  if (Arg != AutoreleaseRVArg) {
677  // If there isn't an exact match, check if we have equivalent PHIs.
678  const PHINode *PN = dyn_cast<PHINode>(Arg);
679  if (!PN)
680  return false;
681 
683  getEquivalentPHIs(*PN, ArgUsers);
684  if (!llvm::is_contained(ArgUsers, AutoreleaseRVArg))
685  return false;
686  }
687 
688  // Okay, this is a match. Merge them.
689  ++NumPeeps;
690  LLVM_DEBUG(dbgs() << "Found inlined objc_autoreleaseReturnValue '"
691  << *AutoreleaseRV << "' paired with '" << *Inst << "'\n");
692 
693  // Delete the RV pair, starting with the AutoreleaseRV.
694  AutoreleaseRV->replaceAllUsesWith(
695  cast<CallInst>(AutoreleaseRV)->getArgOperand(0));
696  Changed = true;
697  EraseInstruction(AutoreleaseRV);
698  if (Class == ARCInstKind::RetainRV) {
699  // AutoreleaseRV and RetainRV cancel out. Delete the RetainRV.
700  Inst->replaceAllUsesWith(cast<CallInst>(Inst)->getArgOperand(0));
701  EraseInstruction(Inst);
702  return true;
703  }
704 
705  // ClaimRV is a frontend peephole for RetainRV + Release. Since the
706  // AutoreleaseRV and RetainRV cancel out, replace the ClaimRV with a Release.
707  assert(Class == ARCInstKind::ClaimRV);
708  Value *CallArg = cast<CallInst>(Inst)->getArgOperand(0);
710  EP.get(ARCRuntimeEntryPointKind::Release), CallArg, "", Inst);
712  "Expected ClaimRV to be safe to tail call");
713  Release->setTailCall();
715  EraseInstruction(Inst);
716 
717  // Run the normal optimizations on Release.
718  OptimizeIndividualCallImpl(F, BlockColors, Release, ARCInstKind::Release,
719  Arg);
720  return true;
721 }
722 
723 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
724 /// used as a return value.
725 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
726  Instruction *AutoreleaseRV,
727  ARCInstKind &Class) {
728  // Check for a return of the pointer value.
729  const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
730 
731  // If the argument is ConstantPointerNull or UndefValue, its other users
732  // aren't actually interesting to look at.
733  if (isa<ConstantData>(Ptr))
734  return;
735 
737  Users.push_back(Ptr);
738 
739  // Add PHIs that are equivalent to Ptr to Users.
740  if (const PHINode *PN = dyn_cast<PHINode>(Ptr))
741  getEquivalentPHIs(*PN, Users);
742 
743  do {
744  Ptr = Users.pop_back_val();
745  for (const User *U : Ptr->users()) {
746  if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
747  return;
748  if (isa<BitCastInst>(U))
749  Users.push_back(U);
750  }
751  } while (!Users.empty());
752 
753  Changed = true;
754  ++NumPeeps;
755 
756  LLVM_DEBUG(
757  dbgs() << "Transforming objc_autoreleaseReturnValue => "
758  "objc_autorelease since its operand is not used as a return "
759  "value.\n"
760  "Old = "
761  << *AutoreleaseRV << "\n");
762 
763  CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
765  AutoreleaseRVCI->setCalledFunction(NewDecl);
766  AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
768 
769  LLVM_DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
770 }
771 
772 namespace {
773 Instruction *
774 CloneCallInstForBB(CallInst &CI, BasicBlock &BB,
775  const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
777  for (unsigned I = 0, E = CI.getNumOperandBundles(); I != E; ++I) {
778  auto Bundle = CI.getOperandBundleAt(I);
779  // Funclets will be reassociated in the future.
780  if (Bundle.getTagID() == LLVMContext::OB_funclet)
781  continue;
782  OpBundles.emplace_back(Bundle);
783  }
784 
785  if (!BlockColors.empty()) {
786  const ColorVector &CV = BlockColors.find(&BB)->second;
787  assert(CV.size() == 1 && "non-unique color for block!");
788  Instruction *EHPad = CV.front()->getFirstNonPHI();
789  if (EHPad->isEHPad())
790  OpBundles.emplace_back("funclet", EHPad);
791  }
792 
793  return CallInst::Create(&CI, OpBundles);
794 }
795 }
796 
797 /// Visit each call, one at a time, and make simplifications without doing any
798 /// additional analysis.
799 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
800  LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
801  // Reset all the flags in preparation for recomputing them.
802  UsedInThisFunction = 0;
803 
805  if (F.hasPersonalityFn() &&
806  isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
807  BlockColors = colorEHFunclets(F);
808 
809  // Store any delayed AutoreleaseRV intrinsics, so they can be easily paired
810  // with RetainRV and ClaimRV.
811  Instruction *DelayedAutoreleaseRV = nullptr;
812  const Value *DelayedAutoreleaseRVArg = nullptr;
813  auto setDelayedAutoreleaseRV = [&](Instruction *AutoreleaseRV) {
814  assert(!DelayedAutoreleaseRV || !AutoreleaseRV);
815  DelayedAutoreleaseRV = AutoreleaseRV;
816  DelayedAutoreleaseRVArg = nullptr;
817  };
818  auto optimizeDelayedAutoreleaseRV = [&]() {
819  if (!DelayedAutoreleaseRV)
820  return;
821  OptimizeIndividualCallImpl(F, BlockColors, DelayedAutoreleaseRV,
823  DelayedAutoreleaseRVArg);
824  setDelayedAutoreleaseRV(nullptr);
825  };
826  auto shouldDelayAutoreleaseRV = [&](Instruction *NonARCInst) {
827  // Nothing to delay, but we may as well skip the logic below.
828  if (!DelayedAutoreleaseRV)
829  return true;
830 
831  // If we hit the end of the basic block we're not going to find an RV-pair.
832  // Stop delaying.
833  if (NonARCInst->isTerminator())
834  return false;
835 
836  // Given the frontend rules for emitting AutoreleaseRV, RetainRV, and
837  // ClaimRV, it's probably safe to skip over even opaque function calls
838  // here since OptimizeInlinedAutoreleaseRVCall will confirm that they
839  // have the same RCIdentityRoot. However, what really matters is
840  // skipping instructions or intrinsics that the inliner could leave behind;
841  // be conservative for now and don't skip over opaque calls, which could
842  // potentially include other ARC calls.
843  auto *CB = dyn_cast<CallBase>(NonARCInst);
844  if (!CB)
845  return true;
846  return CB->getIntrinsicID() != Intrinsic::not_intrinsic;
847  };
848 
849  // Visit all objc_* calls in F.
850  for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
851  Instruction *Inst = &*I++;
852 
853  if (auto *CI = dyn_cast<CallInst>(Inst))
855  BundledInsts->insertRVCall(&*I, CI);
856  Changed = true;
857  }
858 
860 
861  // Skip this loop if this instruction isn't itself an ARC intrinsic.
862  const Value *Arg = nullptr;
863  switch (Class) {
864  default:
865  optimizeDelayedAutoreleaseRV();
866  break;
868  case ARCInstKind::User:
869  case ARCInstKind::None:
870  // This is a non-ARC instruction. If we're delaying an AutoreleaseRV,
871  // check if it's safe to skip over it; if not, optimize the AutoreleaseRV
872  // now.
873  if (!shouldDelayAutoreleaseRV(Inst))
874  optimizeDelayedAutoreleaseRV();
875  continue;
877  optimizeDelayedAutoreleaseRV();
878  setDelayedAutoreleaseRV(Inst);
879  continue;
882  if (DelayedAutoreleaseRV) {
883  // We have a potential RV pair. Check if they cancel out.
884  if (OptimizeInlinedAutoreleaseRVCall(F, BlockColors, Inst, Arg, Class,
885  DelayedAutoreleaseRV,
886  DelayedAutoreleaseRVArg)) {
887  setDelayedAutoreleaseRV(nullptr);
888  continue;
889  }
890  optimizeDelayedAutoreleaseRV();
891  }
892  break;
893  }
894 
895  OptimizeIndividualCallImpl(F, BlockColors, Inst, Class, Arg);
896  }
897 
898  // Catch the final delayed AutoreleaseRV.
899  optimizeDelayedAutoreleaseRV();
900 }
901 
902 /// This function returns true if the value is inert. An ObjC ARC runtime call
903 /// taking an inert operand can be safely deleted.
904 static bool isInertARCValue(Value *V, SmallPtrSet<Value *, 1> &VisitedPhis) {
905  V = V->stripPointerCasts();
906 
907  if (IsNullOrUndef(V))
908  return true;
909 
910  // See if this is a global attribute annotated with an 'objc_arc_inert'.
911  if (auto *GV = dyn_cast<GlobalVariable>(V))
912  if (GV->hasAttribute("objc_arc_inert"))
913  return true;
914 
915  if (auto PN = dyn_cast<PHINode>(V)) {
916  // Ignore this phi if it has already been discovered.
917  if (!VisitedPhis.insert(PN).second)
918  return true;
919  // Look through phis's operands.
920  for (Value *Opnd : PN->incoming_values())
921  if (!isInertARCValue(Opnd, VisitedPhis))
922  return false;
923  return true;
924  }
925 
926  return false;
927 }
928 
929 void ObjCARCOpt::OptimizeIndividualCallImpl(
931  Instruction *Inst, ARCInstKind Class, const Value *Arg) {
932  LLVM_DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
933 
934  // We can delete this call if it takes an inert value.
935  SmallPtrSet<Value *, 1> VisitedPhis;
936 
937  if (BundledInsts->contains(Inst)) {
938  UsedInThisFunction |= 1 << unsigned(Class);
939  return;
940  }
941 
942  if (IsNoopOnGlobal(Class))
943  if (isInertARCValue(Inst->getOperand(0), VisitedPhis)) {
944  if (!Inst->getType()->isVoidTy())
945  Inst->replaceAllUsesWith(Inst->getOperand(0));
946  Inst->eraseFromParent();
947  Changed = true;
948  return;
949  }
950 
951  switch (Class) {
952  default:
953  break;
954 
955  // Delete no-op casts. These function calls have special semantics, but
956  // the semantics are entirely implemented via lowering in the front-end,
957  // so by the time they reach the optimizer, they are just no-op calls
958  // which return their argument.
959  //
960  // There are gray areas here, as the ability to cast reference-counted
961  // pointers to raw void* and back allows code to break ARC assumptions,
962  // however these are currently considered to be unimportant.
964  Changed = true;
965  ++NumNoops;
966  LLVM_DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
967  EraseInstruction(Inst);
968  return;
969 
970  // If the pointer-to-weak-pointer is null, it's undefined behavior.
976  CallInst *CI = cast<CallInst>(Inst);
977  if (IsNullOrUndef(CI->getArgOperand(0))) {
978  Changed = true;
979  Type *Ty = CI->getArgOperand(0)->getType();
980  new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
981  Constant::getNullValue(Ty), CI);
982  Value *NewValue = UndefValue::get(CI->getType());
983  LLVM_DEBUG(
984  dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
985  "\nOld = "
986  << *CI << "\nNew = " << *NewValue << "\n");
987  CI->replaceAllUsesWith(NewValue);
988  CI->eraseFromParent();
989  return;
990  }
991  break;
992  }
994  case ARCInstKind::MoveWeak: {
995  CallInst *CI = cast<CallInst>(Inst);
996  if (IsNullOrUndef(CI->getArgOperand(0)) ||
997  IsNullOrUndef(CI->getArgOperand(1))) {
998  Changed = true;
999  Type *Ty = CI->getArgOperand(0)->getType();
1000  new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1001  Constant::getNullValue(Ty), CI);
1002 
1003  Value *NewValue = UndefValue::get(CI->getType());
1004  LLVM_DEBUG(
1005  dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1006  "\nOld = "
1007  << *CI << "\nNew = " << *NewValue << "\n");
1008 
1009  CI->replaceAllUsesWith(NewValue);
1010  CI->eraseFromParent();
1011  return;
1012  }
1013  break;
1014  }
1015  case ARCInstKind::RetainRV:
1016  if (OptimizeRetainRVCall(F, Inst))
1017  return;
1018  break;
1020  OptimizeAutoreleaseRVCall(F, Inst, Class);
1021  break;
1022  }
1023 
1024  // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
1025  if (IsAutorelease(Class) && Inst->use_empty()) {
1026  CallInst *Call = cast<CallInst>(Inst);
1027  const Value *Arg = Call->getArgOperand(0);
1029  if (Arg) {
1030  Changed = true;
1031  ++NumAutoreleases;
1032 
1033  // Create the declaration lazily.
1034  LLVMContext &C = Inst->getContext();
1035 
1037  CallInst *NewCall =
1038  CallInst::Create(Decl, Call->getArgOperand(0), "", Call);
1039  NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
1040  MDNode::get(C, None));
1041 
1042  LLVM_DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
1043  "since x is otherwise unused.\nOld: "
1044  << *Call << "\nNew: " << *NewCall << "\n");
1045 
1046  EraseInstruction(Call);
1047  Inst = NewCall;
1049  }
1050  }
1051 
1052  // For functions which can never be passed stack arguments, add
1053  // a tail keyword.
1054  if (IsAlwaysTail(Class) && !cast<CallInst>(Inst)->isNoTailCall()) {
1055  Changed = true;
1056  LLVM_DEBUG(
1057  dbgs() << "Adding tail keyword to function since it can never be "
1058  "passed stack args: "
1059  << *Inst << "\n");
1060  cast<CallInst>(Inst)->setTailCall();
1061  }
1062 
1063  // Ensure that functions that can never have a "tail" keyword due to the
1064  // semantics of ARC truly do not do so.
1065  if (IsNeverTail(Class)) {
1066  Changed = true;
1067  LLVM_DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst
1068  << "\n");
1069  cast<CallInst>(Inst)->setTailCall(false);
1070  }
1071 
1072  // Set nounwind as needed.
1073  if (IsNoThrow(Class)) {
1074  Changed = true;
1075  LLVM_DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1076  << "\n");
1077  cast<CallInst>(Inst)->setDoesNotThrow();
1078  }
1079 
1080  // Note: This catches instructions unrelated to ARC.
1081  if (!IsNoopOnNull(Class)) {
1082  UsedInThisFunction |= 1 << unsigned(Class);
1083  return;
1084  }
1085 
1086  // If we haven't already looked up the root, look it up now.
1087  if (!Arg)
1088  Arg = GetArgRCIdentityRoot(Inst);
1089 
1090  // ARC calls with null are no-ops. Delete them.
1091  if (IsNullOrUndef(Arg)) {
1092  Changed = true;
1093  ++NumNoops;
1094  LLVM_DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1095  << "\n");
1096  EraseInstruction(Inst);
1097  return;
1098  }
1099 
1100  // Keep track of which of retain, release, autorelease, and retain_block
1101  // are actually present in this function.
1102  UsedInThisFunction |= 1 << unsigned(Class);
1103 
1104  // If Arg is a PHI, and one or more incoming values to the
1105  // PHI are null, and the call is control-equivalent to the PHI, and there
1106  // are no relevant side effects between the PHI and the call, and the call
1107  // is not a release that doesn't have the clang.imprecise_release tag, the
1108  // call could be pushed up to just those paths with non-null incoming
1109  // values. For now, don't bother splitting critical edges for this.
1110  if (Class == ARCInstKind::Release &&
1111  !Inst->getMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease)))
1112  return;
1113 
1115  Worklist.push_back(std::make_pair(Inst, Arg));
1116  do {
1117  std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1118  Inst = Pair.first;
1119  Arg = Pair.second;
1120 
1121  const PHINode *PN = dyn_cast<PHINode>(Arg);
1122  if (!PN)
1123  continue;
1124 
1125  // Determine if the PHI has any null operands, or any incoming
1126  // critical edges.
1127  bool HasNull = false;
1128  bool HasCriticalEdges = false;
1129  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1130  Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
1131  if (IsNullOrUndef(Incoming))
1132  HasNull = true;
1133  else if (PN->getIncomingBlock(i)->getTerminator()->getNumSuccessors() !=
1134  1) {
1135  HasCriticalEdges = true;
1136  break;
1137  }
1138  }
1139  // If we have null operands and no critical edges, optimize.
1140  if (HasCriticalEdges)
1141  continue;
1142  if (!HasNull)
1143  continue;
1144 
1145  Instruction *DepInst = nullptr;
1146 
1147  // Check that there is nothing that cares about the reference
1148  // count between the call and the phi.
1149  switch (Class) {
1150  case ARCInstKind::Retain:
1152  // These can always be moved up.
1153  break;
1154  case ARCInstKind::Release:
1155  // These can't be moved across things that care about the retain
1156  // count.
1158  Inst->getParent(), Inst, PA);
1159  break;
1161  // These can't be moved across autorelease pool scope boundaries.
1163  Inst->getParent(), Inst, PA);
1164  break;
1165  case ARCInstKind::ClaimRV:
1166  case ARCInstKind::RetainRV:
1168  // Don't move these; the RV optimization depends on the autoreleaseRV
1169  // being tail called, and the retainRV being immediately after a call
1170  // (which might still happen if we get lucky with codegen layout, but
1171  // it's not worth taking the chance).
1172  continue;
1173  default:
1174  llvm_unreachable("Invalid dependence flavor");
1175  }
1176 
1177  if (DepInst != PN)
1178  continue;
1179 
1180  Changed = true;
1181  ++NumPartialNoops;
1182  // Clone the call into each predecessor that has a non-null value.
1183  CallInst *CInst = cast<CallInst>(Inst);
1184  Type *ParamTy = CInst->getArgOperand(0)->getType();
1185  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1186  Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
1187  if (IsNullOrUndef(Incoming))
1188  continue;
1189  Value *Op = PN->getIncomingValue(i);
1190  Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1191  CallInst *Clone = cast<CallInst>(
1192  CloneCallInstForBB(*CInst, *InsertPos->getParent(), BlockColors));
1193  if (Op->getType() != ParamTy)
1194  Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1195  Clone->setArgOperand(0, Op);
1196  Clone->insertBefore(InsertPos);
1197 
1198  LLVM_DEBUG(dbgs() << "Cloning " << *CInst << "\n"
1199  "And inserting clone at "
1200  << *InsertPos << "\n");
1201  Worklist.push_back(std::make_pair(Clone, Incoming));
1202  }
1203  // Erase the original call.
1204  LLVM_DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1205  EraseInstruction(CInst);
1206  } while (!Worklist.empty());
1207 }
1208 
1209 /// If we have a top down pointer in the S_Use state, make sure that there are
1210 /// no CFG hazards by checking the states of various bottom up pointers.
1211 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1212  const bool SuccSRRIKnownSafe,
1213  TopDownPtrState &S,
1214  bool &SomeSuccHasSame,
1215  bool &AllSuccsHaveSame,
1216  bool &NotAllSeqEqualButKnownSafe,
1217  bool &ShouldContinue) {
1218  switch (SuccSSeq) {
1219  case S_CanRelease: {
1220  if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
1221  S.ClearSequenceProgress();
1222  break;
1223  }
1224  S.SetCFGHazardAfflicted(true);
1225  ShouldContinue = true;
1226  break;
1227  }
1228  case S_Use:
1229  SomeSuccHasSame = true;
1230  break;
1231  case S_Stop:
1232  case S_MovableRelease:
1233  if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1234  AllSuccsHaveSame = false;
1235  else
1236  NotAllSeqEqualButKnownSafe = true;
1237  break;
1238  case S_Retain:
1239  llvm_unreachable("bottom-up pointer in retain state!");
1240  case S_None:
1241  llvm_unreachable("This should have been handled earlier.");
1242  }
1243 }
1244 
1245 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1246 /// there are no CFG hazards by checking the states of various bottom up
1247 /// pointers.
1248 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1249  const bool SuccSRRIKnownSafe,
1250  TopDownPtrState &S,
1251  bool &SomeSuccHasSame,
1252  bool &AllSuccsHaveSame,
1253  bool &NotAllSeqEqualButKnownSafe) {
1254  switch (SuccSSeq) {
1255  case S_CanRelease:
1256  SomeSuccHasSame = true;
1257  break;
1258  case S_Stop:
1259  case S_MovableRelease:
1260  case S_Use:
1261  if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1262  AllSuccsHaveSame = false;
1263  else
1264  NotAllSeqEqualButKnownSafe = true;
1265  break;
1266  case S_Retain:
1267  llvm_unreachable("bottom-up pointer in retain state!");
1268  case S_None:
1269  llvm_unreachable("This should have been handled earlier.");
1270  }
1271 }
1272 
1273 /// Check for critical edges, loop boundaries, irreducible control flow, or
1274 /// other CFG structures where moving code across the edge would result in it
1275 /// being executed more.
1276 void
1277 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1279  BBState &MyStates) const {
1280  // If any top-down local-use or possible-dec has a succ which is earlier in
1281  // the sequence, forget it.
1282  for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
1283  I != E; ++I) {
1284  TopDownPtrState &S = I->second;
1285  const Sequence Seq = I->second.GetSeq();
1286 
1287  // We only care about S_Retain, S_CanRelease, and S_Use.
1288  if (Seq == S_None)
1289  continue;
1290 
1291  // Make sure that if extra top down states are added in the future that this
1292  // code is updated to handle it.
1293  assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1294  "Unknown top down sequence state.");
1295 
1296  const Value *Arg = I->first;
1297  bool SomeSuccHasSame = false;
1298  bool AllSuccsHaveSame = true;
1299  bool NotAllSeqEqualButKnownSafe = false;
1300 
1301  for (const BasicBlock *Succ : successors(BB)) {
1302  // If VisitBottomUp has pointer information for this successor, take
1303  // what we know about it.
1305  BBStates.find(Succ);
1306  assert(BBI != BBStates.end());
1307  const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1308  const Sequence SuccSSeq = SuccS.GetSeq();
1309 
1310  // If bottom up, the pointer is in an S_None state, clear the sequence
1311  // progress since the sequence in the bottom up state finished
1312  // suggesting a mismatch in between retains/releases. This is true for
1313  // all three cases that we are handling here: S_Retain, S_Use, and
1314  // S_CanRelease.
1315  if (SuccSSeq == S_None) {
1316  S.ClearSequenceProgress();
1317  continue;
1318  }
1319 
1320  // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1321  // checks.
1322  const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1323 
1324  // *NOTE* We do not use Seq from above here since we are allowing for
1325  // S.GetSeq() to change while we are visiting basic blocks.
1326  switch(S.GetSeq()) {
1327  case S_Use: {
1328  bool ShouldContinue = false;
1329  CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1330  AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1331  ShouldContinue);
1332  if (ShouldContinue)
1333  continue;
1334  break;
1335  }
1336  case S_CanRelease:
1337  CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1338  SomeSuccHasSame, AllSuccsHaveSame,
1339  NotAllSeqEqualButKnownSafe);
1340  break;
1341  case S_Retain:
1342  case S_None:
1343  case S_Stop:
1344  case S_MovableRelease:
1345  break;
1346  }
1347  }
1348 
1349  // If the state at the other end of any of the successor edges
1350  // matches the current state, require all edges to match. This
1351  // guards against loops in the middle of a sequence.
1352  if (SomeSuccHasSame && !AllSuccsHaveSame) {
1353  S.ClearSequenceProgress();
1354  } else if (NotAllSeqEqualButKnownSafe) {
1355  // If we would have cleared the state foregoing the fact that we are known
1356  // safe, stop code motion. This is because whether or not it is safe to
1357  // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1358  // are allowed to perform code motion.
1359  S.SetCFGHazardAfflicted(true);
1360  }
1361  }
1362 }
1363 
1364 bool ObjCARCOpt::VisitInstructionBottomUp(
1366  BBState &MyStates) {
1367  bool NestingDetected = false;
1369  const Value *Arg = nullptr;
1370 
1371  LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
1372 
1373  switch (Class) {
1374  case ARCInstKind::Release: {
1375  Arg = GetArgRCIdentityRoot(Inst);
1376 
1377  BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1378  NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
1379  break;
1380  }
1382  // In OptimizeIndividualCalls, we have strength reduced all optimizable
1383  // objc_retainBlocks to objc_retains. Thus at this point any
1384  // objc_retainBlocks that we see are not optimizable.
1385  break;
1386  case ARCInstKind::Retain:
1387  case ARCInstKind::RetainRV: {
1388  Arg = GetArgRCIdentityRoot(Inst);
1389  BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1390  if (S.MatchWithRetain()) {
1391  // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1392  // it's better to let it remain as the first instruction after a call.
1393  if (Class != ARCInstKind::RetainRV) {
1394  LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1395  Retains[Inst] = S.GetRRInfo();
1396  }
1397  S.ClearSequenceProgress();
1398  }
1399  // A retain moving bottom up can be a use.
1400  break;
1401  }
1403  // Conservatively, clear MyStates for all known pointers.
1404  MyStates.clearBottomUpPointers();
1405  return NestingDetected;
1407  case ARCInstKind::None:
1408  // These are irrelevant.
1409  return NestingDetected;
1410  default:
1411  break;
1412  }
1413 
1414  // Consider any other possible effects of this instruction on each
1415  // pointer being tracked.
1416  for (auto MI = MyStates.bottom_up_ptr_begin(),
1417  ME = MyStates.bottom_up_ptr_end();
1418  MI != ME; ++MI) {
1419  const Value *Ptr = MI->first;
1420  if (Ptr == Arg)
1421  continue; // Handled above.
1422  BottomUpPtrState &S = MI->second;
1423 
1424  if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1425  continue;
1426 
1427  S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
1428  }
1429 
1430  return NestingDetected;
1431 }
1432 
1433 bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1435  BlotMapVector<Value *, RRInfo> &Retains) {
1436  LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1437 
1438  bool NestingDetected = false;
1439  BBState &MyStates = BBStates[BB];
1440 
1441  // Merge the states from each successor to compute the initial state
1442  // for the current block.
1443  BBState::edge_iterator SI(MyStates.succ_begin()),
1444  SE(MyStates.succ_end());
1445  if (SI != SE) {
1446  const BasicBlock *Succ = *SI;
1448  assert(I != BBStates.end());
1449  MyStates.InitFromSucc(I->second);
1450  ++SI;
1451  for (; SI != SE; ++SI) {
1452  Succ = *SI;
1453  I = BBStates.find(Succ);
1454  assert(I != BBStates.end());
1455  MyStates.MergeSucc(I->second);
1456  }
1457  }
1458 
1459  LLVM_DEBUG(dbgs() << "Before:\n"
1460  << BBStates[BB] << "\n"
1461  << "Performing Dataflow:\n");
1462 
1463  // Visit all the instructions, bottom-up.
1464  for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1465  Instruction *Inst = &*std::prev(I);
1466 
1467  // Invoke instructions are visited as part of their successors (below).
1468  if (isa<InvokeInst>(Inst))
1469  continue;
1470 
1471  LLVM_DEBUG(dbgs() << " Visiting " << *Inst << "\n");
1472 
1473  NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1474 
1475  // Bail out if the number of pointers being tracked becomes too large so
1476  // that this pass can complete in a reasonable amount of time.
1477  if (MyStates.bottom_up_ptr_list_size() > MaxPtrStates) {
1478  DisableRetainReleasePairing = true;
1479  return false;
1480  }
1481  }
1482 
1483  // If there's a predecessor with an invoke, visit the invoke as if it were
1484  // part of this block, since we can't insert code after an invoke in its own
1485  // block, and we don't want to split critical edges.
1486  for (BBState::edge_iterator PI(MyStates.pred_begin()),
1487  PE(MyStates.pred_end()); PI != PE; ++PI) {
1488  BasicBlock *Pred = *PI;
1489  if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1490  NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1491  }
1492 
1493  LLVM_DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
1494 
1495  return NestingDetected;
1496 }
1497 
1498 bool
1499 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1500  DenseMap<Value *, RRInfo> &Releases,
1501  BBState &MyStates) {
1502  bool NestingDetected = false;
1504  const Value *Arg = nullptr;
1505 
1506  LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
1507 
1508  switch (Class) {
1510  // In OptimizeIndividualCalls, we have strength reduced all optimizable
1511  // objc_retainBlocks to objc_retains. Thus at this point any
1512  // objc_retainBlocks that we see are not optimizable. We need to break since
1513  // a retain can be a potential use.
1514  break;
1515  case ARCInstKind::Retain:
1516  case ARCInstKind::RetainRV: {
1517  Arg = GetArgRCIdentityRoot(Inst);
1518  TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1519  NestingDetected |= S.InitTopDown(Class, Inst);
1520  // A retain can be a potential use; proceed to the generic checking
1521  // code below.
1522  break;
1523  }
1524  case ARCInstKind::Release: {
1525  Arg = GetArgRCIdentityRoot(Inst);
1526  TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1527  // Try to form a tentative pair in between this release instruction and the
1528  // top down pointers that we are tracking.
1529  if (S.MatchWithRelease(MDKindCache, Inst)) {
1530  // If we succeed, copy S's RRInfo into the Release -> {Retain Set
1531  // Map}. Then we clear S.
1532  LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1533  Releases[Inst] = S.GetRRInfo();
1534  S.ClearSequenceProgress();
1535  }
1536  break;
1537  }
1539  // Conservatively, clear MyStates for all known pointers.
1540  MyStates.clearTopDownPointers();
1541  return false;
1543  case ARCInstKind::None:
1544  // These can not be uses of
1545  return false;
1546  default:
1547  break;
1548  }
1549 
1550  // Consider any other possible effects of this instruction on each
1551  // pointer being tracked.
1552  for (auto MI = MyStates.top_down_ptr_begin(),
1553  ME = MyStates.top_down_ptr_end();
1554  MI != ME; ++MI) {
1555  const Value *Ptr = MI->first;
1556  if (Ptr == Arg)
1557  continue; // Handled above.
1558  TopDownPtrState &S = MI->second;
1559  if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class, *BundledInsts))
1560  continue;
1561 
1562  S.HandlePotentialUse(Inst, Ptr, PA, Class);
1563  }
1564 
1565  return NestingDetected;
1566 }
1567 
1568 bool
1569 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
1571  DenseMap<Value *, RRInfo> &Releases) {
1572  LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1573  bool NestingDetected = false;
1574  BBState &MyStates = BBStates[BB];
1575 
1576  // Merge the states from each predecessor to compute the initial state
1577  // for the current block.
1578  BBState::edge_iterator PI(MyStates.pred_begin()),
1579  PE(MyStates.pred_end());
1580  if (PI != PE) {
1581  const BasicBlock *Pred = *PI;
1583  assert(I != BBStates.end());
1584  MyStates.InitFromPred(I->second);
1585  ++PI;
1586  for (; PI != PE; ++PI) {
1587  Pred = *PI;
1588  I = BBStates.find(Pred);
1589  assert(I != BBStates.end());
1590  MyStates.MergePred(I->second);
1591  }
1592  }
1593 
1594  // Check that BB and MyStates have the same number of predecessors. This
1595  // prevents retain calls that live outside a loop from being moved into the
1596  // loop.
1597  if (!BB->hasNPredecessors(MyStates.pred_end() - MyStates.pred_begin()))
1598  for (auto I = MyStates.top_down_ptr_begin(),
1599  E = MyStates.top_down_ptr_end();
1600  I != E; ++I)
1601  I->second.SetCFGHazardAfflicted(true);
1602 
1603  LLVM_DEBUG(dbgs() << "Before:\n"
1604  << BBStates[BB] << "\n"
1605  << "Performing Dataflow:\n");
1606 
1607  // Visit all the instructions, top-down.
1608  for (Instruction &Inst : *BB) {
1609  LLVM_DEBUG(dbgs() << " Visiting " << Inst << "\n");
1610 
1611  NestingDetected |= VisitInstructionTopDown(&Inst, Releases, MyStates);
1612 
1613  // Bail out if the number of pointers being tracked becomes too large so
1614  // that this pass can complete in a reasonable amount of time.
1615  if (MyStates.top_down_ptr_list_size() > MaxPtrStates) {
1616  DisableRetainReleasePairing = true;
1617  return false;
1618  }
1619  }
1620 
1621  LLVM_DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n"
1622  << BBStates[BB] << "\n\n");
1623  CheckForCFGHazards(BB, BBStates, MyStates);
1624  LLVM_DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n");
1625  return NestingDetected;
1626 }
1627 
1628 static void
1630  SmallVectorImpl<BasicBlock *> &PostOrder,
1631  SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1632  unsigned NoObjCARCExceptionsMDKind,
1634  /// The visited set, for doing DFS walks.
1636 
1637  // Do DFS, computing the PostOrder.
1640 
1641  // Functions always have exactly one entry block, and we don't have
1642  // any other block that we treat like an entry block.
1643  BasicBlock *EntryBB = &F.getEntryBlock();
1644  BBState &MyStates = BBStates[EntryBB];
1645  MyStates.SetAsEntry();
1646  Instruction *EntryTI = EntryBB->getTerminator();
1647  SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1648  Visited.insert(EntryBB);
1649  OnStack.insert(EntryBB);
1650  do {
1651  dfs_next_succ:
1652  BasicBlock *CurrBB = SuccStack.back().first;
1653  succ_iterator SE(CurrBB->getTerminator(), false);
1654 
1655  while (SuccStack.back().second != SE) {
1656  BasicBlock *SuccBB = *SuccStack.back().second++;
1657  if (Visited.insert(SuccBB).second) {
1658  SuccStack.push_back(
1659  std::make_pair(SuccBB, succ_iterator(SuccBB->getTerminator())));
1660  BBStates[CurrBB].addSucc(SuccBB);
1661  BBState &SuccStates = BBStates[SuccBB];
1662  SuccStates.addPred(CurrBB);
1663  OnStack.insert(SuccBB);
1664  goto dfs_next_succ;
1665  }
1666 
1667  if (!OnStack.count(SuccBB)) {
1668  BBStates[CurrBB].addSucc(SuccBB);
1669  BBStates[SuccBB].addPred(CurrBB);
1670  }
1671  }
1672  OnStack.erase(CurrBB);
1673  PostOrder.push_back(CurrBB);
1674  SuccStack.pop_back();
1675  } while (!SuccStack.empty());
1676 
1677  Visited.clear();
1678 
1679  // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1680  // Functions may have many exits, and there also blocks which we treat
1681  // as exits due to ignored edges.
1683  for (BasicBlock &ExitBB : F) {
1684  BBState &MyStates = BBStates[&ExitBB];
1685  if (!MyStates.isExit())
1686  continue;
1687 
1688  MyStates.SetAsExit();
1689 
1690  PredStack.push_back(std::make_pair(&ExitBB, MyStates.pred_begin()));
1691  Visited.insert(&ExitBB);
1692  while (!PredStack.empty()) {
1693  reverse_dfs_next_succ:
1694  BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1695  while (PredStack.back().second != PE) {
1696  BasicBlock *BB = *PredStack.back().second++;
1697  if (Visited.insert(BB).second) {
1698  PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1699  goto reverse_dfs_next_succ;
1700  }
1701  }
1702  ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1703  }
1704  }
1705 }
1706 
1707 // Visit the function both top-down and bottom-up.
1708 bool ObjCARCOpt::Visit(Function &F,
1711  DenseMap<Value *, RRInfo> &Releases) {
1712  // Use reverse-postorder traversals, because we magically know that loops
1713  // will be well behaved, i.e. they won't repeatedly call retain on a single
1714  // pointer without doing a release. We can't use the ReversePostOrderTraversal
1715  // class here because we want the reverse-CFG postorder to consider each
1716  // function exit point, and we want to ignore selected cycle edges.
1718  SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1719  ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1720  MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
1721  BBStates);
1722 
1723  // Use reverse-postorder on the reverse CFG for bottom-up.
1724  bool BottomUpNestingDetected = false;
1725  for (BasicBlock *BB : llvm::reverse(ReverseCFGPostOrder)) {
1726  BottomUpNestingDetected |= VisitBottomUp(BB, BBStates, Retains);
1727  if (DisableRetainReleasePairing)
1728  return false;
1729  }
1730 
1731  // Use reverse-postorder for top-down.
1732  bool TopDownNestingDetected = false;
1733  for (BasicBlock *BB : llvm::reverse(PostOrder)) {
1734  TopDownNestingDetected |= VisitTopDown(BB, BBStates, Releases);
1735  if (DisableRetainReleasePairing)
1736  return false;
1737  }
1738 
1739  return TopDownNestingDetected && BottomUpNestingDetected;
1740 }
1741 
1742 /// Move the calls in RetainsToMove and ReleasesToMove.
1743 void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1744  RRInfo &ReleasesToMove,
1746  DenseMap<Value *, RRInfo> &Releases,
1747  SmallVectorImpl<Instruction *> &DeadInsts,
1748  Module *M) {
1749  Type *ArgTy = Arg->getType();
1750  Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1751 
1752  LLVM_DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1753 
1754  // Insert the new retain and release calls.
1755  for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1756  Value *MyArg = ArgTy == ParamTy ? Arg :
1757  new BitCastInst(Arg, ParamTy, "", InsertPt);
1759  CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1760  Call->setDoesNotThrow();
1761  Call->setTailCall();
1762 
1763  LLVM_DEBUG(dbgs() << "Inserting new Retain: " << *Call
1764  << "\n"
1765  "At insertion point: "
1766  << *InsertPt << "\n");
1767  }
1768  for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1769  Value *MyArg = ArgTy == ParamTy ? Arg :
1770  new BitCastInst(Arg, ParamTy, "", InsertPt);
1772  CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1773  // Attach a clang.imprecise_release metadata tag, if appropriate.
1774  if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1775  Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
1776  Call->setDoesNotThrow();
1777  if (ReleasesToMove.IsTailCallRelease)
1778  Call->setTailCall();
1779 
1780  LLVM_DEBUG(dbgs() << "Inserting new Release: " << *Call
1781  << "\n"
1782  "At insertion point: "
1783  << *InsertPt << "\n");
1784  }
1785 
1786  // Delete the original retain and release calls.
1787  for (Instruction *OrigRetain : RetainsToMove.Calls) {
1788  Retains.blot(OrigRetain);
1789  DeadInsts.push_back(OrigRetain);
1790  LLVM_DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1791  }
1792  for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1793  Releases.erase(OrigRelease);
1794  DeadInsts.push_back(OrigRelease);
1795  LLVM_DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1796  }
1797 }
1798 
1799 bool ObjCARCOpt::PairUpRetainsAndReleases(
1802  DenseMap<Value *, RRInfo> &Releases, Module *M,
1803  Instruction *Retain,
1804  SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1805  RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1806  bool &AnyPairsCompletelyEliminated) {
1807  // If a pair happens in a region where it is known that the reference count
1808  // is already incremented, we can similarly ignore possible decrements unless
1809  // we are dealing with a retainable object with multiple provenance sources.
1810  bool KnownSafeTD = true, KnownSafeBU = true;
1811  bool CFGHazardAfflicted = false;
1812 
1813  // Connect the dots between the top-down-collected RetainsToMove and
1814  // bottom-up-collected ReleasesToMove to form sets of related calls.
1815  // This is an iterative process so that we connect multiple releases
1816  // to multiple retains if needed.
1817  unsigned OldDelta = 0;
1818  unsigned NewDelta = 0;
1819  unsigned OldCount = 0;
1820  unsigned NewCount = 0;
1821  bool FirstRelease = true;
1822  for (SmallVector<Instruction *, 4> NewRetains{Retain};;) {
1823  SmallVector<Instruction *, 4> NewReleases;
1824  for (Instruction *NewRetain : NewRetains) {
1825  auto It = Retains.find(NewRetain);
1826  assert(It != Retains.end());
1827  const RRInfo &NewRetainRRI = It->second;
1828  KnownSafeTD &= NewRetainRRI.KnownSafe;
1829  CFGHazardAfflicted |= NewRetainRRI.CFGHazardAfflicted;
1830  for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1831  auto Jt = Releases.find(NewRetainRelease);
1832  if (Jt == Releases.end())
1833  return false;
1834  const RRInfo &NewRetainReleaseRRI = Jt->second;
1835 
1836  // If the release does not have a reference to the retain as well,
1837  // something happened which is unaccounted for. Do not do anything.
1838  //
1839  // This can happen if we catch an additive overflow during path count
1840  // merging.
1841  if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1842  return false;
1843 
1844  if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1845  // If we overflow when we compute the path count, don't remove/move
1846  // anything.
1847  const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
1848  unsigned PathCount = BBState::OverflowOccurredValue;
1849  if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1850  return false;
1851  assert(PathCount != BBState::OverflowOccurredValue &&
1852  "PathCount at this point can not be "
1853  "OverflowOccurredValue.");
1854  OldDelta -= PathCount;
1855 
1856  // Merge the ReleaseMetadata and IsTailCallRelease values.
1857  if (FirstRelease) {
1858  ReleasesToMove.ReleaseMetadata =
1859  NewRetainReleaseRRI.ReleaseMetadata;
1860  ReleasesToMove.IsTailCallRelease =
1861  NewRetainReleaseRRI.IsTailCallRelease;
1862  FirstRelease = false;
1863  } else {
1864  if (ReleasesToMove.ReleaseMetadata !=
1865  NewRetainReleaseRRI.ReleaseMetadata)
1866  ReleasesToMove.ReleaseMetadata = nullptr;
1867  if (ReleasesToMove.IsTailCallRelease !=
1868  NewRetainReleaseRRI.IsTailCallRelease)
1869  ReleasesToMove.IsTailCallRelease = false;
1870  }
1871 
1872  // Collect the optimal insertion points.
1873  if (!KnownSafe)
1874  for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
1875  if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
1876  // If we overflow when we compute the path count, don't
1877  // remove/move anything.
1878  const BBState &RIPBBState = BBStates[RIP->getParent()];
1879  PathCount = BBState::OverflowOccurredValue;
1880  if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1881  return false;
1882  assert(PathCount != BBState::OverflowOccurredValue &&
1883  "PathCount at this point can not be "
1884  "OverflowOccurredValue.");
1885  NewDelta -= PathCount;
1886  }
1887  }
1888  NewReleases.push_back(NewRetainRelease);
1889  }
1890  }
1891  }
1892  NewRetains.clear();
1893  if (NewReleases.empty()) break;
1894 
1895  // Back the other way.
1896  for (Instruction *NewRelease : NewReleases) {
1897  auto It = Releases.find(NewRelease);
1898  assert(It != Releases.end());
1899  const RRInfo &NewReleaseRRI = It->second;
1900  KnownSafeBU &= NewReleaseRRI.KnownSafe;
1901  CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
1902  for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
1903  auto Jt = Retains.find(NewReleaseRetain);
1904  if (Jt == Retains.end())
1905  return false;
1906  const RRInfo &NewReleaseRetainRRI = Jt->second;
1907 
1908  // If the retain does not have a reference to the release as well,
1909  // something happened which is unaccounted for. Do not do anything.
1910  //
1911  // This can happen if we catch an additive overflow during path count
1912  // merging.
1913  if (!NewReleaseRetainRRI.Calls.count(NewRelease))
1914  return false;
1915 
1916  if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
1917  // If we overflow when we compute the path count, don't remove/move
1918  // anything.
1919  const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
1920  unsigned PathCount = BBState::OverflowOccurredValue;
1921  if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1922  return false;
1923  assert(PathCount != BBState::OverflowOccurredValue &&
1924  "PathCount at this point can not be "
1925  "OverflowOccurredValue.");
1926  OldDelta += PathCount;
1927  OldCount += PathCount;
1928 
1929  // Collect the optimal insertion points.
1930  if (!KnownSafe)
1931  for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
1932  if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
1933  // If we overflow when we compute the path count, don't
1934  // remove/move anything.
1935  const BBState &RIPBBState = BBStates[RIP->getParent()];
1936 
1937  PathCount = BBState::OverflowOccurredValue;
1938  if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1939  return false;
1940  assert(PathCount != BBState::OverflowOccurredValue &&
1941  "PathCount at this point can not be "
1942  "OverflowOccurredValue.");
1943  NewDelta += PathCount;
1944  NewCount += PathCount;
1945  }
1946  }
1947  NewRetains.push_back(NewReleaseRetain);
1948  }
1949  }
1950  }
1951  if (NewRetains.empty()) break;
1952  }
1953 
1954  // We can only remove pointers if we are known safe in both directions.
1955  bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
1956  if (UnconditionallySafe) {
1957  RetainsToMove.ReverseInsertPts.clear();
1958  ReleasesToMove.ReverseInsertPts.clear();
1959  NewCount = 0;
1960  } else {
1961  // Determine whether the new insertion points we computed preserve the
1962  // balance of retain and release calls through the program.
1963  // TODO: If the fully aggressive solution isn't valid, try to find a
1964  // less aggressive solution which is.
1965  if (NewDelta != 0)
1966  return false;
1967 
1968  // At this point, we are not going to remove any RR pairs, but we still are
1969  // able to move RR pairs. If one of our pointers is afflicted with
1970  // CFGHazards, we cannot perform such code motion so exit early.
1971  const bool WillPerformCodeMotion =
1972  !RetainsToMove.ReverseInsertPts.empty() ||
1973  !ReleasesToMove.ReverseInsertPts.empty();
1974  if (CFGHazardAfflicted && WillPerformCodeMotion)
1975  return false;
1976  }
1977 
1978  // Determine whether the original call points are balanced in the retain and
1979  // release calls through the program. If not, conservatively don't touch
1980  // them.
1981  // TODO: It's theoretically possible to do code motion in this case, as
1982  // long as the existing imbalances are maintained.
1983  if (OldDelta != 0)
1984  return false;
1985 
1986  Changed = true;
1987  assert(OldCount != 0 && "Unreachable code?");
1988  NumRRs += OldCount - NewCount;
1989  // Set to true if we completely removed any RR pairs.
1990  AnyPairsCompletelyEliminated = NewCount == 0;
1991 
1992  // We can move calls!
1993  return true;
1994 }
1995 
1996 /// Identify pairings between the retains and releases, and delete and/or move
1997 /// them.
1998 bool ObjCARCOpt::PerformCodePlacement(
2001  DenseMap<Value *, RRInfo> &Releases, Module *M) {
2002  LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2003 
2004  bool AnyPairsCompletelyEliminated = false;
2006 
2007  // Visit each retain.
2009  E = Retains.end();
2010  I != E; ++I) {
2011  Value *V = I->first;
2012  if (!V) continue; // blotted
2013 
2014  Instruction *Retain = cast<Instruction>(V);
2015 
2016  LLVM_DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2017 
2018  Value *Arg = GetArgRCIdentityRoot(Retain);
2019 
2020  // If the object being released is in static or stack storage, we know it's
2021  // not being managed by ObjC reference counting, so we can delete pairs
2022  // regardless of what possible decrements or uses lie between them.
2023  bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2024 
2025  // A constant pointer can't be pointing to an object on the heap. It may
2026  // be reference-counted, but it won't be deleted.
2027  if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2028  if (const GlobalVariable *GV =
2029  dyn_cast<GlobalVariable>(
2030  GetRCIdentityRoot(LI->getPointerOperand())))
2031  if (GV->isConstant())
2032  KnownSafe = true;
2033 
2034  // Connect the dots between the top-down-collected RetainsToMove and
2035  // bottom-up-collected ReleasesToMove to form sets of related calls.
2036  RRInfo RetainsToMove, ReleasesToMove;
2037 
2038  bool PerformMoveCalls = PairUpRetainsAndReleases(
2039  BBStates, Retains, Releases, M, Retain, DeadInsts,
2040  RetainsToMove, ReleasesToMove, Arg, KnownSafe,
2041  AnyPairsCompletelyEliminated);
2042 
2043  if (PerformMoveCalls) {
2044  // Ok, everything checks out and we're all set. Let's move/delete some
2045  // code!
2046  MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2047  Retains, Releases, DeadInsts, M);
2048  }
2049  }
2050 
2051  // Now that we're done moving everything, we can delete the newly dead
2052  // instructions, as we no longer need them as insert points.
2053  while (!DeadInsts.empty())
2054  EraseInstruction(DeadInsts.pop_back_val());
2055 
2056  return AnyPairsCompletelyEliminated;
2057 }
2058 
2059 /// Weak pointer optimizations.
2060 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2061  LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2062 
2063  // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2064  // itself because it uses AliasAnalysis and we need to do provenance
2065  // queries instead.
2066  for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2067  Instruction *Inst = &*I++;
2068 
2069  LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2070 
2072  if (Class != ARCInstKind::LoadWeak &&
2074  continue;
2075 
2076  // Delete objc_loadWeak calls with no users.
2077  if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
2078  Inst->eraseFromParent();
2079  Changed = true;
2080  continue;
2081  }
2082 
2083  // TODO: For now, just look for an earlier available version of this value
2084  // within the same block. Theoretically, we could do memdep-style non-local
2085  // analysis too, but that would want caching. A better approach would be to
2086  // use the technique that EarlyCSE uses.
2087  inst_iterator Current = std::prev(I);
2088  BasicBlock *CurrentBB = &*Current.getBasicBlockIterator();
2089  for (BasicBlock::iterator B = CurrentBB->begin(),
2090  J = Current.getInstructionIterator();
2091  J != B; --J) {
2092  Instruction *EarlierInst = &*std::prev(J);
2093  ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
2094  switch (EarlierClass) {
2095  case ARCInstKind::LoadWeak:
2097  // If this is loading from the same pointer, replace this load's value
2098  // with that one.
2099  CallInst *Call = cast<CallInst>(Inst);
2100  CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2101  Value *Arg = Call->getArgOperand(0);
2102  Value *EarlierArg = EarlierCall->getArgOperand(0);
2103  switch (PA.getAA()->alias(Arg, EarlierArg)) {
2105  Changed = true;
2106  // If the load has a builtin retain, insert a plain retain for it.
2107  if (Class == ARCInstKind::LoadWeakRetained) {
2109  CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2110  CI->setTailCall();
2111  }
2112  // Zap the fully redundant load.
2113  Call->replaceAllUsesWith(EarlierCall);
2114  Call->eraseFromParent();
2115  goto clobbered;
2116  case AliasResult::MayAlias:
2118  goto clobbered;
2119  case AliasResult::NoAlias:
2120  break;
2121  }
2122  break;
2123  }
2125  case ARCInstKind::InitWeak: {
2126  // If this is storing to the same pointer and has the same size etc.
2127  // replace this load's value with the stored value.
2128  CallInst *Call = cast<CallInst>(Inst);
2129  CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2130  Value *Arg = Call->getArgOperand(0);
2131  Value *EarlierArg = EarlierCall->getArgOperand(0);
2132  switch (PA.getAA()->alias(Arg, EarlierArg)) {
2134  Changed = true;
2135  // If the load has a builtin retain, insert a plain retain for it.
2136  if (Class == ARCInstKind::LoadWeakRetained) {
2138  CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2139  CI->setTailCall();
2140  }
2141  // Zap the fully redundant load.
2142  Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2143  Call->eraseFromParent();
2144  goto clobbered;
2145  case AliasResult::MayAlias:
2147  goto clobbered;
2148  case AliasResult::NoAlias:
2149  break;
2150  }
2151  break;
2152  }
2153  case ARCInstKind::MoveWeak:
2154  case ARCInstKind::CopyWeak:
2155  // TOOD: Grab the copied value.
2156  goto clobbered;
2158  case ARCInstKind::None:
2160  case ARCInstKind::User:
2161  // Weak pointers are only modified through the weak entry points
2162  // (and arbitrary calls, which could call the weak entry points).
2163  break;
2164  default:
2165  // Anything else could modify the weak pointer.
2166  goto clobbered;
2167  }
2168  }
2169  clobbered:;
2170  }
2171 
2172  // Then, for each destroyWeak with an alloca operand, check to see if
2173  // the alloca and all its users can be zapped.
2174  for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2175  Instruction *Inst = &*I++;
2177  if (Class != ARCInstKind::DestroyWeak)
2178  continue;
2179 
2180  CallInst *Call = cast<CallInst>(Inst);
2181  Value *Arg = Call->getArgOperand(0);
2182  if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2183  for (User *U : Alloca->users()) {
2184  const Instruction *UserInst = cast<Instruction>(U);
2185  switch (GetBasicARCInstKind(UserInst)) {
2186  case ARCInstKind::InitWeak:
2189  continue;
2190  default:
2191  goto done;
2192  }
2193  }
2194  Changed = true;
2195  for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
2196  CallInst *UserInst = cast<CallInst>(*UI++);
2197  switch (GetBasicARCInstKind(UserInst)) {
2198  case ARCInstKind::InitWeak:
2200  // These functions return their second argument.
2201  UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2202  break;
2204  // No return value.
2205  break;
2206  default:
2207  llvm_unreachable("alloca really is used!");
2208  }
2209  UserInst->eraseFromParent();
2210  }
2211  Alloca->eraseFromParent();
2212  done:;
2213  }
2214  }
2215 }
2216 
2217 /// Identify program paths which execute sequences of retains and releases which
2218 /// can be eliminated.
2219 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2220  // Releases, Retains - These are used to store the results of the main flow
2221  // analysis. These use Value* as the key instead of Instruction* so that the
2222  // map stays valid when we get around to rewriting code and calls get
2223  // replaced by arguments.
2224  DenseMap<Value *, RRInfo> Releases;
2226 
2227  // This is used during the traversal of the function to track the
2228  // states for each identified object at each block.
2230 
2231  // Analyze the CFG of the function, and all instructions.
2232  bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2233 
2234  if (DisableRetainReleasePairing)
2235  return false;
2236 
2237  // Transform.
2238  bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
2239  Releases,
2240  F.getParent());
2241 
2242  return AnyPairsCompletelyEliminated && NestingDetected;
2243 }
2244 
2245 /// Check if there is a dependent call earlier that does not have anything in
2246 /// between the Retain and the call that can affect the reference count of their
2247 /// shared pointer argument. Note that Retain need not be in BB.
2249  Instruction *Retain,
2250  ProvenanceAnalysis &PA) {
2251  auto *Call = dyn_cast_or_null<CallInst>(findSingleDependency(
2252  CanChangeRetainCount, Arg, Retain->getParent(), Retain, PA));
2253 
2254  // Check that the pointer is the return value of the call.
2255  if (!Call || Arg != Call)
2256  return nullptr;
2257 
2258  // Check that the call is a regular call.
2259  ARCInstKind Class = GetBasicARCInstKind(Call);
2260  return Class == ARCInstKind::CallOrUser || Class == ARCInstKind::Call
2261  ? Call
2262  : nullptr;
2263 }
2264 
2265 /// Find a dependent retain that precedes the given autorelease for which there
2266 /// is nothing in between the two instructions that can affect the ref count of
2267 /// Arg.
2268 static CallInst *
2270  Instruction *Autorelease,
2271  ProvenanceAnalysis &PA) {
2272  auto *Retain = dyn_cast_or_null<CallInst>(
2274 
2275  // Check that we found a retain with the same argument.
2278  return nullptr;
2279  }
2280 
2281  return Retain;
2282 }
2283 
2284 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2285 /// no instructions dependent on Arg that need a positive ref count in between
2286 /// the autorelease and the ret.
2287 static CallInst *
2289  ReturnInst *Ret,
2290  ProvenanceAnalysis &PA) {
2292  auto *Autorelease = dyn_cast_or_null<CallInst>(
2294 
2295  if (!Autorelease)
2296  return nullptr;
2297  ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2298  if (!IsAutorelease(AutoreleaseClass))
2299  return nullptr;
2301  return nullptr;
2302 
2303  return Autorelease;
2304 }
2305 
2306 /// Look for this pattern:
2307 /// \code
2308 /// %call = call i8* @something(...)
2309 /// %2 = call i8* @objc_retain(i8* %call)
2310 /// %3 = call i8* @objc_autorelease(i8* %2)
2311 /// ret i8* %3
2312 /// \endcode
2313 /// And delete the retain and autorelease.
2314 void ObjCARCOpt::OptimizeReturns(Function &F) {
2315  if (!F.getReturnType()->isPointerTy())
2316  return;
2317 
2318  LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2319 
2320  for (BasicBlock &BB: F) {
2321  ReturnInst *Ret = dyn_cast<ReturnInst>(&BB.back());
2322  if (!Ret)
2323  continue;
2324 
2325  LLVM_DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2326 
2327  const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2328 
2329  // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2330  // dependent on Arg such that there are no instructions dependent on Arg
2331  // that need a positive ref count in between the autorelease and Ret.
2334 
2335  if (!Autorelease)
2336  continue;
2337 
2339  Arg, Autorelease->getParent(), Autorelease, PA);
2340 
2341  if (!Retain)
2342  continue;
2343 
2344  // Check that there is nothing that can affect the reference count
2345  // between the retain and the call. Note that Retain need not be in BB.
2347 
2348  // Don't remove retainRV/autoreleaseRV pairs if the call isn't a tail call.
2349  if (!Call ||
2350  (!Call->isTailCall() &&
2353  continue;
2354 
2355  // If so, we can zap the retain and autorelease.
2356  Changed = true;
2357  ++NumRets;
2358  LLVM_DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: " << *Autorelease
2359  << "\n");
2360  BundledInsts->eraseInst(Retain);
2361  EraseInstruction(Autorelease);
2362  }
2363 }
2364 
2365 #ifndef NDEBUG
2366 void
2367 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2368  Statistic &NumRetains =
2369  AfterOptimization ? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2370  Statistic &NumReleases =
2371  AfterOptimization ? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2372 
2373  for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2374  Instruction *Inst = &*I++;
2375  switch (GetBasicARCInstKind(Inst)) {
2376  default:
2377  break;
2378  case ARCInstKind::Retain:
2379  ++NumRetains;
2380  break;
2381  case ARCInstKind::Release:
2382  ++NumReleases;
2383  break;
2384  }
2385  }
2386 }
2387 #endif
2388 
2389 void ObjCARCOpt::init(Module &M) {
2390  if (!EnableARCOpts)
2391  return;
2392 
2393  // Intuitively, objc_retain and others are nocapture, however in practice
2394  // they are not, because they return their argument value. And objc_release
2395  // calls finalizers which can have arbitrary side effects.
2396  MDKindCache.init(&M);
2397 
2398  // Initialize our runtime entry point cache.
2399  EP.init(&M);
2400 }
2401 
2402 bool ObjCARCOpt::run(Function &F, AAResults &AA) {
2403  if (!EnableARCOpts)
2404  return false;
2405 
2406  Changed = CFGChanged = false;
2407  BundledRetainClaimRVs BRV(EP, false);
2408  BundledInsts = &BRV;
2409 
2410  LLVM_DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName()
2411  << " >>>"
2412  "\n");
2413 
2414  std::pair<bool, bool> R = BundledInsts->insertAfterInvokes(F, nullptr);
2415  Changed |= R.first;
2416  CFGChanged |= R.second;
2417 
2418  PA.setAA(&AA);
2419 
2420 #ifndef NDEBUG
2421  if (AreStatisticsEnabled()) {
2422  GatherStatistics(F, false);
2423  }
2424 #endif
2425 
2426  // This pass performs several distinct transformations. As a compile-time aid
2427  // when compiling code that isn't ObjC, skip these if the relevant ObjC
2428  // library functions aren't declared.
2429 
2430  // Preliminary optimizations. This also computes UsedInThisFunction.
2431  OptimizeIndividualCalls(F);
2432 
2433  // Optimizations for weak pointers.
2434  if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2435  (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2436  (1 << unsigned(ARCInstKind::StoreWeak)) |
2437  (1 << unsigned(ARCInstKind::InitWeak)) |
2438  (1 << unsigned(ARCInstKind::CopyWeak)) |
2439  (1 << unsigned(ARCInstKind::MoveWeak)) |
2440  (1 << unsigned(ARCInstKind::DestroyWeak))))
2441  OptimizeWeakCalls(F);
2442 
2443  // Optimizations for retain+release pairs.
2444  if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2445  (1 << unsigned(ARCInstKind::RetainRV)) |
2446  (1 << unsigned(ARCInstKind::RetainBlock))))
2447  if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2448  // Run OptimizeSequences until it either stops making changes or
2449  // no retain+release pair nesting is detected.
2450  while (OptimizeSequences(F)) {}
2451 
2452  // Optimizations if objc_autorelease is used.
2453  if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2454  (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2455  OptimizeReturns(F);
2456 
2457  // Gather statistics after optimization.
2458 #ifndef NDEBUG
2459  if (AreStatisticsEnabled()) {
2460  GatherStatistics(F, true);
2461  }
2462 #endif
2463 
2464  LLVM_DEBUG(dbgs() << "\n");
2465 
2466  return Changed;
2467 }
2468 
2469 void ObjCARCOpt::releaseMemory() {
2470  PA.clear();
2471 }
2472 
2473 /// @}
2474 ///
2475 
2478  ObjCARCOpt OCAO;
2479  OCAO.init(*F.getParent());
2480 
2481  bool Changed = OCAO.run(F, AM.getResult<AAManager>(F));
2482  bool CFGChanged = OCAO.hasCFGChanged();
2483  if (Changed) {
2484  PreservedAnalyses PA;
2485  if (!CFGChanged)
2486  PA.preserveSet<CFGAnalyses>();
2487  return PA;
2488  }
2489  return PreservedAnalyses::all();
2490 }
llvm::objcarc::BundledRetainClaimRVs
Definition: ObjCARC.h:106
llvm::SuccIterator
Definition: CFG.h:139
llvm::objcarc::GetBasicARCInstKind
ARCInstKind GetBasicARCInstKind(const Value *V)
Determine which objc runtime call instruction class V belongs to.
Definition: ObjCARCInstKind.h:104
i
i
Definition: README.txt:29
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:155
llvm::CallBase::getNumOperandBundles
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:1893
llvm::objcarc::ARCInstKind::User
@ User
could "use" a pointer
llvm::AAManager
A manager for alias analyses.
Definition: AliasAnalysis.h:1233
llvm::objcarc::operator<<
raw_ostream & operator<<(raw_ostream &OS, const ARCInstKind Class)
Definition: ObjCARCInstKind.cpp:28
llvm::AliasResult::MayAlias
@ MayAlias
The two locations may or may not alias.
Definition: AliasAnalysis.h:102
llvm::AliasResult::PartialAlias
@ PartialAlias
The two locations alias, but only due to a partial overlap.
Definition: AliasAnalysis.h:104
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:102
llvm
Definition: AllocatorList.h:23
llvm::objcarc::Sequence
Sequence
Definition: PtrState.h:41
llvm::CallInst::setTailCall
void setTailCall(bool IsTc=true)
Definition: Instructions.h:1685
llvm::ReturnInst
Return a value (possibly void), from a function.
Definition: Instructions.h:2953
llvm::SmallPtrSetImpl::erase
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:378
llvm::PHINode::incoming_values
op_range incoming_values()
Definition: Instructions.h:2686
Metadata.h
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:90
BlotMapVector.h
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:769
llvm::succ_end
Interval::succ_iterator succ_end(Interval *I)
Definition: Interval.h:102
InstIterator.h
llvm::Function
Definition: Function.h:61
llvm::objcarc::NeedsPositiveRetainCount
@ NeedsPositiveRetainCount
Definition: DependencyAnalysis.h:45
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
Pass.h
llvm::CallBase::setCalledFunction
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
Definition: InstrTypes.h:1432
arc
objc arc
Definition: ObjCARCOpts.cpp:607
llvm::BitCastInst
This class represents a no-op cast from one type to another.
Definition: Instructions.h:5166
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1167
Statistic.h
llvm::objcarc::S_Use
@ S_Use
any use of x.
Definition: PtrState.h:45
llvm::objcarc::ARCInstKind::RetainBlock
@ RetainBlock
objc_retainBlock
ErrorHandling.h
llvm::GlobalVariable
Definition: GlobalVariable.h:40
llvm::Instruction::insertBefore
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:84
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::erase
bool erase(const KeyT &Val)
Definition: DenseMap.h:302
BBState::OverflowOccurredValue
static const unsigned OverflowOccurredValue
Definition: ObjCARCOpts.cpp:211
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:46
DenseMap.h
llvm::objcarc::IsNeverTail
bool IsNeverTail(ARCInstKind Class)
Test if the given class represents instructions which are never safe to mark with the "tail" keyword.
Definition: ObjCARCInstKind.cpp:557
llvm::reverse
auto reverse(ContainerTy &&C, std::enable_if_t< has_rbegin< ContainerTy >::value > *=nullptr)
Definition: STLExtras.h:329
llvm::objcarc::ARCInstKind::LoadWeak
@ LoadWeak
objc_loadWeak (derived)
EHPersonalities.h
llvm::TinyPtrVector::front
EltTy front() const
Definition: TinyPtrVector.h:230
llvm::objcarc::ARCInstKind::Call
@ Call
could call objc_release
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:449
llvm::objcarc::ARCInstKind::MoveWeak
@ MoveWeak
objc_moveWeak (derived)
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:116
llvm::Intrinsic::not_intrinsic
@ not_intrinsic
Definition: Intrinsics.h:45
llvm::objcarc::ProvenanceAnalysis
This is similar to BasicAliasAnalysis, and it uses many of the same techniques, except it uses specia...
Definition: ProvenanceAnalysis.h:51
STLExtras.h
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:634
llvm::successors
succ_range successors(Instruction *I)
Definition: CFG.h:262
LLVM_ATTRIBUTE_UNUSED
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:188
llvm::objcarc::GetARCInstKind
ARCInstKind GetARCInstKind(const Value *V)
Map V to its ARCInstKind equivalence class.
Definition: ObjCARCInstKind.cpp:213
llvm::Type::getInt8Ty
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:195
llvm::objcarc::IsAutorelease
bool IsAutorelease(ARCInstKind Class)
Test if the given class is objc_autorelease or equivalent.
Definition: ObjCARCInstKind.cpp:381
llvm::objcarc::ARCInstKind::Autorelease
@ Autorelease
objc_autorelease
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:122
llvm::MDNode::get
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1198
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::RISCVFenceField::R
@ R
Definition: RISCVBaseInfo.h:180
llvm::Instruction::setMetadata
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1330
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
AliasAnalysis.h
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
llvm::InstIterator::getInstructionIterator
BIty & getInstructionIterator()
Definition: InstIterator.h:74
llvm::classifyEHPersonality
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Definition: EHPersonalities.cpp:21
llvm::objcarc::ObjCARCAAWrapperPass
Legacy wrapper pass to provide the ObjCARCAAResult object.
Definition: ObjCARCAliasAnalysis.h:80
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:206
Instruction.h
CommandLine.h
llvm::Instruction::getNumSuccessors
unsigned getNumSuccessors() const
Return the number of successors that this instruction has.
Definition: Instruction.cpp:740
llvm::objcarc::RRInfo
Unidirectional information about either a retain-decrement-use-release sequence or release-use-decrem...
Definition: PtrState.h:56
llvm::AreStatisticsEnabled
bool AreStatisticsEnabled()
Check if statistics are enabled.
Definition: Statistic.cpp:131
optimization
objc ObjC ARC optimization
Definition: ObjCARCOpts.cpp:607
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
Constants.h
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition: Instructions.h:2696
llvm::AAResults
Definition: AliasAnalysis.h:456
llvm::objcarc::ARCInstKind::InitWeak
@ InitWeak
objc_initWeak (derived)
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::objcarc::ARCRuntimeEntryPointKind::Release
@ Release
llvm::objcarc::GetArgRCIdentityRoot
Value * GetArgRCIdentityRoot(Value *Inst)
Assuming the given instruction is one of the special calls such as objc_retain or objc_release,...
Definition: ObjCARCAnalysisUtils.h:128
llvm::User
Definition: User.h:44
DependencyAnalysis.h
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::objcarc::ARCInstKind::CallOrUser
@ CallOrUser
could call objc_release and/or "use" pointers
llvm::objcarc::IsNoopOnGlobal
bool IsNoopOnGlobal(ARCInstKind Class)
Test if the given class represents instructions which do nothing if passed a global variable.
Definition: ObjCARCInstKind.cpp:486
InstrTypes.h
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:296
llvm::objcarc::hasAttachedCallOpBundle
bool hasAttachedCallOpBundle(const CallBase *CB, bool IsRetain)
Definition: ObjCARCUtil.h:34
llvm::CallInst::Create
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:1521
MaxPtrStates
static cl::opt< unsigned > MaxPtrStates("arc-opt-max-ptr-states", cl::Hidden, cl::desc("Maximum number of ptr states the optimizer keeps track of"), cl::init(4095))
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::ObjCARCOptPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: ObjCARCOpts.cpp:2476
llvm::objcarc::ARCInstKind::Release
@ Release
objc_release
llvm::objcarc::ARCInstKind::RetainRV
@ RetainRV
objc_retainAutoreleasedReturnValue
llvm::BlotMapVector::empty
bool empty() const
Definition: BlotMapVector.h:109
llvm::initializeObjCARCOptLegacyPassPass
void initializeObjCARCOptLegacyPassPass(PassRegistry &)
llvm::BlotMapVector::const_iterator
typename VectorTy::const_iterator const_iterator
Definition: BlotMapVector.h:48
false
Definition: StackSlotColoring.cpp:142
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
llvm::objcarc::ARCInstKind::NoopCast
@ NoopCast
objc_retainedObject, etc.
llvm::objcarc::getEquivalentPHIs
void getEquivalentPHIs(PHINodeTy &PN, VectorTy &PHIList)
Return the list of PHI nodes that are equivalent to PN.
Definition: ObjCARC.h:75
llvm::objcarc::ARCInstKind::CopyWeak
@ CopyWeak
objc_copyWeak (derived)
llvm::objcarc::RRInfo::KnownSafe
bool KnownSafe
After an objc_retain, the reference count of the referenced object is known to be positive.
Definition: PtrState.h:69
llvm::Instruction
Definition: Instruction.h:45
llvm::STATISTIC
STATISTIC(NumFunctions, "Total number of functions")
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:50
llvm::objcarc::BottomUpPtrState
Definition: PtrState.h:168
llvm::operator<<
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:230
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1783
llvm::objcarc::ARCInstKind::IntrinsicUser
@ IntrinsicUser
llvm.objc.clang.arc.use
llvm::BlotMapVector::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &InsertPair)
Definition: BlotMapVector.h:67
SmallPtrSet.h
llvm::objcarc::S_None
@ S_None
Definition: PtrState.h:42
FindSingleUseIdentifiedObject
static const Value * FindSingleUseIdentifiedObject(const Value *Arg)
This is similar to GetRCIdentityRoot but it stops as soon as it finds a value with multiple uses.
Definition: ObjCARCOpts.cpp:89
FindPredecessorRetainWithSafePath
static CallInst * FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB, Instruction *Autorelease, ProvenanceAnalysis &PA)
Find a dependent retain that precedes the given autorelease for which there is nothing in between the...
Definition: ObjCARCOpts.cpp:2269
llvm::objcarc::S_Retain
@ S_Retain
objc_retain(x).
Definition: PtrState.h:43
llvm::MCID::Call
@ Call
Definition: MCInstrDesc.h:153
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition: Instructions.h:2692
llvm::objcarc::RRInfo::ReverseInsertPts
SmallPtrSet< Instruction *, 2 > ReverseInsertPts
The set of optimal insert positions for moving calls in the opposite sequence.
Definition: PtrState.h:84
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:345
llvm::objcarc::ARCInstKind
ARCInstKind
Definition: ObjCARCInstKind.h:28
Type.h
llvm::objcarc::IsAlwaysTail
bool IsAlwaysTail(ARCInstKind Class)
Test if the given class represents instructions which are always safe to mark with the "tail" keyword...
Definition: ObjCARCInstKind.cpp:521
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:282
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:58
CFG.h
ARCRuntimeEntryPoints.h
llvm::InvokeInst
Invoke instruction.
Definition: Instructions.h:3716
llvm::objcarc::ARCInstKind::AutoreleasepoolPop
@ AutoreleasepoolPop
objc_autoreleasePoolPop
HasSafePathToPredecessorCall
static CallInst * HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain, ProvenanceAnalysis &PA)
Check if there is a dependent call earlier that does not have anything in between the Retain and the ...
Definition: ObjCARCOpts.cpp:2248
BasicBlock.h
llvm::cl::opt
Definition: CommandLine.h:1422
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:303
llvm::Instruction::eraseFromParent
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:78
ProvenanceAnalysis.h
llvm::objcarc::ARCMDKindID::ImpreciseRelease
@ ImpreciseRelease
llvm::BlotMapVector
An associative container with fast insertion-order (deterministic) iteration over its elements.
Definition: BlotMapVector.h:22
llvm::PointerType::getUnqual
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:653
llvm::colorEHFunclets
DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
Definition: EHPersonalities.cpp:81
llvm::objcarc::IsNoThrow
bool IsNoThrow(ARCInstKind Class)
Test if the given class represents instructions which are always safe to mark with the nounwind attri...
Definition: ObjCARCInstKind.cpp:596
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::objcarc::S_Stop
@ S_Stop
code motion is stopped.
Definition: PtrState.h:46
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::objcarc::ARCMDKindID::NoObjCARCExceptions
@ NoObjCARCExceptions
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::DenseMap
Definition: DenseMap.h:714
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::succ_begin
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:99
llvm::isScopedEHPersonality
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Definition: EHPersonalities.h:80
llvm::GetElementPtrInst
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:931
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1547
ObjCARCInstKind.h
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
llvm::objcarc::RRInfo::IsTailCallRelease
bool IsTailCallRelease
True of the objc_release calls are all marked with the "tail" keyword.
Definition: PtrState.h:72
isInertARCValue
static bool isInertARCValue(Value *V, SmallPtrSet< Value *, 1 > &VisitedPhis)
This function returns true if the value is inert.
Definition: ObjCARCOpts.cpp:904
llvm::objcarc::S_MovableRelease
@ S_MovableRelease
objc_release(x), !clang.imprecise_release.
Definition: PtrState.h:47
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::NVPTXISD::CallArg
@ CallArg
Definition: NVPTXISelLowering.h:40
SI
StandardInstrumentations SI(Debug, VerifyEach)
llvm::objcarc::CanChangeRetainCount
@ CanChangeRetainCount
Definition: DependencyAnalysis.h:47
llvm::objcarc::EnableARCOpts
bool EnableARCOpts
A handy option to enable/disable all ARC Optimizations.
Definition: ObjCARCAnalysisUtils.cpp:23
llvm::Type::isVoidTy
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
llvm::objcarc::IsRetain
bool IsRetain(ARCInstKind Class)
Test if the given class is objc_retain or equivalent.
Definition: ObjCARCInstKind.cpp:345
ObjCARCAliasAnalysis.h
llvm::objcarc
Definition: ObjCARCAliasAnalysis.h:29
llvm::objcarc::IsObjCIdentifiedObject
bool IsObjCIdentifiedObject(const Value *V)
Return true if this value refers to a distinct and identifiable object.
Definition: ObjCARCAnalysisUtils.h:183
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
llvm::MDNode
Metadata node.
Definition: Metadata.h:897
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
llvm::objcarc::RRInfo::CFGHazardAfflicted
bool CFGHazardAfflicted
If this is true, we cannot perform code motion but can still remove retain/release pairs.
Definition: PtrState.h:88
llvm::objcarc::IsNoopInstruction
bool IsNoopInstruction(const Instruction *I)
Definition: ObjCARCAnalysisUtils.h:136
llvm::objcarc::ARCMDKindCache
A cache of MDKinds used by various ARC optimizations.
Definition: ObjCARCAnalysisUtils.h:226
llvm::SmallPtrSetImplBase::clear
void clear()
Definition: SmallPtrSet.h:94
FindPredecessorAutoreleaseWithSafePath
static CallInst * FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB, ReturnInst *Ret, ProvenanceAnalysis &PA)
Look for an `‘autorelease’' instruction dependent on Arg such that there are no instructions dependen...
Definition: ObjCARCOpts.cpp:2288
None.h
llvm::objcarc::IsNullOrUndef
bool IsNullOrUndef(const Value *V)
Definition: ObjCARCAnalysisUtils.h:132
llvm::objcarc::PtrState::IsKnownSafe
bool IsKnownSafe() const
Definition: PtrState.h:119
llvm::TinyPtrVector::size
unsigned size() const
Definition: TinyPtrVector.h:172
llvm::CallBase::getOperandBundleAt
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:1942
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
llvm::CFGAnalyses
Represents analyses that only rely on functions' control flow.
Definition: PassManager.h:116
llvm::objcarc::RRInfo::ReleaseMetadata
MDNode * ReleaseMetadata
If the Calls are objc_release calls and they all have a clang.imprecise_release tag,...
Definition: PtrState.h:76
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:517
llvm::AliasResult::NoAlias
@ NoAlias
The two locations do not alias at all.
Definition: AliasAnalysis.h:99
Compiler.h
llvm::Value::getContext
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:965
llvm::AliasResult::MustAlias
@ MustAlias
The two locations precisely alias each other.
Definition: AliasAnalysis.h:106
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::TrackingStatistic
Definition: Statistic.h:49
llvm::objcarc::PtrState::GetSeq
Sequence GetSeq() const
Definition: PtrState.h:150
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:174
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:148
llvm::objcarc::GetRCIdentityRoot
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
Definition: ObjCARCAnalysisUtils.h:107
llvm::CallBase::setArgOperand
void setArgOperand(unsigned i, Value *v)
Definition: InstrTypes.h:1346
llvm::Value::stripPointerCasts
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:662
ObjCARCAnalysisUtils.h
llvm::objcarc::RRInfo::Calls
SmallPtrSet< Instruction *, 2 > Calls
For a top-down sequence, the set of objc_retains or objc_retainBlocks.
Definition: PtrState.h:80
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::empty
LLVM_NODISCARD bool empty() const
Definition: DenseMap.h:97
llvm::objcarc::ARCInstKind::AutoreleasepoolPush
@ AutoreleasepoolPush
objc_autoreleasePoolPush
llvm::BlotMapVector::find
iterator find(const KeyT &Key)
Definition: BlotMapVector.h:79
ObjCARCUtil.h
runOnFunction
static bool runOnFunction(Function &F, bool PostInlining)
Definition: EntryExitInstrumenter.cpp:69
Constant.h
llvm::Constant::getNullValue
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:346
llvm::objcarc::ARCRuntimeEntryPoints
Declarations for ObjC runtime functions and constants.
Definition: ARCRuntimeEntryPoints.h:52
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:83
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:314
CheckForUseCFGHazard
static void CheckForUseCFGHazard(const Sequence SuccSSeq, const bool SuccSRRIKnownSafe, TopDownPtrState &S, bool &SomeSuccHasSame, bool &AllSuccsHaveSame, bool &NotAllSeqEqualButKnownSafe, bool &ShouldContinue)
If we have a top down pointer in the S_Use state, make sure that there are no CFG hazards by checking...
Definition: ObjCARCOpts.cpp:1211
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:161
GlobalVariable.h
llvm::objcarc::ARCInstKind::StoreWeak
@ StoreWeak
objc_storeWeak (primitive)
llvm::pred_end
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
llvm::objcarc::TopDownPtrState
Definition: PtrState.h:189
Casting.h
Function.h
llvm::objcarc::ARCInstKind::DestroyWeak
@ DestroyWeak
objc_destroyWeak (derived)
llvm::inst_end
inst_iterator inst_end(Function *F)
Definition: InstIterator.h:132
llvm::Instruction::isEHPad
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:649
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:584
llvm::objcarc::findSingleDependency
llvm::Instruction * findSingleDependency(DependenceKind Flavor, const Value *Arg, BasicBlock *StartBB, Instruction *StartInst, ProvenanceAnalysis &PA)
Find dependent instructions.
Definition: DependencyAnalysis.cpp:263
CheckForCanReleaseCFGHazard
static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq, const bool SuccSRRIKnownSafe, TopDownPtrState &S, bool &SomeSuccHasSame, bool &AllSuccsHaveSame, bool &NotAllSeqEqualButKnownSafe)
If we have a Top Down pointer in the S_CanRelease state, make sure that there are no CFG hazards by c...
Definition: ObjCARCOpts.cpp:1248
llvm::BlotMapVector::blot
void blot(const KeyT &Key)
This is similar to erase, but instead of removing the element from the vector, it just zeros out the ...
Definition: BlotMapVector.h:96
llvm::tgtok::Class
@ Class
Definition: TGLexer.h:50
llvm::objcarc::EraseInstruction
static void EraseInstruction(Instruction *CI)
Erase the given instruction.
Definition: ObjCARC.h:40
llvm::BasicBlock::back
const Instruction & back() const
Definition: BasicBlock.h:310
llvm::objcarc::ARCInstKind::ClaimRV
@ ClaimRV
objc_unsafeClaimAutoreleasedReturnValue
llvm::pred_begin
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
llvm::objcarc::ARCRuntimeEntryPointKind::Retain
@ Retain
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
Instructions.h
llvm::PreservedAnalyses::preserveSet
void preserveSet()
Mark an analysis set as preserved.
Definition: PassManager.h:191
llvm::objcarc::ARCInstKind::None
@ None
anything that is inert from an ARC perspective.
SmallVector.h
User.h
ObjCARC.h
PtrState.h
ComputePostOrders
static void ComputePostOrders(Function &F, SmallVectorImpl< BasicBlock * > &PostOrder, SmallVectorImpl< BasicBlock * > &ReverseCFGPostOrder, unsigned NoObjCARCExceptionsMDKind, DenseMap< const BasicBlock *, BBState > &BBStates)
Definition: ObjCARCOpts.cpp:1629
llvm::InstIterator::getBasicBlockIterator
BBIty & getBasicBlockIterator()
Definition: InstIterator.h:73
ObjCARCOptLegacyPass::ID
static char ID
Definition: ObjCARCOpts.cpp:595
llvm::CallBase::getArgOperand
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1341
llvm::AAResultsWrapperPass
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Definition: AliasAnalysis.h:1281
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:94
llvm::PHINode::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Definition: Instructions.h:2716
llvm::BlotMapVector::clear
void clear()
Definition: BlotMapVector.h:104
Users
iv Induction Variable Users
Definition: IVUsers.cpp:52
llvm::BlotMapVector::begin
iterator begin()
Definition: BlotMapVector.h:50
llvm::objcarc::ARCRuntimeEntryPointKind::Autorelease
@ Autorelease
llvm::PHINode
Definition: Instructions.h:2600
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::createObjCARCOptPass
Pass * createObjCARCOptPass()
Definition: ObjCARCOpts.cpp:610
DerivedTypes.h
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:44
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:298
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1478
llvm::InliningAdvisorMode::Release
@ Release
llvm::objcarc::S_CanRelease
@ S_CanRelease
foo(x) – x could possibly see a ref count decrement.
Definition: PtrState.h:44
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::succ_iterator
SuccIterator< Instruction, BasicBlock > succ_iterator
Definition: CFG.h:243
GEP
Hexagon Common GEP
Definition: HexagonCommonGEP.cpp:171
llvm::inst_begin
inst_iterator inst_begin(Function *F)
Definition: InstIterator.h:131
llvm::AnalysisUsage::addRequired
AnalysisUsage & addRequired()
Definition: PassAnalysisSupport.h:75
LLVMContext.h
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition: Instructions.h:61
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::cl::desc
Definition: CommandLine.h:414
llvm::InstIterator
Definition: InstIterator.h:32
raw_ostream.h
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(ObjCARCOptLegacyPass, "objc-arc", "ObjC ARC optimization", false, false) INITIALIZE_PASS_END(ObjCARCOptLegacyPass
llvm::TinyPtrVector
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Definition: TinyPtrVector.h:30
Value.h
InitializePasses.h
llvm::objcarc::PtrState
This class summarizes several per-pointer runtime properties which are propagated through the flow gr...
Definition: PtrState.h:101
llvm::LLVMContext::OB_funclet
@ OB_funclet
Definition: LLVMContext.h:91
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
llvm::objcarc::ARCInstKind::Retain
@ Retain
objc_retain
Debug.h
llvm::objcarc::IsForwarding
bool IsForwarding(ARCInstKind Class)
Test if the given class represents instructions which return their argument verbatim.
Definition: ObjCARCInstKind.cpp:416
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:422
llvm::objcarc::AutoreleasePoolBoundary
@ AutoreleasePoolBoundary
Definition: DependencyAnalysis.h:46
llvm::objcarc::IsNoopOnNull
bool IsNoopOnNull(ARCInstKind Class)
Test if the given class represents instructions which do nothing if passed a null pointer.
Definition: ObjCARCInstKind.cpp:451
llvm::BlotMapVector::end
iterator end()
Definition: BlotMapVector.h:51
llvm::BasicBlock::const_iterator
InstListType::const_iterator const_iterator
Definition: BasicBlock.h:91
Other
Optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1169
llvm::objcarc::ARCInstKind::LoadWeakRetained
@ LoadWeakRetained
objc_loadWeakRetained (primitive)
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:907
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::objcarc::ARCInstKind::AutoreleaseRV
@ AutoreleaseRV
objc_autoreleaseReturnValue
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38