LLVM  14.0.0git
InlineFunction.cpp
Go to the documentation of this file.
1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CFG.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DIBuilder.h"
41 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InlineAsm.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/MDBuilder.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/Support/Casting.h"
68 #include <algorithm>
69 #include <cassert>
70 #include <cstdint>
71 #include <iterator>
72 #include <limits>
73 #include <string>
74 #include <utility>
75 #include <vector>
76 
77 using namespace llvm;
79 
80 static cl::opt<bool>
81 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
82  cl::Hidden,
83  cl::desc("Convert noalias attributes to metadata during inlining."));
84 
85 static cl::opt<bool>
86  UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden,
87  cl::ZeroOrMore, cl::init(true),
88  cl::desc("Use the llvm.experimental.noalias.scope.decl "
89  "intrinsic during inlining."));
90 
91 // Disabled by default, because the added alignment assumptions may increase
92 // compile-time and block optimizations. This option is not suitable for use
93 // with frontends that emit comprehensive parameter alignment annotations.
94 static cl::opt<bool>
95 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
96  cl::init(false), cl::Hidden,
97  cl::desc("Convert align attributes to assumptions during inlining."));
98 
100  "update-return-attrs", cl::init(true), cl::Hidden,
101  cl::desc("Update return attributes on calls within inlined body"));
102 
104  "max-inst-checked-for-throw-during-inlining", cl::Hidden,
105  cl::desc("the maximum number of instructions analyzed for may throw during "
106  "attribute inference in inlined body"),
107  cl::init(4));
108 
109 namespace {
110 
111  /// A class for recording information about inlining a landing pad.
112  class LandingPadInliningInfo {
113  /// Destination of the invoke's unwind.
114  BasicBlock *OuterResumeDest;
115 
116  /// Destination for the callee's resume.
117  BasicBlock *InnerResumeDest = nullptr;
118 
119  /// LandingPadInst associated with the invoke.
120  LandingPadInst *CallerLPad = nullptr;
121 
122  /// PHI for EH values from landingpad insts.
123  PHINode *InnerEHValuesPHI = nullptr;
124 
125  SmallVector<Value*, 8> UnwindDestPHIValues;
126 
127  public:
128  LandingPadInliningInfo(InvokeInst *II)
129  : OuterResumeDest(II->getUnwindDest()) {
130  // If there are PHI nodes in the unwind destination block, we need to keep
131  // track of which values came into them from the invoke before removing
132  // the edge from this block.
133  BasicBlock *InvokeBB = II->getParent();
134  BasicBlock::iterator I = OuterResumeDest->begin();
135  for (; isa<PHINode>(I); ++I) {
136  // Save the value to use for this edge.
137  PHINode *PHI = cast<PHINode>(I);
138  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
139  }
140 
141  CallerLPad = cast<LandingPadInst>(I);
142  }
143 
144  /// The outer unwind destination is the target of
145  /// unwind edges introduced for calls within the inlined function.
146  BasicBlock *getOuterResumeDest() const {
147  return OuterResumeDest;
148  }
149 
150  BasicBlock *getInnerResumeDest();
151 
152  LandingPadInst *getLandingPadInst() const { return CallerLPad; }
153 
154  /// Forward the 'resume' instruction to the caller's landing pad block.
155  /// When the landing pad block has only one predecessor, this is
156  /// a simple branch. When there is more than one predecessor, we need to
157  /// split the landing pad block after the landingpad instruction and jump
158  /// to there.
159  void forwardResume(ResumeInst *RI,
160  SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
161 
162  /// Add incoming-PHI values to the unwind destination block for the given
163  /// basic block, using the values for the original invoke's source block.
164  void addIncomingPHIValuesFor(BasicBlock *BB) const {
165  addIncomingPHIValuesForInto(BB, OuterResumeDest);
166  }
167 
168  void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
169  BasicBlock::iterator I = dest->begin();
170  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
171  PHINode *phi = cast<PHINode>(I);
172  phi->addIncoming(UnwindDestPHIValues[i], src);
173  }
174  }
175  };
176 
177 } // end anonymous namespace
178 
179 /// Get or create a target for the branch from ResumeInsts.
180 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
181  if (InnerResumeDest) return InnerResumeDest;
182 
183  // Split the landing pad.
184  BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
185  InnerResumeDest =
186  OuterResumeDest->splitBasicBlock(SplitPoint,
187  OuterResumeDest->getName() + ".body");
188 
189  // The number of incoming edges we expect to the inner landing pad.
190  const unsigned PHICapacity = 2;
191 
192  // Create corresponding new PHIs for all the PHIs in the outer landing pad.
193  Instruction *InsertPoint = &InnerResumeDest->front();
194  BasicBlock::iterator I = OuterResumeDest->begin();
195  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
196  PHINode *OuterPHI = cast<PHINode>(I);
197  PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
198  OuterPHI->getName() + ".lpad-body",
199  InsertPoint);
200  OuterPHI->replaceAllUsesWith(InnerPHI);
201  InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
202  }
203 
204  // Create a PHI for the exception values.
205  InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
206  "eh.lpad-body", InsertPoint);
207  CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
208  InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
209 
210  // All done.
211  return InnerResumeDest;
212 }
213 
214 /// Forward the 'resume' instruction to the caller's landing pad block.
215 /// When the landing pad block has only one predecessor, this is a simple
216 /// branch. When there is more than one predecessor, we need to split the
217 /// landing pad block after the landingpad instruction and jump to there.
218 void LandingPadInliningInfo::forwardResume(
219  ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
220  BasicBlock *Dest = getInnerResumeDest();
221  BasicBlock *Src = RI->getParent();
222 
223  BranchInst::Create(Dest, Src);
224 
225  // Update the PHIs in the destination. They were inserted in an order which
226  // makes this work.
227  addIncomingPHIValuesForInto(Src, Dest);
228 
229  InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
230  RI->eraseFromParent();
231 }
232 
233 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
234 static Value *getParentPad(Value *EHPad) {
235  if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
236  return FPI->getParentPad();
237  return cast<CatchSwitchInst>(EHPad)->getParentPad();
238 }
239 
241 
242 /// Helper for getUnwindDestToken that does the descendant-ward part of
243 /// the search.
245  UnwindDestMemoTy &MemoMap) {
246  SmallVector<Instruction *, 8> Worklist(1, EHPad);
247 
248  while (!Worklist.empty()) {
249  Instruction *CurrentPad = Worklist.pop_back_val();
250  // We only put pads on the worklist that aren't in the MemoMap. When
251  // we find an unwind dest for a pad we may update its ancestors, but
252  // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
253  // so they should never get updated while queued on the worklist.
254  assert(!MemoMap.count(CurrentPad));
255  Value *UnwindDestToken = nullptr;
256  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
257  if (CatchSwitch->hasUnwindDest()) {
258  UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
259  } else {
260  // Catchswitch doesn't have a 'nounwind' variant, and one might be
261  // annotated as "unwinds to caller" when really it's nounwind (see
262  // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
263  // parent's unwind dest from this. We can check its catchpads'
264  // descendants, since they might include a cleanuppad with an
265  // "unwinds to caller" cleanupret, which can be trusted.
266  for (auto HI = CatchSwitch->handler_begin(),
267  HE = CatchSwitch->handler_end();
268  HI != HE && !UnwindDestToken; ++HI) {
269  BasicBlock *HandlerBlock = *HI;
270  auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
271  for (User *Child : CatchPad->users()) {
272  // Intentionally ignore invokes here -- since the catchswitch is
273  // marked "unwind to caller", it would be a verifier error if it
274  // contained an invoke which unwinds out of it, so any invoke we'd
275  // encounter must unwind to some child of the catch.
276  if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
277  continue;
278 
279  Instruction *ChildPad = cast<Instruction>(Child);
280  auto Memo = MemoMap.find(ChildPad);
281  if (Memo == MemoMap.end()) {
282  // Haven't figured out this child pad yet; queue it.
283  Worklist.push_back(ChildPad);
284  continue;
285  }
286  // We've already checked this child, but might have found that
287  // it offers no proof either way.
288  Value *ChildUnwindDestToken = Memo->second;
289  if (!ChildUnwindDestToken)
290  continue;
291  // We already know the child's unwind dest, which can either
292  // be ConstantTokenNone to indicate unwind to caller, or can
293  // be another child of the catchpad. Only the former indicates
294  // the unwind dest of the catchswitch.
295  if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
296  UnwindDestToken = ChildUnwindDestToken;
297  break;
298  }
299  assert(getParentPad(ChildUnwindDestToken) == CatchPad);
300  }
301  }
302  }
303  } else {
304  auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
305  for (User *U : CleanupPad->users()) {
306  if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
307  if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
308  UnwindDestToken = RetUnwindDest->getFirstNonPHI();
309  else
310  UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
311  break;
312  }
313  Value *ChildUnwindDestToken;
314  if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
315  ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
316  } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
317  Instruction *ChildPad = cast<Instruction>(U);
318  auto Memo = MemoMap.find(ChildPad);
319  if (Memo == MemoMap.end()) {
320  // Haven't resolved this child yet; queue it and keep searching.
321  Worklist.push_back(ChildPad);
322  continue;
323  }
324  // We've checked this child, but still need to ignore it if it
325  // had no proof either way.
326  ChildUnwindDestToken = Memo->second;
327  if (!ChildUnwindDestToken)
328  continue;
329  } else {
330  // Not a relevant user of the cleanuppad
331  continue;
332  }
333  // In a well-formed program, the child/invoke must either unwind to
334  // an(other) child of the cleanup, or exit the cleanup. In the
335  // first case, continue searching.
336  if (isa<Instruction>(ChildUnwindDestToken) &&
337  getParentPad(ChildUnwindDestToken) == CleanupPad)
338  continue;
339  UnwindDestToken = ChildUnwindDestToken;
340  break;
341  }
342  }
343  // If we haven't found an unwind dest for CurrentPad, we may have queued its
344  // children, so move on to the next in the worklist.
345  if (!UnwindDestToken)
346  continue;
347 
348  // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
349  // any ancestors of CurrentPad up to but not including UnwindDestToken's
350  // parent pad. Record this in the memo map, and check to see if the
351  // original EHPad being queried is one of the ones exited.
352  Value *UnwindParent;
353  if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
354  UnwindParent = getParentPad(UnwindPad);
355  else
356  UnwindParent = nullptr;
357  bool ExitedOriginalPad = false;
358  for (Instruction *ExitedPad = CurrentPad;
359  ExitedPad && ExitedPad != UnwindParent;
360  ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
361  // Skip over catchpads since they just follow their catchswitches.
362  if (isa<CatchPadInst>(ExitedPad))
363  continue;
364  MemoMap[ExitedPad] = UnwindDestToken;
365  ExitedOriginalPad |= (ExitedPad == EHPad);
366  }
367 
368  if (ExitedOriginalPad)
369  return UnwindDestToken;
370 
371  // Continue the search.
372  }
373 
374  // No definitive information is contained within this funclet.
375  return nullptr;
376 }
377 
378 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
379 /// return that pad instruction. If it unwinds to caller, return
380 /// ConstantTokenNone. If it does not have a definitive unwind destination,
381 /// return nullptr.
382 ///
383 /// This routine gets invoked for calls in funclets in inlinees when inlining
384 /// an invoke. Since many funclets don't have calls inside them, it's queried
385 /// on-demand rather than building a map of pads to unwind dests up front.
386 /// Determining a funclet's unwind dest may require recursively searching its
387 /// descendants, and also ancestors and cousins if the descendants don't provide
388 /// an answer. Since most funclets will have their unwind dest immediately
389 /// available as the unwind dest of a catchswitch or cleanupret, this routine
390 /// searches top-down from the given pad and then up. To avoid worst-case
391 /// quadratic run-time given that approach, it uses a memo map to avoid
392 /// re-processing funclet trees. The callers that rewrite the IR as they go
393 /// take advantage of this, for correctness, by checking/forcing rewritten
394 /// pads' entries to match the original callee view.
396  UnwindDestMemoTy &MemoMap) {
397  // Catchpads unwind to the same place as their catchswitch;
398  // redirct any queries on catchpads so the code below can
399  // deal with just catchswitches and cleanuppads.
400  if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
401  EHPad = CPI->getCatchSwitch();
402 
403  // Check if we've already determined the unwind dest for this pad.
404  auto Memo = MemoMap.find(EHPad);
405  if (Memo != MemoMap.end())
406  return Memo->second;
407 
408  // Search EHPad and, if necessary, its descendants.
409  Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
410  assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
411  if (UnwindDestToken)
412  return UnwindDestToken;
413 
414  // No information is available for this EHPad from itself or any of its
415  // descendants. An unwind all the way out to a pad in the caller would
416  // need also to agree with the unwind dest of the parent funclet, so
417  // search up the chain to try to find a funclet with information. Put
418  // null entries in the memo map to avoid re-processing as we go up.
419  MemoMap[EHPad] = nullptr;
420 #ifndef NDEBUG
422  TempMemos.insert(EHPad);
423 #endif
424  Instruction *LastUselessPad = EHPad;
425  Value *AncestorToken;
426  for (AncestorToken = getParentPad(EHPad);
427  auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
428  AncestorToken = getParentPad(AncestorToken)) {
429  // Skip over catchpads since they just follow their catchswitches.
430  if (isa<CatchPadInst>(AncestorPad))
431  continue;
432  // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
433  // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
434  // call to getUnwindDestToken, that would mean that AncestorPad had no
435  // information in itself, its descendants, or its ancestors. If that
436  // were the case, then we should also have recorded the lack of information
437  // for the descendant that we're coming from. So assert that we don't
438  // find a null entry in the MemoMap for AncestorPad.
439  assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
440  auto AncestorMemo = MemoMap.find(AncestorPad);
441  if (AncestorMemo == MemoMap.end()) {
442  UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
443  } else {
444  UnwindDestToken = AncestorMemo->second;
445  }
446  if (UnwindDestToken)
447  break;
448  LastUselessPad = AncestorPad;
449  MemoMap[LastUselessPad] = nullptr;
450 #ifndef NDEBUG
451  TempMemos.insert(LastUselessPad);
452 #endif
453  }
454 
455  // We know that getUnwindDestTokenHelper was called on LastUselessPad and
456  // returned nullptr (and likewise for EHPad and any of its ancestors up to
457  // LastUselessPad), so LastUselessPad has no information from below. Since
458  // getUnwindDestTokenHelper must investigate all downward paths through
459  // no-information nodes to prove that a node has no information like this,
460  // and since any time it finds information it records it in the MemoMap for
461  // not just the immediately-containing funclet but also any ancestors also
462  // exited, it must be the case that, walking downward from LastUselessPad,
463  // visiting just those nodes which have not been mapped to an unwind dest
464  // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
465  // they are just used to keep getUnwindDestTokenHelper from repeating work),
466  // any node visited must have been exhaustively searched with no information
467  // for it found.
468  SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
469  while (!Worklist.empty()) {
470  Instruction *UselessPad = Worklist.pop_back_val();
471  auto Memo = MemoMap.find(UselessPad);
472  if (Memo != MemoMap.end() && Memo->second) {
473  // Here the name 'UselessPad' is a bit of a misnomer, because we've found
474  // that it is a funclet that does have information about unwinding to
475  // a particular destination; its parent was a useless pad.
476  // Since its parent has no information, the unwind edge must not escape
477  // the parent, and must target a sibling of this pad. This local unwind
478  // gives us no information about EHPad. Leave it and the subtree rooted
479  // at it alone.
480  assert(getParentPad(Memo->second) == getParentPad(UselessPad));
481  continue;
482  }
483  // We know we don't have information for UselesPad. If it has an entry in
484  // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
485  // added on this invocation of getUnwindDestToken; if a previous invocation
486  // recorded nullptr, it would have had to prove that the ancestors of
487  // UselessPad, which include LastUselessPad, had no information, and that
488  // in turn would have required proving that the descendants of
489  // LastUselesPad, which include EHPad, have no information about
490  // LastUselessPad, which would imply that EHPad was mapped to nullptr in
491  // the MemoMap on that invocation, which isn't the case if we got here.
492  assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
493  // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
494  // information that we'd be contradicting by making a map entry for it
495  // (which is something that getUnwindDestTokenHelper must have proved for
496  // us to get here). Just assert on is direct users here; the checks in
497  // this downward walk at its descendants will verify that they don't have
498  // any unwind edges that exit 'UselessPad' either (i.e. they either have no
499  // unwind edges or unwind to a sibling).
500  MemoMap[UselessPad] = UnwindDestToken;
501  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
502  assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
503  for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
504  auto *CatchPad = HandlerBlock->getFirstNonPHI();
505  for (User *U : CatchPad->users()) {
506  assert(
507  (!isa<InvokeInst>(U) ||
508  (getParentPad(
509  cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
510  CatchPad)) &&
511  "Expected useless pad");
512  if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
513  Worklist.push_back(cast<Instruction>(U));
514  }
515  }
516  } else {
517  assert(isa<CleanupPadInst>(UselessPad));
518  for (User *U : UselessPad->users()) {
519  assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
520  assert((!isa<InvokeInst>(U) ||
521  (getParentPad(
522  cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
523  UselessPad)) &&
524  "Expected useless pad");
525  if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
526  Worklist.push_back(cast<Instruction>(U));
527  }
528  }
529  }
530 
531  return UnwindDestToken;
532 }
533 
534 /// When we inline a basic block into an invoke,
535 /// we have to turn all of the calls that can throw into invokes.
536 /// This function analyze BB to see if there are any calls, and if so,
537 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
538 /// nodes in that block with the values specified in InvokeDestPHIValues.
540  BasicBlock *BB, BasicBlock *UnwindEdge,
541  UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
543  // We only need to check for function calls: inlined invoke
544  // instructions require no special handling.
545  CallInst *CI = dyn_cast<CallInst>(&I);
546 
547  if (!CI || CI->doesNotThrow())
548  continue;
549 
550  if (CI->isInlineAsm()) {
551  InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
552  if (!IA->canThrow()) {
553  continue;
554  }
555  }
556 
557  // We do not need to (and in fact, cannot) convert possibly throwing calls
558  // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
559  // invokes. The caller's "segment" of the deoptimization continuation
560  // attached to the newly inlined @llvm.experimental_deoptimize
561  // (resp. @llvm.experimental.guard) call should contain the exception
562  // handling logic, if any.
563  if (auto *F = CI->getCalledFunction())
564  if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
565  F->getIntrinsicID() == Intrinsic::experimental_guard)
566  continue;
567 
568  if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
569  // This call is nested inside a funclet. If that funclet has an unwind
570  // destination within the inlinee, then unwinding out of this call would
571  // be UB. Rewriting this call to an invoke which targets the inlined
572  // invoke's unwind dest would give the call's parent funclet multiple
573  // unwind destinations, which is something that subsequent EH table
574  // generation can't handle and that the veirifer rejects. So when we
575  // see such a call, leave it as a call.
576  auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
577  Value *UnwindDestToken =
578  getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
579  if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
580  continue;
581 #ifndef NDEBUG
582  Instruction *MemoKey;
583  if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
584  MemoKey = CatchPad->getCatchSwitch();
585  else
586  MemoKey = FuncletPad;
587  assert(FuncletUnwindMap->count(MemoKey) &&
588  (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
589  "must get memoized to avoid confusing later searches");
590 #endif // NDEBUG
591  }
592 
593  changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
594  return BB;
595  }
596  return nullptr;
597 }
598 
599 /// If we inlined an invoke site, we need to convert calls
600 /// in the body of the inlined function into invokes.
601 ///
602 /// II is the invoke instruction being inlined. FirstNewBlock is the first
603 /// block of the inlined code (the last block is the end of the function),
604 /// and InlineCodeInfo is information about the code that got inlined.
605 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
606  ClonedCodeInfo &InlinedCodeInfo) {
607  BasicBlock *InvokeDest = II->getUnwindDest();
608 
609  Function *Caller = FirstNewBlock->getParent();
610 
611  // The inlined code is currently at the end of the function, scan from the
612  // start of the inlined code to its end, checking for stuff we need to
613  // rewrite.
614  LandingPadInliningInfo Invoke(II);
615 
616  // Get all of the inlined landing pad instructions.
618  for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
619  I != E; ++I)
620  if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
621  InlinedLPads.insert(II->getLandingPadInst());
622 
623  // Append the clauses from the outer landing pad instruction into the inlined
624  // landing pad instructions.
625  LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
626  for (LandingPadInst *InlinedLPad : InlinedLPads) {
627  unsigned OuterNum = OuterLPad->getNumClauses();
628  InlinedLPad->reserveClauses(OuterNum);
629  for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
630  InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
631  if (OuterLPad->isCleanup())
632  InlinedLPad->setCleanup(true);
633  }
634 
635  for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
636  BB != E; ++BB) {
637  if (InlinedCodeInfo.ContainsCalls)
639  &*BB, Invoke.getOuterResumeDest()))
640  // Update any PHI nodes in the exceptional block to indicate that there
641  // is now a new entry in them.
642  Invoke.addIncomingPHIValuesFor(NewBB);
643 
644  // Forward any resumes that are remaining here.
645  if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
646  Invoke.forwardResume(RI, InlinedLPads);
647  }
648 
649  // Now that everything is happy, we have one final detail. The PHI nodes in
650  // the exception destination block still have entries due to the original
651  // invoke instruction. Eliminate these entries (which might even delete the
652  // PHI node) now.
653  InvokeDest->removePredecessor(II->getParent());
654 }
655 
656 /// If we inlined an invoke site, we need to convert calls
657 /// in the body of the inlined function into invokes.
658 ///
659 /// II is the invoke instruction being inlined. FirstNewBlock is the first
660 /// block of the inlined code (the last block is the end of the function),
661 /// and InlineCodeInfo is information about the code that got inlined.
662 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
663  ClonedCodeInfo &InlinedCodeInfo) {
664  BasicBlock *UnwindDest = II->getUnwindDest();
665  Function *Caller = FirstNewBlock->getParent();
666 
667  assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
668 
669  // If there are PHI nodes in the unwind destination block, we need to keep
670  // track of which values came into them from the invoke before removing the
671  // edge from this block.
672  SmallVector<Value *, 8> UnwindDestPHIValues;
673  BasicBlock *InvokeBB = II->getParent();
674  for (Instruction &I : *UnwindDest) {
675  // Save the value to use for this edge.
676  PHINode *PHI = dyn_cast<PHINode>(&I);
677  if (!PHI)
678  break;
679  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
680  }
681 
682  // Add incoming-PHI values to the unwind destination block for the given basic
683  // block, using the values for the original invoke's source block.
684  auto UpdatePHINodes = [&](BasicBlock *Src) {
685  BasicBlock::iterator I = UnwindDest->begin();
686  for (Value *V : UnwindDestPHIValues) {
687  PHINode *PHI = cast<PHINode>(I);
688  PHI->addIncoming(V, Src);
689  ++I;
690  }
691  };
692 
693  // This connects all the instructions which 'unwind to caller' to the invoke
694  // destination.
695  UnwindDestMemoTy FuncletUnwindMap;
696  for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
697  BB != E; ++BB) {
698  if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
699  if (CRI->unwindsToCaller()) {
700  auto *CleanupPad = CRI->getCleanupPad();
701  CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
702  CRI->eraseFromParent();
703  UpdatePHINodes(&*BB);
704  // Finding a cleanupret with an unwind destination would confuse
705  // subsequent calls to getUnwindDestToken, so map the cleanuppad
706  // to short-circuit any such calls and recognize this as an "unwind
707  // to caller" cleanup.
708  assert(!FuncletUnwindMap.count(CleanupPad) ||
709  isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
710  FuncletUnwindMap[CleanupPad] =
711  ConstantTokenNone::get(Caller->getContext());
712  }
713  }
714 
715  Instruction *I = BB->getFirstNonPHI();
716  if (!I->isEHPad())
717  continue;
718 
719  Instruction *Replacement = nullptr;
720  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
721  if (CatchSwitch->unwindsToCaller()) {
722  Value *UnwindDestToken;
723  if (auto *ParentPad =
724  dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
725  // This catchswitch is nested inside another funclet. If that
726  // funclet has an unwind destination within the inlinee, then
727  // unwinding out of this catchswitch would be UB. Rewriting this
728  // catchswitch to unwind to the inlined invoke's unwind dest would
729  // give the parent funclet multiple unwind destinations, which is
730  // something that subsequent EH table generation can't handle and
731  // that the veirifer rejects. So when we see such a call, leave it
732  // as "unwind to caller".
733  UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
734  if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
735  continue;
736  } else {
737  // This catchswitch has no parent to inherit constraints from, and
738  // none of its descendants can have an unwind edge that exits it and
739  // targets another funclet in the inlinee. It may or may not have a
740  // descendant that definitively has an unwind to caller. In either
741  // case, we'll have to assume that any unwinds out of it may need to
742  // be routed to the caller, so treat it as though it has a definitive
743  // unwind to caller.
744  UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
745  }
746  auto *NewCatchSwitch = CatchSwitchInst::Create(
747  CatchSwitch->getParentPad(), UnwindDest,
748  CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
749  CatchSwitch);
750  for (BasicBlock *PadBB : CatchSwitch->handlers())
751  NewCatchSwitch->addHandler(PadBB);
752  // Propagate info for the old catchswitch over to the new one in
753  // the unwind map. This also serves to short-circuit any subsequent
754  // checks for the unwind dest of this catchswitch, which would get
755  // confused if they found the outer handler in the callee.
756  FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
757  Replacement = NewCatchSwitch;
758  }
759  } else if (!isa<FuncletPadInst>(I)) {
760  llvm_unreachable("unexpected EHPad!");
761  }
762 
763  if (Replacement) {
764  Replacement->takeName(I);
765  I->replaceAllUsesWith(Replacement);
766  I->eraseFromParent();
767  UpdatePHINodes(&*BB);
768  }
769  }
770 
771  if (InlinedCodeInfo.ContainsCalls)
772  for (Function::iterator BB = FirstNewBlock->getIterator(),
773  E = Caller->end();
774  BB != E; ++BB)
776  &*BB, UnwindDest, &FuncletUnwindMap))
777  // Update any PHI nodes in the exceptional block to indicate that there
778  // is now a new entry in them.
779  UpdatePHINodes(NewBB);
780 
781  // Now that everything is happy, we have one final detail. The PHI nodes in
782  // the exception destination block still have entries due to the original
783  // invoke instruction. Eliminate these entries (which might even delete the
784  // PHI node) now.
785  UnwindDest->removePredecessor(InvokeBB);
786 }
787 
788 /// When inlining a call site that has !llvm.mem.parallel_loop_access,
789 /// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
790 /// be propagated to all memory-accessing cloned instructions.
792  Function::iterator FEnd) {
793  MDNode *MemParallelLoopAccess =
794  CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
795  MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
796  MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);
797  MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias);
798  if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
799  return;
800 
801  for (BasicBlock &BB : make_range(FStart, FEnd)) {
802  for (Instruction &I : BB) {
803  // This metadata is only relevant for instructions that access memory.
804  if (!I.mayReadOrWriteMemory())
805  continue;
806 
807  if (MemParallelLoopAccess) {
808  // TODO: This probably should not overwrite MemParalleLoopAccess.
809  MemParallelLoopAccess = MDNode::concatenate(
810  I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
811  MemParallelLoopAccess);
812  I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
813  MemParallelLoopAccess);
814  }
815 
816  if (AccessGroup)
817  I.setMetadata(LLVMContext::MD_access_group, uniteAccessGroups(
818  I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
819 
820  if (AliasScope)
821  I.setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
822  I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
823 
824  if (NoAlias)
825  I.setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
826  I.getMetadata(LLVMContext::MD_noalias), NoAlias));
827  }
828  }
829 }
830 
831 /// Utility for cloning !noalias and !alias.scope metadata. When a code region
832 /// using scoped alias metadata is inlined, the aliasing relationships may not
833 /// hold between the two version. It is necessary to create a deep clone of the
834 /// metadata, putting the two versions in separate scope domains.
838  MetadataMap MDMap;
839  void addRecursiveMetadataUses();
840 
841 public:
843 
844  /// Create a new clone of the scoped alias metadata, which will be used by
845  /// subsequent remap() calls.
846  void clone();
847 
848  /// Remap instructions in the given range from the original to the cloned
849  /// metadata.
850  void remap(Function::iterator FStart, Function::iterator FEnd);
851 };
852 
854  const Function *F) {
855  for (const BasicBlock &BB : *F) {
856  for (const Instruction &I : BB) {
857  if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
858  MD.insert(M);
859  if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
860  MD.insert(M);
861 
862  // We also need to clone the metadata in noalias intrinsics.
863  if (const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
864  MD.insert(Decl->getScopeList());
865  }
866  }
867  addRecursiveMetadataUses();
868 }
869 
870 void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
871  SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
872  while (!Queue.empty()) {
873  const MDNode *M = cast<MDNode>(Queue.pop_back_val());
874  for (const Metadata *Op : M->operands())
875  if (const MDNode *OpMD = dyn_cast<MDNode>(Op))
876  if (MD.insert(OpMD))
877  Queue.push_back(OpMD);
878  }
879 }
880 
882  assert(MDMap.empty() && "clone() already called ?");
883 
884  SmallVector<TempMDTuple, 16> DummyNodes;
885  for (const MDNode *I : MD) {
886  DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), None));
887  MDMap[I].reset(DummyNodes.back().get());
888  }
889 
890  // Create new metadata nodes to replace the dummy nodes, replacing old
891  // metadata references with either a dummy node or an already-created new
892  // node.
894  for (const MDNode *I : MD) {
895  for (const Metadata *Op : I->operands()) {
896  if (const MDNode *M = dyn_cast<MDNode>(Op))
897  NewOps.push_back(MDMap[M]);
898  else
899  NewOps.push_back(const_cast<Metadata *>(Op));
900  }
901 
902  MDNode *NewM = MDNode::get(I->getContext(), NewOps);
903  MDTuple *TempM = cast<MDTuple>(MDMap[I]);
904  assert(TempM->isTemporary() && "Expected temporary node");
905 
906  TempM->replaceAllUsesWith(NewM);
907  NewOps.clear();
908  }
909 }
910 
912  Function::iterator FEnd) {
913  if (MDMap.empty())
914  return; // Nothing to do.
915 
916  for (BasicBlock &BB : make_range(FStart, FEnd)) {
917  for (Instruction &I : BB) {
918  // TODO: The null checks for the MDMap.lookup() results should no longer
919  // be necessary.
920  if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
921  if (MDNode *MNew = MDMap.lookup(M))
922  I.setMetadata(LLVMContext::MD_alias_scope, MNew);
923 
924  if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
925  if (MDNode *MNew = MDMap.lookup(M))
926  I.setMetadata(LLVMContext::MD_noalias, MNew);
927 
928  if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
929  if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
930  Decl->setScopeList(MNew);
931  }
932  }
933 }
934 
935 /// If the inlined function has noalias arguments,
936 /// then add new alias scopes for each noalias argument, tag the mapped noalias
937 /// parameters with noalias metadata specifying the new scope, and tag all
938 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
940  const DataLayout &DL, AAResults *CalleeAAR,
941  ClonedCodeInfo &InlinedFunctionInfo) {
943  return;
944 
945  const Function *CalledFunc = CB.getCalledFunction();
947 
948  for (const Argument &Arg : CalledFunc->args())
949  if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
950  NoAliasArgs.push_back(&Arg);
951 
952  if (NoAliasArgs.empty())
953  return;
954 
955  // To do a good job, if a noalias variable is captured, we need to know if
956  // the capture point dominates the particular use we're considering.
957  DominatorTree DT;
958  DT.recalculate(const_cast<Function&>(*CalledFunc));
959 
960  // noalias indicates that pointer values based on the argument do not alias
961  // pointer values which are not based on it. So we add a new "scope" for each
962  // noalias function argument. Accesses using pointers based on that argument
963  // become part of that alias scope, accesses using pointers not based on that
964  // argument are tagged as noalias with that scope.
965 
967  MDBuilder MDB(CalledFunc->getContext());
968 
969  // Create a new scope domain for this function.
970  MDNode *NewDomain =
971  MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
972  for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
973  const Argument *A = NoAliasArgs[i];
974 
975  std::string Name = std::string(CalledFunc->getName());
976  if (A->hasName()) {
977  Name += ": %";
978  Name += A->getName();
979  } else {
980  Name += ": argument ";
981  Name += utostr(i);
982  }
983 
984  // Note: We always create a new anonymous root here. This is true regardless
985  // of the linkage of the callee because the aliasing "scope" is not just a
986  // property of the callee, but also all control dependencies in the caller.
987  MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
988  NewScopes.insert(std::make_pair(A, NewScope));
989 
990  if (UseNoAliasIntrinsic) {
991  // Introduce a llvm.experimental.noalias.scope.decl for the noalias
992  // argument.
993  MDNode *AScopeList = MDNode::get(CalledFunc->getContext(), NewScope);
994  auto *NoAliasDecl =
996  // Ignore the result for now. The result will be used when the
997  // llvm.noalias intrinsic is introduced.
998  (void)NoAliasDecl;
999  }
1000  }
1001 
1002  // Iterate over all new instructions in the map; for all memory-access
1003  // instructions, add the alias scope metadata.
1004  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
1005  VMI != VMIE; ++VMI) {
1006  if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
1007  if (!VMI->second)
1008  continue;
1009 
1010  Instruction *NI = dyn_cast<Instruction>(VMI->second);
1011  if (!NI || InlinedFunctionInfo.isSimplified(I, NI))
1012  continue;
1013 
1014  bool IsArgMemOnlyCall = false, IsFuncCall = false;
1016 
1017  if (const LoadInst *LI = dyn_cast<LoadInst>(I))
1018  PtrArgs.push_back(LI->getPointerOperand());
1019  else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
1020  PtrArgs.push_back(SI->getPointerOperand());
1021  else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1022  PtrArgs.push_back(VAAI->getPointerOperand());
1023  else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1024  PtrArgs.push_back(CXI->getPointerOperand());
1025  else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1026  PtrArgs.push_back(RMWI->getPointerOperand());
1027  else if (const auto *Call = dyn_cast<CallBase>(I)) {
1028  // If we know that the call does not access memory, then we'll still
1029  // know that about the inlined clone of this call site, and we don't
1030  // need to add metadata.
1031  if (Call->doesNotAccessMemory())
1032  continue;
1033 
1034  IsFuncCall = true;
1035  if (CalleeAAR) {
1036  FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1037 
1038  // We'll retain this knowledge without additional metadata.
1040  continue;
1041 
1043  IsArgMemOnlyCall = true;
1044  }
1045 
1046  for (Value *Arg : Call->args()) {
1047  // We need to check the underlying objects of all arguments, not just
1048  // the pointer arguments, because we might be passing pointers as
1049  // integers, etc.
1050  // However, if we know that the call only accesses pointer arguments,
1051  // then we only need to check the pointer arguments.
1052  if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1053  continue;
1054 
1055  PtrArgs.push_back(Arg);
1056  }
1057  }
1058 
1059  // If we found no pointers, then this instruction is not suitable for
1060  // pairing with an instruction to receive aliasing metadata.
1061  // However, if this is a call, this we might just alias with none of the
1062  // noalias arguments.
1063  if (PtrArgs.empty() && !IsFuncCall)
1064  continue;
1065 
1066  // It is possible that there is only one underlying object, but you
1067  // need to go through several PHIs to see it, and thus could be
1068  // repeated in the Objects list.
1071 
1073  for (const Value *V : PtrArgs) {
1075  getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
1076 
1077  for (const Value *O : Objects)
1078  ObjSet.insert(O);
1079  }
1080 
1081  // Figure out if we're derived from anything that is not a noalias
1082  // argument.
1083  bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1084  for (const Value *V : ObjSet) {
1085  // Is this value a constant that cannot be derived from any pointer
1086  // value (we need to exclude constant expressions, for example, that
1087  // are formed from arithmetic on global symbols).
1088  bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1089  isa<ConstantPointerNull>(V) ||
1090  isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1091  if (IsNonPtrConst)
1092  continue;
1093 
1094  // If this is anything other than a noalias argument, then we cannot
1095  // completely describe the aliasing properties using alias.scope
1096  // metadata (and, thus, won't add any).
1097  if (const Argument *A = dyn_cast<Argument>(V)) {
1098  if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1099  UsesAliasingPtr = true;
1100  } else {
1101  UsesAliasingPtr = true;
1102  }
1103 
1104  // If this is not some identified function-local object (which cannot
1105  // directly alias a noalias argument), or some other argument (which,
1106  // by definition, also cannot alias a noalias argument), then we could
1107  // alias a noalias argument that has been captured).
1108  if (!isa<Argument>(V) &&
1109  !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1110  CanDeriveViaCapture = true;
1111  }
1112 
1113  // A function call can always get captured noalias pointers (via other
1114  // parameters, globals, etc.).
1115  if (IsFuncCall && !IsArgMemOnlyCall)
1116  CanDeriveViaCapture = true;
1117 
1118  // First, we want to figure out all of the sets with which we definitely
1119  // don't alias. Iterate over all noalias set, and add those for which:
1120  // 1. The noalias argument is not in the set of objects from which we
1121  // definitely derive.
1122  // 2. The noalias argument has not yet been captured.
1123  // An arbitrary function that might load pointers could see captured
1124  // noalias arguments via other noalias arguments or globals, and so we
1125  // must always check for prior capture.
1126  for (const Argument *A : NoAliasArgs) {
1127  if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1128  // It might be tempting to skip the
1129  // PointerMayBeCapturedBefore check if
1130  // A->hasNoCaptureAttr() is true, but this is
1131  // incorrect because nocapture only guarantees
1132  // that no copies outlive the function, not
1133  // that the value cannot be locally captured.
1135  /* ReturnCaptures */ false,
1136  /* StoreCaptures */ false, I, &DT)))
1137  NoAliases.push_back(NewScopes[A]);
1138  }
1139 
1140  if (!NoAliases.empty())
1141  NI->setMetadata(LLVMContext::MD_noalias,
1143  NI->getMetadata(LLVMContext::MD_noalias),
1144  MDNode::get(CalledFunc->getContext(), NoAliases)));
1145 
1146  // Next, we want to figure out all of the sets to which we might belong.
1147  // We might belong to a set if the noalias argument is in the set of
1148  // underlying objects. If there is some non-noalias argument in our list
1149  // of underlying objects, then we cannot add a scope because the fact
1150  // that some access does not alias with any set of our noalias arguments
1151  // cannot itself guarantee that it does not alias with this access
1152  // (because there is some pointer of unknown origin involved and the
1153  // other access might also depend on this pointer). We also cannot add
1154  // scopes to arbitrary functions unless we know they don't access any
1155  // non-parameter pointer-values.
1156  bool CanAddScopes = !UsesAliasingPtr;
1157  if (CanAddScopes && IsFuncCall)
1158  CanAddScopes = IsArgMemOnlyCall;
1159 
1160  if (CanAddScopes)
1161  for (const Argument *A : NoAliasArgs) {
1162  if (ObjSet.count(A))
1163  Scopes.push_back(NewScopes[A]);
1164  }
1165 
1166  if (!Scopes.empty())
1167  NI->setMetadata(
1168  LLVMContext::MD_alias_scope,
1169  MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1170  MDNode::get(CalledFunc->getContext(), Scopes)));
1171  }
1172  }
1173 }
1174 
1176  Instruction *End) {
1177 
1178  assert(Begin->getParent() == End->getParent() &&
1179  "Expected to be in same basic block!");
1181  Begin->getIterator(), End->getIterator(), InlinerAttributeWindow + 1);
1182 }
1183 
1185 
1187  if (AB.empty())
1188  return AB;
1189  AttrBuilder Valid;
1190  // Only allow these white listed attributes to be propagated back to the
1191  // callee. This is because other attributes may only be valid on the call
1192  // itself, i.e. attributes such as signext and zeroext.
1193  if (auto DerefBytes = AB.getDereferenceableBytes())
1194  Valid.addDereferenceableAttr(DerefBytes);
1195  if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes())
1196  Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1197  if (AB.contains(Attribute::NoAlias))
1198  Valid.addAttribute(Attribute::NoAlias);
1199  if (AB.contains(Attribute::NonNull))
1200  Valid.addAttribute(Attribute::NonNull);
1201  return Valid;
1202 }
1203 
1206  return;
1207 
1209  if (Valid.empty())
1210  return;
1211  auto *CalledFunction = CB.getCalledFunction();
1212  auto &Context = CalledFunction->getContext();
1213 
1214  for (auto &BB : *CalledFunction) {
1215  auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1216  if (!RI || !isa<CallBase>(RI->getOperand(0)))
1217  continue;
1218  auto *RetVal = cast<CallBase>(RI->getOperand(0));
1219  // Sanity check that the cloned RetVal exists and is a call, otherwise we
1220  // cannot add the attributes on the cloned RetVal.
1221  // Simplification during inlining could have transformed the cloned
1222  // instruction.
1223  auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1224  if (!NewRetVal)
1225  continue;
1226  // Backward propagation of attributes to the returned value may be incorrect
1227  // if it is control flow dependent.
1228  // Consider:
1229  // @callee {
1230  // %rv = call @foo()
1231  // %rv2 = call @bar()
1232  // if (%rv2 != null)
1233  // return %rv2
1234  // if (%rv == null)
1235  // exit()
1236  // return %rv
1237  // }
1238  // caller() {
1239  // %val = call nonnull @callee()
1240  // }
1241  // Here we cannot add the nonnull attribute on either foo or bar. So, we
1242  // limit the check to both RetVal and RI are in the same basic block and
1243  // there are no throwing/exiting instructions between these instructions.
1244  if (RI->getParent() != RetVal->getParent() ||
1245  MayContainThrowingOrExitingCall(RetVal, RI))
1246  continue;
1247  // Add to the existing attributes of NewRetVal, i.e. the cloned call
1248  // instruction.
1249  // NB! When we have the same attribute already existing on NewRetVal, but
1250  // with a differing value, the AttributeList's merge API honours the already
1251  // existing attribute value (i.e. attributes such as dereferenceable,
1252  // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1253  AttributeList AL = NewRetVal->getAttributes();
1254  AttributeList NewAL = AL.addRetAttributes(Context, Valid);
1255  NewRetVal->setAttributes(NewAL);
1256  }
1257 }
1258 
1259 /// If the inlined function has non-byval align arguments, then
1260 /// add @llvm.assume-based alignment assumptions to preserve this information.
1263  return;
1264 
1265  AssumptionCache *AC = &IFI.GetAssumptionCache(*CB.getCaller());
1266  auto &DL = CB.getCaller()->getParent()->getDataLayout();
1267 
1268  // To avoid inserting redundant assumptions, we should check for assumptions
1269  // already in the caller. To do this, we might need a DT of the caller.
1270  DominatorTree DT;
1271  bool DTCalculated = false;
1272 
1273  Function *CalledFunc = CB.getCalledFunction();
1274  for (Argument &Arg : CalledFunc->args()) {
1275  unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1276  if (Align && !Arg.hasPassPointeeByValueCopyAttr() && !Arg.hasNUses(0)) {
1277  if (!DTCalculated) {
1278  DT.recalculate(*CB.getCaller());
1279  DTCalculated = true;
1280  }
1281 
1282  // If we can already prove the asserted alignment in the context of the
1283  // caller, then don't bother inserting the assumption.
1284  Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1285  if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= Align)
1286  continue;
1287 
1288  CallInst *NewAsmp =
1290  AC->registerAssumption(cast<AssumeInst>(NewAsmp));
1291  }
1292  }
1293 }
1294 
1295 /// Once we have cloned code over from a callee into the caller,
1296 /// update the specified callgraph to reflect the changes we made.
1297 /// Note that it's possible that not all code was copied over, so only
1298 /// some edges of the callgraph may remain.
1300  Function::iterator FirstNewBlock,
1301  ValueToValueMapTy &VMap,
1302  InlineFunctionInfo &IFI) {
1303  CallGraph &CG = *IFI.CG;
1304  const Function *Caller = CB.getCaller();
1305  const Function *Callee = CB.getCalledFunction();
1306  CallGraphNode *CalleeNode = CG[Callee];
1307  CallGraphNode *CallerNode = CG[Caller];
1308 
1309  // Since we inlined some uninlined call sites in the callee into the caller,
1310  // add edges from the caller to all of the callees of the callee.
1311  CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1312 
1313  // Consider the case where CalleeNode == CallerNode.
1315  if (CalleeNode == CallerNode) {
1316  CallCache.assign(I, E);
1317  I = CallCache.begin();
1318  E = CallCache.end();
1319  }
1320 
1321  for (; I != E; ++I) {
1322  // Skip 'refererence' call records.
1323  if (!I->first)
1324  continue;
1325 
1326  const Value *OrigCall = *I->first;
1327 
1328  ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1329  // Only copy the edge if the call was inlined!
1330  if (VMI == VMap.end() || VMI->second == nullptr)
1331  continue;
1332 
1333  // If the call was inlined, but then constant folded, there is no edge to
1334  // add. Check for this case.
1335  auto *NewCall = dyn_cast<CallBase>(VMI->second);
1336  if (!NewCall)
1337  continue;
1338 
1339  // We do not treat intrinsic calls like real function calls because we
1340  // expect them to become inline code; do not add an edge for an intrinsic.
1341  if (NewCall->getCalledFunction() &&
1342  NewCall->getCalledFunction()->isIntrinsic())
1343  continue;
1344 
1345  // Remember that this call site got inlined for the client of
1346  // InlineFunction.
1347  IFI.InlinedCalls.push_back(NewCall);
1348 
1349  // It's possible that inlining the callsite will cause it to go from an
1350  // indirect to a direct call by resolving a function pointer. If this
1351  // happens, set the callee of the new call site to a more precise
1352  // destination. This can also happen if the call graph node of the caller
1353  // was just unnecessarily imprecise.
1354  if (!I->second->getFunction())
1355  if (Function *F = NewCall->getCalledFunction()) {
1356  // Indirect call site resolved to direct call.
1357  CallerNode->addCalledFunction(NewCall, CG[F]);
1358 
1359  continue;
1360  }
1361 
1362  CallerNode->addCalledFunction(NewCall, I->second);
1363  }
1364 
1365  // Update the call graph by deleting the edge from Callee to Caller. We must
1366  // do this after the loop above in case Caller and Callee are the same.
1367  CallerNode->removeCallEdgeFor(*cast<CallBase>(&CB));
1368 }
1369 
1370 static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src,
1371  Module *M, BasicBlock *InsertBlock,
1372  InlineFunctionInfo &IFI) {
1373  IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1374 
1375  Value *Size =
1376  Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1377 
1378  // Always generate a memcpy of alignment 1 here because we don't know
1379  // the alignment of the src pointer. Other optimizations can infer
1380  // better alignment.
1381  Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1382  /*SrcAlign*/ Align(1), Size);
1383 }
1384 
1385 /// When inlining a call site that has a byval argument,
1386 /// we have to make the implicit memcpy explicit by adding it.
1387 static Value *HandleByValArgument(Type *ByValType, Value *Arg,
1388  Instruction *TheCall,
1389  const Function *CalledFunc,
1390  InlineFunctionInfo &IFI,
1391  unsigned ByValAlignment) {
1392  assert(cast<PointerType>(Arg->getType())
1393  ->isOpaqueOrPointeeTypeMatches(ByValType));
1394  Function *Caller = TheCall->getFunction();
1395  const DataLayout &DL = Caller->getParent()->getDataLayout();
1396 
1397  // If the called function is readonly, then it could not mutate the caller's
1398  // copy of the byval'd memory. In this case, it is safe to elide the copy and
1399  // temporary.
1400  if (CalledFunc->onlyReadsMemory()) {
1401  // If the byval argument has a specified alignment that is greater than the
1402  // passed in pointer, then we either have to round up the input pointer or
1403  // give up on this transformation.
1404  if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1405  return Arg;
1406 
1407  AssumptionCache *AC =
1408  IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1409 
1410  // If the pointer is already known to be sufficiently aligned, or if we can
1411  // round it up to a larger alignment, then we don't need a temporary.
1412  if (getOrEnforceKnownAlignment(Arg, Align(ByValAlignment), DL, TheCall,
1413  AC) >= ByValAlignment)
1414  return Arg;
1415 
1416  // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1417  // for code quality, but rarely happens and is required for correctness.
1418  }
1419 
1420  // Create the alloca. If we have DataLayout, use nice alignment.
1421  Align Alignment(DL.getPrefTypeAlignment(ByValType));
1422 
1423  // If the byval had an alignment specified, we *must* use at least that
1424  // alignment, as it is required by the byval argument (and uses of the
1425  // pointer inside the callee).
1426  Alignment = max(Alignment, MaybeAlign(ByValAlignment));
1427 
1428  Value *NewAlloca =
1429  new AllocaInst(ByValType, DL.getAllocaAddrSpace(), nullptr, Alignment,
1430  Arg->getName(), &*Caller->begin()->begin());
1431  IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1432 
1433  // Uses of the argument in the function should use our new alloca
1434  // instead.
1435  return NewAlloca;
1436 }
1437 
1438 // Check whether this Value is used by a lifetime intrinsic.
1440  for (User *U : V->users())
1441  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1442  if (II->isLifetimeStartOrEnd())
1443  return true;
1444  return false;
1445 }
1446 
1447 // Check whether the given alloca already has
1448 // lifetime.start or lifetime.end intrinsics.
1449 static bool hasLifetimeMarkers(AllocaInst *AI) {
1450  Type *Ty = AI->getType();
1451  Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1452  Ty->getPointerAddressSpace());
1453  if (Ty == Int8PtrTy)
1454  return isUsedByLifetimeMarker(AI);
1455 
1456  // Do a scan to find all the casts to i8*.
1457  for (User *U : AI->users()) {
1458  if (U->getType() != Int8PtrTy) continue;
1459  if (U->stripPointerCasts() != AI) continue;
1460  if (isUsedByLifetimeMarker(U))
1461  return true;
1462  }
1463  return false;
1464 }
1465 
1466 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1467 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1468 /// cannot be static.
1469 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1470  return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1471 }
1472 
1473 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1474 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1475 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1476  LLVMContext &Ctx,
1478  auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1479  return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),
1480  OrigDL.getScope(), IA);
1481 }
1482 
1483 /// Update inlined instructions' line numbers to
1484 /// to encode location where these instructions are inlined.
1486  Instruction *TheCall, bool CalleeHasDebugInfo) {
1487  const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1488  if (!TheCallDL)
1489  return;
1490 
1491  auto &Ctx = Fn->getContext();
1492  DILocation *InlinedAtNode = TheCallDL;
1493 
1494  // Create a unique call site, not to be confused with any other call from the
1495  // same location.
1496  InlinedAtNode = DILocation::getDistinct(
1497  Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1498  InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1499 
1500  // Cache the inlined-at nodes as they're built so they are reused, without
1501  // this every instruction's inlined-at chain would become distinct from each
1502  // other.
1504 
1505  // Check if we are not generating inline line tables and want to use
1506  // the call site location instead.
1507  bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1508 
1509  for (; FI != Fn->end(); ++FI) {
1510  for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1511  BI != BE; ++BI) {
1512  // Loop metadata needs to be updated so that the start and end locs
1513  // reference inlined-at locations.
1514  auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1515  &IANodes](Metadata *MD) -> Metadata * {
1516  if (auto *Loc = dyn_cast_or_null<DILocation>(MD))
1517  return inlineDebugLoc(Loc, InlinedAtNode, Ctx, IANodes).get();
1518  return MD;
1519  };
1520  updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc);
1521 
1522  if (!NoInlineLineTables)
1523  if (DebugLoc DL = BI->getDebugLoc()) {
1524  DebugLoc IDL =
1525  inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1526  BI->setDebugLoc(IDL);
1527  continue;
1528  }
1529 
1530  if (CalleeHasDebugInfo && !NoInlineLineTables)
1531  continue;
1532 
1533  // If the inlined instruction has no line number, or if inline info
1534  // is not being generated, make it look as if it originates from the call
1535  // location. This is important for ((__always_inline, __nodebug__))
1536  // functions which must use caller location for all instructions in their
1537  // function body.
1538 
1539  // Don't update static allocas, as they may get moved later.
1540  if (auto *AI = dyn_cast<AllocaInst>(BI))
1542  continue;
1543 
1544  BI->setDebugLoc(TheCallDL);
1545  }
1546 
1547  // Remove debug info intrinsics if we're not keeping inline info.
1548  if (NoInlineLineTables) {
1549  BasicBlock::iterator BI = FI->begin();
1550  while (BI != FI->end()) {
1551  if (isa<DbgInfoIntrinsic>(BI)) {
1552  BI = BI->eraseFromParent();
1553  continue;
1554  }
1555  ++BI;
1556  }
1557  }
1558 
1559  }
1560 }
1561 
1562 /// Update the block frequencies of the caller after a callee has been inlined.
1563 ///
1564 /// Each block cloned into the caller has its block frequency scaled by the
1565 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1566 /// callee's entry block gets the same frequency as the callsite block and the
1567 /// relative frequencies of all cloned blocks remain the same after cloning.
1568 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1569  const ValueToValueMapTy &VMap,
1570  BlockFrequencyInfo *CallerBFI,
1571  BlockFrequencyInfo *CalleeBFI,
1572  const BasicBlock &CalleeEntryBlock) {
1574  for (auto Entry : VMap) {
1575  if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1576  continue;
1577  auto *OrigBB = cast<BasicBlock>(Entry.first);
1578  auto *ClonedBB = cast<BasicBlock>(Entry.second);
1579  uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1580  if (!ClonedBBs.insert(ClonedBB).second) {
1581  // Multiple blocks in the callee might get mapped to one cloned block in
1582  // the caller since we prune the callee as we clone it. When that happens,
1583  // we want to use the maximum among the original blocks' frequencies.
1584  uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1585  if (NewFreq > Freq)
1586  Freq = NewFreq;
1587  }
1588  CallerBFI->setBlockFreq(ClonedBB, Freq);
1589  }
1590  BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1591  CallerBFI->setBlockFreqAndScale(
1592  EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1593  ClonedBBs);
1594 }
1595 
1596 /// Update the branch metadata for cloned call instructions.
1597 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1598  const ProfileCount &CalleeEntryCount,
1599  const CallBase &TheCall, ProfileSummaryInfo *PSI,
1600  BlockFrequencyInfo *CallerBFI) {
1601  if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1602  CalleeEntryCount.getCount() < 1)
1603  return;
1604  auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1605  int64_t CallCount =
1606  std::min(CallSiteCount.getValueOr(0), CalleeEntryCount.getCount());
1607  updateProfileCallee(Callee, -CallCount, &VMap);
1608 }
1609 
1611  Function *Callee, int64_t entryDelta,
1613  auto CalleeCount = Callee->getEntryCount();
1614  if (!CalleeCount.hasValue())
1615  return;
1616 
1617  uint64_t priorEntryCount = CalleeCount.getCount();
1618  uint64_t newEntryCount;
1619 
1620  // Since CallSiteCount is an estimate, it could exceed the original callee
1621  // count and has to be set to 0 so guard against underflow.
1622  if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1623  newEntryCount = 0;
1624  else
1625  newEntryCount = priorEntryCount + entryDelta;
1626 
1627  // During inlining ?
1628  if (VMap) {
1629  uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1630  for (auto Entry : *VMap)
1631  if (isa<CallInst>(Entry.first))
1632  if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1633  CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1634  }
1635 
1636  if (entryDelta) {
1637  Callee->setEntryCount(newEntryCount);
1638 
1639  for (BasicBlock &BB : *Callee)
1640  // No need to update the callsite if it is pruned during inlining.
1641  if (!VMap || VMap->count(&BB))
1642  for (Instruction &I : BB)
1643  if (CallInst *CI = dyn_cast<CallInst>(&I))
1644  CI->updateProfWeight(newEntryCount, priorEntryCount);
1645  }
1646 }
1647 
1648 /// An operand bundle "clang.arc.attachedcall" on a call indicates the call
1649 /// result is implicitly consumed by a call to retainRV or claimRV immediately
1650 /// after the call. This function inlines the retainRV/claimRV calls.
1651 ///
1652 /// There are three cases to consider:
1653 ///
1654 /// 1. If there is a call to autoreleaseRV that takes a pointer to the returned
1655 /// object in the callee return block, the autoreleaseRV call and the
1656 /// retainRV/claimRV call in the caller cancel out. If the call in the caller
1657 /// is a claimRV call, a call to objc_release is emitted.
1658 ///
1659 /// 2. If there is a call in the callee return block that doesn't have operand
1660 /// bundle "clang.arc.attachedcall", the operand bundle on the original call
1661 /// is transferred to the call in the callee.
1662 ///
1663 /// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is
1664 /// a retainRV call.
1665 static void
1667  const SmallVectorImpl<ReturnInst *> &Returns) {
1668  Module *Mod = CB.getModule();
1669  assert(objcarc::isRetainOrClaimRV(RVCallKind) && "unexpected ARC function");
1670  bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
1671  IsClaimRV = !IsRetainRV;
1672 
1673  for (auto *RI : Returns) {
1674  Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0));
1677  bool InsertRetainCall = IsRetainRV;
1679 
1680  // Walk backwards through the basic block looking for either a matching
1681  // autoreleaseRV call or an unannotated call.
1682  for (; I != EI;) {
1683  auto CurI = I++;
1684 
1685  // Ignore casts.
1686  if (isa<CastInst>(*CurI))
1687  continue;
1688 
1689  if (auto *II = dyn_cast<IntrinsicInst>(&*CurI)) {
1690  if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
1691  !II->hasNUses(0) ||
1692  objcarc::GetRCIdentityRoot(II->getOperand(0)) != RetOpnd)
1693  break;
1694 
1695  // If we've found a matching authoreleaseRV call:
1696  // - If claimRV is attached to the call, insert a call to objc_release
1697  // and erase the autoreleaseRV call.
1698  // - If retainRV is attached to the call, just erase the autoreleaseRV
1699  // call.
1700  if (IsClaimRV) {
1701  Builder.SetInsertPoint(II);
1702  Function *IFn =
1703  Intrinsic::getDeclaration(Mod, Intrinsic::objc_release);
1704  Value *BC = Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType());
1705  Builder.CreateCall(IFn, BC, "");
1706  }
1707  II->eraseFromParent();
1708  InsertRetainCall = false;
1709  break;
1710  }
1711 
1712  auto *CI = dyn_cast<CallInst>(&*CurI);
1713 
1714  if (!CI)
1715  break;
1716 
1717  if (objcarc::GetRCIdentityRoot(CI) != RetOpnd ||
1719  break;
1720 
1721  // If we've found an unannotated call that defines RetOpnd, add a
1722  // "clang.arc.attachedcall" operand bundle.
1723  Value *BundleArgs[] = {*objcarc::getAttachedARCFunction(&CB)};
1724  OperandBundleDef OB("clang.arc.attachedcall", BundleArgs);
1725  auto *NewCall = CallBase::addOperandBundle(
1727  NewCall->copyMetadata(*CI);
1728  CI->replaceAllUsesWith(NewCall);
1729  CI->eraseFromParent();
1730  InsertRetainCall = false;
1731  break;
1732  }
1733 
1734  if (InsertRetainCall) {
1735  // The retainRV is attached to the call and we've failed to find a
1736  // matching autoreleaseRV or an annotated call in the callee. Emit a call
1737  // to objc_retain.
1738  Builder.SetInsertPoint(RI);
1739  Function *IFn = Intrinsic::getDeclaration(Mod, Intrinsic::objc_retain);
1740  Value *BC = Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType());
1741  Builder.CreateCall(IFn, BC, "");
1742  }
1743  }
1744 }
1745 
1746 /// This function inlines the called function into the basic block of the
1747 /// caller. This returns false if it is not possible to inline this call.
1748 /// The program is still in a well defined state if this occurs though.
1749 ///
1750 /// Note that this only does one level of inlining. For example, if the
1751 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1752 /// exists in the instruction stream. Similarly this will inline a recursive
1753 /// function by one level.
1755  AAResults *CalleeAAR,
1756  bool InsertLifetime,
1757  Function *ForwardVarArgsTo) {
1758  assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
1759 
1760  // FIXME: we don't inline callbr yet.
1761  if (isa<CallBrInst>(CB))
1762  return InlineResult::failure("We don't inline callbr yet.");
1763 
1764  // If IFI has any state in it, zap it before we fill it in.
1765  IFI.reset();
1766 
1767  Function *CalledFunc = CB.getCalledFunction();
1768  if (!CalledFunc || // Can't inline external function or indirect
1769  CalledFunc->isDeclaration()) // call!
1770  return InlineResult::failure("external or indirect");
1771 
1772  // The inliner does not know how to inline through calls with operand bundles
1773  // in general ...
1774  if (CB.hasOperandBundles()) {
1775  for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
1776  uint32_t Tag = CB.getOperandBundleAt(i).getTagID();
1777  // ... but it knows how to inline through "deopt" operand bundles ...
1778  if (Tag == LLVMContext::OB_deopt)
1779  continue;
1780  // ... and "funclet" operand bundles.
1781  if (Tag == LLVMContext::OB_funclet)
1782  continue;
1784  continue;
1785 
1786  return InlineResult::failure("unsupported operand bundle");
1787  }
1788  }
1789 
1790  // If the call to the callee cannot throw, set the 'nounwind' flag on any
1791  // calls that we inline.
1792  bool MarkNoUnwind = CB.doesNotThrow();
1793 
1794  BasicBlock *OrigBB = CB.getParent();
1795  Function *Caller = OrigBB->getParent();
1796 
1797  // GC poses two hazards to inlining, which only occur when the callee has GC:
1798  // 1. If the caller has no GC, then the callee's GC must be propagated to the
1799  // caller.
1800  // 2. If the caller has a differing GC, it is invalid to inline.
1801  if (CalledFunc->hasGC()) {
1802  if (!Caller->hasGC())
1803  Caller->setGC(CalledFunc->getGC());
1804  else if (CalledFunc->getGC() != Caller->getGC())
1805  return InlineResult::failure("incompatible GC");
1806  }
1807 
1808  // Get the personality function from the callee if it contains a landing pad.
1809  Constant *CalledPersonality =
1810  CalledFunc->hasPersonalityFn()
1811  ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1812  : nullptr;
1813 
1814  // Find the personality function used by the landing pads of the caller. If it
1815  // exists, then check to see that it matches the personality function used in
1816  // the callee.
1817  Constant *CallerPersonality =
1818  Caller->hasPersonalityFn()
1819  ? Caller->getPersonalityFn()->stripPointerCasts()
1820  : nullptr;
1821  if (CalledPersonality) {
1822  if (!CallerPersonality)
1823  Caller->setPersonalityFn(CalledPersonality);
1824  // If the personality functions match, then we can perform the
1825  // inlining. Otherwise, we can't inline.
1826  // TODO: This isn't 100% true. Some personality functions are proper
1827  // supersets of others and can be used in place of the other.
1828  else if (CalledPersonality != CallerPersonality)
1829  return InlineResult::failure("incompatible personality");
1830  }
1831 
1832  // We need to figure out which funclet the callsite was in so that we may
1833  // properly nest the callee.
1834  Instruction *CallSiteEHPad = nullptr;
1835  if (CallerPersonality) {
1836  EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1837  if (isScopedEHPersonality(Personality)) {
1838  Optional<OperandBundleUse> ParentFunclet =
1840  if (ParentFunclet)
1841  CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1842 
1843  // OK, the inlining site is legal. What about the target function?
1844 
1845  if (CallSiteEHPad) {
1846  if (Personality == EHPersonality::MSVC_CXX) {
1847  // The MSVC personality cannot tolerate catches getting inlined into
1848  // cleanup funclets.
1849  if (isa<CleanupPadInst>(CallSiteEHPad)) {
1850  // Ok, the call site is within a cleanuppad. Let's check the callee
1851  // for catchpads.
1852  for (const BasicBlock &CalledBB : *CalledFunc) {
1853  if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1854  return InlineResult::failure("catch in cleanup funclet");
1855  }
1856  }
1857  } else if (isAsynchronousEHPersonality(Personality)) {
1858  // SEH is even less tolerant, there may not be any sort of exceptional
1859  // funclet in the callee.
1860  for (const BasicBlock &CalledBB : *CalledFunc) {
1861  if (CalledBB.isEHPad())
1862  return InlineResult::failure("SEH in cleanup funclet");
1863  }
1864  }
1865  }
1866  }
1867  }
1868 
1869  // Determine if we are dealing with a call in an EHPad which does not unwind
1870  // to caller.
1871  bool EHPadForCallUnwindsLocally = false;
1872  if (CallSiteEHPad && isa<CallInst>(CB)) {
1873  UnwindDestMemoTy FuncletUnwindMap;
1874  Value *CallSiteUnwindDestToken =
1875  getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1876 
1877  EHPadForCallUnwindsLocally =
1878  CallSiteUnwindDestToken &&
1879  !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1880  }
1881 
1882  // Get an iterator to the last basic block in the function, which will have
1883  // the new function inlined after it.
1884  Function::iterator LastBlock = --Caller->end();
1885 
1886  // Make sure to capture all of the return instructions from the cloned
1887  // function.
1889  ClonedCodeInfo InlinedFunctionInfo;
1890  Function::iterator FirstNewBlock;
1891 
1892  { // Scope to destroy VMap after cloning.
1893  ValueToValueMapTy VMap;
1894  struct ByValInit {
1895  Value *Dst;
1896  Value *Src;
1897  Type *Ty;
1898  };
1899  // Keep a list of pair (dst, src) to emit byval initializations.
1900  SmallVector<ByValInit, 4> ByValInits;
1901 
1902  // When inlining a function that contains noalias scope metadata,
1903  // this metadata needs to be cloned so that the inlined blocks
1904  // have different "unique scopes" at every call site.
1905  // Track the metadata that must be cloned. Do this before other changes to
1906  // the function, so that we do not get in trouble when inlining caller ==
1907  // callee.
1908  ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());
1909 
1910  auto &DL = Caller->getParent()->getDataLayout();
1911 
1912  // Calculate the vector of arguments to pass into the function cloner, which
1913  // matches up the formal to the actual argument values.
1914  auto AI = CB.arg_begin();
1915  unsigned ArgNo = 0;
1916  for (Function::arg_iterator I = CalledFunc->arg_begin(),
1917  E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1918  Value *ActualArg = *AI;
1919 
1920  // When byval arguments actually inlined, we need to make the copy implied
1921  // by them explicit. However, we don't do this if the callee is readonly
1922  // or readnone, because the copy would be unneeded: the callee doesn't
1923  // modify the struct.
1924  if (CB.isByValArgument(ArgNo)) {
1925  ActualArg = HandleByValArgument(CB.getParamByValType(ArgNo), ActualArg,
1926  &CB, CalledFunc, IFI,
1927  CalledFunc->getParamAlignment(ArgNo));
1928  if (ActualArg != *AI)
1929  ByValInits.push_back(
1930  {ActualArg, (Value *)*AI, CB.getParamByValType(ArgNo)});
1931  }
1932 
1933  VMap[&*I] = ActualArg;
1934  }
1935 
1936  // TODO: Remove this when users have been updated to the assume bundles.
1937  // Add alignment assumptions if necessary. We do this before the inlined
1938  // instructions are actually cloned into the caller so that we can easily
1939  // check what will be known at the start of the inlined code.
1940  AddAlignmentAssumptions(CB, IFI);
1941 
1942  AssumptionCache *AC =
1943  IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1944 
1945  /// Preserve all attributes on of the call and its parameters.
1946  salvageKnowledge(&CB, AC);
1947 
1948  // We want the inliner to prune the code as it copies. We would LOVE to
1949  // have no dead or constant instructions leftover after inlining occurs
1950  // (which can happen, e.g., because an argument was constant), but we'll be
1951  // happy with whatever the cloner can do.
1952  CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1953  /*ModuleLevelChanges=*/false, Returns, ".i",
1954  &InlinedFunctionInfo);
1955  // Remember the first block that is newly cloned over.
1956  FirstNewBlock = LastBlock; ++FirstNewBlock;
1957 
1958  // Insert retainRV/clainRV runtime calls.
1960  if (RVCallKind != objcarc::ARCInstKind::None)
1961  inlineRetainOrClaimRVCalls(CB, RVCallKind, Returns);
1962 
1963  // Updated caller/callee profiles only when requested. For sample loader
1964  // inlining, the context-sensitive inlinee profile doesn't need to be
1965  // subtracted from callee profile, and the inlined clone also doesn't need
1966  // to be scaled based on call site count.
1967  if (IFI.UpdateProfile) {
1968  if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1969  // Update the BFI of blocks cloned into the caller.
1970  updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1971  CalledFunc->front());
1972 
1973  updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), CB,
1974  IFI.PSI, IFI.CallerBFI);
1975  }
1976 
1977  // Inject byval arguments initialization.
1978  for (ByValInit &Init : ByValInits)
1979  HandleByValArgumentInit(Init.Ty, Init.Dst, Init.Src, Caller->getParent(),
1980  &*FirstNewBlock, IFI);
1981 
1982  Optional<OperandBundleUse> ParentDeopt =
1984  if (ParentDeopt) {
1986 
1987  for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1988  CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
1989  if (!ICS)
1990  continue; // instruction was DCE'd or RAUW'ed to undef
1991 
1992  OpDefs.clear();
1993 
1994  OpDefs.reserve(ICS->getNumOperandBundles());
1995 
1996  for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
1997  ++COBi) {
1998  auto ChildOB = ICS->getOperandBundleAt(COBi);
1999  if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
2000  // If the inlined call has other operand bundles, let them be
2001  OpDefs.emplace_back(ChildOB);
2002  continue;
2003  }
2004 
2005  // It may be useful to separate this logic (of handling operand
2006  // bundles) out to a separate "policy" component if this gets crowded.
2007  // Prepend the parent's deoptimization continuation to the newly
2008  // inlined call's deoptimization continuation.
2009  std::vector<Value *> MergedDeoptArgs;
2010  MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2011  ChildOB.Inputs.size());
2012 
2013  llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs);
2014  llvm::append_range(MergedDeoptArgs, ChildOB.Inputs);
2015 
2016  OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
2017  }
2018 
2019  Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS);
2020 
2021  // Note: the RAUW does the appropriate fixup in VMap, so we need to do
2022  // this even if the call returns void.
2023  ICS->replaceAllUsesWith(NewI);
2024 
2025  VH = nullptr;
2026  ICS->eraseFromParent();
2027  }
2028  }
2029 
2030  // Update the callgraph if requested.
2031  if (IFI.CG)
2032  UpdateCallGraphAfterInlining(CB, FirstNewBlock, VMap, IFI);
2033 
2034  // For 'nodebug' functions, the associated DISubprogram is always null.
2035  // Conservatively avoid propagating the callsite debug location to
2036  // instructions inlined from a function whose DISubprogram is not null.
2037  fixupLineNumbers(Caller, FirstNewBlock, &CB,
2038  CalledFunc->getSubprogram() != nullptr);
2039 
2040  // Now clone the inlined noalias scope metadata.
2041  SAMetadataCloner.clone();
2042  SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2043 
2044  // Add noalias metadata if necessary.
2045  AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR, InlinedFunctionInfo);
2046 
2047  // Clone return attributes on the callsite into the calls within the inlined
2048  // function which feed into its return value.
2049  AddReturnAttributes(CB, VMap);
2050 
2051  // Propagate metadata on the callsite if necessary.
2052  PropagateCallSiteMetadata(CB, FirstNewBlock, Caller->end());
2053 
2054  // Register any cloned assumptions.
2055  if (IFI.GetAssumptionCache)
2056  for (BasicBlock &NewBlock :
2057  make_range(FirstNewBlock->getIterator(), Caller->end()))
2058  for (Instruction &I : NewBlock)
2059  if (auto *II = dyn_cast<AssumeInst>(&I))
2060  IFI.GetAssumptionCache(*Caller).registerAssumption(II);
2061  }
2062 
2063  // If there are any alloca instructions in the block that used to be the entry
2064  // block for the callee, move them to the entry block of the caller. First
2065  // calculate which instruction they should be inserted before. We insert the
2066  // instructions at the end of the current alloca list.
2067  {
2068  BasicBlock::iterator InsertPoint = Caller->begin()->begin();
2069  for (BasicBlock::iterator I = FirstNewBlock->begin(),
2070  E = FirstNewBlock->end(); I != E; ) {
2071  AllocaInst *AI = dyn_cast<AllocaInst>(I++);
2072  if (!AI) continue;
2073 
2074  // If the alloca is now dead, remove it. This often occurs due to code
2075  // specialization.
2076  if (AI->use_empty()) {
2077  AI->eraseFromParent();
2078  continue;
2079  }
2080 
2081  if (!allocaWouldBeStaticInEntry(AI))
2082  continue;
2083 
2084  // Keep track of the static allocas that we inline into the caller.
2085  IFI.StaticAllocas.push_back(AI);
2086 
2087  // Scan for the block of allocas that we can move over, and move them
2088  // all at once.
2089  while (isa<AllocaInst>(I) &&
2090  !cast<AllocaInst>(I)->use_empty() &&
2091  allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
2092  IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
2093  ++I;
2094  }
2095 
2096  // Transfer all of the allocas over in a block. Using splice means
2097  // that the instructions aren't removed from the symbol table, then
2098  // reinserted.
2099  Caller->getEntryBlock().getInstList().splice(
2100  InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
2101  }
2102  }
2103 
2104  SmallVector<Value*,4> VarArgsToForward;
2105  SmallVector<AttributeSet, 4> VarArgsAttrs;
2106  for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
2107  i < CB.arg_size(); i++) {
2108  VarArgsToForward.push_back(CB.getArgOperand(i));
2109  VarArgsAttrs.push_back(CB.getAttributes().getParamAttrs(i));
2110  }
2111 
2112  bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
2113  if (InlinedFunctionInfo.ContainsCalls) {
2114  CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
2115  if (CallInst *CI = dyn_cast<CallInst>(&CB))
2116  CallSiteTailKind = CI->getTailCallKind();
2117 
2118  // For inlining purposes, the "notail" marker is the same as no marker.
2119  if (CallSiteTailKind == CallInst::TCK_NoTail)
2120  CallSiteTailKind = CallInst::TCK_None;
2121 
2122  for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
2123  ++BB) {
2125  CallInst *CI = dyn_cast<CallInst>(&I);
2126  if (!CI)
2127  continue;
2128 
2129  // Forward varargs from inlined call site to calls to the
2130  // ForwardVarArgsTo function, if requested, and to musttail calls.
2131  if (!VarArgsToForward.empty() &&
2132  ((ForwardVarArgsTo &&
2133  CI->getCalledFunction() == ForwardVarArgsTo) ||
2134  CI->isMustTailCall())) {
2135  // Collect attributes for non-vararg parameters.
2138  if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2139  for (unsigned ArgNo = 0;
2140  ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2141  ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));
2142  }
2143 
2144  // Add VarArg attributes.
2145  ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2146  Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(),
2147  Attrs.getRetAttrs(), ArgAttrs);
2148  // Add VarArgs to existing parameters.
2149  SmallVector<Value *, 6> Params(CI->args());
2150  Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2151  CallInst *NewCI = CallInst::Create(
2152  CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
2153  NewCI->setDebugLoc(CI->getDebugLoc());
2154  NewCI->setAttributes(Attrs);
2155  NewCI->setCallingConv(CI->getCallingConv());
2156  CI->replaceAllUsesWith(NewCI);
2157  CI->eraseFromParent();
2158  CI = NewCI;
2159  }
2160 
2161  if (Function *F = CI->getCalledFunction())
2162  InlinedDeoptimizeCalls |=
2163  F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2164 
2165  // We need to reduce the strength of any inlined tail calls. For
2166  // musttail, we have to avoid introducing potential unbounded stack
2167  // growth. For example, if functions 'f' and 'g' are mutually recursive
2168  // with musttail, we can inline 'g' into 'f' so long as we preserve
2169  // musttail on the cloned call to 'f'. If either the inlined call site
2170  // or the cloned call site is *not* musttail, the program already has
2171  // one frame of stack growth, so it's safe to remove musttail. Here is
2172  // a table of example transformations:
2173  //
2174  // f -> musttail g -> musttail f ==> f -> musttail f
2175  // f -> musttail g -> tail f ==> f -> tail f
2176  // f -> g -> musttail f ==> f -> f
2177  // f -> g -> tail f ==> f -> f
2178  //
2179  // Inlined notail calls should remain notail calls.
2180  CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2181  if (ChildTCK != CallInst::TCK_NoTail)
2182  ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2183  CI->setTailCallKind(ChildTCK);
2184  InlinedMustTailCalls |= CI->isMustTailCall();
2185 
2186  // Calls inlined through a 'nounwind' call site should be marked
2187  // 'nounwind'.
2188  if (MarkNoUnwind)
2189  CI->setDoesNotThrow();
2190  }
2191  }
2192  }
2193 
2194  // Leave lifetime markers for the static alloca's, scoping them to the
2195  // function we just inlined.
2196  // We need to insert lifetime intrinsics even at O0 to avoid invalid
2197  // access caused by multithreaded coroutines. The check
2198  // `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
2199  if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2200  !IFI.StaticAllocas.empty()) {
2201  IRBuilder<> builder(&FirstNewBlock->front());
2202  for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2203  AllocaInst *AI = IFI.StaticAllocas[ai];
2204  // Don't mark swifterror allocas. They can't have bitcast uses.
2205  if (AI->isSwiftError())
2206  continue;
2207 
2208  // If the alloca is already scoped to something smaller than the whole
2209  // function then there's no need to add redundant, less accurate markers.
2210  if (hasLifetimeMarkers(AI))
2211  continue;
2212 
2213  // Try to determine the size of the allocation.
2214  ConstantInt *AllocaSize = nullptr;
2215  if (ConstantInt *AIArraySize =
2216  dyn_cast<ConstantInt>(AI->getArraySize())) {
2217  auto &DL = Caller->getParent()->getDataLayout();
2218  Type *AllocaType = AI->getAllocatedType();
2219  TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2220  uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2221 
2222  // Don't add markers for zero-sized allocas.
2223  if (AllocaArraySize == 0)
2224  continue;
2225 
2226  // Check that array size doesn't saturate uint64_t and doesn't
2227  // overflow when it's multiplied by type size.
2228  if (!AllocaTypeSize.isScalable() &&
2229  AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2230  std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2231  AllocaTypeSize.getFixedSize()) {
2232  AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2233  AllocaArraySize * AllocaTypeSize);
2234  }
2235  }
2236 
2237  builder.CreateLifetimeStart(AI, AllocaSize);
2238  for (ReturnInst *RI : Returns) {
2239  // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2240  // call and a return. The return kills all local allocas.
2241  if (InlinedMustTailCalls &&
2243  continue;
2244  if (InlinedDeoptimizeCalls &&
2246  continue;
2247  IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2248  }
2249  }
2250  }
2251 
2252  // If the inlined code contained dynamic alloca instructions, wrap the inlined
2253  // code with llvm.stacksave/llvm.stackrestore intrinsics.
2254  if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2255  Module *M = Caller->getParent();
2256  // Get the two intrinsics we care about.
2257  Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
2258  Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
2259 
2260  // Insert the llvm.stacksave.
2261  CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2262  .CreateCall(StackSave, {}, "savedstack");
2263 
2264  // Insert a call to llvm.stackrestore before any return instructions in the
2265  // inlined function.
2266  for (ReturnInst *RI : Returns) {
2267  // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2268  // call and a return. The return will restore the stack pointer.
2269  if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2270  continue;
2271  if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2272  continue;
2273  IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2274  }
2275  }
2276 
2277  // If we are inlining for an invoke instruction, we must make sure to rewrite
2278  // any call instructions into invoke instructions. This is sensitive to which
2279  // funclet pads were top-level in the inlinee, so must be done before
2280  // rewriting the "parent pad" links.
2281  if (auto *II = dyn_cast<InvokeInst>(&CB)) {
2282  BasicBlock *UnwindDest = II->getUnwindDest();
2283  Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2284  if (isa<LandingPadInst>(FirstNonPHI)) {
2285  HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2286  } else {
2287  HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2288  }
2289  }
2290 
2291  // Update the lexical scopes of the new funclets and callsites.
2292  // Anything that had 'none' as its parent is now nested inside the callsite's
2293  // EHPad.
2294 
2295  if (CallSiteEHPad) {
2296  for (Function::iterator BB = FirstNewBlock->getIterator(),
2297  E = Caller->end();
2298  BB != E; ++BB) {
2299  // Add bundle operands to any top-level call sites.
2301  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2302  CallBase *I = dyn_cast<CallBase>(&*BBI++);
2303  if (!I)
2304  continue;
2305 
2306  // Skip call sites which are nounwind intrinsics.
2307  auto *CalledFn =
2308  dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
2309  if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow())
2310  continue;
2311 
2312  // Skip call sites which already have a "funclet" bundle.
2313  if (I->getOperandBundle(LLVMContext::OB_funclet))
2314  continue;
2315 
2316  I->getOperandBundlesAsDefs(OpBundles);
2317  OpBundles.emplace_back("funclet", CallSiteEHPad);
2318 
2319  Instruction *NewInst = CallBase::Create(I, OpBundles, I);
2320  NewInst->takeName(I);
2321  I->replaceAllUsesWith(NewInst);
2322  I->eraseFromParent();
2323 
2324  OpBundles.clear();
2325  }
2326 
2327  // It is problematic if the inlinee has a cleanupret which unwinds to
2328  // caller and we inline it into a call site which doesn't unwind but into
2329  // an EH pad that does. Such an edge must be dynamically unreachable.
2330  // As such, we replace the cleanupret with unreachable.
2331  if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2332  if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2333  changeToUnreachable(CleanupRet);
2334 
2335  Instruction *I = BB->getFirstNonPHI();
2336  if (!I->isEHPad())
2337  continue;
2338 
2339  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2340  if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2341  CatchSwitch->setParentPad(CallSiteEHPad);
2342  } else {
2343  auto *FPI = cast<FuncletPadInst>(I);
2344  if (isa<ConstantTokenNone>(FPI->getParentPad()))
2345  FPI->setParentPad(CallSiteEHPad);
2346  }
2347  }
2348  }
2349 
2350  if (InlinedDeoptimizeCalls) {
2351  // We need to at least remove the deoptimizing returns from the Return set,
2352  // so that the control flow from those returns does not get merged into the
2353  // caller (but terminate it instead). If the caller's return type does not
2354  // match the callee's return type, we also need to change the return type of
2355  // the intrinsic.
2356  if (Caller->getReturnType() == CB.getType()) {
2357  llvm::erase_if(Returns, [](ReturnInst *RI) {
2358  return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2359  });
2360  } else {
2361  SmallVector<ReturnInst *, 8> NormalReturns;
2362  Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2363  Caller->getParent(), Intrinsic::experimental_deoptimize,
2364  {Caller->getReturnType()});
2365 
2366  for (ReturnInst *RI : Returns) {
2367  CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2368  if (!DeoptCall) {
2369  NormalReturns.push_back(RI);
2370  continue;
2371  }
2372 
2373  // The calling convention on the deoptimize call itself may be bogus,
2374  // since the code we're inlining may have undefined behavior (and may
2375  // never actually execute at runtime); but all
2376  // @llvm.experimental.deoptimize declarations have to have the same
2377  // calling convention in a well-formed module.
2378  auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2379  NewDeoptIntrinsic->setCallingConv(CallingConv);
2380  auto *CurBB = RI->getParent();
2381  RI->eraseFromParent();
2382 
2383  SmallVector<Value *, 4> CallArgs(DeoptCall->args());
2384 
2386  DeoptCall->getOperandBundlesAsDefs(OpBundles);
2387  auto DeoptAttributes = DeoptCall->getAttributes();
2388  DeoptCall->eraseFromParent();
2389  assert(!OpBundles.empty() &&
2390  "Expected at least the deopt operand bundle");
2391 
2392  IRBuilder<> Builder(CurBB);
2393  CallInst *NewDeoptCall =
2394  Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2395  NewDeoptCall->setCallingConv(CallingConv);
2396  NewDeoptCall->setAttributes(DeoptAttributes);
2397  if (NewDeoptCall->getType()->isVoidTy())
2398  Builder.CreateRetVoid();
2399  else
2400  Builder.CreateRet(NewDeoptCall);
2401  }
2402 
2403  // Leave behind the normal returns so we can merge control flow.
2404  std::swap(Returns, NormalReturns);
2405  }
2406  }
2407 
2408  // Handle any inlined musttail call sites. In order for a new call site to be
2409  // musttail, the source of the clone and the inlined call site must have been
2410  // musttail. Therefore it's safe to return without merging control into the
2411  // phi below.
2412  if (InlinedMustTailCalls) {
2413  // Check if we need to bitcast the result of any musttail calls.
2414  Type *NewRetTy = Caller->getReturnType();
2415  bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
2416 
2417  // Handle the returns preceded by musttail calls separately.
2418  SmallVector<ReturnInst *, 8> NormalReturns;
2419  for (ReturnInst *RI : Returns) {
2420  CallInst *ReturnedMustTail =
2422  if (!ReturnedMustTail) {
2423  NormalReturns.push_back(RI);
2424  continue;
2425  }
2426  if (!NeedBitCast)
2427  continue;
2428 
2429  // Delete the old return and any preceding bitcast.
2430  BasicBlock *CurBB = RI->getParent();
2431  auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2432  RI->eraseFromParent();
2433  if (OldCast)
2434  OldCast->eraseFromParent();
2435 
2436  // Insert a new bitcast and return with the right type.
2437  IRBuilder<> Builder(CurBB);
2438  Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2439  }
2440 
2441  // Leave behind the normal returns so we can merge control flow.
2442  std::swap(Returns, NormalReturns);
2443  }
2444 
2445  // Now that all of the transforms on the inlined code have taken place but
2446  // before we splice the inlined code into the CFG and lose track of which
2447  // blocks were actually inlined, collect the call sites. We only do this if
2448  // call graph updates weren't requested, as those provide value handle based
2449  // tracking of inlined call sites instead. Calls to intrinsics are not
2450  // collected because they are not inlineable.
2451  if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2452  // Otherwise just collect the raw call sites that were inlined.
2453  for (BasicBlock &NewBB :
2454  make_range(FirstNewBlock->getIterator(), Caller->end()))
2455  for (Instruction &I : NewBB)
2456  if (auto *CB = dyn_cast<CallBase>(&I))
2457  if (!(CB->getCalledFunction() &&
2458  CB->getCalledFunction()->isIntrinsic()))
2459  IFI.InlinedCallSites.push_back(CB);
2460  }
2461 
2462  // If we cloned in _exactly one_ basic block, and if that block ends in a
2463  // return instruction, we splice the body of the inlined callee directly into
2464  // the calling basic block.
2465  if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2466  // Move all of the instructions right before the call.
2467  OrigBB->getInstList().splice(CB.getIterator(), FirstNewBlock->getInstList(),
2468  FirstNewBlock->begin(), FirstNewBlock->end());
2469  // Remove the cloned basic block.
2470  Caller->getBasicBlockList().pop_back();
2471 
2472  // If the call site was an invoke instruction, add a branch to the normal
2473  // destination.
2474  if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2475  BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), &CB);
2476  NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2477  }
2478 
2479  // If the return instruction returned a value, replace uses of the call with
2480  // uses of the returned value.
2481  if (!CB.use_empty()) {
2482  ReturnInst *R = Returns[0];
2483  if (&CB == R->getReturnValue())
2485  else
2486  CB.replaceAllUsesWith(R->getReturnValue());
2487  }
2488  // Since we are now done with the Call/Invoke, we can delete it.
2489  CB.eraseFromParent();
2490 
2491  // Since we are now done with the return instruction, delete it also.
2492  Returns[0]->eraseFromParent();
2493 
2494  // We are now done with the inlining.
2495  return InlineResult::success();
2496  }
2497 
2498  // Otherwise, we have the normal case, of more than one block to inline or
2499  // multiple return sites.
2500 
2501  // We want to clone the entire callee function into the hole between the
2502  // "starter" and "ender" blocks. How we accomplish this depends on whether
2503  // this is an invoke instruction or a call instruction.
2504  BasicBlock *AfterCallBB;
2505  BranchInst *CreatedBranchToNormalDest = nullptr;
2506  if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2507 
2508  // Add an unconditional branch to make this look like the CallInst case...
2509  CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), &CB);
2510 
2511  // Split the basic block. This guarantees that no PHI nodes will have to be
2512  // updated due to new incoming edges, and make the invoke case more
2513  // symmetric to the call case.
2514  AfterCallBB =
2515  OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2516  CalledFunc->getName() + ".exit");
2517 
2518  } else { // It's a call
2519  // If this is a call instruction, we need to split the basic block that
2520  // the call lives in.
2521  //
2522  AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
2523  CalledFunc->getName() + ".exit");
2524  }
2525 
2526  if (IFI.CallerBFI) {
2527  // Copy original BB's block frequency to AfterCallBB
2528  IFI.CallerBFI->setBlockFreq(
2529  AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2530  }
2531 
2532  // Change the branch that used to go to AfterCallBB to branch to the first
2533  // basic block of the inlined function.
2534  //
2535  Instruction *Br = OrigBB->getTerminator();
2536  assert(Br && Br->getOpcode() == Instruction::Br &&
2537  "splitBasicBlock broken!");
2538  Br->setOperand(0, &*FirstNewBlock);
2539 
2540  // Now that the function is correct, make it a little bit nicer. In
2541  // particular, move the basic blocks inserted from the end of the function
2542  // into the space made by splitting the source basic block.
2543  Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2544  Caller->getBasicBlockList(), FirstNewBlock,
2545  Caller->end());
2546 
2547  // Handle all of the return instructions that we just cloned in, and eliminate
2548  // any users of the original call/invoke instruction.
2549  Type *RTy = CalledFunc->getReturnType();
2550 
2551  PHINode *PHI = nullptr;
2552  if (Returns.size() > 1) {
2553  // The PHI node should go at the front of the new basic block to merge all
2554  // possible incoming values.
2555  if (!CB.use_empty()) {
2556  PHI = PHINode::Create(RTy, Returns.size(), CB.getName(),
2557  &AfterCallBB->front());
2558  // Anything that used the result of the function call should now use the
2559  // PHI node as their operand.
2560  CB.replaceAllUsesWith(PHI);
2561  }
2562 
2563  // Loop over all of the return instructions adding entries to the PHI node
2564  // as appropriate.
2565  if (PHI) {
2566  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2567  ReturnInst *RI = Returns[i];
2568  assert(RI->getReturnValue()->getType() == PHI->getType() &&
2569  "Ret value not consistent in function!");
2570  PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2571  }
2572  }
2573 
2574  // Add a branch to the merge points and remove return instructions.
2575  DebugLoc Loc;
2576  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2577  ReturnInst *RI = Returns[i];
2578  BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2579  Loc = RI->getDebugLoc();
2580  BI->setDebugLoc(Loc);
2581  RI->eraseFromParent();
2582  }
2583  // We need to set the debug location to *somewhere* inside the
2584  // inlined function. The line number may be nonsensical, but the
2585  // instruction will at least be associated with the right
2586  // function.
2587  if (CreatedBranchToNormalDest)
2588  CreatedBranchToNormalDest->setDebugLoc(Loc);
2589  } else if (!Returns.empty()) {
2590  // Otherwise, if there is exactly one return value, just replace anything
2591  // using the return value of the call with the computed value.
2592  if (!CB.use_empty()) {
2593  if (&CB == Returns[0]->getReturnValue())
2595  else
2596  CB.replaceAllUsesWith(Returns[0]->getReturnValue());
2597  }
2598 
2599  // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2600  BasicBlock *ReturnBB = Returns[0]->getParent();
2601  ReturnBB->replaceAllUsesWith(AfterCallBB);
2602 
2603  // Splice the code from the return block into the block that it will return
2604  // to, which contains the code that was after the call.
2605  AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2606  ReturnBB->getInstList());
2607 
2608  if (CreatedBranchToNormalDest)
2609  CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2610 
2611  // Delete the return instruction now and empty ReturnBB now.
2612  Returns[0]->eraseFromParent();
2613  ReturnBB->eraseFromParent();
2614  } else if (!CB.use_empty()) {
2615  // No returns, but something is using the return value of the call. Just
2616  // nuke the result.
2618  }
2619 
2620  // Since we are now done with the Call/Invoke, we can delete it.
2621  CB.eraseFromParent();
2622 
2623  // If we inlined any musttail calls and the original return is now
2624  // unreachable, delete it. It can only contain a bitcast and ret.
2625  if (InlinedMustTailCalls && pred_empty(AfterCallBB))
2626  AfterCallBB->eraseFromParent();
2627 
2628  // We should always be able to fold the entry block of the function into the
2629  // single predecessor of the block...
2630  assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2631  BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2632 
2633  // Splice the code entry block into calling block, right before the
2634  // unconditional branch.
2635  CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2636  OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2637 
2638  // Remove the unconditional branch.
2639  OrigBB->getInstList().erase(Br);
2640 
2641  // Now we can remove the CalleeEntry block, which is now empty.
2642  Caller->getBasicBlockList().erase(CalleeEntry);
2643 
2644  // If we inserted a phi node, check to see if it has a single value (e.g. all
2645  // the entries are the same or undef). If so, remove the PHI so it doesn't
2646  // block other optimizations.
2647  if (PHI) {
2648  AssumptionCache *AC =
2649  IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2650  auto &DL = Caller->getParent()->getDataLayout();
2651  if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2652  PHI->replaceAllUsesWith(V);
2653  PHI->eraseFromParent();
2654  }
2655  }
2656 
2657  return InlineResult::success();
2658 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::EHPersonality::MSVC_CXX
@ MSVC_CXX
llvm::AttrBuilder::addDereferenceableOrNullAttr
AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
Definition: Attributes.cpp:1655
i
i
Definition: README.txt:29
llvm::InvokeInst::getNormalDest
BasicBlock * getNormalDest() const
Definition: Instructions.h:3881
llvm::BasicBlock::getTerminatingDeoptimizeCall
const CallInst * getTerminatingDeoptimizeCall() const
Returns the call instruction calling @llvm.experimental.deoptimize prior to the terminating return in...
Definition: BasicBlock.cpp:189
llvm::CallBase::getNumOperandBundles
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:1890
IdentifyValidAttributes
static AttrBuilder IdentifyValidAttributes(CallBase &CB)
Definition: InlineFunction.cpp:1184
llvm::isAsynchronousEHPersonality
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
Definition: EHPersonalities.h:50
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
AssumptionCache.h
Attrs
Function Attrs
Definition: README_ALTIVEC.txt:215
llvm::InlineResult::success
static InlineResult success()
Definition: InlineCost.h:164
llvm::Function::isIntrinsic
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:212
llvm::CatchSwitchInst::Create
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:4297
llvm::CallGraphNode::CalledFunctionsVector
std::vector< CallRecord > CalledFunctionsVector
Definition: CallGraph.h:182
llvm::AArch64CC::HI
@ HI
Definition: AArch64BaseInfo.h:263
llvm
This file implements support for optimizing divisions by a constant.
Definition: AllocatorList.h:23
HandleInlinedEHPad
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
Definition: InlineFunction.cpp:662
llvm::objcarc::hasAttachedCallOpBundle
bool hasAttachedCallOpBundle(const CallBase *CB)
Definition: ObjCARCUtil.h:29
llvm::LandingPadInst::isCleanup
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
Definition: Instructions.h:2932
getUnwindDestToken
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
Definition: InlineFunction.cpp:395
llvm::CallBase::getOperandBundlesAsDefs
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Definition: Instructions.cpp:375
llvm::Function::args
iterator_range< arg_iterator > args()
Definition: Function.h:773
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::Instruction::getModule
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
llvm::AArch64CC::AL
@ AL
Definition: AArch64BaseInfo.h:269
llvm::objcarc::getAttachedARCFunction
Optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
Definition: ObjCARCUtil.h:43
llvm::CallGraphNode::iterator
std::vector< CallRecord >::iterator iterator
Definition: CallGraph.h:194
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
llvm::AssumptionCache::registerAssumption
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Definition: AssumptionCache.cpp:217
llvm::ReturnInst
Return a value (possibly void), from a function.
Definition: Instructions.h:2986
llvm::VAArgInst
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Definition: Instructions.h:1833
llvm::ClonedCodeInfo
This struct can be used to capture information about code being cloned, while it is being cloned.
Definition: Cloning.h:64
llvm::CallBase::getOperandBundle
Optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:1977
Optional.h
ValueMapper.h
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:113
allocaWouldBeStaticInEntry
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
Definition: InlineFunction.cpp:1469
llvm::IRBuilderBase::CreateLifetimeEnd
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
Definition: IRBuilder.cpp:440
MayContainThrowingOrExitingCall
static bool MayContainThrowingOrExitingCall(Instruction *Begin, Instruction *End)
Definition: InlineFunction.cpp:1175
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1379
Metadata.h
llvm::Type::getInt8PtrTy
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:293
llvm::Function::end
iterator end()
Definition: Function.h:736
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:90
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:107
IntrinsicInst.h
DebugInfoMetadata.h
llvm::ValueMap::end
iterator end()
Definition: ValueMap.h:136
llvm::TypeSize::getFixedSize
ScalarTy getFixedSize() const
Definition: TypeSize.h:426
llvm::Function
Definition: Function.h:62
fixupLineNumbers
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
Definition: InlineFunction.cpp:1485
getUnwindDestTokenHelper
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
Definition: InlineFunction.cpp:244
llvm::ReturnInst::getReturnValue
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
Definition: Instructions.h:3031
llvm::AllocaInst::getType
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:104
llvm::SmallVector< Value *, 8 >
llvm::CallInst::setTailCallKind
void setTailCallKind(TailCallKind TCK)
Definition: Instructions.h:1678
InlineAsm.h
llvm::LandingPadInst
The landingpad instruction holds all of the information necessary to generate correct exception handl...
Definition: Instructions.h:2885
CaptureTracking.h
llvm::CallGraphNode::removeCallEdgeFor
void removeCallEdgeFor(CallBase &Call)
Removes the edge in the node for the specified call site.
Definition: CallGraph.cpp:214
llvm::CallBase::isInlineAsm
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1448
llvm::Function::getSubprogram
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1541
ErrorHandling.h
builder
assume builder
Definition: AssumeBundleBuilder.cpp:649
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:734
llvm::LLVMContext::OB_clang_arc_attachedcall
@ OB_clang_arc_attachedcall
Definition: LLVMContext.h:96
llvm::IRBuilder<>
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1732
ValueTracking.h
Local.h
llvm::AttributeList::get
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
Definition: Attributes.cpp:1011
llvm::CallGraph
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:73
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::AAResults::onlyAccessesArgPointees
static bool onlyAccessesArgPointees(FunctionModRefBehavior MRB)
Checks if functions with the specified behavior are known to read and write at most from objects poin...
Definition: AliasAnalysis.h:689
llvm::CallBase::addOperandBundle
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
Definition: Instructions.cpp:454
HandleByValArgument
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, unsigned ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
Definition: InlineFunction.cpp:1387
llvm::DILocation
Debug location.
Definition: DebugInfoMetadata.h:1580
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:321
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
DenseMap.h
Module.h
llvm::BasicBlock::eraseFromParent
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition: BasicBlock.cpp:133
llvm::AttributeList
Definition: Attributes.h:399
llvm::getOrEnforceKnownAlignment
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1343
llvm::CallBase::getAttributes
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1458
llvm::OperandBundleDefT
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1114
llvm::CallBase::getFunctionType
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1241
EHPersonalities.h
llvm::updateProfileCallee
void updateProfileCallee(Function *Callee, int64_t entryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding entryDelta then scaling callsite i...
Definition: InlineFunction.cpp:1610
llvm::BasicBlock::splitBasicBlock
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:385
llvm::objcarc::isRetainOrClaimRV
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/claimRV.
Definition: ObjCARCUtil.h:52
llvm::Optional
Definition: APInt.h:33
llvm::DenseMapBase::count
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:145
llvm::ProfileSummaryInfo::getProfileCount
Optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Definition: ProfileSummaryInfo.cpp:77
llvm::SmallPtrSet< Instruction *, 4 >
llvm::CallBase::isByValArgument
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: InstrTypes.h:1656
llvm::FunctionType::getNumParams
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:139
llvm::Function::ProfileCount::isSynthetic
bool isSynthetic() const
Definition: Function.h:268
HandleCallsInBlockInlinedThroughInvoke
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
Definition: InlineFunction.cpp:539
STLExtras.h
llvm::CallBase::arg_begin
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1303
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::CallInst::TCK_None
@ TCK_None
Definition: Instructions.h:1653
llvm::CallBase::setDoesNotThrow
void setDoesNotThrow()
Definition: InstrTypes.h:1850
llvm::uniteAccessGroups
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
Definition: VectorUtils.cpp:658
llvm::BasicBlock::rend
reverse_iterator rend()
Definition: BasicBlock.h:303
llvm::LinearPolySize::isScalable
bool isScalable() const
Returns whether the size is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:299
llvm::CallGraphNode::addCalledFunction
void addCalledFunction(CallBase *Call, CallGraphNode *M)
Adds a function to the list of functions called by this one.
Definition: CallGraph.h:243
llvm::MDBuilder::createAnonymousAliasScope
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
Definition: MDBuilder.h:140
llvm::MDNode::get
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1233
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::InlineFunctionInfo::CallerBFI
BlockFrequencyInfo * CallerBFI
Definition: Cloning.h:218
llvm::Instruction::setMetadata
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1336
llvm::InlineFunctionInfo::PSI
ProfileSummaryInfo * PSI
Definition: Cloning.h:217
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
AliasAnalysis.h
Context
LLVMContext & Context
Definition: NVVMIntrRange.cpp:66
llvm::isIdentifiedFunctionLocal
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
Definition: AliasAnalysis.cpp:985
llvm::classifyEHPersonality
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Definition: EHPersonalities.cpp:21
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:206
Instruction.h
CommandLine.h
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
AddAliasScopeMetadata
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
Definition: InlineFunction.cpp:939
llvm::Instruction::getOpcode
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:160
llvm::BlockFrequencyInfo
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Definition: BlockFrequencyInfo.h:37
PropagateCallSiteMetadata
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
Definition: InlineFunction.cpp:791
llvm::CallGraphNode::end
iterator end()
Definition: CallGraph.h:201
llvm::InlineFunctionInfo::CG
CallGraph * CG
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
Definition: Cloning.h:215
llvm::ms_demangle::CallingConv
CallingConv
Definition: MicrosoftDemangleNodes.h:59
llvm::GlobalValue::isDeclaration
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:228
Constants.h
llvm::AAResults
Definition: AliasAnalysis.h:508
llvm::AllocaInst::getAllocatedType
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:113
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
UpdateReturnAttributes
static cl::opt< bool > UpdateReturnAttributes("update-return-attrs", cl::init(true), cl::Hidden, cl::desc("Update return attributes on calls within inlined body"))
llvm::DebugLoc::getCol
unsigned getCol() const
Definition: DebugLoc.cpp:30
llvm::SmallVectorImpl::append
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:648
llvm::InvokeInst::getLandingPadInst
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
Definition: Instructions.cpp:894
llvm::User
Definition: User.h:44
llvm::getKnownAlignment
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition: Local.h:224
Intrinsics.h
llvm::CleanupReturnInst::Create
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, Instruction *InsertBefore=nullptr)
Definition: Instructions.h:4624
llvm::LandingPadInst::getNumClauses
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Definition: Instructions.h:2957
InstrTypes.h
llvm::CallBase::getCalledFunction
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
Definition: InstrTypes.h:1383
llvm::CallBase::setAttributes
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1462
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:296
UpdatePHINodes
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
Definition: BasicBlockUtils.cpp:988
llvm::CallInst::Create
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:1518
llvm::MDTuple
Tuple of metadata.
Definition: Metadata.h:1174
llvm::objcarc::ARCInstKind::RetainRV
@ RetainRV
objc_retainAutoreleasedReturnValue
llvm::LLVMContext::OB_funclet
@ OB_funclet
Definition: LLVMContext.h:91
AssumeBundleBuilder.h
llvm::BlockFrequencyInfo::setBlockFreq
void setBlockFreq(const BasicBlock *BB, uint64_t Freq)
Definition: BlockFrequencyInfo.cpp:229
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:109
llvm::ClonedCodeInfo::OperandBundleCallSites
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
Definition: Cloning.h:76
llvm::Function::arg_end
arg_iterator arg_end()
Definition: Function.h:758
llvm::PHINode::getIncomingValueForBlock
Value * getIncomingValueForBlock(const BasicBlock *BB) const
Definition: Instructions.h:2818
llvm::AttrBuilder::empty
bool empty() const
Return true if the builder contains no target-independent attributes.
Definition: Attributes.h:1131
llvm::Instruction
Definition: Instruction.h:45
llvm::SimplifyInstruction
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
Definition: InstructionSimplify.cpp:6327
llvm::ClonedCodeInfo::isSimplified
bool isSimplified(const Value *From, const Value *To) const
Definition: Cloning.h:85
MDBuilder.h
llvm::AllocaInst::getArraySize
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:100
HandleInlinedLandingPad
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
Definition: InlineFunction.cpp:605
llvm::Function::hasPersonalityFn
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:786
ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner
ScopedAliasMetadataDeepCloner(const Function *F)
Definition: InlineFunction.cpp:853
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1796
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:925
llvm::CallBase::getParamByValType
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Definition: InstrTypes.h:1723
DebugLoc.h
SmallPtrSet.h
llvm::CallGraphNode
A node in the call graph for a module.
Definition: CallGraph.h:167
llvm::ValueMap::begin
iterator begin()
Definition: ValueMap.h:135
isUsedByLifetimeMarker
static bool isUsedByLifetimeMarker(Value *V)
Definition: InlineFunction.cpp:1439
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::BasicBlock::getFirstNonPHI
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:216
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::Metadata
Root of the metadata hierarchy.
Definition: Metadata.h:62
llvm::CallBase::getCallingConv
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1439
llvm::ValueMap::count
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: ValueMap.h:152
llvm::Instruction::isLifetimeStartOrEnd
bool isLifetimeStartOrEnd() const
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
Definition: Instruction.cpp:706
llvm::None
const NoneType None
Definition: None.h:23
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:344
llvm::objcarc::ARCInstKind
ARCInstKind
Definition: ObjCARCInstKind.h:28
Type.h
getParentPad
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
Definition: InlineFunction.cpp:234
llvm::CallBase::getCaller
Function * getCaller()
Helper to get the caller (the parent function).
Definition: Instructions.cpp:282
llvm::DebugLoc::appendInlinedAt
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
Definition: DebugLoc.cpp:71
llvm::MDBuilder::createAnonymousAliasScopeDomain
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
Definition: MDBuilder.h:133
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:282
llvm::OperandBundleUse::getTagID
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1084
CFG.h
llvm::AllocaInst::isSwiftError
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:148
llvm::ProfileSummaryInfo
Analysis providing profile information.
Definition: ProfileSummaryInfo.h:39
llvm::InvokeInst
Invoke instruction.
Definition: Instructions.h:3749
llvm::Function::getGC
const std::string & getGC() const
Definition: Function.cpp:687
llvm::cl::ZeroOrMore
@ ZeroOrMore
Definition: CommandLine.h:120
llvm::InlineAsm
Definition: InlineAsm.h:31
VectorUtils.h
llvm::Function::hasFnAttribute
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:626
BasicBlock.h
llvm::cl::opt< bool >
llvm::AttributeList::ReturnIndex
@ ReturnIndex
Definition: Attributes.h:402
llvm::RISCVFenceField::O
@ O
Definition: RISCVBaseInfo.h:197
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:304
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::getUnderlyingObjects
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Definition: ValueTracking.cpp:4423
llvm::Instruction::eraseFromParent
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:78
llvm::Function::getReturnType
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:182
llvm::InlineAsm::canThrow
bool canThrow() const
Definition: InlineAsm.h:71
UseNoAliasIntrinsic
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::ZeroOrMore, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
inlineDebugLoc
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
Definition: InlineFunction.cpp:1475
llvm::InlineFunctionInfo::InlinedCalls
SmallVector< WeakTrackingVH, 8 > InlinedCalls
InlineFunction fills this in with callsites that were inlined from the callee.
Definition: Cloning.h:226
uint64_t
ProfileSummaryInfo.h
llvm::Function::getCallingConv
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:240
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:578
llvm::CallInst::TailCallKind
TailCallKind
Definition: Instructions.h:1652
hasLifetimeMarkers
static bool hasLifetimeMarkers(AllocaInst *AI)
Definition: InlineFunction.cpp:1449
llvm::Function::hasGC
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition: Function.h:319
llvm::PHINode::addIncoming
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: Instructions.h:2783
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::BranchInst::Create
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
Definition: Instructions.h:3124
llvm::DenseMap
Definition: DenseMap.h:714
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::DebugLoc::get
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:21
llvm::AttrBuilder
Definition: Attributes.h:931
Cloning.h
StringExtras.h
llvm::BlockFrequency::getFrequency
uint64_t getFrequency() const
Returns the frequency as a fixpoint number scaled by the entry frequency.
Definition: BlockFrequency.h:35
llvm::isScopedEHPersonality
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Definition: EHPersonalities.h:80
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:441
llvm::make_early_inc_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:576
DIBuilder.h
UpdateCallGraphAfterInlining
static void UpdateCallGraphAfterInlining(CallBase &CB, Function::iterator FirstNewBlock, ValueToValueMapTy &VMap, InlineFunctionInfo &IFI)
Once we have cloned code over from a callee into the caller, update the specified callgraph to reflec...
Definition: InlineFunction.cpp:1299
llvm::Instruction::setDebugLoc
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:367
llvm::LandingPadInst::getClause
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
Definition: Instructions.h:2942
AddAlignmentAssumptions
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
Definition: InlineFunction.cpp:1261
ScopedAliasMetadataDeepCloner
Utility for cloning !noalias and !alias.scope metadata.
Definition: InlineFunction.cpp:835
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::ValueMapIterator::ValueTypeProxy::second
ValueT & second
Definition: ValueMap.h:346
llvm::CallBase::hasOperandBundles
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition: InstrTypes.h:1895
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:840
SI
StandardInstrumentations SI(Debug, VerifyEach)
iterator_range.h
llvm::Type::isVoidTy
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:138
llvm::salvageKnowledge
void salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
Definition: AssumeBundleBuilder.cpp:292
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
llvm::MDNode
Metadata node.
Definition: Metadata.h:906
llvm::CallBase::Create
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, Instruction *InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
Definition: Instructions.cpp:255
llvm::changeToInvokeAndSplitBasicBlock
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:2193
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:650
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
llvm::IRBuilderBase::CreateNoAliasScopeDeclaration
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
Definition: IRBuilder.cpp:488
llvm::CallBase::getIntrinsicID
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
Definition: Instructions.cpp:311
getDebugLoc
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
Definition: MachineInstrBundle.cpp:109
llvm::DominatorTreeBase::recalculate
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Definition: GenericDomTree.h:778
llvm::CloneAndPruneFunctionInto
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
Definition: CloneFunction.cpp:778
None.h
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
DataLayout.h
llvm::Function::setCallingConv
void setCallingConv(CallingConv::ID CC)
Definition: Function.h:244
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition: AssumptionCache.h:41
llvm::CallBase::getOperandBundleAt
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:1946
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:134
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
llvm::Function::getEntryCount
ProfileCount getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Definition: Function.cpp:1910
llvm::MDNode::concatenate
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
Definition: Metadata.cpp:914
llvm::Instruction::getFunction
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
uint32_t
AddReturnAttributes
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap)
Definition: InlineFunction.cpp:1204
llvm::X86II::OB
@ OB
Definition: X86BaseInfo.h:796
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:1748
llvm::Value::getContext
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:990
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:81
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
PreserveAlignmentAssumptions
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
llvm::objcarc::getAttachedARCFunctionKind
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
Definition: ObjCARCUtil.h:60
llvm::pred_empty
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:119
llvm::updateLoopMetadataDebugLocations
void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
Definition: DebugInfo.cpp:374
llvm::FunctionModRefBehavior
FunctionModRefBehavior
Summary of how a function affects memory in the program.
Definition: AliasAnalysis.h:263
llvm::CallInst::isMustTailCall
bool isMustTailCall() const
Definition: Instructions.h:1674
llvm::MDTuple::getTemporary
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
Definition: Metadata.h:1220
BlockFrequencyInfo.h
ScopedAliasMetadataDeepCloner::remap
void remap(Function::iterator FStart, Function::iterator FEnd)
Remap instructions in the given range from the original to the cloned metadata.
Definition: InlineFunction.cpp:911
llvm::MDNode::getDistinct
static MDTuple * getDistinct(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1241
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:127
llvm::ValueMap< const Value *, WeakTrackingVH >
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:175
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
llvm::EHPersonality
EHPersonality
Definition: EHPersonalities.h:22
llvm::CallBase::paramHasAttr
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Definition: Instructions.cpp:341
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:152
llvm::objcarc::GetRCIdentityRoot
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
Definition: ObjCARCAnalysisUtils.h:107
llvm::Constant::stripPointerCasts
const Constant * stripPointerCasts() const
Definition: Constant.h:207
llvm::Init
Definition: Record.h:271
llvm::AAResults::getModRefBehavior
FunctionModRefBehavior getModRefBehavior(const CallBase *Call)
Return the behavior of the given call site.
Definition: AliasAnalysis.cpp:423
llvm::AtomicRMWInst
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:726
llvm::ClonedCodeInfo::ContainsCalls
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
Definition: Cloning.h:66
ObjCARCAnalysisUtils.h
llvm::CallBase::doesNotThrow
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:1849
llvm::Function::ProfileCount::getCount
uint64_t getCount() const
Definition: Function.h:266
llvm::OperandBundleUse::Inputs
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1057
llvm::InlineFunctionInfo::InlinedCallSites
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
Definition: Cloning.h:233
Argument.h
HandleByValArgumentInit
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI)
Definition: InlineFunction.cpp:1370
llvm::BasicBlock::front
const Instruction & front() const
Definition: BasicBlock.h:308
Callee
amdgpu Simplify well known AMD library false FunctionCallee Callee
Definition: AMDGPULibCalls.cpp:206
ObjCARCUtil.h
llvm::BlockFrequencyInfo::setBlockFreqAndScale
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, uint64_t Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
Definition: BlockFrequencyInfo.cpp:234
llvm::InlineFunctionInfo::reset
void reset()
Definition: Cloning.h:239
llvm::Function::getParamAlignment
uint64_t getParamAlignment(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Definition: Function.h:448
Constant.h
llvm::ResumeInst
Resume the propagation of an exception.
Definition: Instructions.h:4197
llvm::MDNode::replaceAllUsesWith
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
Definition: Metadata.h:992
llvm::Type::getInt64Ty
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:242
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:52
llvm::ValueMapIterator
Definition: ValueMap.h:49
llvm::DenseMapBase::end
iterator end()
Definition: DenseMap.h:83
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:324
llvm::PHINode::Create
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Definition: Instructions.h:2675
llvm::Function::getArg
Argument * getArg(unsigned i) const
Definition: Function.h:767
llvm::LLVMContext::OB_deopt
@ OB_deopt
Definition: LLVMContext.h:90
llvm::CallBase::arg_size
unsigned arg_size() const
Definition: InstrTypes.h:1326
llvm::AAResults::onlyAccessesInaccessibleMem
static bool onlyAccessesInaccessibleMem(FunctionModRefBehavior MRB)
Checks if functions with the specified behavior are known to read and write at most from memory that ...
Definition: AliasAnalysis.h:703
ProfileCount
Function::ProfileCount ProfileCount
Definition: InlineFunction.cpp:78
llvm::isGuaranteedToTransferExecutionToSuccessor
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
Definition: ValueTracking.cpp:5302
llvm::TypeSize
Definition: TypeSize.h:417
llvm::ConstantTokenNone::get
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1467
Casting.h
Function.h
updateCallProfile
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
Definition: InlineFunction.cpp:1597
llvm::Value::hasNUses
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition: Value.cpp:149
llvm::InlineFunctionInfo
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:201
llvm::Function::getFunctionType
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:177
llvm::ValueMap::find
iterator find(const KeyT &Val)
Definition: ValueMap.h:156
llvm::Instruction::isEHPad
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:661
llvm::InlineResult::failure
static InlineResult failure(const char *Reason)
Definition: InlineCost.h:165
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:585
llvm::CallBase::getCalledOperand
Value * getCalledOperand() const
Definition: InstrTypes.h:1376
ScopedAliasMetadataDeepCloner::clone
void clone()
Create a new clone of the scoped alias metadata, which will be used by subsequent remap() calls.
Definition: InlineFunction.cpp:881
llvm::BlockFrequencyInfo::getBlockFreq
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Definition: BlockFrequencyInfo.cpp:204
llvm::Function::getPersonalityFn
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1823
llvm::Function::arg_begin
arg_iterator arg_begin()
Definition: Function.h:749
EnableNoAliasConversion
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:45
llvm::AttrBuilder::addDereferenceableAttr
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
Definition: Attributes.cpp:1649
llvm::MDBuilder
Definition: MDBuilder.h:35
llvm::Function::front
const BasicBlock & front() const
Definition: Function.h:741
CallGraph.h
llvm::DebugLoc::getLine
unsigned getLine() const
Definition: DebugLoc.cpp:25
llvm::AttrBuilder::addAttribute
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
Definition: Attributes.h:953
llvm::changeToUnreachable
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2109
llvm::BasicBlock::getInstList
const InstListType & getInstList() const
Return the underlying instruction list container.
Definition: BasicBlock.h:363
llvm::BasicBlock::reverse_iterator
InstListType::reverse_iterator reverse_iterator
Definition: BasicBlock.h:92
llvm::InlineFunctionInfo::StaticAllocas
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
Definition: Cloning.h:222
llvm::MDNode::isTemporary
bool isTemporary() const
Definition: Metadata.h:987
Instructions.h
llvm::numbers::phi
constexpr double phi
Definition: MathExtras.h:71
llvm::AllocaInst::isUsedWithInAlloca
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:138
llvm::objcarc::ARCInstKind::None
@ None
anything that is inert from an ARC perspective.
SmallVector.h
llvm::ilist_iterator::getReverse
ilist_iterator< OptionsT, !IsReverse, IsConst > getReverse() const
Get a reverse iterator to the same node.
Definition: ilist_iterator.h:121
llvm::Instruction::getDebugLoc
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:370
User.h
llvm::InlineFunctionInfo::UpdateProfile
bool UpdateProfile
Update profile for callee as well as cloned version.
Definition: Cloning.h:237
Dominators.h
NoAliases
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
updateCallerBFI
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
Definition: InlineFunction.cpp:1568
llvm::CallBase::getArgOperand
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1328
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:94
InstructionSimplify.h
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::PHINode
Definition: Instructions.h:2633
llvm::Function::onlyReadsMemory
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:515
llvm::BasicBlock::removePredecessor
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition: BasicBlock.cpp:325
inlineRetainOrClaimRVCalls
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
Definition: InlineFunction.cpp:1666
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::InlineFunction
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
Definition: InlineFunction.cpp:1754
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1161
llvm::Module::getDataLayout
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:401
DerivedTypes.h
llvm::SmallPtrSetImpl
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:307
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1475
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::ValueMap::lookup
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:165
LLVMContext.h
llvm::Value::takeName
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:382
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition: Instructions.h:62
llvm::Function::ProfileCount
Class to represent profile counts.
Definition: Function.h:255
llvm::DebugLoc::getScope
MDNode * getScope() const
Definition: DebugLoc.cpp:35
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::InlineFunctionInfo::GetAssumptionCache
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
Definition: Cloning.h:216
llvm::BasicBlock::getTerminatingMustTailCall
const CallInst * getTerminatingMustTailCall() const
Returns the call instruction marked 'musttail' prior to the terminating return instruction of this ba...
Definition: BasicBlock.cpp:158
llvm::cl::desc
Definition: CommandLine.h:412
Mod
Module * Mod
Definition: PassBuilderBindings.cpp:54
llvm::BranchInst
Conditional or Unconditional Branch instruction.
Definition: Instructions.h:3068
InlinerAttributeWindow
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
llvm::ClonedCodeInfo::ContainsDynamicAllocas
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
Definition: Cloning.h:71
llvm::Function::ProfileCount::hasValue
bool hasValue() const
Definition: Function.h:265
llvm::SetVector< const MDNode * >
llvm::AttributeList::getParamAttrs
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
Definition: Attributes.cpp:1354
llvm::SmallVectorImpl::reserve
void reserve(size_type N)
Definition: SmallVector.h:624
llvm::CallInst::TCK_NoTail
@ TCK_NoTail
Definition: Instructions.h:1656
llvm::IRBuilderBase::CreateAlignmentAssumption
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
Definition: IRBuilder.cpp:1234
Value.h
llvm::InvokeInst::getUnwindDest
BasicBlock * getUnwindDest() const
Definition: Instructions.h:3884
llvm::InlineFunctionInfo::CalleeBFI
BlockFrequencyInfo * CalleeBFI
Definition: Cloning.h:218
llvm::PointerMayBeCapturedBefore
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
Definition: CaptureTracking.cpp:245
llvm::Value
LLVM Value Representation.
Definition: Value.h:74
llvm::AtomicCmpXchgInst
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:521
llvm::InlineResult
InlineResult is basically true or false.
Definition: InlineCost.h:159
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:421
llvm::CallInst::getTailCallKind
TailCallKind getTailCallKind() const
Definition: Instructions.h:1665
llvm::IRBuilderBase::CreateCall
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2395
llvm::CallBase::args
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1319
SetVector.h
llvm::CallBase::setCallingConv
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1443
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:908
llvm::Function::iterator
BasicBlockListType::iterator iterator
Definition: Function.h:68
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::CallGraphNode::begin
iterator begin()
Definition: CallGraph.h:200