LLVM  14.0.0git
InlineFunction.cpp
Go to the documentation of this file.
1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CFG.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DIBuilder.h"
41 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InlineAsm.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/MDBuilder.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/Support/Casting.h"
68 #include <algorithm>
69 #include <cassert>
70 #include <cstdint>
71 #include <iterator>
72 #include <limits>
73 #include <string>
74 #include <utility>
75 #include <vector>
76 
77 using namespace llvm;
79 
80 static cl::opt<bool>
81 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
82  cl::Hidden,
83  cl::desc("Convert noalias attributes to metadata during inlining."));
84 
85 static cl::opt<bool>
86  UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden,
87  cl::ZeroOrMore, cl::init(true),
88  cl::desc("Use the llvm.experimental.noalias.scope.decl "
89  "intrinsic during inlining."));
90 
91 // Disabled by default, because the added alignment assumptions may increase
92 // compile-time and block optimizations. This option is not suitable for use
93 // with frontends that emit comprehensive parameter alignment annotations.
94 static cl::opt<bool>
95 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
96  cl::init(false), cl::Hidden,
97  cl::desc("Convert align attributes to assumptions during inlining."));
98 
100  "update-return-attrs", cl::init(true), cl::Hidden,
101  cl::desc("Update return attributes on calls within inlined body"));
102 
104  "max-inst-checked-for-throw-during-inlining", cl::Hidden,
105  cl::desc("the maximum number of instructions analyzed for may throw during "
106  "attribute inference in inlined body"),
107  cl::init(4));
108 
109 namespace {
110 
111  /// A class for recording information about inlining a landing pad.
112  class LandingPadInliningInfo {
113  /// Destination of the invoke's unwind.
114  BasicBlock *OuterResumeDest;
115 
116  /// Destination for the callee's resume.
117  BasicBlock *InnerResumeDest = nullptr;
118 
119  /// LandingPadInst associated with the invoke.
120  LandingPadInst *CallerLPad = nullptr;
121 
122  /// PHI for EH values from landingpad insts.
123  PHINode *InnerEHValuesPHI = nullptr;
124 
125  SmallVector<Value*, 8> UnwindDestPHIValues;
126 
127  public:
128  LandingPadInliningInfo(InvokeInst *II)
129  : OuterResumeDest(II->getUnwindDest()) {
130  // If there are PHI nodes in the unwind destination block, we need to keep
131  // track of which values came into them from the invoke before removing
132  // the edge from this block.
133  BasicBlock *InvokeBB = II->getParent();
134  BasicBlock::iterator I = OuterResumeDest->begin();
135  for (; isa<PHINode>(I); ++I) {
136  // Save the value to use for this edge.
137  PHINode *PHI = cast<PHINode>(I);
138  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
139  }
140 
141  CallerLPad = cast<LandingPadInst>(I);
142  }
143 
144  /// The outer unwind destination is the target of
145  /// unwind edges introduced for calls within the inlined function.
146  BasicBlock *getOuterResumeDest() const {
147  return OuterResumeDest;
148  }
149 
150  BasicBlock *getInnerResumeDest();
151 
152  LandingPadInst *getLandingPadInst() const { return CallerLPad; }
153 
154  /// Forward the 'resume' instruction to the caller's landing pad block.
155  /// When the landing pad block has only one predecessor, this is
156  /// a simple branch. When there is more than one predecessor, we need to
157  /// split the landing pad block after the landingpad instruction and jump
158  /// to there.
159  void forwardResume(ResumeInst *RI,
160  SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
161 
162  /// Add incoming-PHI values to the unwind destination block for the given
163  /// basic block, using the values for the original invoke's source block.
164  void addIncomingPHIValuesFor(BasicBlock *BB) const {
165  addIncomingPHIValuesForInto(BB, OuterResumeDest);
166  }
167 
168  void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
169  BasicBlock::iterator I = dest->begin();
170  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
171  PHINode *phi = cast<PHINode>(I);
172  phi->addIncoming(UnwindDestPHIValues[i], src);
173  }
174  }
175  };
176 
177 } // end anonymous namespace
178 
179 /// Get or create a target for the branch from ResumeInsts.
180 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
181  if (InnerResumeDest) return InnerResumeDest;
182 
183  // Split the landing pad.
184  BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
185  InnerResumeDest =
186  OuterResumeDest->splitBasicBlock(SplitPoint,
187  OuterResumeDest->getName() + ".body");
188 
189  // The number of incoming edges we expect to the inner landing pad.
190  const unsigned PHICapacity = 2;
191 
192  // Create corresponding new PHIs for all the PHIs in the outer landing pad.
193  Instruction *InsertPoint = &InnerResumeDest->front();
194  BasicBlock::iterator I = OuterResumeDest->begin();
195  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
196  PHINode *OuterPHI = cast<PHINode>(I);
197  PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
198  OuterPHI->getName() + ".lpad-body",
199  InsertPoint);
200  OuterPHI->replaceAllUsesWith(InnerPHI);
201  InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
202  }
203 
204  // Create a PHI for the exception values.
205  InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
206  "eh.lpad-body", InsertPoint);
207  CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
208  InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
209 
210  // All done.
211  return InnerResumeDest;
212 }
213 
214 /// Forward the 'resume' instruction to the caller's landing pad block.
215 /// When the landing pad block has only one predecessor, this is a simple
216 /// branch. When there is more than one predecessor, we need to split the
217 /// landing pad block after the landingpad instruction and jump to there.
218 void LandingPadInliningInfo::forwardResume(
219  ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
220  BasicBlock *Dest = getInnerResumeDest();
221  BasicBlock *Src = RI->getParent();
222 
223  BranchInst::Create(Dest, Src);
224 
225  // Update the PHIs in the destination. They were inserted in an order which
226  // makes this work.
227  addIncomingPHIValuesForInto(Src, Dest);
228 
229  InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
230  RI->eraseFromParent();
231 }
232 
233 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
234 static Value *getParentPad(Value *EHPad) {
235  if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
236  return FPI->getParentPad();
237  return cast<CatchSwitchInst>(EHPad)->getParentPad();
238 }
239 
241 
242 /// Helper for getUnwindDestToken that does the descendant-ward part of
243 /// the search.
245  UnwindDestMemoTy &MemoMap) {
246  SmallVector<Instruction *, 8> Worklist(1, EHPad);
247 
248  while (!Worklist.empty()) {
249  Instruction *CurrentPad = Worklist.pop_back_val();
250  // We only put pads on the worklist that aren't in the MemoMap. When
251  // we find an unwind dest for a pad we may update its ancestors, but
252  // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
253  // so they should never get updated while queued on the worklist.
254  assert(!MemoMap.count(CurrentPad));
255  Value *UnwindDestToken = nullptr;
256  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
257  if (CatchSwitch->hasUnwindDest()) {
258  UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
259  } else {
260  // Catchswitch doesn't have a 'nounwind' variant, and one might be
261  // annotated as "unwinds to caller" when really it's nounwind (see
262  // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
263  // parent's unwind dest from this. We can check its catchpads'
264  // descendants, since they might include a cleanuppad with an
265  // "unwinds to caller" cleanupret, which can be trusted.
266  for (auto HI = CatchSwitch->handler_begin(),
267  HE = CatchSwitch->handler_end();
268  HI != HE && !UnwindDestToken; ++HI) {
269  BasicBlock *HandlerBlock = *HI;
270  auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
271  for (User *Child : CatchPad->users()) {
272  // Intentionally ignore invokes here -- since the catchswitch is
273  // marked "unwind to caller", it would be a verifier error if it
274  // contained an invoke which unwinds out of it, so any invoke we'd
275  // encounter must unwind to some child of the catch.
276  if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
277  continue;
278 
279  Instruction *ChildPad = cast<Instruction>(Child);
280  auto Memo = MemoMap.find(ChildPad);
281  if (Memo == MemoMap.end()) {
282  // Haven't figured out this child pad yet; queue it.
283  Worklist.push_back(ChildPad);
284  continue;
285  }
286  // We've already checked this child, but might have found that
287  // it offers no proof either way.
288  Value *ChildUnwindDestToken = Memo->second;
289  if (!ChildUnwindDestToken)
290  continue;
291  // We already know the child's unwind dest, which can either
292  // be ConstantTokenNone to indicate unwind to caller, or can
293  // be another child of the catchpad. Only the former indicates
294  // the unwind dest of the catchswitch.
295  if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
296  UnwindDestToken = ChildUnwindDestToken;
297  break;
298  }
299  assert(getParentPad(ChildUnwindDestToken) == CatchPad);
300  }
301  }
302  }
303  } else {
304  auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
305  for (User *U : CleanupPad->users()) {
306  if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
307  if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
308  UnwindDestToken = RetUnwindDest->getFirstNonPHI();
309  else
310  UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
311  break;
312  }
313  Value *ChildUnwindDestToken;
314  if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
315  ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
316  } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
317  Instruction *ChildPad = cast<Instruction>(U);
318  auto Memo = MemoMap.find(ChildPad);
319  if (Memo == MemoMap.end()) {
320  // Haven't resolved this child yet; queue it and keep searching.
321  Worklist.push_back(ChildPad);
322  continue;
323  }
324  // We've checked this child, but still need to ignore it if it
325  // had no proof either way.
326  ChildUnwindDestToken = Memo->second;
327  if (!ChildUnwindDestToken)
328  continue;
329  } else {
330  // Not a relevant user of the cleanuppad
331  continue;
332  }
333  // In a well-formed program, the child/invoke must either unwind to
334  // an(other) child of the cleanup, or exit the cleanup. In the
335  // first case, continue searching.
336  if (isa<Instruction>(ChildUnwindDestToken) &&
337  getParentPad(ChildUnwindDestToken) == CleanupPad)
338  continue;
339  UnwindDestToken = ChildUnwindDestToken;
340  break;
341  }
342  }
343  // If we haven't found an unwind dest for CurrentPad, we may have queued its
344  // children, so move on to the next in the worklist.
345  if (!UnwindDestToken)
346  continue;
347 
348  // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
349  // any ancestors of CurrentPad up to but not including UnwindDestToken's
350  // parent pad. Record this in the memo map, and check to see if the
351  // original EHPad being queried is one of the ones exited.
352  Value *UnwindParent;
353  if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
354  UnwindParent = getParentPad(UnwindPad);
355  else
356  UnwindParent = nullptr;
357  bool ExitedOriginalPad = false;
358  for (Instruction *ExitedPad = CurrentPad;
359  ExitedPad && ExitedPad != UnwindParent;
360  ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
361  // Skip over catchpads since they just follow their catchswitches.
362  if (isa<CatchPadInst>(ExitedPad))
363  continue;
364  MemoMap[ExitedPad] = UnwindDestToken;
365  ExitedOriginalPad |= (ExitedPad == EHPad);
366  }
367 
368  if (ExitedOriginalPad)
369  return UnwindDestToken;
370 
371  // Continue the search.
372  }
373 
374  // No definitive information is contained within this funclet.
375  return nullptr;
376 }
377 
378 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
379 /// return that pad instruction. If it unwinds to caller, return
380 /// ConstantTokenNone. If it does not have a definitive unwind destination,
381 /// return nullptr.
382 ///
383 /// This routine gets invoked for calls in funclets in inlinees when inlining
384 /// an invoke. Since many funclets don't have calls inside them, it's queried
385 /// on-demand rather than building a map of pads to unwind dests up front.
386 /// Determining a funclet's unwind dest may require recursively searching its
387 /// descendants, and also ancestors and cousins if the descendants don't provide
388 /// an answer. Since most funclets will have their unwind dest immediately
389 /// available as the unwind dest of a catchswitch or cleanupret, this routine
390 /// searches top-down from the given pad and then up. To avoid worst-case
391 /// quadratic run-time given that approach, it uses a memo map to avoid
392 /// re-processing funclet trees. The callers that rewrite the IR as they go
393 /// take advantage of this, for correctness, by checking/forcing rewritten
394 /// pads' entries to match the original callee view.
396  UnwindDestMemoTy &MemoMap) {
397  // Catchpads unwind to the same place as their catchswitch;
398  // redirct any queries on catchpads so the code below can
399  // deal with just catchswitches and cleanuppads.
400  if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
401  EHPad = CPI->getCatchSwitch();
402 
403  // Check if we've already determined the unwind dest for this pad.
404  auto Memo = MemoMap.find(EHPad);
405  if (Memo != MemoMap.end())
406  return Memo->second;
407 
408  // Search EHPad and, if necessary, its descendants.
409  Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
410  assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
411  if (UnwindDestToken)
412  return UnwindDestToken;
413 
414  // No information is available for this EHPad from itself or any of its
415  // descendants. An unwind all the way out to a pad in the caller would
416  // need also to agree with the unwind dest of the parent funclet, so
417  // search up the chain to try to find a funclet with information. Put
418  // null entries in the memo map to avoid re-processing as we go up.
419  MemoMap[EHPad] = nullptr;
420 #ifndef NDEBUG
422  TempMemos.insert(EHPad);
423 #endif
424  Instruction *LastUselessPad = EHPad;
425  Value *AncestorToken;
426  for (AncestorToken = getParentPad(EHPad);
427  auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
428  AncestorToken = getParentPad(AncestorToken)) {
429  // Skip over catchpads since they just follow their catchswitches.
430  if (isa<CatchPadInst>(AncestorPad))
431  continue;
432  // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
433  // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
434  // call to getUnwindDestToken, that would mean that AncestorPad had no
435  // information in itself, its descendants, or its ancestors. If that
436  // were the case, then we should also have recorded the lack of information
437  // for the descendant that we're coming from. So assert that we don't
438  // find a null entry in the MemoMap for AncestorPad.
439  assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
440  auto AncestorMemo = MemoMap.find(AncestorPad);
441  if (AncestorMemo == MemoMap.end()) {
442  UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
443  } else {
444  UnwindDestToken = AncestorMemo->second;
445  }
446  if (UnwindDestToken)
447  break;
448  LastUselessPad = AncestorPad;
449  MemoMap[LastUselessPad] = nullptr;
450 #ifndef NDEBUG
451  TempMemos.insert(LastUselessPad);
452 #endif
453  }
454 
455  // We know that getUnwindDestTokenHelper was called on LastUselessPad and
456  // returned nullptr (and likewise for EHPad and any of its ancestors up to
457  // LastUselessPad), so LastUselessPad has no information from below. Since
458  // getUnwindDestTokenHelper must investigate all downward paths through
459  // no-information nodes to prove that a node has no information like this,
460  // and since any time it finds information it records it in the MemoMap for
461  // not just the immediately-containing funclet but also any ancestors also
462  // exited, it must be the case that, walking downward from LastUselessPad,
463  // visiting just those nodes which have not been mapped to an unwind dest
464  // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
465  // they are just used to keep getUnwindDestTokenHelper from repeating work),
466  // any node visited must have been exhaustively searched with no information
467  // for it found.
468  SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
469  while (!Worklist.empty()) {
470  Instruction *UselessPad = Worklist.pop_back_val();
471  auto Memo = MemoMap.find(UselessPad);
472  if (Memo != MemoMap.end() && Memo->second) {
473  // Here the name 'UselessPad' is a bit of a misnomer, because we've found
474  // that it is a funclet that does have information about unwinding to
475  // a particular destination; its parent was a useless pad.
476  // Since its parent has no information, the unwind edge must not escape
477  // the parent, and must target a sibling of this pad. This local unwind
478  // gives us no information about EHPad. Leave it and the subtree rooted
479  // at it alone.
480  assert(getParentPad(Memo->second) == getParentPad(UselessPad));
481  continue;
482  }
483  // We know we don't have information for UselesPad. If it has an entry in
484  // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
485  // added on this invocation of getUnwindDestToken; if a previous invocation
486  // recorded nullptr, it would have had to prove that the ancestors of
487  // UselessPad, which include LastUselessPad, had no information, and that
488  // in turn would have required proving that the descendants of
489  // LastUselesPad, which include EHPad, have no information about
490  // LastUselessPad, which would imply that EHPad was mapped to nullptr in
491  // the MemoMap on that invocation, which isn't the case if we got here.
492  assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
493  // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
494  // information that we'd be contradicting by making a map entry for it
495  // (which is something that getUnwindDestTokenHelper must have proved for
496  // us to get here). Just assert on is direct users here; the checks in
497  // this downward walk at its descendants will verify that they don't have
498  // any unwind edges that exit 'UselessPad' either (i.e. they either have no
499  // unwind edges or unwind to a sibling).
500  MemoMap[UselessPad] = UnwindDestToken;
501  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
502  assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
503  for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
504  auto *CatchPad = HandlerBlock->getFirstNonPHI();
505  for (User *U : CatchPad->users()) {
506  assert(
507  (!isa<InvokeInst>(U) ||
508  (getParentPad(
509  cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
510  CatchPad)) &&
511  "Expected useless pad");
512  if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
513  Worklist.push_back(cast<Instruction>(U));
514  }
515  }
516  } else {
517  assert(isa<CleanupPadInst>(UselessPad));
518  for (User *U : UselessPad->users()) {
519  assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
520  assert((!isa<InvokeInst>(U) ||
521  (getParentPad(
522  cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
523  UselessPad)) &&
524  "Expected useless pad");
525  if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
526  Worklist.push_back(cast<Instruction>(U));
527  }
528  }
529  }
530 
531  return UnwindDestToken;
532 }
533 
534 /// When we inline a basic block into an invoke,
535 /// we have to turn all of the calls that can throw into invokes.
536 /// This function analyze BB to see if there are any calls, and if so,
537 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
538 /// nodes in that block with the values specified in InvokeDestPHIValues.
540  BasicBlock *BB, BasicBlock *UnwindEdge,
541  UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
542  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
543  Instruction *I = &*BBI++;
544 
545  // We only need to check for function calls: inlined invoke
546  // instructions require no special handling.
547  CallInst *CI = dyn_cast<CallInst>(I);
548 
549  if (!CI || CI->doesNotThrow())
550  continue;
551 
552  if (CI->isInlineAsm()) {
553  InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
554  if (!IA->canThrow()) {
555  continue;
556  }
557  }
558 
559  // We do not need to (and in fact, cannot) convert possibly throwing calls
560  // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
561  // invokes. The caller's "segment" of the deoptimization continuation
562  // attached to the newly inlined @llvm.experimental_deoptimize
563  // (resp. @llvm.experimental.guard) call should contain the exception
564  // handling logic, if any.
565  if (auto *F = CI->getCalledFunction())
566  if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
567  F->getIntrinsicID() == Intrinsic::experimental_guard)
568  continue;
569 
570  if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
571  // This call is nested inside a funclet. If that funclet has an unwind
572  // destination within the inlinee, then unwinding out of this call would
573  // be UB. Rewriting this call to an invoke which targets the inlined
574  // invoke's unwind dest would give the call's parent funclet multiple
575  // unwind destinations, which is something that subsequent EH table
576  // generation can't handle and that the veirifer rejects. So when we
577  // see such a call, leave it as a call.
578  auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
579  Value *UnwindDestToken =
580  getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
581  if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
582  continue;
583 #ifndef NDEBUG
584  Instruction *MemoKey;
585  if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
586  MemoKey = CatchPad->getCatchSwitch();
587  else
588  MemoKey = FuncletPad;
589  assert(FuncletUnwindMap->count(MemoKey) &&
590  (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
591  "must get memoized to avoid confusing later searches");
592 #endif // NDEBUG
593  }
594 
595  changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
596  return BB;
597  }
598  return nullptr;
599 }
600 
601 /// If we inlined an invoke site, we need to convert calls
602 /// in the body of the inlined function into invokes.
603 ///
604 /// II is the invoke instruction being inlined. FirstNewBlock is the first
605 /// block of the inlined code (the last block is the end of the function),
606 /// and InlineCodeInfo is information about the code that got inlined.
607 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
608  ClonedCodeInfo &InlinedCodeInfo) {
609  BasicBlock *InvokeDest = II->getUnwindDest();
610 
611  Function *Caller = FirstNewBlock->getParent();
612 
613  // The inlined code is currently at the end of the function, scan from the
614  // start of the inlined code to its end, checking for stuff we need to
615  // rewrite.
616  LandingPadInliningInfo Invoke(II);
617 
618  // Get all of the inlined landing pad instructions.
620  for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
621  I != E; ++I)
622  if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
623  InlinedLPads.insert(II->getLandingPadInst());
624 
625  // Append the clauses from the outer landing pad instruction into the inlined
626  // landing pad instructions.
627  LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
628  for (LandingPadInst *InlinedLPad : InlinedLPads) {
629  unsigned OuterNum = OuterLPad->getNumClauses();
630  InlinedLPad->reserveClauses(OuterNum);
631  for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
632  InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
633  if (OuterLPad->isCleanup())
634  InlinedLPad->setCleanup(true);
635  }
636 
637  for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
638  BB != E; ++BB) {
639  if (InlinedCodeInfo.ContainsCalls)
641  &*BB, Invoke.getOuterResumeDest()))
642  // Update any PHI nodes in the exceptional block to indicate that there
643  // is now a new entry in them.
644  Invoke.addIncomingPHIValuesFor(NewBB);
645 
646  // Forward any resumes that are remaining here.
647  if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
648  Invoke.forwardResume(RI, InlinedLPads);
649  }
650 
651  // Now that everything is happy, we have one final detail. The PHI nodes in
652  // the exception destination block still have entries due to the original
653  // invoke instruction. Eliminate these entries (which might even delete the
654  // PHI node) now.
655  InvokeDest->removePredecessor(II->getParent());
656 }
657 
658 /// If we inlined an invoke site, we need to convert calls
659 /// in the body of the inlined function into invokes.
660 ///
661 /// II is the invoke instruction being inlined. FirstNewBlock is the first
662 /// block of the inlined code (the last block is the end of the function),
663 /// and InlineCodeInfo is information about the code that got inlined.
664 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
665  ClonedCodeInfo &InlinedCodeInfo) {
666  BasicBlock *UnwindDest = II->getUnwindDest();
667  Function *Caller = FirstNewBlock->getParent();
668 
669  assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
670 
671  // If there are PHI nodes in the unwind destination block, we need to keep
672  // track of which values came into them from the invoke before removing the
673  // edge from this block.
674  SmallVector<Value *, 8> UnwindDestPHIValues;
675  BasicBlock *InvokeBB = II->getParent();
676  for (Instruction &I : *UnwindDest) {
677  // Save the value to use for this edge.
678  PHINode *PHI = dyn_cast<PHINode>(&I);
679  if (!PHI)
680  break;
681  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
682  }
683 
684  // Add incoming-PHI values to the unwind destination block for the given basic
685  // block, using the values for the original invoke's source block.
686  auto UpdatePHINodes = [&](BasicBlock *Src) {
687  BasicBlock::iterator I = UnwindDest->begin();
688  for (Value *V : UnwindDestPHIValues) {
689  PHINode *PHI = cast<PHINode>(I);
690  PHI->addIncoming(V, Src);
691  ++I;
692  }
693  };
694 
695  // This connects all the instructions which 'unwind to caller' to the invoke
696  // destination.
697  UnwindDestMemoTy FuncletUnwindMap;
698  for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
699  BB != E; ++BB) {
700  if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
701  if (CRI->unwindsToCaller()) {
702  auto *CleanupPad = CRI->getCleanupPad();
703  CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
704  CRI->eraseFromParent();
705  UpdatePHINodes(&*BB);
706  // Finding a cleanupret with an unwind destination would confuse
707  // subsequent calls to getUnwindDestToken, so map the cleanuppad
708  // to short-circuit any such calls and recognize this as an "unwind
709  // to caller" cleanup.
710  assert(!FuncletUnwindMap.count(CleanupPad) ||
711  isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
712  FuncletUnwindMap[CleanupPad] =
713  ConstantTokenNone::get(Caller->getContext());
714  }
715  }
716 
717  Instruction *I = BB->getFirstNonPHI();
718  if (!I->isEHPad())
719  continue;
720 
721  Instruction *Replacement = nullptr;
722  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
723  if (CatchSwitch->unwindsToCaller()) {
724  Value *UnwindDestToken;
725  if (auto *ParentPad =
726  dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
727  // This catchswitch is nested inside another funclet. If that
728  // funclet has an unwind destination within the inlinee, then
729  // unwinding out of this catchswitch would be UB. Rewriting this
730  // catchswitch to unwind to the inlined invoke's unwind dest would
731  // give the parent funclet multiple unwind destinations, which is
732  // something that subsequent EH table generation can't handle and
733  // that the veirifer rejects. So when we see such a call, leave it
734  // as "unwind to caller".
735  UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
736  if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
737  continue;
738  } else {
739  // This catchswitch has no parent to inherit constraints from, and
740  // none of its descendants can have an unwind edge that exits it and
741  // targets another funclet in the inlinee. It may or may not have a
742  // descendant that definitively has an unwind to caller. In either
743  // case, we'll have to assume that any unwinds out of it may need to
744  // be routed to the caller, so treat it as though it has a definitive
745  // unwind to caller.
746  UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
747  }
748  auto *NewCatchSwitch = CatchSwitchInst::Create(
749  CatchSwitch->getParentPad(), UnwindDest,
750  CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
751  CatchSwitch);
752  for (BasicBlock *PadBB : CatchSwitch->handlers())
753  NewCatchSwitch->addHandler(PadBB);
754  // Propagate info for the old catchswitch over to the new one in
755  // the unwind map. This also serves to short-circuit any subsequent
756  // checks for the unwind dest of this catchswitch, which would get
757  // confused if they found the outer handler in the callee.
758  FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
759  Replacement = NewCatchSwitch;
760  }
761  } else if (!isa<FuncletPadInst>(I)) {
762  llvm_unreachable("unexpected EHPad!");
763  }
764 
765  if (Replacement) {
766  Replacement->takeName(I);
767  I->replaceAllUsesWith(Replacement);
768  I->eraseFromParent();
769  UpdatePHINodes(&*BB);
770  }
771  }
772 
773  if (InlinedCodeInfo.ContainsCalls)
774  for (Function::iterator BB = FirstNewBlock->getIterator(),
775  E = Caller->end();
776  BB != E; ++BB)
778  &*BB, UnwindDest, &FuncletUnwindMap))
779  // Update any PHI nodes in the exceptional block to indicate that there
780  // is now a new entry in them.
781  UpdatePHINodes(NewBB);
782 
783  // Now that everything is happy, we have one final detail. The PHI nodes in
784  // the exception destination block still have entries due to the original
785  // invoke instruction. Eliminate these entries (which might even delete the
786  // PHI node) now.
787  UnwindDest->removePredecessor(InvokeBB);
788 }
789 
790 /// When inlining a call site that has !llvm.mem.parallel_loop_access,
791 /// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
792 /// be propagated to all memory-accessing cloned instructions.
794  Function::iterator FEnd) {
795  MDNode *MemParallelLoopAccess =
796  CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
797  MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
798  MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);
799  MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias);
800  if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
801  return;
802 
803  for (BasicBlock &BB : make_range(FStart, FEnd)) {
804  for (Instruction &I : BB) {
805  // This metadata is only relevant for instructions that access memory.
806  if (!I.mayReadOrWriteMemory())
807  continue;
808 
809  if (MemParallelLoopAccess) {
810  // TODO: This probably should not overwrite MemParalleLoopAccess.
811  MemParallelLoopAccess = MDNode::concatenate(
812  I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
813  MemParallelLoopAccess);
814  I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
815  MemParallelLoopAccess);
816  }
817 
818  if (AccessGroup)
819  I.setMetadata(LLVMContext::MD_access_group, uniteAccessGroups(
820  I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
821 
822  if (AliasScope)
823  I.setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
824  I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
825 
826  if (NoAlias)
827  I.setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
828  I.getMetadata(LLVMContext::MD_noalias), NoAlias));
829  }
830  }
831 }
832 
833 /// Utility for cloning !noalias and !alias.scope metadata. When a code region
834 /// using scoped alias metadata is inlined, the aliasing relationships may not
835 /// hold between the two version. It is necessary to create a deep clone of the
836 /// metadata, putting the two versions in separate scope domains.
840  MetadataMap MDMap;
841  void addRecursiveMetadataUses();
842 
843 public:
845 
846  /// Create a new clone of the scoped alias metadata, which will be used by
847  /// subsequent remap() calls.
848  void clone();
849 
850  /// Remap instructions in the given range from the original to the cloned
851  /// metadata.
852  void remap(Function::iterator FStart, Function::iterator FEnd);
853 };
854 
856  const Function *F) {
857  for (const BasicBlock &BB : *F) {
858  for (const Instruction &I : BB) {
859  if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
860  MD.insert(M);
861  if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
862  MD.insert(M);
863 
864  // We also need to clone the metadata in noalias intrinsics.
865  if (const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
866  MD.insert(Decl->getScopeList());
867  }
868  }
869  addRecursiveMetadataUses();
870 }
871 
872 void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
873  SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
874  while (!Queue.empty()) {
875  const MDNode *M = cast<MDNode>(Queue.pop_back_val());
876  for (const Metadata *Op : M->operands())
877  if (const MDNode *OpMD = dyn_cast<MDNode>(Op))
878  if (MD.insert(OpMD))
879  Queue.push_back(OpMD);
880  }
881 }
882 
884  assert(MDMap.empty() && "clone() already called ?");
885 
886  SmallVector<TempMDTuple, 16> DummyNodes;
887  for (const MDNode *I : MD) {
888  DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), None));
889  MDMap[I].reset(DummyNodes.back().get());
890  }
891 
892  // Create new metadata nodes to replace the dummy nodes, replacing old
893  // metadata references with either a dummy node or an already-created new
894  // node.
896  for (const MDNode *I : MD) {
897  for (const Metadata *Op : I->operands()) {
898  if (const MDNode *M = dyn_cast<MDNode>(Op))
899  NewOps.push_back(MDMap[M]);
900  else
901  NewOps.push_back(const_cast<Metadata *>(Op));
902  }
903 
904  MDNode *NewM = MDNode::get(I->getContext(), NewOps);
905  MDTuple *TempM = cast<MDTuple>(MDMap[I]);
906  assert(TempM->isTemporary() && "Expected temporary node");
907 
908  TempM->replaceAllUsesWith(NewM);
909  NewOps.clear();
910  }
911 }
912 
914  Function::iterator FEnd) {
915  if (MDMap.empty())
916  return; // Nothing to do.
917 
918  for (BasicBlock &BB : make_range(FStart, FEnd)) {
919  for (Instruction &I : BB) {
920  // TODO: The null checks for the MDMap.lookup() results should no longer
921  // be necessary.
922  if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
923  if (MDNode *MNew = MDMap.lookup(M))
924  I.setMetadata(LLVMContext::MD_alias_scope, MNew);
925 
926  if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
927  if (MDNode *MNew = MDMap.lookup(M))
928  I.setMetadata(LLVMContext::MD_noalias, MNew);
929 
930  if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
931  if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
932  Decl->setScopeList(MNew);
933  }
934  }
935 }
936 
937 /// If the inlined function has noalias arguments,
938 /// then add new alias scopes for each noalias argument, tag the mapped noalias
939 /// parameters with noalias metadata specifying the new scope, and tag all
940 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
942  const DataLayout &DL, AAResults *CalleeAAR,
943  ClonedCodeInfo &InlinedFunctionInfo) {
945  return;
946 
947  const Function *CalledFunc = CB.getCalledFunction();
949 
950  for (const Argument &Arg : CalledFunc->args())
951  if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
952  NoAliasArgs.push_back(&Arg);
953 
954  if (NoAliasArgs.empty())
955  return;
956 
957  // To do a good job, if a noalias variable is captured, we need to know if
958  // the capture point dominates the particular use we're considering.
959  DominatorTree DT;
960  DT.recalculate(const_cast<Function&>(*CalledFunc));
961 
962  // noalias indicates that pointer values based on the argument do not alias
963  // pointer values which are not based on it. So we add a new "scope" for each
964  // noalias function argument. Accesses using pointers based on that argument
965  // become part of that alias scope, accesses using pointers not based on that
966  // argument are tagged as noalias with that scope.
967 
969  MDBuilder MDB(CalledFunc->getContext());
970 
971  // Create a new scope domain for this function.
972  MDNode *NewDomain =
973  MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
974  for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
975  const Argument *A = NoAliasArgs[i];
976 
977  std::string Name = std::string(CalledFunc->getName());
978  if (A->hasName()) {
979  Name += ": %";
980  Name += A->getName();
981  } else {
982  Name += ": argument ";
983  Name += utostr(i);
984  }
985 
986  // Note: We always create a new anonymous root here. This is true regardless
987  // of the linkage of the callee because the aliasing "scope" is not just a
988  // property of the callee, but also all control dependencies in the caller.
989  MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
990  NewScopes.insert(std::make_pair(A, NewScope));
991 
992  if (UseNoAliasIntrinsic) {
993  // Introduce a llvm.experimental.noalias.scope.decl for the noalias
994  // argument.
995  MDNode *AScopeList = MDNode::get(CalledFunc->getContext(), NewScope);
996  auto *NoAliasDecl =
998  // Ignore the result for now. The result will be used when the
999  // llvm.noalias intrinsic is introduced.
1000  (void)NoAliasDecl;
1001  }
1002  }
1003 
1004  // Iterate over all new instructions in the map; for all memory-access
1005  // instructions, add the alias scope metadata.
1006  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
1007  VMI != VMIE; ++VMI) {
1008  if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
1009  if (!VMI->second)
1010  continue;
1011 
1012  Instruction *NI = dyn_cast<Instruction>(VMI->second);
1013  if (!NI || InlinedFunctionInfo.isSimplified(I, NI))
1014  continue;
1015 
1016  bool IsArgMemOnlyCall = false, IsFuncCall = false;
1018 
1019  if (const LoadInst *LI = dyn_cast<LoadInst>(I))
1020  PtrArgs.push_back(LI->getPointerOperand());
1021  else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
1022  PtrArgs.push_back(SI->getPointerOperand());
1023  else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1024  PtrArgs.push_back(VAAI->getPointerOperand());
1025  else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1026  PtrArgs.push_back(CXI->getPointerOperand());
1027  else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1028  PtrArgs.push_back(RMWI->getPointerOperand());
1029  else if (const auto *Call = dyn_cast<CallBase>(I)) {
1030  // If we know that the call does not access memory, then we'll still
1031  // know that about the inlined clone of this call site, and we don't
1032  // need to add metadata.
1033  if (Call->doesNotAccessMemory())
1034  continue;
1035 
1036  IsFuncCall = true;
1037  if (CalleeAAR) {
1038  FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1039 
1040  // We'll retain this knowledge without additional metadata.
1042  continue;
1043 
1045  IsArgMemOnlyCall = true;
1046  }
1047 
1048  for (Value *Arg : Call->args()) {
1049  // We need to check the underlying objects of all arguments, not just
1050  // the pointer arguments, because we might be passing pointers as
1051  // integers, etc.
1052  // However, if we know that the call only accesses pointer arguments,
1053  // then we only need to check the pointer arguments.
1054  if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1055  continue;
1056 
1057  PtrArgs.push_back(Arg);
1058  }
1059  }
1060 
1061  // If we found no pointers, then this instruction is not suitable for
1062  // pairing with an instruction to receive aliasing metadata.
1063  // However, if this is a call, this we might just alias with none of the
1064  // noalias arguments.
1065  if (PtrArgs.empty() && !IsFuncCall)
1066  continue;
1067 
1068  // It is possible that there is only one underlying object, but you
1069  // need to go through several PHIs to see it, and thus could be
1070  // repeated in the Objects list.
1073 
1075  for (const Value *V : PtrArgs) {
1077  getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
1078 
1079  for (const Value *O : Objects)
1080  ObjSet.insert(O);
1081  }
1082 
1083  // Figure out if we're derived from anything that is not a noalias
1084  // argument.
1085  bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1086  for (const Value *V : ObjSet) {
1087  // Is this value a constant that cannot be derived from any pointer
1088  // value (we need to exclude constant expressions, for example, that
1089  // are formed from arithmetic on global symbols).
1090  bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1091  isa<ConstantPointerNull>(V) ||
1092  isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1093  if (IsNonPtrConst)
1094  continue;
1095 
1096  // If this is anything other than a noalias argument, then we cannot
1097  // completely describe the aliasing properties using alias.scope
1098  // metadata (and, thus, won't add any).
1099  if (const Argument *A = dyn_cast<Argument>(V)) {
1100  if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1101  UsesAliasingPtr = true;
1102  } else {
1103  UsesAliasingPtr = true;
1104  }
1105 
1106  // If this is not some identified function-local object (which cannot
1107  // directly alias a noalias argument), or some other argument (which,
1108  // by definition, also cannot alias a noalias argument), then we could
1109  // alias a noalias argument that has been captured).
1110  if (!isa<Argument>(V) &&
1111  !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1112  CanDeriveViaCapture = true;
1113  }
1114 
1115  // A function call can always get captured noalias pointers (via other
1116  // parameters, globals, etc.).
1117  if (IsFuncCall && !IsArgMemOnlyCall)
1118  CanDeriveViaCapture = true;
1119 
1120  // First, we want to figure out all of the sets with which we definitely
1121  // don't alias. Iterate over all noalias set, and add those for which:
1122  // 1. The noalias argument is not in the set of objects from which we
1123  // definitely derive.
1124  // 2. The noalias argument has not yet been captured.
1125  // An arbitrary function that might load pointers could see captured
1126  // noalias arguments via other noalias arguments or globals, and so we
1127  // must always check for prior capture.
1128  for (const Argument *A : NoAliasArgs) {
1129  if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1130  // It might be tempting to skip the
1131  // PointerMayBeCapturedBefore check if
1132  // A->hasNoCaptureAttr() is true, but this is
1133  // incorrect because nocapture only guarantees
1134  // that no copies outlive the function, not
1135  // that the value cannot be locally captured.
1137  /* ReturnCaptures */ false,
1138  /* StoreCaptures */ false, I, &DT)))
1139  NoAliases.push_back(NewScopes[A]);
1140  }
1141 
1142  if (!NoAliases.empty())
1143  NI->setMetadata(LLVMContext::MD_noalias,
1145  NI->getMetadata(LLVMContext::MD_noalias),
1146  MDNode::get(CalledFunc->getContext(), NoAliases)));
1147 
1148  // Next, we want to figure out all of the sets to which we might belong.
1149  // We might belong to a set if the noalias argument is in the set of
1150  // underlying objects. If there is some non-noalias argument in our list
1151  // of underlying objects, then we cannot add a scope because the fact
1152  // that some access does not alias with any set of our noalias arguments
1153  // cannot itself guarantee that it does not alias with this access
1154  // (because there is some pointer of unknown origin involved and the
1155  // other access might also depend on this pointer). We also cannot add
1156  // scopes to arbitrary functions unless we know they don't access any
1157  // non-parameter pointer-values.
1158  bool CanAddScopes = !UsesAliasingPtr;
1159  if (CanAddScopes && IsFuncCall)
1160  CanAddScopes = IsArgMemOnlyCall;
1161 
1162  if (CanAddScopes)
1163  for (const Argument *A : NoAliasArgs) {
1164  if (ObjSet.count(A))
1165  Scopes.push_back(NewScopes[A]);
1166  }
1167 
1168  if (!Scopes.empty())
1169  NI->setMetadata(
1170  LLVMContext::MD_alias_scope,
1171  MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1172  MDNode::get(CalledFunc->getContext(), Scopes)));
1173  }
1174  }
1175 }
1176 
1178  Instruction *End) {
1179 
1180  assert(Begin->getParent() == End->getParent() &&
1181  "Expected to be in same basic block!");
1182  unsigned NumInstChecked = 0;
1183  // Check that all instructions in the range [Begin, End) are guaranteed to
1184  // transfer execution to successor.
1185  for (auto &I : make_range(Begin->getIterator(), End->getIterator()))
1186  if (NumInstChecked++ > InlinerAttributeWindow ||
1188  return true;
1189  return false;
1190 }
1191 
1193 
1195  if (AB.empty())
1196  return AB;
1197  AttrBuilder Valid;
1198  // Only allow these white listed attributes to be propagated back to the
1199  // callee. This is because other attributes may only be valid on the call
1200  // itself, i.e. attributes such as signext and zeroext.
1201  if (auto DerefBytes = AB.getDereferenceableBytes())
1202  Valid.addDereferenceableAttr(DerefBytes);
1203  if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes())
1204  Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1205  if (AB.contains(Attribute::NoAlias))
1206  Valid.addAttribute(Attribute::NoAlias);
1207  if (AB.contains(Attribute::NonNull))
1208  Valid.addAttribute(Attribute::NonNull);
1209  return Valid;
1210 }
1211 
1214  return;
1215 
1217  if (Valid.empty())
1218  return;
1219  auto *CalledFunction = CB.getCalledFunction();
1220  auto &Context = CalledFunction->getContext();
1221 
1222  for (auto &BB : *CalledFunction) {
1223  auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1224  if (!RI || !isa<CallBase>(RI->getOperand(0)))
1225  continue;
1226  auto *RetVal = cast<CallBase>(RI->getOperand(0));
1227  // Sanity check that the cloned RetVal exists and is a call, otherwise we
1228  // cannot add the attributes on the cloned RetVal.
1229  // Simplification during inlining could have transformed the cloned
1230  // instruction.
1231  auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1232  if (!NewRetVal)
1233  continue;
1234  // Backward propagation of attributes to the returned value may be incorrect
1235  // if it is control flow dependent.
1236  // Consider:
1237  // @callee {
1238  // %rv = call @foo()
1239  // %rv2 = call @bar()
1240  // if (%rv2 != null)
1241  // return %rv2
1242  // if (%rv == null)
1243  // exit()
1244  // return %rv
1245  // }
1246  // caller() {
1247  // %val = call nonnull @callee()
1248  // }
1249  // Here we cannot add the nonnull attribute on either foo or bar. So, we
1250  // limit the check to both RetVal and RI are in the same basic block and
1251  // there are no throwing/exiting instructions between these instructions.
1252  if (RI->getParent() != RetVal->getParent() ||
1253  MayContainThrowingOrExitingCall(RetVal, RI))
1254  continue;
1255  // Add to the existing attributes of NewRetVal, i.e. the cloned call
1256  // instruction.
1257  // NB! When we have the same attribute already existing on NewRetVal, but
1258  // with a differing value, the AttributeList's merge API honours the already
1259  // existing attribute value (i.e. attributes such as dereferenceable,
1260  // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1261  AttributeList AL = NewRetVal->getAttributes();
1262  AttributeList NewAL =
1263  AL.addAttributes(Context, AttributeList::ReturnIndex, Valid);
1264  NewRetVal->setAttributes(NewAL);
1265  }
1266 }
1267 
1268 /// If the inlined function has non-byval align arguments, then
1269 /// add @llvm.assume-based alignment assumptions to preserve this information.
1272  return;
1273 
1274  AssumptionCache *AC = &IFI.GetAssumptionCache(*CB.getCaller());
1275  auto &DL = CB.getCaller()->getParent()->getDataLayout();
1276 
1277  // To avoid inserting redundant assumptions, we should check for assumptions
1278  // already in the caller. To do this, we might need a DT of the caller.
1279  DominatorTree DT;
1280  bool DTCalculated = false;
1281 
1282  Function *CalledFunc = CB.getCalledFunction();
1283  for (Argument &Arg : CalledFunc->args()) {
1284  unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1285  if (Align && !Arg.hasPassPointeeByValueCopyAttr() && !Arg.hasNUses(0)) {
1286  if (!DTCalculated) {
1287  DT.recalculate(*CB.getCaller());
1288  DTCalculated = true;
1289  }
1290 
1291  // If we can already prove the asserted alignment in the context of the
1292  // caller, then don't bother inserting the assumption.
1293  Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1294  if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= Align)
1295  continue;
1296 
1297  CallInst *NewAsmp =
1299  AC->registerAssumption(cast<AssumeInst>(NewAsmp));
1300  }
1301  }
1302 }
1303 
1304 /// Once we have cloned code over from a callee into the caller,
1305 /// update the specified callgraph to reflect the changes we made.
1306 /// Note that it's possible that not all code was copied over, so only
1307 /// some edges of the callgraph may remain.
1309  Function::iterator FirstNewBlock,
1310  ValueToValueMapTy &VMap,
1311  InlineFunctionInfo &IFI) {
1312  CallGraph &CG = *IFI.CG;
1313  const Function *Caller = CB.getCaller();
1314  const Function *Callee = CB.getCalledFunction();
1315  CallGraphNode *CalleeNode = CG[Callee];
1316  CallGraphNode *CallerNode = CG[Caller];
1317 
1318  // Since we inlined some uninlined call sites in the callee into the caller,
1319  // add edges from the caller to all of the callees of the callee.
1320  CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1321 
1322  // Consider the case where CalleeNode == CallerNode.
1324  if (CalleeNode == CallerNode) {
1325  CallCache.assign(I, E);
1326  I = CallCache.begin();
1327  E = CallCache.end();
1328  }
1329 
1330  for (; I != E; ++I) {
1331  // Skip 'refererence' call records.
1332  if (!I->first)
1333  continue;
1334 
1335  const Value *OrigCall = *I->first;
1336 
1337  ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1338  // Only copy the edge if the call was inlined!
1339  if (VMI == VMap.end() || VMI->second == nullptr)
1340  continue;
1341 
1342  // If the call was inlined, but then constant folded, there is no edge to
1343  // add. Check for this case.
1344  auto *NewCall = dyn_cast<CallBase>(VMI->second);
1345  if (!NewCall)
1346  continue;
1347 
1348  // We do not treat intrinsic calls like real function calls because we
1349  // expect them to become inline code; do not add an edge for an intrinsic.
1350  if (NewCall->getCalledFunction() &&
1351  NewCall->getCalledFunction()->isIntrinsic())
1352  continue;
1353 
1354  // Remember that this call site got inlined for the client of
1355  // InlineFunction.
1356  IFI.InlinedCalls.push_back(NewCall);
1357 
1358  // It's possible that inlining the callsite will cause it to go from an
1359  // indirect to a direct call by resolving a function pointer. If this
1360  // happens, set the callee of the new call site to a more precise
1361  // destination. This can also happen if the call graph node of the caller
1362  // was just unnecessarily imprecise.
1363  if (!I->second->getFunction())
1364  if (Function *F = NewCall->getCalledFunction()) {
1365  // Indirect call site resolved to direct call.
1366  CallerNode->addCalledFunction(NewCall, CG[F]);
1367 
1368  continue;
1369  }
1370 
1371  CallerNode->addCalledFunction(NewCall, I->second);
1372  }
1373 
1374  // Update the call graph by deleting the edge from Callee to Caller. We must
1375  // do this after the loop above in case Caller and Callee are the same.
1376  CallerNode->removeCallEdgeFor(*cast<CallBase>(&CB));
1377 }
1378 
1379 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1380  BasicBlock *InsertBlock,
1381  InlineFunctionInfo &IFI) {
1382  Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1383  IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1384 
1385  Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1386 
1387  // Always generate a memcpy of alignment 1 here because we don't know
1388  // the alignment of the src pointer. Other optimizations can infer
1389  // better alignment.
1390  Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1391  /*SrcAlign*/ Align(1), Size);
1392 }
1393 
1394 /// When inlining a call site that has a byval argument,
1395 /// we have to make the implicit memcpy explicit by adding it.
1397  const Function *CalledFunc,
1398  InlineFunctionInfo &IFI,
1399  unsigned ByValAlignment) {
1400  PointerType *ArgTy = cast<PointerType>(Arg->getType());
1401  Type *AggTy = ArgTy->getElementType();
1402 
1403  Function *Caller = TheCall->getFunction();
1404  const DataLayout &DL = Caller->getParent()->getDataLayout();
1405 
1406  // If the called function is readonly, then it could not mutate the caller's
1407  // copy of the byval'd memory. In this case, it is safe to elide the copy and
1408  // temporary.
1409  if (CalledFunc->onlyReadsMemory()) {
1410  // If the byval argument has a specified alignment that is greater than the
1411  // passed in pointer, then we either have to round up the input pointer or
1412  // give up on this transformation.
1413  if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1414  return Arg;
1415 
1416  AssumptionCache *AC =
1417  IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1418 
1419  // If the pointer is already known to be sufficiently aligned, or if we can
1420  // round it up to a larger alignment, then we don't need a temporary.
1421  if (getOrEnforceKnownAlignment(Arg, Align(ByValAlignment), DL, TheCall,
1422  AC) >= ByValAlignment)
1423  return Arg;
1424 
1425  // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1426  // for code quality, but rarely happens and is required for correctness.
1427  }
1428 
1429  // Create the alloca. If we have DataLayout, use nice alignment.
1430  Align Alignment(DL.getPrefTypeAlignment(AggTy));
1431 
1432  // If the byval had an alignment specified, we *must* use at least that
1433  // alignment, as it is required by the byval argument (and uses of the
1434  // pointer inside the callee).
1435  Alignment = max(Alignment, MaybeAlign(ByValAlignment));
1436 
1437  Value *NewAlloca =
1438  new AllocaInst(AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
1439  Arg->getName(), &*Caller->begin()->begin());
1440  IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1441 
1442  // Uses of the argument in the function should use our new alloca
1443  // instead.
1444  return NewAlloca;
1445 }
1446 
1447 // Check whether this Value is used by a lifetime intrinsic.
1449  for (User *U : V->users())
1450  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1451  if (II->isLifetimeStartOrEnd())
1452  return true;
1453  return false;
1454 }
1455 
1456 // Check whether the given alloca already has
1457 // lifetime.start or lifetime.end intrinsics.
1458 static bool hasLifetimeMarkers(AllocaInst *AI) {
1459  Type *Ty = AI->getType();
1460  Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1461  Ty->getPointerAddressSpace());
1462  if (Ty == Int8PtrTy)
1463  return isUsedByLifetimeMarker(AI);
1464 
1465  // Do a scan to find all the casts to i8*.
1466  for (User *U : AI->users()) {
1467  if (U->getType() != Int8PtrTy) continue;
1468  if (U->stripPointerCasts() != AI) continue;
1469  if (isUsedByLifetimeMarker(U))
1470  return true;
1471  }
1472  return false;
1473 }
1474 
1475 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1476 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1477 /// cannot be static.
1478 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1479  return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1480 }
1481 
1482 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1483 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1484 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1485  LLVMContext &Ctx,
1487  auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1488  return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),
1489  OrigDL.getScope(), IA);
1490 }
1491 
1492 /// Update inlined instructions' line numbers to
1493 /// to encode location where these instructions are inlined.
1495  Instruction *TheCall, bool CalleeHasDebugInfo) {
1496  const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1497  if (!TheCallDL)
1498  return;
1499 
1500  auto &Ctx = Fn->getContext();
1501  DILocation *InlinedAtNode = TheCallDL;
1502 
1503  // Create a unique call site, not to be confused with any other call from the
1504  // same location.
1505  InlinedAtNode = DILocation::getDistinct(
1506  Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1507  InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1508 
1509  // Cache the inlined-at nodes as they're built so they are reused, without
1510  // this every instruction's inlined-at chain would become distinct from each
1511  // other.
1513 
1514  // Check if we are not generating inline line tables and want to use
1515  // the call site location instead.
1516  bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1517 
1518  for (; FI != Fn->end(); ++FI) {
1519  for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1520  BI != BE; ++BI) {
1521  // Loop metadata needs to be updated so that the start and end locs
1522  // reference inlined-at locations.
1523  auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1524  &IANodes](Metadata *MD) -> Metadata * {
1525  if (auto *Loc = dyn_cast_or_null<DILocation>(MD))
1526  return inlineDebugLoc(Loc, InlinedAtNode, Ctx, IANodes).get();
1527  return MD;
1528  };
1529  updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc);
1530 
1531  if (!NoInlineLineTables)
1532  if (DebugLoc DL = BI->getDebugLoc()) {
1533  DebugLoc IDL =
1534  inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1535  BI->setDebugLoc(IDL);
1536  continue;
1537  }
1538 
1539  if (CalleeHasDebugInfo && !NoInlineLineTables)
1540  continue;
1541 
1542  // If the inlined instruction has no line number, or if inline info
1543  // is not being generated, make it look as if it originates from the call
1544  // location. This is important for ((__always_inline, __nodebug__))
1545  // functions which must use caller location for all instructions in their
1546  // function body.
1547 
1548  // Don't update static allocas, as they may get moved later.
1549  if (auto *AI = dyn_cast<AllocaInst>(BI))
1551  continue;
1552 
1553  BI->setDebugLoc(TheCallDL);
1554  }
1555 
1556  // Remove debug info intrinsics if we're not keeping inline info.
1557  if (NoInlineLineTables) {
1558  BasicBlock::iterator BI = FI->begin();
1559  while (BI != FI->end()) {
1560  if (isa<DbgInfoIntrinsic>(BI)) {
1561  BI = BI->eraseFromParent();
1562  continue;
1563  }
1564  ++BI;
1565  }
1566  }
1567 
1568  }
1569 }
1570 
1571 /// Update the block frequencies of the caller after a callee has been inlined.
1572 ///
1573 /// Each block cloned into the caller has its block frequency scaled by the
1574 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1575 /// callee's entry block gets the same frequency as the callsite block and the
1576 /// relative frequencies of all cloned blocks remain the same after cloning.
1577 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1578  const ValueToValueMapTy &VMap,
1579  BlockFrequencyInfo *CallerBFI,
1580  BlockFrequencyInfo *CalleeBFI,
1581  const BasicBlock &CalleeEntryBlock) {
1583  for (auto Entry : VMap) {
1584  if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1585  continue;
1586  auto *OrigBB = cast<BasicBlock>(Entry.first);
1587  auto *ClonedBB = cast<BasicBlock>(Entry.second);
1588  uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1589  if (!ClonedBBs.insert(ClonedBB).second) {
1590  // Multiple blocks in the callee might get mapped to one cloned block in
1591  // the caller since we prune the callee as we clone it. When that happens,
1592  // we want to use the maximum among the original blocks' frequencies.
1593  uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1594  if (NewFreq > Freq)
1595  Freq = NewFreq;
1596  }
1597  CallerBFI->setBlockFreq(ClonedBB, Freq);
1598  }
1599  BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1600  CallerBFI->setBlockFreqAndScale(
1601  EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1602  ClonedBBs);
1603 }
1604 
1605 /// Update the branch metadata for cloned call instructions.
1606 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1607  const ProfileCount &CalleeEntryCount,
1608  const CallBase &TheCall, ProfileSummaryInfo *PSI,
1609  BlockFrequencyInfo *CallerBFI) {
1610  if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1611  CalleeEntryCount.getCount() < 1)
1612  return;
1613  auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1614  int64_t CallCount =
1615  std::min(CallSiteCount.getValueOr(0), CalleeEntryCount.getCount());
1616  updateProfileCallee(Callee, -CallCount, &VMap);
1617 }
1618 
1620  Function *Callee, int64_t entryDelta,
1622  auto CalleeCount = Callee->getEntryCount();
1623  if (!CalleeCount.hasValue())
1624  return;
1625 
1626  uint64_t priorEntryCount = CalleeCount.getCount();
1627  uint64_t newEntryCount;
1628 
1629  // Since CallSiteCount is an estimate, it could exceed the original callee
1630  // count and has to be set to 0 so guard against underflow.
1631  if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1632  newEntryCount = 0;
1633  else
1634  newEntryCount = priorEntryCount + entryDelta;
1635 
1636  // During inlining ?
1637  if (VMap) {
1638  uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1639  for (auto Entry : *VMap)
1640  if (isa<CallInst>(Entry.first))
1641  if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1642  CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1643  }
1644 
1645  if (entryDelta) {
1646  Callee->setEntryCount(newEntryCount);
1647 
1648  for (BasicBlock &BB : *Callee)
1649  // No need to update the callsite if it is pruned during inlining.
1650  if (!VMap || VMap->count(&BB))
1651  for (Instruction &I : BB)
1652  if (CallInst *CI = dyn_cast<CallInst>(&I))
1653  CI->updateProfWeight(newEntryCount, priorEntryCount);
1654  }
1655 }
1656 
1657 /// An operand bundle "clang.arc.attachedcall" on a call indicates the call
1658 /// result is implicitly consumed by a call to retainRV or claimRV immediately
1659 /// after the call. This function inlines the retainRV/claimRV calls.
1660 ///
1661 /// There are three cases to consider:
1662 ///
1663 /// 1. If there is a call to autoreleaseRV that takes a pointer to the returned
1664 /// object in the callee return block, the autoreleaseRV call and the
1665 /// retainRV/claimRV call in the caller cancel out. If the call in the caller
1666 /// is a claimRV call, a call to objc_release is emitted.
1667 ///
1668 /// 2. If there is a call in the callee return block that doesn't have operand
1669 /// bundle "clang.arc.attachedcall", the operand bundle on the original call
1670 /// is transferred to the call in the callee.
1671 ///
1672 /// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is
1673 /// a retainRV call.
1674 static void
1676  const SmallVectorImpl<ReturnInst *> &Returns) {
1677  Module *Mod = CB.getModule();
1678  bool IsRetainRV = objcarc::hasAttachedCallOpBundle(&CB, true),
1679  IsClaimRV = !IsRetainRV;
1680 
1681  for (auto *RI : Returns) {
1682  Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0));
1685  bool InsertRetainCall = IsRetainRV;
1687 
1688  // Walk backwards through the basic block looking for either a matching
1689  // autoreleaseRV call or an unannotated call.
1690  for (; I != EI;) {
1691  auto CurI = I++;
1692 
1693  // Ignore casts.
1694  if (isa<CastInst>(*CurI))
1695  continue;
1696 
1697  if (auto *II = dyn_cast<IntrinsicInst>(&*CurI)) {
1698  if (II->getIntrinsicID() == Intrinsic::objc_autoreleaseReturnValue &&
1699  II->hasNUses(0) &&
1700  objcarc::GetRCIdentityRoot(II->getOperand(0)) == RetOpnd) {
1701  // If we've found a matching authoreleaseRV call:
1702  // - If claimRV is attached to the call, insert a call to objc_release
1703  // and erase the autoreleaseRV call.
1704  // - If retainRV is attached to the call, just erase the autoreleaseRV
1705  // call.
1706  if (IsClaimRV) {
1707  Builder.SetInsertPoint(II);
1708  Function *IFn =
1709  Intrinsic::getDeclaration(Mod, Intrinsic::objc_release);
1710  Value *BC =
1711  Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType());
1712  Builder.CreateCall(IFn, BC, "");
1713  }
1714  II->eraseFromParent();
1715  InsertRetainCall = false;
1716  }
1717  } else if (auto *CI = dyn_cast<CallInst>(&*CurI)) {
1718  if (objcarc::GetRCIdentityRoot(CI) == RetOpnd &&
1720  // If we've found an unannotated call that defines RetOpnd, add a
1721  // "clang.arc.attachedcall" operand bundle.
1722  Value *BundleArgs[] = {ConstantInt::get(
1723  Builder.getInt64Ty(),
1725  OperandBundleDef OB("clang.arc.attachedcall", BundleArgs);
1726  auto *NewCall = CallBase::addOperandBundle(
1728  NewCall->copyMetadata(*CI);
1729  CI->replaceAllUsesWith(NewCall);
1730  CI->eraseFromParent();
1731  InsertRetainCall = false;
1732  }
1733  }
1734 
1735  break;
1736  }
1737 
1738  if (InsertRetainCall) {
1739  // The retainRV is attached to the call and we've failed to find a
1740  // matching autoreleaseRV or an annotated call in the callee. Emit a call
1741  // to objc_retain.
1742  Builder.SetInsertPoint(RI);
1743  Function *IFn = Intrinsic::getDeclaration(Mod, Intrinsic::objc_retain);
1744  Value *BC = Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType());
1745  Builder.CreateCall(IFn, BC, "");
1746  }
1747  }
1748 }
1749 
1750 /// This function inlines the called function into the basic block of the
1751 /// caller. This returns false if it is not possible to inline this call.
1752 /// The program is still in a well defined state if this occurs though.
1753 ///
1754 /// Note that this only does one level of inlining. For example, if the
1755 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1756 /// exists in the instruction stream. Similarly this will inline a recursive
1757 /// function by one level.
1759  AAResults *CalleeAAR,
1760  bool InsertLifetime,
1761  Function *ForwardVarArgsTo) {
1762  assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
1763 
1764  // FIXME: we don't inline callbr yet.
1765  if (isa<CallBrInst>(CB))
1766  return InlineResult::failure("We don't inline callbr yet.");
1767 
1768  // If IFI has any state in it, zap it before we fill it in.
1769  IFI.reset();
1770 
1771  Function *CalledFunc = CB.getCalledFunction();
1772  if (!CalledFunc || // Can't inline external function or indirect
1773  CalledFunc->isDeclaration()) // call!
1774  return InlineResult::failure("external or indirect");
1775 
1776  // The inliner does not know how to inline through calls with operand bundles
1777  // in general ...
1778  if (CB.hasOperandBundles()) {
1779  for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
1780  uint32_t Tag = CB.getOperandBundleAt(i).getTagID();
1781  // ... but it knows how to inline through "deopt" operand bundles ...
1782  if (Tag == LLVMContext::OB_deopt)
1783  continue;
1784  // ... and "funclet" operand bundles.
1785  if (Tag == LLVMContext::OB_funclet)
1786  continue;
1788  continue;
1789 
1790  return InlineResult::failure("unsupported operand bundle");
1791  }
1792  }
1793 
1794  // If the call to the callee cannot throw, set the 'nounwind' flag on any
1795  // calls that we inline.
1796  bool MarkNoUnwind = CB.doesNotThrow();
1797 
1798  BasicBlock *OrigBB = CB.getParent();
1799  Function *Caller = OrigBB->getParent();
1800 
1801  // GC poses two hazards to inlining, which only occur when the callee has GC:
1802  // 1. If the caller has no GC, then the callee's GC must be propagated to the
1803  // caller.
1804  // 2. If the caller has a differing GC, it is invalid to inline.
1805  if (CalledFunc->hasGC()) {
1806  if (!Caller->hasGC())
1807  Caller->setGC(CalledFunc->getGC());
1808  else if (CalledFunc->getGC() != Caller->getGC())
1809  return InlineResult::failure("incompatible GC");
1810  }
1811 
1812  // Get the personality function from the callee if it contains a landing pad.
1813  Constant *CalledPersonality =
1814  CalledFunc->hasPersonalityFn()
1815  ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1816  : nullptr;
1817 
1818  // Find the personality function used by the landing pads of the caller. If it
1819  // exists, then check to see that it matches the personality function used in
1820  // the callee.
1821  Constant *CallerPersonality =
1822  Caller->hasPersonalityFn()
1823  ? Caller->getPersonalityFn()->stripPointerCasts()
1824  : nullptr;
1825  if (CalledPersonality) {
1826  if (!CallerPersonality)
1827  Caller->setPersonalityFn(CalledPersonality);
1828  // If the personality functions match, then we can perform the
1829  // inlining. Otherwise, we can't inline.
1830  // TODO: This isn't 100% true. Some personality functions are proper
1831  // supersets of others and can be used in place of the other.
1832  else if (CalledPersonality != CallerPersonality)
1833  return InlineResult::failure("incompatible personality");
1834  }
1835 
1836  // We need to figure out which funclet the callsite was in so that we may
1837  // properly nest the callee.
1838  Instruction *CallSiteEHPad = nullptr;
1839  if (CallerPersonality) {
1840  EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1841  if (isScopedEHPersonality(Personality)) {
1842  Optional<OperandBundleUse> ParentFunclet =
1844  if (ParentFunclet)
1845  CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1846 
1847  // OK, the inlining site is legal. What about the target function?
1848 
1849  if (CallSiteEHPad) {
1850  if (Personality == EHPersonality::MSVC_CXX) {
1851  // The MSVC personality cannot tolerate catches getting inlined into
1852  // cleanup funclets.
1853  if (isa<CleanupPadInst>(CallSiteEHPad)) {
1854  // Ok, the call site is within a cleanuppad. Let's check the callee
1855  // for catchpads.
1856  for (const BasicBlock &CalledBB : *CalledFunc) {
1857  if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1858  return InlineResult::failure("catch in cleanup funclet");
1859  }
1860  }
1861  } else if (isAsynchronousEHPersonality(Personality)) {
1862  // SEH is even less tolerant, there may not be any sort of exceptional
1863  // funclet in the callee.
1864  for (const BasicBlock &CalledBB : *CalledFunc) {
1865  if (CalledBB.isEHPad())
1866  return InlineResult::failure("SEH in cleanup funclet");
1867  }
1868  }
1869  }
1870  }
1871  }
1872 
1873  // Determine if we are dealing with a call in an EHPad which does not unwind
1874  // to caller.
1875  bool EHPadForCallUnwindsLocally = false;
1876  if (CallSiteEHPad && isa<CallInst>(CB)) {
1877  UnwindDestMemoTy FuncletUnwindMap;
1878  Value *CallSiteUnwindDestToken =
1879  getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1880 
1881  EHPadForCallUnwindsLocally =
1882  CallSiteUnwindDestToken &&
1883  !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1884  }
1885 
1886  // Get an iterator to the last basic block in the function, which will have
1887  // the new function inlined after it.
1888  Function::iterator LastBlock = --Caller->end();
1889 
1890  // Make sure to capture all of the return instructions from the cloned
1891  // function.
1893  ClonedCodeInfo InlinedFunctionInfo;
1894  Function::iterator FirstNewBlock;
1895 
1896  { // Scope to destroy VMap after cloning.
1897  ValueToValueMapTy VMap;
1898  // Keep a list of pair (dst, src) to emit byval initializations.
1900 
1901  // When inlining a function that contains noalias scope metadata,
1902  // this metadata needs to be cloned so that the inlined blocks
1903  // have different "unique scopes" at every call site.
1904  // Track the metadata that must be cloned. Do this before other changes to
1905  // the function, so that we do not get in trouble when inlining caller ==
1906  // callee.
1907  ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());
1908 
1909  auto &DL = Caller->getParent()->getDataLayout();
1910 
1911  // Calculate the vector of arguments to pass into the function cloner, which
1912  // matches up the formal to the actual argument values.
1913  auto AI = CB.arg_begin();
1914  unsigned ArgNo = 0;
1915  for (Function::arg_iterator I = CalledFunc->arg_begin(),
1916  E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1917  Value *ActualArg = *AI;
1918 
1919  // When byval arguments actually inlined, we need to make the copy implied
1920  // by them explicit. However, we don't do this if the callee is readonly
1921  // or readnone, because the copy would be unneeded: the callee doesn't
1922  // modify the struct.
1923  if (CB.isByValArgument(ArgNo)) {
1924  ActualArg = HandleByValArgument(ActualArg, &CB, CalledFunc, IFI,
1925  CalledFunc->getParamAlignment(ArgNo));
1926  if (ActualArg != *AI)
1927  ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1928  }
1929 
1930  VMap[&*I] = ActualArg;
1931  }
1932 
1933  // TODO: Remove this when users have been updated to the assume bundles.
1934  // Add alignment assumptions if necessary. We do this before the inlined
1935  // instructions are actually cloned into the caller so that we can easily
1936  // check what will be known at the start of the inlined code.
1937  AddAlignmentAssumptions(CB, IFI);
1938 
1939  AssumptionCache *AC =
1940  IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1941 
1942  /// Preserve all attributes on of the call and its parameters.
1943  salvageKnowledge(&CB, AC);
1944 
1945  // We want the inliner to prune the code as it copies. We would LOVE to
1946  // have no dead or constant instructions leftover after inlining occurs
1947  // (which can happen, e.g., because an argument was constant), but we'll be
1948  // happy with whatever the cloner can do.
1949  CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1950  /*ModuleLevelChanges=*/false, Returns, ".i",
1951  &InlinedFunctionInfo);
1952  // Remember the first block that is newly cloned over.
1953  FirstNewBlock = LastBlock; ++FirstNewBlock;
1954 
1955  // Insert retainRV/clainRV runtime calls.
1957  inlineRetainOrClaimRVCalls(CB, Returns);
1958 
1959  // Updated caller/callee profiles only when requested. For sample loader
1960  // inlining, the context-sensitive inlinee profile doesn't need to be
1961  // subtracted from callee profile, and the inlined clone also doesn't need
1962  // to be scaled based on call site count.
1963  if (IFI.UpdateProfile) {
1964  if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1965  // Update the BFI of blocks cloned into the caller.
1966  updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1967  CalledFunc->front());
1968 
1969  updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), CB,
1970  IFI.PSI, IFI.CallerBFI);
1971  }
1972 
1973  // Inject byval arguments initialization.
1974  for (std::pair<Value*, Value*> &Init : ByValInit)
1975  HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1976  &*FirstNewBlock, IFI);
1977 
1978  Optional<OperandBundleUse> ParentDeopt =
1980  if (ParentDeopt) {
1982 
1983  for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1984  CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
1985  if (!ICS)
1986  continue; // instruction was DCE'd or RAUW'ed to undef
1987 
1988  OpDefs.clear();
1989 
1990  OpDefs.reserve(ICS->getNumOperandBundles());
1991 
1992  for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
1993  ++COBi) {
1994  auto ChildOB = ICS->getOperandBundleAt(COBi);
1995  if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1996  // If the inlined call has other operand bundles, let them be
1997  OpDefs.emplace_back(ChildOB);
1998  continue;
1999  }
2000 
2001  // It may be useful to separate this logic (of handling operand
2002  // bundles) out to a separate "policy" component if this gets crowded.
2003  // Prepend the parent's deoptimization continuation to the newly
2004  // inlined call's deoptimization continuation.
2005  std::vector<Value *> MergedDeoptArgs;
2006  MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2007  ChildOB.Inputs.size());
2008 
2009  llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs);
2010  llvm::append_range(MergedDeoptArgs, ChildOB.Inputs);
2011 
2012  OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
2013  }
2014 
2015  Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS);
2016 
2017  // Note: the RAUW does the appropriate fixup in VMap, so we need to do
2018  // this even if the call returns void.
2019  ICS->replaceAllUsesWith(NewI);
2020 
2021  VH = nullptr;
2022  ICS->eraseFromParent();
2023  }
2024  }
2025 
2026  // Update the callgraph if requested.
2027  if (IFI.CG)
2028  UpdateCallGraphAfterInlining(CB, FirstNewBlock, VMap, IFI);
2029 
2030  // For 'nodebug' functions, the associated DISubprogram is always null.
2031  // Conservatively avoid propagating the callsite debug location to
2032  // instructions inlined from a function whose DISubprogram is not null.
2033  fixupLineNumbers(Caller, FirstNewBlock, &CB,
2034  CalledFunc->getSubprogram() != nullptr);
2035 
2036  // Now clone the inlined noalias scope metadata.
2037  SAMetadataCloner.clone();
2038  SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2039 
2040  // Add noalias metadata if necessary.
2041  AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR, InlinedFunctionInfo);
2042 
2043  // Clone return attributes on the callsite into the calls within the inlined
2044  // function which feed into its return value.
2045  AddReturnAttributes(CB, VMap);
2046 
2047  // Propagate metadata on the callsite if necessary.
2048  PropagateCallSiteMetadata(CB, FirstNewBlock, Caller->end());
2049 
2050  // Register any cloned assumptions.
2051  if (IFI.GetAssumptionCache)
2052  for (BasicBlock &NewBlock :
2053  make_range(FirstNewBlock->getIterator(), Caller->end()))
2054  for (Instruction &I : NewBlock)
2055  if (auto *II = dyn_cast<AssumeInst>(&I))
2056  IFI.GetAssumptionCache(*Caller).registerAssumption(II);
2057  }
2058 
2059  // If there are any alloca instructions in the block that used to be the entry
2060  // block for the callee, move them to the entry block of the caller. First
2061  // calculate which instruction they should be inserted before. We insert the
2062  // instructions at the end of the current alloca list.
2063  {
2064  BasicBlock::iterator InsertPoint = Caller->begin()->begin();
2065  for (BasicBlock::iterator I = FirstNewBlock->begin(),
2066  E = FirstNewBlock->end(); I != E; ) {
2067  AllocaInst *AI = dyn_cast<AllocaInst>(I++);
2068  if (!AI) continue;
2069 
2070  // If the alloca is now dead, remove it. This often occurs due to code
2071  // specialization.
2072  if (AI->use_empty()) {
2073  AI->eraseFromParent();
2074  continue;
2075  }
2076 
2077  if (!allocaWouldBeStaticInEntry(AI))
2078  continue;
2079 
2080  // Keep track of the static allocas that we inline into the caller.
2081  IFI.StaticAllocas.push_back(AI);
2082 
2083  // Scan for the block of allocas that we can move over, and move them
2084  // all at once.
2085  while (isa<AllocaInst>(I) &&
2086  !cast<AllocaInst>(I)->use_empty() &&
2087  allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
2088  IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
2089  ++I;
2090  }
2091 
2092  // Transfer all of the allocas over in a block. Using splice means
2093  // that the instructions aren't removed from the symbol table, then
2094  // reinserted.
2095  Caller->getEntryBlock().getInstList().splice(
2096  InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
2097  }
2098  }
2099 
2100  SmallVector<Value*,4> VarArgsToForward;
2101  SmallVector<AttributeSet, 4> VarArgsAttrs;
2102  for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
2103  i < CB.getNumArgOperands(); i++) {
2104  VarArgsToForward.push_back(CB.getArgOperand(i));
2105  VarArgsAttrs.push_back(CB.getAttributes().getParamAttributes(i));
2106  }
2107 
2108  bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
2109  if (InlinedFunctionInfo.ContainsCalls) {
2110  CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
2111  if (CallInst *CI = dyn_cast<CallInst>(&CB))
2112  CallSiteTailKind = CI->getTailCallKind();
2113 
2114  // For inlining purposes, the "notail" marker is the same as no marker.
2115  if (CallSiteTailKind == CallInst::TCK_NoTail)
2116  CallSiteTailKind = CallInst::TCK_None;
2117 
2118  for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
2119  ++BB) {
2120  for (auto II = BB->begin(); II != BB->end();) {
2121  Instruction &I = *II++;
2122  CallInst *CI = dyn_cast<CallInst>(&I);
2123  if (!CI)
2124  continue;
2125 
2126  // Forward varargs from inlined call site to calls to the
2127  // ForwardVarArgsTo function, if requested, and to musttail calls.
2128  if (!VarArgsToForward.empty() &&
2129  ((ForwardVarArgsTo &&
2130  CI->getCalledFunction() == ForwardVarArgsTo) ||
2131  CI->isMustTailCall())) {
2132  // Collect attributes for non-vararg parameters.
2135  if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2136  for (unsigned ArgNo = 0;
2137  ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2138  ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
2139  }
2140 
2141  // Add VarArg attributes.
2142  ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2143  Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
2144  Attrs.getRetAttributes(), ArgAttrs);
2145  // Add VarArgs to existing parameters.
2146  SmallVector<Value *, 6> Params(CI->arg_operands());
2147  Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2148  CallInst *NewCI = CallInst::Create(
2149  CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
2150  NewCI->setDebugLoc(CI->getDebugLoc());
2151  NewCI->setAttributes(Attrs);
2152  NewCI->setCallingConv(CI->getCallingConv());
2153  CI->replaceAllUsesWith(NewCI);
2154  CI->eraseFromParent();
2155  CI = NewCI;
2156  }
2157 
2158  if (Function *F = CI->getCalledFunction())
2159  InlinedDeoptimizeCalls |=
2160  F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2161 
2162  // We need to reduce the strength of any inlined tail calls. For
2163  // musttail, we have to avoid introducing potential unbounded stack
2164  // growth. For example, if functions 'f' and 'g' are mutually recursive
2165  // with musttail, we can inline 'g' into 'f' so long as we preserve
2166  // musttail on the cloned call to 'f'. If either the inlined call site
2167  // or the cloned call site is *not* musttail, the program already has
2168  // one frame of stack growth, so it's safe to remove musttail. Here is
2169  // a table of example transformations:
2170  //
2171  // f -> musttail g -> musttail f ==> f -> musttail f
2172  // f -> musttail g -> tail f ==> f -> tail f
2173  // f -> g -> musttail f ==> f -> f
2174  // f -> g -> tail f ==> f -> f
2175  //
2176  // Inlined notail calls should remain notail calls.
2177  CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2178  if (ChildTCK != CallInst::TCK_NoTail)
2179  ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2180  CI->setTailCallKind(ChildTCK);
2181  InlinedMustTailCalls |= CI->isMustTailCall();
2182 
2183  // Calls inlined through a 'nounwind' call site should be marked
2184  // 'nounwind'.
2185  if (MarkNoUnwind)
2186  CI->setDoesNotThrow();
2187  }
2188  }
2189  }
2190 
2191  // Leave lifetime markers for the static alloca's, scoping them to the
2192  // function we just inlined.
2193  // We need to insert lifetime intrinsics even at O0 to avoid invalid
2194  // access caused by multithreaded coroutines. The check
2195  // `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
2196  if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2197  !IFI.StaticAllocas.empty()) {
2198  IRBuilder<> builder(&FirstNewBlock->front());
2199  for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2200  AllocaInst *AI = IFI.StaticAllocas[ai];
2201  // Don't mark swifterror allocas. They can't have bitcast uses.
2202  if (AI->isSwiftError())
2203  continue;
2204 
2205  // If the alloca is already scoped to something smaller than the whole
2206  // function then there's no need to add redundant, less accurate markers.
2207  if (hasLifetimeMarkers(AI))
2208  continue;
2209 
2210  // Try to determine the size of the allocation.
2211  ConstantInt *AllocaSize = nullptr;
2212  if (ConstantInt *AIArraySize =
2213  dyn_cast<ConstantInt>(AI->getArraySize())) {
2214  auto &DL = Caller->getParent()->getDataLayout();
2215  Type *AllocaType = AI->getAllocatedType();
2216  TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2217  uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2218 
2219  // Don't add markers for zero-sized allocas.
2220  if (AllocaArraySize == 0)
2221  continue;
2222 
2223  // Check that array size doesn't saturate uint64_t and doesn't
2224  // overflow when it's multiplied by type size.
2225  if (!AllocaTypeSize.isScalable() &&
2226  AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2227  std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2228  AllocaTypeSize.getFixedSize()) {
2229  AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2230  AllocaArraySize * AllocaTypeSize);
2231  }
2232  }
2233 
2234  builder.CreateLifetimeStart(AI, AllocaSize);
2235  for (ReturnInst *RI : Returns) {
2236  // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2237  // call and a return. The return kills all local allocas.
2238  if (InlinedMustTailCalls &&
2240  continue;
2241  if (InlinedDeoptimizeCalls &&
2243  continue;
2244  IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2245  }
2246  }
2247  }
2248 
2249  // If the inlined code contained dynamic alloca instructions, wrap the inlined
2250  // code with llvm.stacksave/llvm.stackrestore intrinsics.
2251  if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2252  Module *M = Caller->getParent();
2253  // Get the two intrinsics we care about.
2254  Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
2255  Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
2256 
2257  // Insert the llvm.stacksave.
2258  CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2259  .CreateCall(StackSave, {}, "savedstack");
2260 
2261  // Insert a call to llvm.stackrestore before any return instructions in the
2262  // inlined function.
2263  for (ReturnInst *RI : Returns) {
2264  // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2265  // call and a return. The return will restore the stack pointer.
2266  if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2267  continue;
2268  if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2269  continue;
2270  IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2271  }
2272  }
2273 
2274  // If we are inlining for an invoke instruction, we must make sure to rewrite
2275  // any call instructions into invoke instructions. This is sensitive to which
2276  // funclet pads were top-level in the inlinee, so must be done before
2277  // rewriting the "parent pad" links.
2278  if (auto *II = dyn_cast<InvokeInst>(&CB)) {
2279  BasicBlock *UnwindDest = II->getUnwindDest();
2280  Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2281  if (isa<LandingPadInst>(FirstNonPHI)) {
2282  HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2283  } else {
2284  HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2285  }
2286  }
2287 
2288  // Update the lexical scopes of the new funclets and callsites.
2289  // Anything that had 'none' as its parent is now nested inside the callsite's
2290  // EHPad.
2291 
2292  if (CallSiteEHPad) {
2293  for (Function::iterator BB = FirstNewBlock->getIterator(),
2294  E = Caller->end();
2295  BB != E; ++BB) {
2296  // Add bundle operands to any top-level call sites.
2298  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2299  CallBase *I = dyn_cast<CallBase>(&*BBI++);
2300  if (!I)
2301  continue;
2302 
2303  // Skip call sites which are nounwind intrinsics.
2304  auto *CalledFn =
2305  dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
2306  if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow())
2307  continue;
2308 
2309  // Skip call sites which already have a "funclet" bundle.
2310  if (I->getOperandBundle(LLVMContext::OB_funclet))
2311  continue;
2312 
2313  I->getOperandBundlesAsDefs(OpBundles);
2314  OpBundles.emplace_back("funclet", CallSiteEHPad);
2315 
2316  Instruction *NewInst = CallBase::Create(I, OpBundles, I);
2317  NewInst->takeName(I);
2318  I->replaceAllUsesWith(NewInst);
2319  I->eraseFromParent();
2320 
2321  OpBundles.clear();
2322  }
2323 
2324  // It is problematic if the inlinee has a cleanupret which unwinds to
2325  // caller and we inline it into a call site which doesn't unwind but into
2326  // an EH pad that does. Such an edge must be dynamically unreachable.
2327  // As such, we replace the cleanupret with unreachable.
2328  if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2329  if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2330  changeToUnreachable(CleanupRet);
2331 
2332  Instruction *I = BB->getFirstNonPHI();
2333  if (!I->isEHPad())
2334  continue;
2335 
2336  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2337  if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2338  CatchSwitch->setParentPad(CallSiteEHPad);
2339  } else {
2340  auto *FPI = cast<FuncletPadInst>(I);
2341  if (isa<ConstantTokenNone>(FPI->getParentPad()))
2342  FPI->setParentPad(CallSiteEHPad);
2343  }
2344  }
2345  }
2346 
2347  if (InlinedDeoptimizeCalls) {
2348  // We need to at least remove the deoptimizing returns from the Return set,
2349  // so that the control flow from those returns does not get merged into the
2350  // caller (but terminate it instead). If the caller's return type does not
2351  // match the callee's return type, we also need to change the return type of
2352  // the intrinsic.
2353  if (Caller->getReturnType() == CB.getType()) {
2354  llvm::erase_if(Returns, [](ReturnInst *RI) {
2355  return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2356  });
2357  } else {
2358  SmallVector<ReturnInst *, 8> NormalReturns;
2359  Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2360  Caller->getParent(), Intrinsic::experimental_deoptimize,
2361  {Caller->getReturnType()});
2362 
2363  for (ReturnInst *RI : Returns) {
2364  CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2365  if (!DeoptCall) {
2366  NormalReturns.push_back(RI);
2367  continue;
2368  }
2369 
2370  // The calling convention on the deoptimize call itself may be bogus,
2371  // since the code we're inlining may have undefined behavior (and may
2372  // never actually execute at runtime); but all
2373  // @llvm.experimental.deoptimize declarations have to have the same
2374  // calling convention in a well-formed module.
2375  auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2376  NewDeoptIntrinsic->setCallingConv(CallingConv);
2377  auto *CurBB = RI->getParent();
2378  RI->eraseFromParent();
2379 
2380  SmallVector<Value *, 4> CallArgs(DeoptCall->args());
2381 
2383  DeoptCall->getOperandBundlesAsDefs(OpBundles);
2384  auto DeoptAttributes = DeoptCall->getAttributes();
2385  DeoptCall->eraseFromParent();
2386  assert(!OpBundles.empty() &&
2387  "Expected at least the deopt operand bundle");
2388 
2389  IRBuilder<> Builder(CurBB);
2390  CallInst *NewDeoptCall =
2391  Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2392  NewDeoptCall->setCallingConv(CallingConv);
2393  NewDeoptCall->setAttributes(DeoptAttributes);
2394  if (NewDeoptCall->getType()->isVoidTy())
2395  Builder.CreateRetVoid();
2396  else
2397  Builder.CreateRet(NewDeoptCall);
2398  }
2399 
2400  // Leave behind the normal returns so we can merge control flow.
2401  std::swap(Returns, NormalReturns);
2402  }
2403  }
2404 
2405  // Handle any inlined musttail call sites. In order for a new call site to be
2406  // musttail, the source of the clone and the inlined call site must have been
2407  // musttail. Therefore it's safe to return without merging control into the
2408  // phi below.
2409  if (InlinedMustTailCalls) {
2410  // Check if we need to bitcast the result of any musttail calls.
2411  Type *NewRetTy = Caller->getReturnType();
2412  bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
2413 
2414  // Handle the returns preceded by musttail calls separately.
2415  SmallVector<ReturnInst *, 8> NormalReturns;
2416  for (ReturnInst *RI : Returns) {
2417  CallInst *ReturnedMustTail =
2419  if (!ReturnedMustTail) {
2420  NormalReturns.push_back(RI);
2421  continue;
2422  }
2423  if (!NeedBitCast)
2424  continue;
2425 
2426  // Delete the old return and any preceding bitcast.
2427  BasicBlock *CurBB = RI->getParent();
2428  auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2429  RI->eraseFromParent();
2430  if (OldCast)
2431  OldCast->eraseFromParent();
2432 
2433  // Insert a new bitcast and return with the right type.
2434  IRBuilder<> Builder(CurBB);
2435  Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2436  }
2437 
2438  // Leave behind the normal returns so we can merge control flow.
2439  std::swap(Returns, NormalReturns);
2440  }
2441 
2442  // Now that all of the transforms on the inlined code have taken place but
2443  // before we splice the inlined code into the CFG and lose track of which
2444  // blocks were actually inlined, collect the call sites. We only do this if
2445  // call graph updates weren't requested, as those provide value handle based
2446  // tracking of inlined call sites instead. Calls to intrinsics are not
2447  // collected because they are not inlineable.
2448  if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2449  // Otherwise just collect the raw call sites that were inlined.
2450  for (BasicBlock &NewBB :
2451  make_range(FirstNewBlock->getIterator(), Caller->end()))
2452  for (Instruction &I : NewBB)
2453  if (auto *CB = dyn_cast<CallBase>(&I))
2454  if (!(CB->getCalledFunction() &&
2455  CB->getCalledFunction()->isIntrinsic()))
2456  IFI.InlinedCallSites.push_back(CB);
2457  }
2458 
2459  // If we cloned in _exactly one_ basic block, and if that block ends in a
2460  // return instruction, we splice the body of the inlined callee directly into
2461  // the calling basic block.
2462  if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2463  // Move all of the instructions right before the call.
2464  OrigBB->getInstList().splice(CB.getIterator(), FirstNewBlock->getInstList(),
2465  FirstNewBlock->begin(), FirstNewBlock->end());
2466  // Remove the cloned basic block.
2467  Caller->getBasicBlockList().pop_back();
2468 
2469  // If the call site was an invoke instruction, add a branch to the normal
2470  // destination.
2471  if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2472  BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), &CB);
2473  NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2474  }
2475 
2476  // If the return instruction returned a value, replace uses of the call with
2477  // uses of the returned value.
2478  if (!CB.use_empty()) {
2479  ReturnInst *R = Returns[0];
2480  if (&CB == R->getReturnValue())
2482  else
2483  CB.replaceAllUsesWith(R->getReturnValue());
2484  }
2485  // Since we are now done with the Call/Invoke, we can delete it.
2486  CB.eraseFromParent();
2487 
2488  // Since we are now done with the return instruction, delete it also.
2489  Returns[0]->eraseFromParent();
2490 
2491  // We are now done with the inlining.
2492  return InlineResult::success();
2493  }
2494 
2495  // Otherwise, we have the normal case, of more than one block to inline or
2496  // multiple return sites.
2497 
2498  // We want to clone the entire callee function into the hole between the
2499  // "starter" and "ender" blocks. How we accomplish this depends on whether
2500  // this is an invoke instruction or a call instruction.
2501  BasicBlock *AfterCallBB;
2502  BranchInst *CreatedBranchToNormalDest = nullptr;
2503  if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2504 
2505  // Add an unconditional branch to make this look like the CallInst case...
2506  CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), &CB);
2507 
2508  // Split the basic block. This guarantees that no PHI nodes will have to be
2509  // updated due to new incoming edges, and make the invoke case more
2510  // symmetric to the call case.
2511  AfterCallBB =
2512  OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2513  CalledFunc->getName() + ".exit");
2514 
2515  } else { // It's a call
2516  // If this is a call instruction, we need to split the basic block that
2517  // the call lives in.
2518  //
2519  AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
2520  CalledFunc->getName() + ".exit");
2521  }
2522 
2523  if (IFI.CallerBFI) {
2524  // Copy original BB's block frequency to AfterCallBB
2525  IFI.CallerBFI->setBlockFreq(
2526  AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2527  }
2528 
2529  // Change the branch that used to go to AfterCallBB to branch to the first
2530  // basic block of the inlined function.
2531  //
2532  Instruction *Br = OrigBB->getTerminator();
2533  assert(Br && Br->getOpcode() == Instruction::Br &&
2534  "splitBasicBlock broken!");
2535  Br->setOperand(0, &*FirstNewBlock);
2536 
2537  // Now that the function is correct, make it a little bit nicer. In
2538  // particular, move the basic blocks inserted from the end of the function
2539  // into the space made by splitting the source basic block.
2540  Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2541  Caller->getBasicBlockList(), FirstNewBlock,
2542  Caller->end());
2543 
2544  // Handle all of the return instructions that we just cloned in, and eliminate
2545  // any users of the original call/invoke instruction.
2546  Type *RTy = CalledFunc->getReturnType();
2547 
2548  PHINode *PHI = nullptr;
2549  if (Returns.size() > 1) {
2550  // The PHI node should go at the front of the new basic block to merge all
2551  // possible incoming values.
2552  if (!CB.use_empty()) {
2553  PHI = PHINode::Create(RTy, Returns.size(), CB.getName(),
2554  &AfterCallBB->front());
2555  // Anything that used the result of the function call should now use the
2556  // PHI node as their operand.
2557  CB.replaceAllUsesWith(PHI);
2558  }
2559 
2560  // Loop over all of the return instructions adding entries to the PHI node
2561  // as appropriate.
2562  if (PHI) {
2563  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2564  ReturnInst *RI = Returns[i];
2565  assert(RI->getReturnValue()->getType() == PHI->getType() &&
2566  "Ret value not consistent in function!");
2567  PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2568  }
2569  }
2570 
2571  // Add a branch to the merge points and remove return instructions.
2572  DebugLoc Loc;
2573  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2574  ReturnInst *RI = Returns[i];
2575  BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2576  Loc = RI->getDebugLoc();
2577  BI->setDebugLoc(Loc);
2578  RI->eraseFromParent();
2579  }
2580  // We need to set the debug location to *somewhere* inside the
2581  // inlined function. The line number may be nonsensical, but the
2582  // instruction will at least be associated with the right
2583  // function.
2584  if (CreatedBranchToNormalDest)
2585  CreatedBranchToNormalDest->setDebugLoc(Loc);
2586  } else if (!Returns.empty()) {
2587  // Otherwise, if there is exactly one return value, just replace anything
2588  // using the return value of the call with the computed value.
2589  if (!CB.use_empty()) {
2590  if (&CB == Returns[0]->getReturnValue())
2592  else
2593  CB.replaceAllUsesWith(Returns[0]->getReturnValue());
2594  }
2595 
2596  // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2597  BasicBlock *ReturnBB = Returns[0]->getParent();
2598  ReturnBB->replaceAllUsesWith(AfterCallBB);
2599 
2600  // Splice the code from the return block into the block that it will return
2601  // to, which contains the code that was after the call.
2602  AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2603  ReturnBB->getInstList());
2604 
2605  if (CreatedBranchToNormalDest)
2606  CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2607 
2608  // Delete the return instruction now and empty ReturnBB now.
2609  Returns[0]->eraseFromParent();
2610  ReturnBB->eraseFromParent();
2611  } else if (!CB.use_empty()) {
2612  // No returns, but something is using the return value of the call. Just
2613  // nuke the result.
2615  }
2616 
2617  // Since we are now done with the Call/Invoke, we can delete it.
2618  CB.eraseFromParent();
2619 
2620  // If we inlined any musttail calls and the original return is now
2621  // unreachable, delete it. It can only contain a bitcast and ret.
2622  if (InlinedMustTailCalls && pred_empty(AfterCallBB))
2623  AfterCallBB->eraseFromParent();
2624 
2625  // We should always be able to fold the entry block of the function into the
2626  // single predecessor of the block...
2627  assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2628  BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2629 
2630  // Splice the code entry block into calling block, right before the
2631  // unconditional branch.
2632  CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2633  OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2634 
2635  // Remove the unconditional branch.
2636  OrigBB->getInstList().erase(Br);
2637 
2638  // Now we can remove the CalleeEntry block, which is now empty.
2639  Caller->getBasicBlockList().erase(CalleeEntry);
2640 
2641  // If we inserted a phi node, check to see if it has a single value (e.g. all
2642  // the entries are the same or undef). If so, remove the PHI so it doesn't
2643  // block other optimizations.
2644  if (PHI) {
2645  AssumptionCache *AC =
2646  IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2647  auto &DL = Caller->getParent()->getDataLayout();
2648  if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2649  PHI->replaceAllUsesWith(V);
2650  PHI->eraseFromParent();
2651  }
2652  }
2653 
2654  return InlineResult::success();
2655 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::EHPersonality::MSVC_CXX
@ MSVC_CXX
llvm::AttrBuilder::addDereferenceableOrNullAttr
AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
Definition: Attributes.cpp:1686
i
i
Definition: README.txt:29
llvm::InvokeInst::getNormalDest
BasicBlock * getNormalDest() const
Definition: Instructions.h:3875
llvm::BasicBlock::getTerminatingDeoptimizeCall
const CallInst * getTerminatingDeoptimizeCall() const
Returns the call instruction calling @llvm.experimental.deoptimize prior to the terminating return in...
Definition: BasicBlock.cpp:185
llvm::CallBase::getNumOperandBundles
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:1896
IdentifyValidAttributes
static AttrBuilder IdentifyValidAttributes(CallBase &CB)
Definition: InlineFunction.cpp:1192
llvm::isAsynchronousEHPersonality
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
Definition: EHPersonalities.h:50
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
AssumptionCache.h
Attrs
Function Attrs
Definition: README_ALTIVEC.txt:215
llvm::InlineResult::success
static InlineResult success()
Definition: InlineCost.h:164
llvm::Function::isIntrinsic
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:210
llvm::CatchSwitchInst::Create
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:4293
llvm::CallGraphNode::CalledFunctionsVector
std::vector< CallRecord > CalledFunctionsVector
Definition: CallGraph.h:182
llvm::AArch64CC::HI
@ HI
Definition: AArch64BaseInfo.h:263
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
HandleInlinedEHPad
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
Definition: InlineFunction.cpp:664
llvm::objcarc::hasAttachedCallOpBundle
bool hasAttachedCallOpBundle(const CallBase *CB)
Definition: ObjCARCUtil.h:34
llvm::LandingPadInst::isCleanup
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
Definition: Instructions.h:2926
getUnwindDestToken
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
Definition: InlineFunction.cpp:395
llvm::CallBase::getOperandBundlesAsDefs
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Definition: Instructions.cpp:365
llvm::Function::args
iterator_range< arg_iterator > args()
Definition: Function.h:817
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::Instruction::getModule
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
llvm::AArch64CC::AL
@ AL
Definition: AArch64BaseInfo.h:269
llvm::CallGraphNode::iterator
std::vector< CallRecord >::iterator iterator
Definition: CallGraph.h:194
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
llvm::AssumptionCache::registerAssumption
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Definition: AssumptionCache.cpp:217
llvm::ReturnInst
Return a value (possibly void), from a function.
Definition: Instructions.h:2980
llvm::VAArgInst
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Definition: Instructions.h:1835
llvm::ClonedCodeInfo
This struct can be used to capture information about code being cloned, while it is being cloned.
Definition: Cloning.h:64
llvm::CallBase::getOperandBundle
Optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:1976
Optional.h
ValueMapper.h
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
allocaWouldBeStaticInEntry
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
Definition: InlineFunction.cpp:1478
llvm::IRBuilderBase::CreateLifetimeEnd
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
Definition: IRBuilder.cpp:440
MayContainThrowingOrExitingCall
static bool MayContainThrowingOrExitingCall(Instruction *Begin, Instruction *End)
Definition: InlineFunction.cpp:1177
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1336
Metadata.h
llvm::Type::getInt8PtrTy
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:255
llvm::Function::end
iterator end()
Definition: Function.h:780
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:90
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:107
IntrinsicInst.h
DebugInfoMetadata.h
llvm::ValueMap::end
iterator end()
Definition: ValueMap.h:136
llvm::objcarc::getAttachedCallOperandBundleEnum
AttachedCallOperandBundle getAttachedCallOperandBundleEnum(bool IsRetain)
Definition: ObjCARCUtil.h:30
llvm::TypeSize::getFixedSize
ScalarTy getFixedSize() const
Definition: TypeSize.h:426
llvm::PointerType::getElementType
Type * getElementType() const
Definition: DerivedTypes.h:672
llvm::Function
Definition: Function.h:61
fixupLineNumbers
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
Definition: InlineFunction.cpp:1494
getUnwindDestTokenHelper
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
Definition: InlineFunction.cpp:244
llvm::ReturnInst::getReturnValue
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
Definition: Instructions.h:3025
llvm::AllocaInst::getType
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:104
llvm::SmallVector< Value *, 8 >
llvm::CallInst::setTailCallKind
void setTailCallKind(TailCallKind TCK)
Definition: Instructions.h:1678
InlineAsm.h
llvm::LandingPadInst
The landingpad instruction holds all of the information necessary to generate correct exception handl...
Definition: Instructions.h:2879
CaptureTracking.h
llvm::CallGraphNode::removeCallEdgeFor
void removeCallEdgeFor(CallBase &Call)
Removes the edge in the node for the specified call site.
Definition: CallGraph.cpp:214
llvm::CallBase::isInlineAsm
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1458
llvm::Function::getSubprogram
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1532
ErrorHandling.h
builder
assume builder
Definition: AssumeBundleBuilder.cpp:650
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:733
llvm::IRBuilder<>
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1728
ValueTracking.h
Local.h
llvm::AttributeList::get
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
Definition: Attributes.cpp:1040
llvm::CallGraph
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:73
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::AAResults::onlyAccessesArgPointees
static bool onlyAccessesArgPointees(FunctionModRefBehavior MRB)
Checks if functions with the specified behavior are known to read and write at most from objects poin...
Definition: AliasAnalysis.h:637
llvm::CallBase::addOperandBundle
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
Definition: Instructions.cpp:444
llvm::DILocation
Debug location.
Definition: DebugInfoMetadata.h:1562
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:321
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
DenseMap.h
Module.h
llvm::BasicBlock::eraseFromParent
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition: BasicBlock.cpp:129
llvm::AttributeList
Definition: Attributes.h:398
llvm::getOrEnforceKnownAlignment
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1343
llvm::CallBase::getAttributes
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1468
llvm::OperandBundleDefT
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1114
llvm::CallBase::getFunctionType
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1241
EHPersonalities.h
llvm::updateProfileCallee
void updateProfileCallee(Function *Callee, int64_t entryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding entryDelta then scaling callsite i...
Definition: InlineFunction.cpp:1619
llvm::BasicBlock::splitBasicBlock
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:381
llvm::Function::hasFnAttribute
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:355
llvm::Optional
Definition: APInt.h:33
llvm::DenseMapBase::count
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:145
llvm::ProfileSummaryInfo::getProfileCount
Optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Definition: ProfileSummaryInfo.cpp:77
llvm::SmallPtrSet< Instruction *, 4 >
llvm::CallBase::isByValArgument
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: InstrTypes.h:1651
llvm::FunctionType::getNumParams
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:138
llvm::Function::ProfileCount::isSynthetic
bool isSynthetic() const
Definition: Function.h:305
llvm::CallBase::getNumArgOperands
unsigned getNumArgOperands() const
Definition: InstrTypes.h:1336
llvm::LLVMContext::OB_clang_arc_attachedcall
@ OB_clang_arc_attachedcall
Definition: LLVMContext.h:96
HandleCallsInBlockInlinedThroughInvoke
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
Definition: InlineFunction.cpp:539
STLExtras.h
llvm::CallBase::arg_begin
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1303
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::CallInst::TCK_None
@ TCK_None
Definition: Instructions.h:1653
llvm::CallBase::setDoesNotThrow
void setDoesNotThrow()
Definition: InstrTypes.h:1846
llvm::PointerMayBeCapturedBefore
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
Definition: CaptureTracking.cpp:183
llvm::uniteAccessGroups
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
Definition: VectorUtils.cpp:652
llvm::BasicBlock::rend
reverse_iterator rend()
Definition: BasicBlock.h:303
llvm::LinearPolySize::isScalable
bool isScalable() const
Returns whether the size is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:299
llvm::CallGraphNode::addCalledFunction
void addCalledFunction(CallBase *Call, CallGraphNode *M)
Adds a function to the list of functions called by this one.
Definition: CallGraph.h:243
llvm::MDBuilder::createAnonymousAliasScope
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
Definition: MDBuilder.h:140
llvm::MDNode::get
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1198
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::InlineFunctionInfo::CallerBFI
BlockFrequencyInfo * CallerBFI
Definition: Cloning.h:218
llvm::Instruction::setMetadata
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1336
llvm::InlineFunctionInfo::PSI
ProfileSummaryInfo * PSI
Definition: Cloning.h:217
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
AliasAnalysis.h
Context
LLVMContext & Context
Definition: NVVMIntrRange.cpp:66
llvm::isIdentifiedFunctionLocal
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
Definition: AliasAnalysis.cpp:991
llvm::classifyEHPersonality
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Definition: EHPersonalities.cpp:21
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:206
Instruction.h
CommandLine.h
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
AddAliasScopeMetadata
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
Definition: InlineFunction.cpp:941
llvm::Instruction::getOpcode
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:160
llvm::BlockFrequencyInfo
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Definition: BlockFrequencyInfo.h:37
PropagateCallSiteMetadata
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
Definition: InlineFunction.cpp:793
llvm::CallGraphNode::end
iterator end()
Definition: CallGraph.h:201
llvm::InlineFunctionInfo::CG
CallGraph * CG
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
Definition: Cloning.h:215
llvm::ms_demangle::CallingConv
CallingConv
Definition: MicrosoftDemangleNodes.h:59
llvm::GlobalValue::isDeclaration
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:228
Constants.h
llvm::AAResults
Definition: AliasAnalysis.h:456
llvm::AllocaInst::getAllocatedType
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:113
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
UpdateReturnAttributes
static cl::opt< bool > UpdateReturnAttributes("update-return-attrs", cl::init(true), cl::Hidden, cl::desc("Update return attributes on calls within inlined body"))
llvm::DebugLoc::getCol
unsigned getCol() const
Definition: DebugLoc.cpp:30
llvm::SmallVectorImpl::append
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:648
llvm::InvokeInst::getLandingPadInst
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
Definition: Instructions.cpp:884
llvm::User
Definition: User.h:44
llvm::getKnownAlignment
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition: Local.h:223
Intrinsics.h
llvm::CleanupReturnInst::Create
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, Instruction *InsertBefore=nullptr)
Definition: Instructions.h:4620
llvm::LandingPadInst::getNumClauses
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Definition: Instructions.h:2951
InstrTypes.h
llvm::CallBase::getCalledFunction
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
Definition: InstrTypes.h:1393
llvm::CallBase::setAttributes
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1472
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:296
UpdatePHINodes
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
Definition: BasicBlockUtils.cpp:988
llvm::CallInst::Create
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:1518
llvm::MDTuple
Tuple of metadata.
Definition: Metadata.h:1139
llvm::AttributeList::getParamAttributes
AttributeSet getParamAttributes(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
Definition: Attributes.cpp:1375
AssumeBundleBuilder.h
llvm::BlockFrequencyInfo::setBlockFreq
void setBlockFreq(const BasicBlock *BB, uint64_t Freq)
Definition: BlockFrequencyInfo.cpp:229
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:109
llvm::ClonedCodeInfo::OperandBundleCallSites
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
Definition: Cloning.h:76
llvm::Function::arg_end
arg_iterator arg_end()
Definition: Function.h:802
llvm::PHINode::getIncomingValueForBlock
Value * getIncomingValueForBlock(const BasicBlock *BB) const
Definition: Instructions.h:2812
llvm::AttrBuilder::empty
bool empty() const
Return true if the builder contains no target-independent attributes.
Definition: Attributes.h:1003
llvm::Instruction
Definition: Instruction.h:45
llvm::SimplifyInstruction
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
Definition: InstructionSimplify.cpp:6246
llvm::ClonedCodeInfo::isSimplified
bool isSimplified(const Value *From, const Value *To) const
Definition: Cloning.h:85
MDBuilder.h
llvm::AllocaInst::getArraySize
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:100
HandleInlinedLandingPad
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
Definition: InlineFunction.cpp:607
llvm::Function::hasPersonalityFn
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:830
ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner
ScopedAliasMetadataDeepCloner(const Function *F)
Definition: InlineFunction.cpp:855
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1784
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:899
DebugLoc.h
SmallPtrSet.h
llvm::CallGraphNode
A node in the call graph for a module.
Definition: CallGraph.h:167
llvm::ValueMap::begin
iterator begin()
Definition: ValueMap.h:135
isUsedByLifetimeMarker
static bool isUsedByLifetimeMarker(Value *V)
Definition: InlineFunction.cpp:1448
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
llvm::BasicBlock::getFirstNonPHI
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:212
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::Metadata
Root of the metadata hierarchy.
Definition: Metadata.h:62
llvm::CallBase::getCallingConv
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1449
llvm::ValueMap::count
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: ValueMap.h:152
llvm::Instruction::isLifetimeStartOrEnd
bool isLifetimeStartOrEnd() const
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
Definition: Instruction.cpp:706
llvm::None
const NoneType None
Definition: None.h:23
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:345
Type.h
getParentPad
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
Definition: InlineFunction.cpp:234
llvm::CallBase::getCaller
Function * getCaller()
Helper to get the caller (the parent function).
Definition: Instructions.cpp:282
llvm::DebugLoc::appendInlinedAt
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
Definition: DebugLoc.cpp:71
llvm::MDBuilder::createAnonymousAliasScopeDomain
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
Definition: MDBuilder.h:133
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:282
llvm::OperandBundleUse::getTagID
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1084
CFG.h
llvm::AllocaInst::isSwiftError
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:148
llvm::ProfileSummaryInfo
Analysis providing profile information.
Definition: ProfileSummaryInfo.h:39
llvm::InvokeInst
Invoke instruction.
Definition: Instructions.h:3743
inlineRetainOrClaimRVCalls
static void inlineRetainOrClaimRVCalls(CallBase &CB, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
Definition: InlineFunction.cpp:1675
llvm::Function::getGC
const std::string & getGC() const
Definition: Function.cpp:644
llvm::cl::ZeroOrMore
@ ZeroOrMore
Definition: CommandLine.h:120
llvm::InlineAsm
Definition: InlineAsm.h:31
VectorUtils.h
BasicBlock.h
llvm::cl::opt< bool >
llvm::AttributeList::ReturnIndex
@ ReturnIndex
Definition: Attributes.h:401
llvm::RISCVFenceField::O
@ O
Definition: RISCVBaseInfo.h:179
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:304
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::getUnderlyingObjects
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Definition: ValueTracking.cpp:4401
llvm::Instruction::eraseFromParent
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:78
HandleByValArgumentInit
static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI)
Definition: InlineFunction.cpp:1379
llvm::Function::getReturnType
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:180
llvm::InlineAsm::canThrow
bool canThrow() const
Definition: InlineAsm.h:71
UseNoAliasIntrinsic
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::ZeroOrMore, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
inlineDebugLoc
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
Definition: InlineFunction.cpp:1484
llvm::InlineFunctionInfo::InlinedCalls
SmallVector< WeakTrackingVH, 8 > InlinedCalls
InlineFunction fills this in with callsites that were inlined from the callee.
Definition: Cloning.h:226
uint64_t
ProfileSummaryInfo.h
llvm::Function::getCallingConv
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:238
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:572
llvm::CallInst::TailCallKind
TailCallKind
Definition: Instructions.h:1652
hasLifetimeMarkers
static bool hasLifetimeMarkers(AllocaInst *AI)
Definition: InlineFunction.cpp:1458
llvm::Function::hasGC
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition: Function.h:393
llvm::PHINode::addIncoming
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: Instructions.h:2777
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:57
llvm::BranchInst::Create
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
Definition: Instructions.h:3118
llvm::DenseMap
Definition: DenseMap.h:714
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::DebugLoc::get
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:21
llvm::AttrBuilder
Definition: Attributes.h:814
Cloning.h
StringExtras.h
llvm::BlockFrequency::getFrequency
uint64_t getFrequency() const
Returns the frequency as a fixpoint number scaled by the entry frequency.
Definition: BlockFrequency.h:35
llvm::isScopedEHPersonality
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Definition: EHPersonalities.h:80
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
DIBuilder.h
UpdateCallGraphAfterInlining
static void UpdateCallGraphAfterInlining(CallBase &CB, Function::iterator FirstNewBlock, ValueToValueMapTy &VMap, InlineFunctionInfo &IFI)
Once we have cloned code over from a callee into the caller, update the specified callgraph to reflec...
Definition: InlineFunction.cpp:1308
llvm::PointerType
Class to represent pointers.
Definition: DerivedTypes.h:631
llvm::Instruction::setDebugLoc
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:369
llvm::LandingPadInst::getClause
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
Definition: Instructions.h:2936
AddAlignmentAssumptions
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
Definition: InlineFunction.cpp:1270
ScopedAliasMetadataDeepCloner
Utility for cloning !noalias and !alias.scope metadata.
Definition: InlineFunction.cpp:837
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::ValueMapIterator::ValueTypeProxy::second
ValueT & second
Definition: ValueMap.h:346
llvm::CallBase::hasOperandBundles
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition: InstrTypes.h:1901
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:840
SI
StandardInstrumentations SI(Debug, VerifyEach)
iterator_range.h
llvm::Type::isVoidTy
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:138
llvm::salvageKnowledge
void salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
Definition: AssumeBundleBuilder.cpp:293
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
llvm::MDNode
Metadata node.
Definition: Metadata.h:897
llvm::CallBase::Create
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, Instruction *InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
Definition: Instructions.cpp:255
llvm::changeToInvokeAndSplitBasicBlock
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:2197
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:651
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
llvm::IRBuilderBase::CreateNoAliasScopeDeclaration
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
Definition: IRBuilder.cpp:488
llvm::CallBase::getIntrinsicID
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
Definition: Instructions.cpp:311
getDebugLoc
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
Definition: MachineInstrBundle.cpp:109
llvm::DominatorTreeBase::recalculate
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Definition: GenericDomTree.h:778
llvm::CloneAndPruneFunctionInto
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
Definition: CloneFunction.cpp:778
None.h
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
DataLayout.h
llvm::LLVMContext::OB_deopt
@ OB_deopt
Definition: LLVMContext.h:90
llvm::Function::setCallingConv
void setCallingConv(CallingConv::ID CC)
Definition: Function.h:242
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition: AssumptionCache.h:41
llvm::CallBase::getOperandBundleAt
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:1945
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
llvm::Function::getEntryCount
ProfileCount getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Definition: Function.cpp:1870
llvm::MDNode::concatenate
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
Definition: Metadata.cpp:914
llvm::Instruction::getFunction
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:520
uint32_t
AddReturnAttributes
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap)
Definition: InlineFunction.cpp:1212
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:1744
llvm::Value::getContext
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:979
HandleByValArgument
static Value * HandleByValArgument(Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, unsigned ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
Definition: InlineFunction.cpp:1396
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:81
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
PreserveAlignmentAssumptions
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
llvm::pred_empty
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:119
llvm::updateLoopMetadataDebugLocations
void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
Definition: DebugInfo.cpp:374
llvm::FunctionModRefBehavior
FunctionModRefBehavior
Summary of how a function affects memory in the program.
Definition: AliasAnalysis.h:262
llvm::CallInst::isMustTailCall
bool isMustTailCall() const
Definition: Instructions.h:1674
llvm::MDTuple::getTemporary
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
Definition: Metadata.h:1185
BlockFrequencyInfo.h
ScopedAliasMetadataDeepCloner::remap
void remap(Function::iterator FStart, Function::iterator FEnd)
Remap instructions in the given range from the original to the cloned metadata.
Definition: InlineFunction.cpp:913
llvm::MDNode::getDistinct
static MDTuple * getDistinct(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1206
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:297
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:127
llvm::ValueMap< const Value *, WeakTrackingVH >
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:175
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
llvm::EHPersonality
EHPersonality
Definition: EHPersonalities.h:22
llvm::CallBase::paramHasAttr
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Definition: Instructions.cpp:343
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:148
llvm::objcarc::GetRCIdentityRoot
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
Definition: ObjCARCAnalysisUtils.h:107
llvm::Constant::stripPointerCasts
const Constant * stripPointerCasts() const
Definition: Constant.h:201
llvm::Init
Definition: Record.h:271
llvm::AAResults::getModRefBehavior
FunctionModRefBehavior getModRefBehavior(const CallBase *Call)
Return the behavior of the given call site.
Definition: AliasAnalysis.cpp:424
llvm::AtomicRMWInst
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:726
llvm::ClonedCodeInfo::ContainsCalls
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
Definition: Cloning.h:66
ObjCARCAnalysisUtils.h
llvm::CallBase::doesNotThrow
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:1845
llvm::Function::ProfileCount::getCount
uint64_t getCount() const
Definition: Function.h:303
llvm::OperandBundleUse::Inputs
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1057
llvm::InlineFunctionInfo::InlinedCallSites
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
Definition: Cloning.h:233
Argument.h
llvm::BasicBlock::front
const Instruction & front() const
Definition: BasicBlock.h:308
Callee
amdgpu Simplify well known AMD library false FunctionCallee Callee
Definition: AMDGPULibCalls.cpp:206
ObjCARCUtil.h
llvm::BlockFrequencyInfo::setBlockFreqAndScale
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, uint64_t Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
Definition: BlockFrequencyInfo.cpp:234
llvm::InlineFunctionInfo::reset
void reset()
Definition: Cloning.h:239
Constant.h
llvm::ResumeInst
Resume the propagation of an exception.
Definition: Instructions.h:4193
llvm::MDNode::replaceAllUsesWith
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
Definition: Metadata.h:982
llvm::Type::getInt64Ty
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:204
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:52
llvm::ValueMapIterator
Definition: ValueMap.h:49
llvm::DenseMapBase::end
iterator end()
Definition: DenseMap.h:83
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:314
llvm::PHINode::Create
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Definition: Instructions.h:2669
llvm::Function::getArg
Argument * getArg(unsigned i) const
Definition: Function.h:811
llvm::AAResults::onlyAccessesInaccessibleMem
static bool onlyAccessesInaccessibleMem(FunctionModRefBehavior MRB)
Checks if functions with the specified behavior are known to read and write at most from memory that ...
Definition: AliasAnalysis.h:651
ProfileCount
Function::ProfileCount ProfileCount
Definition: InlineFunction.cpp:78
llvm::isGuaranteedToTransferExecutionToSuccessor
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
Definition: ValueTracking.cpp:5266
llvm::TypeSize
Definition: TypeSize.h:417
llvm::ConstantTokenNone::get
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1441
Casting.h
Function.h
updateCallProfile
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
Definition: InlineFunction.cpp:1606
llvm::Value::hasNUses
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition: Value.cpp:149
llvm::InlineFunctionInfo
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:201
llvm::Function::getFunctionType
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:175
llvm::ValueMap::find
iterator find(const KeyT &Val)
Definition: ValueMap.h:156
llvm::Instruction::isEHPad
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:663
llvm::InlineResult::failure
static InlineResult failure(const char *Reason)
Definition: InlineCost.h:165
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:585
llvm::CallBase::getCalledOperand
Value * getCalledOperand() const
Definition: InstrTypes.h:1386
ScopedAliasMetadataDeepCloner::clone
void clone()
Create a new clone of the scoped alias metadata, which will be used by subsequent remap() calls.
Definition: InlineFunction.cpp:883
llvm::BlockFrequencyInfo::getBlockFreq
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Definition: BlockFrequencyInfo.cpp:204
llvm::Function::getPersonalityFn
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1783
llvm::Function::arg_begin
arg_iterator arg_begin()
Definition: Function.h:793
EnableNoAliasConversion
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:45
llvm::AttrBuilder::addDereferenceableAttr
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
Definition: Attributes.cpp:1678
llvm::MDBuilder
Definition: MDBuilder.h:35
llvm::Function::front
const BasicBlock & front() const
Definition: Function.h:785
CallGraph.h
llvm::DebugLoc::getLine
unsigned getLine() const
Definition: DebugLoc.cpp:25
llvm::AttrBuilder::addAttribute
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
Definition: Attributes.h:840
llvm::changeToUnreachable
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2113
llvm::BasicBlock::getInstList
const InstListType & getInstList() const
Return the underlying instruction list container.
Definition: BasicBlock.h:363
llvm::BasicBlock::reverse_iterator
InstListType::reverse_iterator reverse_iterator
Definition: BasicBlock.h:92
llvm::Function::getParamAlignment
unsigned getParamAlignment(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Definition: Function.h:486
llvm::InlineFunctionInfo::StaticAllocas
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
Definition: Cloning.h:222
llvm::MDNode::isTemporary
bool isTemporary() const
Definition: Metadata.h:977
Instructions.h
llvm::numbers::phi
constexpr double phi
Definition: MathExtras.h:71
llvm::AllocaInst::isUsedWithInAlloca
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:138
SmallVector.h
llvm::ilist_iterator::getReverse
ilist_iterator< OptionsT, !IsReverse, IsConst > getReverse() const
Get a reverse iterator to the same node.
Definition: ilist_iterator.h:121
llvm::Instruction::getDebugLoc
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:372
User.h
llvm::InlineFunctionInfo::UpdateProfile
bool UpdateProfile
Update profile for callee as well as cloned version.
Definition: Cloning.h:237
Dominators.h
NoAliases
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
updateCallerBFI
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
Definition: InlineFunction.cpp:1577
llvm::CallBase::getArgOperand
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1338
llvm::X86II::OB
@ OB
Definition: X86BaseInfo.h:796
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:94
InstructionSimplify.h
llvm::LLVMContext::OB_funclet
@ OB_funclet
Definition: LLVMContext.h:91
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:340
llvm::PHINode
Definition: Instructions.h:2627
llvm::Function::onlyReadsMemory
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:556
llvm::BasicBlock::removePredecessor
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition: BasicBlock.cpp:321
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::InlineFunction
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
Definition: InlineFunction.cpp:1758
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1161
llvm::Module::getDataLayout
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:401
DerivedTypes.h
llvm::SmallPtrSetImpl
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:307
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1475
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::ValueMap::lookup
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:165
LLVMContext.h
llvm::Value::takeName
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:370
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition: Instructions.h:62
llvm::Function::ProfileCount
Class to represent profile counts.
Definition: Function.h:292
llvm::DebugLoc::getScope
MDNode * getScope() const
Definition: DebugLoc.cpp:35
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::InlineFunctionInfo::GetAssumptionCache
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
Definition: Cloning.h:216
llvm::BasicBlock::getTerminatingMustTailCall
const CallInst * getTerminatingMustTailCall() const
Returns the call instruction marked 'musttail' prior to the terminating return instruction of this ba...
Definition: BasicBlock.cpp:154
llvm::cl::desc
Definition: CommandLine.h:414
Mod
Module * Mod
Definition: PassBuilderBindings.cpp:54
llvm::BranchInst
Conditional or Unconditional Branch instruction.
Definition: Instructions.h:3062
InlinerAttributeWindow
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
llvm::ClonedCodeInfo::ContainsDynamicAllocas
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
Definition: Cloning.h:71
llvm::Function::ProfileCount::hasValue
bool hasValue() const
Definition: Function.h:302
llvm::SetVector< const MDNode * >
llvm::CallBase::arg_operands
iterator_range< User::op_iterator > arg_operands()
Definition: InstrTypes.h:1330
llvm::SmallVectorImpl::reserve
void reserve(size_type N)
Definition: SmallVector.h:624
llvm::CallInst::TCK_NoTail
@ TCK_NoTail
Definition: Instructions.h:1656
llvm::IRBuilderBase::CreateAlignmentAssumption
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
Definition: IRBuilder.cpp:1234
Value.h
llvm::InvokeInst::getUnwindDest
BasicBlock * getUnwindDest() const
Definition: Instructions.h:3878
llvm::InlineFunctionInfo::CalleeBFI
BlockFrequencyInfo * CalleeBFI
Definition: Cloning.h:218
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
llvm::AtomicCmpXchgInst
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:521
llvm::InlineResult
InlineResult is basically true or false.
Definition: InlineCost.h:159
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:422
llvm::CallInst::getTailCallKind
TailCallKind getTailCallKind() const
Definition: Instructions.h:1665
llvm::IRBuilderBase::CreateCall
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2391
llvm::CallBase::args
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1319
SetVector.h
llvm::CallBase::setCallingConv
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1453
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:908
llvm::Function::iterator
BasicBlockListType::iterator iterator
Definition: Function.h:66
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::CallGraphNode::begin
iterator begin()
Definition: CallGraph.h:200