LLVM  9.0.0svn
InlineFunction.cpp
Go to the documentation of this file.
1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DIBuilder.h"
41 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InstrTypes.h"
49 #include "llvm/IR/Instruction.h"
50 #include "llvm/IR/Instructions.h"
51 #include "llvm/IR/IntrinsicInst.h"
52 #include "llvm/IR/Intrinsics.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/IR/MDBuilder.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/User.h"
59 #include "llvm/IR/Value.h"
60 #include "llvm/Support/Casting.h"
65 #include <algorithm>
66 #include <cassert>
67 #include <cstdint>
68 #include <iterator>
69 #include <limits>
70 #include <string>
71 #include <utility>
72 #include <vector>
73 
74 using namespace llvm;
76 
77 static cl::opt<bool>
78 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
79  cl::Hidden,
80  cl::desc("Convert noalias attributes to metadata during inlining."));
81 
82 static cl::opt<bool>
83 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
84  cl::init(true), cl::Hidden,
85  cl::desc("Convert align attributes to assumptions during inlining."));
86 
88  AAResults *CalleeAAR,
89  bool InsertLifetime) {
90  return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime);
91 }
92 
94  AAResults *CalleeAAR,
95  bool InsertLifetime) {
96  return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime);
97 }
98 
99 namespace {
100 
101  /// A class for recording information about inlining a landing pad.
102  class LandingPadInliningInfo {
103  /// Destination of the invoke's unwind.
104  BasicBlock *OuterResumeDest;
105 
106  /// Destination for the callee's resume.
107  BasicBlock *InnerResumeDest = nullptr;
108 
109  /// LandingPadInst associated with the invoke.
110  LandingPadInst *CallerLPad = nullptr;
111 
112  /// PHI for EH values from landingpad insts.
113  PHINode *InnerEHValuesPHI = nullptr;
114 
115  SmallVector<Value*, 8> UnwindDestPHIValues;
116 
117  public:
118  LandingPadInliningInfo(InvokeInst *II)
119  : OuterResumeDest(II->getUnwindDest()) {
120  // If there are PHI nodes in the unwind destination block, we need to keep
121  // track of which values came into them from the invoke before removing
122  // the edge from this block.
123  BasicBlock *InvokeBB = II->getParent();
124  BasicBlock::iterator I = OuterResumeDest->begin();
125  for (; isa<PHINode>(I); ++I) {
126  // Save the value to use for this edge.
127  PHINode *PHI = cast<PHINode>(I);
128  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
129  }
130 
131  CallerLPad = cast<LandingPadInst>(I);
132  }
133 
134  /// The outer unwind destination is the target of
135  /// unwind edges introduced for calls within the inlined function.
136  BasicBlock *getOuterResumeDest() const {
137  return OuterResumeDest;
138  }
139 
140  BasicBlock *getInnerResumeDest();
141 
142  LandingPadInst *getLandingPadInst() const { return CallerLPad; }
143 
144  /// Forward the 'resume' instruction to the caller's landing pad block.
145  /// When the landing pad block has only one predecessor, this is
146  /// a simple branch. When there is more than one predecessor, we need to
147  /// split the landing pad block after the landingpad instruction and jump
148  /// to there.
149  void forwardResume(ResumeInst *RI,
150  SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
151 
152  /// Add incoming-PHI values to the unwind destination block for the given
153  /// basic block, using the values for the original invoke's source block.
154  void addIncomingPHIValuesFor(BasicBlock *BB) const {
155  addIncomingPHIValuesForInto(BB, OuterResumeDest);
156  }
157 
158  void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
159  BasicBlock::iterator I = dest->begin();
160  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
161  PHINode *phi = cast<PHINode>(I);
162  phi->addIncoming(UnwindDestPHIValues[i], src);
163  }
164  }
165  };
166 
167 } // end anonymous namespace
168 
169 /// Get or create a target for the branch from ResumeInsts.
170 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
171  if (InnerResumeDest) return InnerResumeDest;
172 
173  // Split the landing pad.
174  BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
175  InnerResumeDest =
176  OuterResumeDest->splitBasicBlock(SplitPoint,
177  OuterResumeDest->getName() + ".body");
178 
179  // The number of incoming edges we expect to the inner landing pad.
180  const unsigned PHICapacity = 2;
181 
182  // Create corresponding new PHIs for all the PHIs in the outer landing pad.
183  Instruction *InsertPoint = &InnerResumeDest->front();
184  BasicBlock::iterator I = OuterResumeDest->begin();
185  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
186  PHINode *OuterPHI = cast<PHINode>(I);
187  PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
188  OuterPHI->getName() + ".lpad-body",
189  InsertPoint);
190  OuterPHI->replaceAllUsesWith(InnerPHI);
191  InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
192  }
193 
194  // Create a PHI for the exception values.
195  InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
196  "eh.lpad-body", InsertPoint);
197  CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
198  InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
199 
200  // All done.
201  return InnerResumeDest;
202 }
203 
204 /// Forward the 'resume' instruction to the caller's landing pad block.
205 /// When the landing pad block has only one predecessor, this is a simple
206 /// branch. When there is more than one predecessor, we need to split the
207 /// landing pad block after the landingpad instruction and jump to there.
208 void LandingPadInliningInfo::forwardResume(
209  ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
210  BasicBlock *Dest = getInnerResumeDest();
211  BasicBlock *Src = RI->getParent();
212 
213  BranchInst::Create(Dest, Src);
214 
215  // Update the PHIs in the destination. They were inserted in an order which
216  // makes this work.
217  addIncomingPHIValuesForInto(Src, Dest);
218 
219  InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
220  RI->eraseFromParent();
221 }
222 
223 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
224 static Value *getParentPad(Value *EHPad) {
225  if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
226  return FPI->getParentPad();
227  return cast<CatchSwitchInst>(EHPad)->getParentPad();
228 }
229 
231 
232 /// Helper for getUnwindDestToken that does the descendant-ward part of
233 /// the search.
235  UnwindDestMemoTy &MemoMap) {
236  SmallVector<Instruction *, 8> Worklist(1, EHPad);
237 
238  while (!Worklist.empty()) {
239  Instruction *CurrentPad = Worklist.pop_back_val();
240  // We only put pads on the worklist that aren't in the MemoMap. When
241  // we find an unwind dest for a pad we may update its ancestors, but
242  // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
243  // so they should never get updated while queued on the worklist.
244  assert(!MemoMap.count(CurrentPad));
245  Value *UnwindDestToken = nullptr;
246  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
247  if (CatchSwitch->hasUnwindDest()) {
248  UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
249  } else {
250  // Catchswitch doesn't have a 'nounwind' variant, and one might be
251  // annotated as "unwinds to caller" when really it's nounwind (see
252  // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
253  // parent's unwind dest from this. We can check its catchpads'
254  // descendants, since they might include a cleanuppad with an
255  // "unwinds to caller" cleanupret, which can be trusted.
256  for (auto HI = CatchSwitch->handler_begin(),
257  HE = CatchSwitch->handler_end();
258  HI != HE && !UnwindDestToken; ++HI) {
259  BasicBlock *HandlerBlock = *HI;
260  auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
261  for (User *Child : CatchPad->users()) {
262  // Intentionally ignore invokes here -- since the catchswitch is
263  // marked "unwind to caller", it would be a verifier error if it
264  // contained an invoke which unwinds out of it, so any invoke we'd
265  // encounter must unwind to some child of the catch.
266  if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
267  continue;
268 
269  Instruction *ChildPad = cast<Instruction>(Child);
270  auto Memo = MemoMap.find(ChildPad);
271  if (Memo == MemoMap.end()) {
272  // Haven't figured out this child pad yet; queue it.
273  Worklist.push_back(ChildPad);
274  continue;
275  }
276  // We've already checked this child, but might have found that
277  // it offers no proof either way.
278  Value *ChildUnwindDestToken = Memo->second;
279  if (!ChildUnwindDestToken)
280  continue;
281  // We already know the child's unwind dest, which can either
282  // be ConstantTokenNone to indicate unwind to caller, or can
283  // be another child of the catchpad. Only the former indicates
284  // the unwind dest of the catchswitch.
285  if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
286  UnwindDestToken = ChildUnwindDestToken;
287  break;
288  }
289  assert(getParentPad(ChildUnwindDestToken) == CatchPad);
290  }
291  }
292  }
293  } else {
294  auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
295  for (User *U : CleanupPad->users()) {
296  if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
297  if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
298  UnwindDestToken = RetUnwindDest->getFirstNonPHI();
299  else
300  UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
301  break;
302  }
303  Value *ChildUnwindDestToken;
304  if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
305  ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
306  } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
307  Instruction *ChildPad = cast<Instruction>(U);
308  auto Memo = MemoMap.find(ChildPad);
309  if (Memo == MemoMap.end()) {
310  // Haven't resolved this child yet; queue it and keep searching.
311  Worklist.push_back(ChildPad);
312  continue;
313  }
314  // We've checked this child, but still need to ignore it if it
315  // had no proof either way.
316  ChildUnwindDestToken = Memo->second;
317  if (!ChildUnwindDestToken)
318  continue;
319  } else {
320  // Not a relevant user of the cleanuppad
321  continue;
322  }
323  // In a well-formed program, the child/invoke must either unwind to
324  // an(other) child of the cleanup, or exit the cleanup. In the
325  // first case, continue searching.
326  if (isa<Instruction>(ChildUnwindDestToken) &&
327  getParentPad(ChildUnwindDestToken) == CleanupPad)
328  continue;
329  UnwindDestToken = ChildUnwindDestToken;
330  break;
331  }
332  }
333  // If we haven't found an unwind dest for CurrentPad, we may have queued its
334  // children, so move on to the next in the worklist.
335  if (!UnwindDestToken)
336  continue;
337 
338  // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
339  // any ancestors of CurrentPad up to but not including UnwindDestToken's
340  // parent pad. Record this in the memo map, and check to see if the
341  // original EHPad being queried is one of the ones exited.
342  Value *UnwindParent;
343  if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
344  UnwindParent = getParentPad(UnwindPad);
345  else
346  UnwindParent = nullptr;
347  bool ExitedOriginalPad = false;
348  for (Instruction *ExitedPad = CurrentPad;
349  ExitedPad && ExitedPad != UnwindParent;
350  ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
351  // Skip over catchpads since they just follow their catchswitches.
352  if (isa<CatchPadInst>(ExitedPad))
353  continue;
354  MemoMap[ExitedPad] = UnwindDestToken;
355  ExitedOriginalPad |= (ExitedPad == EHPad);
356  }
357 
358  if (ExitedOriginalPad)
359  return UnwindDestToken;
360 
361  // Continue the search.
362  }
363 
364  // No definitive information is contained within this funclet.
365  return nullptr;
366 }
367 
368 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
369 /// return that pad instruction. If it unwinds to caller, return
370 /// ConstantTokenNone. If it does not have a definitive unwind destination,
371 /// return nullptr.
372 ///
373 /// This routine gets invoked for calls in funclets in inlinees when inlining
374 /// an invoke. Since many funclets don't have calls inside them, it's queried
375 /// on-demand rather than building a map of pads to unwind dests up front.
376 /// Determining a funclet's unwind dest may require recursively searching its
377 /// descendants, and also ancestors and cousins if the descendants don't provide
378 /// an answer. Since most funclets will have their unwind dest immediately
379 /// available as the unwind dest of a catchswitch or cleanupret, this routine
380 /// searches top-down from the given pad and then up. To avoid worst-case
381 /// quadratic run-time given that approach, it uses a memo map to avoid
382 /// re-processing funclet trees. The callers that rewrite the IR as they go
383 /// take advantage of this, for correctness, by checking/forcing rewritten
384 /// pads' entries to match the original callee view.
386  UnwindDestMemoTy &MemoMap) {
387  // Catchpads unwind to the same place as their catchswitch;
388  // redirct any queries on catchpads so the code below can
389  // deal with just catchswitches and cleanuppads.
390  if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
391  EHPad = CPI->getCatchSwitch();
392 
393  // Check if we've already determined the unwind dest for this pad.
394  auto Memo = MemoMap.find(EHPad);
395  if (Memo != MemoMap.end())
396  return Memo->second;
397 
398  // Search EHPad and, if necessary, its descendants.
399  Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
400  assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
401  if (UnwindDestToken)
402  return UnwindDestToken;
403 
404  // No information is available for this EHPad from itself or any of its
405  // descendants. An unwind all the way out to a pad in the caller would
406  // need also to agree with the unwind dest of the parent funclet, so
407  // search up the chain to try to find a funclet with information. Put
408  // null entries in the memo map to avoid re-processing as we go up.
409  MemoMap[EHPad] = nullptr;
410 #ifndef NDEBUG
412  TempMemos.insert(EHPad);
413 #endif
414  Instruction *LastUselessPad = EHPad;
415  Value *AncestorToken;
416  for (AncestorToken = getParentPad(EHPad);
417  auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
418  AncestorToken = getParentPad(AncestorToken)) {
419  // Skip over catchpads since they just follow their catchswitches.
420  if (isa<CatchPadInst>(AncestorPad))
421  continue;
422  // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
423  // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
424  // call to getUnwindDestToken, that would mean that AncestorPad had no
425  // information in itself, its descendants, or its ancestors. If that
426  // were the case, then we should also have recorded the lack of information
427  // for the descendant that we're coming from. So assert that we don't
428  // find a null entry in the MemoMap for AncestorPad.
429  assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
430  auto AncestorMemo = MemoMap.find(AncestorPad);
431  if (AncestorMemo == MemoMap.end()) {
432  UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
433  } else {
434  UnwindDestToken = AncestorMemo->second;
435  }
436  if (UnwindDestToken)
437  break;
438  LastUselessPad = AncestorPad;
439  MemoMap[LastUselessPad] = nullptr;
440 #ifndef NDEBUG
441  TempMemos.insert(LastUselessPad);
442 #endif
443  }
444 
445  // We know that getUnwindDestTokenHelper was called on LastUselessPad and
446  // returned nullptr (and likewise for EHPad and any of its ancestors up to
447  // LastUselessPad), so LastUselessPad has no information from below. Since
448  // getUnwindDestTokenHelper must investigate all downward paths through
449  // no-information nodes to prove that a node has no information like this,
450  // and since any time it finds information it records it in the MemoMap for
451  // not just the immediately-containing funclet but also any ancestors also
452  // exited, it must be the case that, walking downward from LastUselessPad,
453  // visiting just those nodes which have not been mapped to an unwind dest
454  // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
455  // they are just used to keep getUnwindDestTokenHelper from repeating work),
456  // any node visited must have been exhaustively searched with no information
457  // for it found.
458  SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
459  while (!Worklist.empty()) {
460  Instruction *UselessPad = Worklist.pop_back_val();
461  auto Memo = MemoMap.find(UselessPad);
462  if (Memo != MemoMap.end() && Memo->second) {
463  // Here the name 'UselessPad' is a bit of a misnomer, because we've found
464  // that it is a funclet that does have information about unwinding to
465  // a particular destination; its parent was a useless pad.
466  // Since its parent has no information, the unwind edge must not escape
467  // the parent, and must target a sibling of this pad. This local unwind
468  // gives us no information about EHPad. Leave it and the subtree rooted
469  // at it alone.
470  assert(getParentPad(Memo->second) == getParentPad(UselessPad));
471  continue;
472  }
473  // We know we don't have information for UselesPad. If it has an entry in
474  // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
475  // added on this invocation of getUnwindDestToken; if a previous invocation
476  // recorded nullptr, it would have had to prove that the ancestors of
477  // UselessPad, which include LastUselessPad, had no information, and that
478  // in turn would have required proving that the descendants of
479  // LastUselesPad, which include EHPad, have no information about
480  // LastUselessPad, which would imply that EHPad was mapped to nullptr in
481  // the MemoMap on that invocation, which isn't the case if we got here.
482  assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
483  // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
484  // information that we'd be contradicting by making a map entry for it
485  // (which is something that getUnwindDestTokenHelper must have proved for
486  // us to get here). Just assert on is direct users here; the checks in
487  // this downward walk at its descendants will verify that they don't have
488  // any unwind edges that exit 'UselessPad' either (i.e. they either have no
489  // unwind edges or unwind to a sibling).
490  MemoMap[UselessPad] = UnwindDestToken;
491  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
492  assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
493  for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
494  auto *CatchPad = HandlerBlock->getFirstNonPHI();
495  for (User *U : CatchPad->users()) {
496  assert(
497  (!isa<InvokeInst>(U) ||
498  (getParentPad(
499  cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
500  CatchPad)) &&
501  "Expected useless pad");
502  if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
503  Worklist.push_back(cast<Instruction>(U));
504  }
505  }
506  } else {
507  assert(isa<CleanupPadInst>(UselessPad));
508  for (User *U : UselessPad->users()) {
509  assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
510  assert((!isa<InvokeInst>(U) ||
511  (getParentPad(
512  cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
513  UselessPad)) &&
514  "Expected useless pad");
515  if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
516  Worklist.push_back(cast<Instruction>(U));
517  }
518  }
519  }
520 
521  return UnwindDestToken;
522 }
523 
524 /// When we inline a basic block into an invoke,
525 /// we have to turn all of the calls that can throw into invokes.
526 /// This function analyze BB to see if there are any calls, and if so,
527 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
528 /// nodes in that block with the values specified in InvokeDestPHIValues.
530  BasicBlock *BB, BasicBlock *UnwindEdge,
531  UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
532  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
533  Instruction *I = &*BBI++;
534 
535  // We only need to check for function calls: inlined invoke
536  // instructions require no special handling.
537  CallInst *CI = dyn_cast<CallInst>(I);
538 
539  if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
540  continue;
541 
542  // We do not need to (and in fact, cannot) convert possibly throwing calls
543  // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
544  // invokes. The caller's "segment" of the deoptimization continuation
545  // attached to the newly inlined @llvm.experimental_deoptimize
546  // (resp. @llvm.experimental.guard) call should contain the exception
547  // handling logic, if any.
548  if (auto *F = CI->getCalledFunction())
549  if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
550  F->getIntrinsicID() == Intrinsic::experimental_guard)
551  continue;
552 
553  if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
554  // This call is nested inside a funclet. If that funclet has an unwind
555  // destination within the inlinee, then unwinding out of this call would
556  // be UB. Rewriting this call to an invoke which targets the inlined
557  // invoke's unwind dest would give the call's parent funclet multiple
558  // unwind destinations, which is something that subsequent EH table
559  // generation can't handle and that the veirifer rejects. So when we
560  // see such a call, leave it as a call.
561  auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
562  Value *UnwindDestToken =
563  getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
564  if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
565  continue;
566 #ifndef NDEBUG
567  Instruction *MemoKey;
568  if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
569  MemoKey = CatchPad->getCatchSwitch();
570  else
571  MemoKey = FuncletPad;
572  assert(FuncletUnwindMap->count(MemoKey) &&
573  (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
574  "must get memoized to avoid confusing later searches");
575 #endif // NDEBUG
576  }
577 
578  changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
579  return BB;
580  }
581  return nullptr;
582 }
583 
584 /// If we inlined an invoke site, we need to convert calls
585 /// in the body of the inlined function into invokes.
586 ///
587 /// II is the invoke instruction being inlined. FirstNewBlock is the first
588 /// block of the inlined code (the last block is the end of the function),
589 /// and InlineCodeInfo is information about the code that got inlined.
590 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
591  ClonedCodeInfo &InlinedCodeInfo) {
592  BasicBlock *InvokeDest = II->getUnwindDest();
593 
594  Function *Caller = FirstNewBlock->getParent();
595 
596  // The inlined code is currently at the end of the function, scan from the
597  // start of the inlined code to its end, checking for stuff we need to
598  // rewrite.
599  LandingPadInliningInfo Invoke(II);
600 
601  // Get all of the inlined landing pad instructions.
603  for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
604  I != E; ++I)
605  if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
606  InlinedLPads.insert(II->getLandingPadInst());
607 
608  // Append the clauses from the outer landing pad instruction into the inlined
609  // landing pad instructions.
610  LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
611  for (LandingPadInst *InlinedLPad : InlinedLPads) {
612  unsigned OuterNum = OuterLPad->getNumClauses();
613  InlinedLPad->reserveClauses(OuterNum);
614  for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
615  InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
616  if (OuterLPad->isCleanup())
617  InlinedLPad->setCleanup(true);
618  }
619 
620  for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
621  BB != E; ++BB) {
622  if (InlinedCodeInfo.ContainsCalls)
624  &*BB, Invoke.getOuterResumeDest()))
625  // Update any PHI nodes in the exceptional block to indicate that there
626  // is now a new entry in them.
627  Invoke.addIncomingPHIValuesFor(NewBB);
628 
629  // Forward any resumes that are remaining here.
630  if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
631  Invoke.forwardResume(RI, InlinedLPads);
632  }
633 
634  // Now that everything is happy, we have one final detail. The PHI nodes in
635  // the exception destination block still have entries due to the original
636  // invoke instruction. Eliminate these entries (which might even delete the
637  // PHI node) now.
638  InvokeDest->removePredecessor(II->getParent());
639 }
640 
641 /// If we inlined an invoke site, we need to convert calls
642 /// in the body of the inlined function into invokes.
643 ///
644 /// II is the invoke instruction being inlined. FirstNewBlock is the first
645 /// block of the inlined code (the last block is the end of the function),
646 /// and InlineCodeInfo is information about the code that got inlined.
647 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
648  ClonedCodeInfo &InlinedCodeInfo) {
649  BasicBlock *UnwindDest = II->getUnwindDest();
650  Function *Caller = FirstNewBlock->getParent();
651 
652  assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
653 
654  // If there are PHI nodes in the unwind destination block, we need to keep
655  // track of which values came into them from the invoke before removing the
656  // edge from this block.
657  SmallVector<Value *, 8> UnwindDestPHIValues;
658  BasicBlock *InvokeBB = II->getParent();
659  for (Instruction &I : *UnwindDest) {
660  // Save the value to use for this edge.
661  PHINode *PHI = dyn_cast<PHINode>(&I);
662  if (!PHI)
663  break;
664  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
665  }
666 
667  // Add incoming-PHI values to the unwind destination block for the given basic
668  // block, using the values for the original invoke's source block.
669  auto UpdatePHINodes = [&](BasicBlock *Src) {
670  BasicBlock::iterator I = UnwindDest->begin();
671  for (Value *V : UnwindDestPHIValues) {
672  PHINode *PHI = cast<PHINode>(I);
673  PHI->addIncoming(V, Src);
674  ++I;
675  }
676  };
677 
678  // This connects all the instructions which 'unwind to caller' to the invoke
679  // destination.
680  UnwindDestMemoTy FuncletUnwindMap;
681  for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
682  BB != E; ++BB) {
683  if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
684  if (CRI->unwindsToCaller()) {
685  auto *CleanupPad = CRI->getCleanupPad();
686  CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
687  CRI->eraseFromParent();
688  UpdatePHINodes(&*BB);
689  // Finding a cleanupret with an unwind destination would confuse
690  // subsequent calls to getUnwindDestToken, so map the cleanuppad
691  // to short-circuit any such calls and recognize this as an "unwind
692  // to caller" cleanup.
693  assert(!FuncletUnwindMap.count(CleanupPad) ||
694  isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
695  FuncletUnwindMap[CleanupPad] =
697  }
698  }
699 
700  Instruction *I = BB->getFirstNonPHI();
701  if (!I->isEHPad())
702  continue;
703 
704  Instruction *Replacement = nullptr;
705  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
706  if (CatchSwitch->unwindsToCaller()) {
707  Value *UnwindDestToken;
708  if (auto *ParentPad =
709  dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
710  // This catchswitch is nested inside another funclet. If that
711  // funclet has an unwind destination within the inlinee, then
712  // unwinding out of this catchswitch would be UB. Rewriting this
713  // catchswitch to unwind to the inlined invoke's unwind dest would
714  // give the parent funclet multiple unwind destinations, which is
715  // something that subsequent EH table generation can't handle and
716  // that the veirifer rejects. So when we see such a call, leave it
717  // as "unwind to caller".
718  UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
719  if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
720  continue;
721  } else {
722  // This catchswitch has no parent to inherit constraints from, and
723  // none of its descendants can have an unwind edge that exits it and
724  // targets another funclet in the inlinee. It may or may not have a
725  // descendant that definitively has an unwind to caller. In either
726  // case, we'll have to assume that any unwinds out of it may need to
727  // be routed to the caller, so treat it as though it has a definitive
728  // unwind to caller.
729  UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
730  }
731  auto *NewCatchSwitch = CatchSwitchInst::Create(
732  CatchSwitch->getParentPad(), UnwindDest,
733  CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
734  CatchSwitch);
735  for (BasicBlock *PadBB : CatchSwitch->handlers())
736  NewCatchSwitch->addHandler(PadBB);
737  // Propagate info for the old catchswitch over to the new one in
738  // the unwind map. This also serves to short-circuit any subsequent
739  // checks for the unwind dest of this catchswitch, which would get
740  // confused if they found the outer handler in the callee.
741  FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
742  Replacement = NewCatchSwitch;
743  }
744  } else if (!isa<FuncletPadInst>(I)) {
745  llvm_unreachable("unexpected EHPad!");
746  }
747 
748  if (Replacement) {
749  Replacement->takeName(I);
750  I->replaceAllUsesWith(Replacement);
751  I->eraseFromParent();
752  UpdatePHINodes(&*BB);
753  }
754  }
755 
756  if (InlinedCodeInfo.ContainsCalls)
757  for (Function::iterator BB = FirstNewBlock->getIterator(),
758  E = Caller->end();
759  BB != E; ++BB)
761  &*BB, UnwindDest, &FuncletUnwindMap))
762  // Update any PHI nodes in the exceptional block to indicate that there
763  // is now a new entry in them.
764  UpdatePHINodes(NewBB);
765 
766  // Now that everything is happy, we have one final detail. The PHI nodes in
767  // the exception destination block still have entries due to the original
768  // invoke instruction. Eliminate these entries (which might even delete the
769  // PHI node) now.
770  UnwindDest->removePredecessor(InvokeBB);
771 }
772 
773 /// When inlining a call site that has !llvm.mem.parallel_loop_access or
774 /// llvm.access.group metadata, that metadata should be propagated to all
775 /// memory-accessing cloned instructions.
777  ValueToValueMapTy &VMap) {
778  MDNode *M =
780  MDNode *CallAccessGroup =
782  if (!M && !CallAccessGroup)
783  return;
784 
785  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
786  VMI != VMIE; ++VMI) {
787  if (!VMI->second)
788  continue;
789 
790  Instruction *NI = dyn_cast<Instruction>(VMI->second);
791  if (!NI)
792  continue;
793 
794  if (M) {
795  if (MDNode *PM =
797  M = MDNode::concatenate(PM, M);
799  } else if (NI->mayReadOrWriteMemory()) {
801  }
802  }
803 
804  if (NI->mayReadOrWriteMemory()) {
805  MDNode *UnitedAccGroups = uniteAccessGroups(
806  NI->getMetadata(LLVMContext::MD_access_group), CallAccessGroup);
807  NI->setMetadata(LLVMContext::MD_access_group, UnitedAccGroups);
808  }
809  }
810 }
811 
812 /// When inlining a function that contains noalias scope metadata,
813 /// this metadata needs to be cloned so that the inlined blocks
814 /// have different "unique scopes" at every call site. Were this not done, then
815 /// aliasing scopes from a function inlined into a caller multiple times could
816 /// not be differentiated (and this would lead to miscompiles because the
817 /// non-aliasing property communicated by the metadata could have
818 /// call-site-specific control dependencies).
820  const Function *CalledFunc = CS.getCalledFunction();
822 
823  // Note: We could only clone the metadata if it is already used in the
824  // caller. I'm omitting that check here because it might confuse
825  // inter-procedural alias analysis passes. We can revisit this if it becomes
826  // an efficiency or overhead problem.
827 
828  for (const BasicBlock &I : *CalledFunc)
829  for (const Instruction &J : I) {
830  if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
831  MD.insert(M);
832  if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
833  MD.insert(M);
834  }
835 
836  if (MD.empty())
837  return;
838 
839  // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
840  // the set.
841  SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
842  while (!Queue.empty()) {
843  const MDNode *M = cast<MDNode>(Queue.pop_back_val());
844  for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
845  if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
846  if (MD.insert(M1))
847  Queue.push_back(M1);
848  }
849 
850  // Now we have a complete set of all metadata in the chains used to specify
851  // the noalias scopes and the lists of those scopes.
852  SmallVector<TempMDTuple, 16> DummyNodes;
854  for (const MDNode *I : MD) {
855  DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
856  MDMap[I].reset(DummyNodes.back().get());
857  }
858 
859  // Create new metadata nodes to replace the dummy nodes, replacing old
860  // metadata references with either a dummy node or an already-created new
861  // node.
862  for (const MDNode *I : MD) {
864  for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
865  const Metadata *V = I->getOperand(i);
866  if (const MDNode *M = dyn_cast<MDNode>(V))
867  NewOps.push_back(MDMap[M]);
868  else
869  NewOps.push_back(const_cast<Metadata *>(V));
870  }
871 
872  MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
873  MDTuple *TempM = cast<MDTuple>(MDMap[I]);
874  assert(TempM->isTemporary() && "Expected temporary node");
875 
876  TempM->replaceAllUsesWith(NewM);
877  }
878 
879  // Now replace the metadata in the new inlined instructions with the
880  // repacements from the map.
881  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
882  VMI != VMIE; ++VMI) {
883  if (!VMI->second)
884  continue;
885 
886  Instruction *NI = dyn_cast<Instruction>(VMI->second);
887  if (!NI)
888  continue;
889 
891  MDNode *NewMD = MDMap[M];
892  // If the call site also had alias scope metadata (a list of scopes to
893  // which instructions inside it might belong), propagate those scopes to
894  // the inlined instructions.
895  if (MDNode *CSM =
897  NewMD = MDNode::concatenate(NewMD, CSM);
899  } else if (NI->mayReadOrWriteMemory()) {
900  if (MDNode *M =
903  }
904 
906  MDNode *NewMD = MDMap[M];
907  // If the call site also had noalias metadata (a list of scopes with
908  // which instructions inside it don't alias), propagate those scopes to
909  // the inlined instructions.
910  if (MDNode *CSM =
912  NewMD = MDNode::concatenate(NewMD, CSM);
914  } else if (NI->mayReadOrWriteMemory()) {
917  }
918  }
919 }
920 
921 /// If the inlined function has noalias arguments,
922 /// then add new alias scopes for each noalias argument, tag the mapped noalias
923 /// parameters with noalias metadata specifying the new scope, and tag all
924 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
926  const DataLayout &DL, AAResults *CalleeAAR) {
928  return;
929 
930  const Function *CalledFunc = CS.getCalledFunction();
932 
933  for (const Argument &Arg : CalledFunc->args())
934  if (Arg.hasNoAliasAttr() && !Arg.use_empty())
935  NoAliasArgs.push_back(&Arg);
936 
937  if (NoAliasArgs.empty())
938  return;
939 
940  // To do a good job, if a noalias variable is captured, we need to know if
941  // the capture point dominates the particular use we're considering.
942  DominatorTree DT;
943  DT.recalculate(const_cast<Function&>(*CalledFunc));
944 
945  // noalias indicates that pointer values based on the argument do not alias
946  // pointer values which are not based on it. So we add a new "scope" for each
947  // noalias function argument. Accesses using pointers based on that argument
948  // become part of that alias scope, accesses using pointers not based on that
949  // argument are tagged as noalias with that scope.
950 
952  MDBuilder MDB(CalledFunc->getContext());
953 
954  // Create a new scope domain for this function.
955  MDNode *NewDomain =
956  MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
957  for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
958  const Argument *A = NoAliasArgs[i];
959 
960  std::string Name = CalledFunc->getName();
961  if (A->hasName()) {
962  Name += ": %";
963  Name += A->getName();
964  } else {
965  Name += ": argument ";
966  Name += utostr(i);
967  }
968 
969  // Note: We always create a new anonymous root here. This is true regardless
970  // of the linkage of the callee because the aliasing "scope" is not just a
971  // property of the callee, but also all control dependencies in the caller.
972  MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
973  NewScopes.insert(std::make_pair(A, NewScope));
974  }
975 
976  // Iterate over all new instructions in the map; for all memory-access
977  // instructions, add the alias scope metadata.
978  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
979  VMI != VMIE; ++VMI) {
980  if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
981  if (!VMI->second)
982  continue;
983 
984  Instruction *NI = dyn_cast<Instruction>(VMI->second);
985  if (!NI)
986  continue;
987 
988  bool IsArgMemOnlyCall = false, IsFuncCall = false;
990 
991  if (const LoadInst *LI = dyn_cast<LoadInst>(I))
992  PtrArgs.push_back(LI->getPointerOperand());
993  else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
994  PtrArgs.push_back(SI->getPointerOperand());
995  else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
996  PtrArgs.push_back(VAAI->getPointerOperand());
997  else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
998  PtrArgs.push_back(CXI->getPointerOperand());
999  else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1000  PtrArgs.push_back(RMWI->getPointerOperand());
1001  else if (const auto *Call = dyn_cast<CallBase>(I)) {
1002  // If we know that the call does not access memory, then we'll still
1003  // know that about the inlined clone of this call site, and we don't
1004  // need to add metadata.
1005  if (Call->doesNotAccessMemory())
1006  continue;
1007 
1008  IsFuncCall = true;
1009  if (CalleeAAR) {
1010  FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1011  if (MRB == FMRB_OnlyAccessesArgumentPointees ||
1013  IsArgMemOnlyCall = true;
1014  }
1015 
1016  for (Value *Arg : Call->args()) {
1017  // We need to check the underlying objects of all arguments, not just
1018  // the pointer arguments, because we might be passing pointers as
1019  // integers, etc.
1020  // However, if we know that the call only accesses pointer arguments,
1021  // then we only need to check the pointer arguments.
1022  if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1023  continue;
1024 
1025  PtrArgs.push_back(Arg);
1026  }
1027  }
1028 
1029  // If we found no pointers, then this instruction is not suitable for
1030  // pairing with an instruction to receive aliasing metadata.
1031  // However, if this is a call, this we might just alias with none of the
1032  // noalias arguments.
1033  if (PtrArgs.empty() && !IsFuncCall)
1034  continue;
1035 
1036  // It is possible that there is only one underlying object, but you
1037  // need to go through several PHIs to see it, and thus could be
1038  // repeated in the Objects list.
1041 
1043  for (const Value *V : PtrArgs) {
1044  SmallVector<Value *, 4> Objects;
1045  GetUnderlyingObjects(const_cast<Value*>(V),
1046  Objects, DL, /* LI = */ nullptr);
1047 
1048  for (Value *O : Objects)
1049  ObjSet.insert(O);
1050  }
1051 
1052  // Figure out if we're derived from anything that is not a noalias
1053  // argument.
1054  bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1055  for (const Value *V : ObjSet) {
1056  // Is this value a constant that cannot be derived from any pointer
1057  // value (we need to exclude constant expressions, for example, that
1058  // are formed from arithmetic on global symbols).
1059  bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1060  isa<ConstantPointerNull>(V) ||
1061  isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1062  if (IsNonPtrConst)
1063  continue;
1064 
1065  // If this is anything other than a noalias argument, then we cannot
1066  // completely describe the aliasing properties using alias.scope
1067  // metadata (and, thus, won't add any).
1068  if (const Argument *A = dyn_cast<Argument>(V)) {
1069  if (!A->hasNoAliasAttr())
1070  UsesAliasingPtr = true;
1071  } else {
1072  UsesAliasingPtr = true;
1073  }
1074 
1075  // If this is not some identified function-local object (which cannot
1076  // directly alias a noalias argument), or some other argument (which,
1077  // by definition, also cannot alias a noalias argument), then we could
1078  // alias a noalias argument that has been captured).
1079  if (!isa<Argument>(V) &&
1080  !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1081  CanDeriveViaCapture = true;
1082  }
1083 
1084  // A function call can always get captured noalias pointers (via other
1085  // parameters, globals, etc.).
1086  if (IsFuncCall && !IsArgMemOnlyCall)
1087  CanDeriveViaCapture = true;
1088 
1089  // First, we want to figure out all of the sets with which we definitely
1090  // don't alias. Iterate over all noalias set, and add those for which:
1091  // 1. The noalias argument is not in the set of objects from which we
1092  // definitely derive.
1093  // 2. The noalias argument has not yet been captured.
1094  // An arbitrary function that might load pointers could see captured
1095  // noalias arguments via other noalias arguments or globals, and so we
1096  // must always check for prior capture.
1097  for (const Argument *A : NoAliasArgs) {
1098  if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1099  // It might be tempting to skip the
1100  // PointerMayBeCapturedBefore check if
1101  // A->hasNoCaptureAttr() is true, but this is
1102  // incorrect because nocapture only guarantees
1103  // that no copies outlive the function, not
1104  // that the value cannot be locally captured.
1106  /* ReturnCaptures */ false,
1107  /* StoreCaptures */ false, I, &DT)))
1108  NoAliases.push_back(NewScopes[A]);
1109  }
1110 
1111  if (!NoAliases.empty())
1115  MDNode::get(CalledFunc->getContext(), NoAliases)));
1116 
1117  // Next, we want to figure out all of the sets to which we might belong.
1118  // We might belong to a set if the noalias argument is in the set of
1119  // underlying objects. If there is some non-noalias argument in our list
1120  // of underlying objects, then we cannot add a scope because the fact
1121  // that some access does not alias with any set of our noalias arguments
1122  // cannot itself guarantee that it does not alias with this access
1123  // (because there is some pointer of unknown origin involved and the
1124  // other access might also depend on this pointer). We also cannot add
1125  // scopes to arbitrary functions unless we know they don't access any
1126  // non-parameter pointer-values.
1127  bool CanAddScopes = !UsesAliasingPtr;
1128  if (CanAddScopes && IsFuncCall)
1129  CanAddScopes = IsArgMemOnlyCall;
1130 
1131  if (CanAddScopes)
1132  for (const Argument *A : NoAliasArgs) {
1133  if (ObjSet.count(A))
1134  Scopes.push_back(NewScopes[A]);
1135  }
1136 
1137  if (!Scopes.empty())
1138  NI->setMetadata(
1141  MDNode::get(CalledFunc->getContext(), Scopes)));
1142  }
1143  }
1144 }
1145 
1146 /// If the inlined function has non-byval align arguments, then
1147 /// add @llvm.assume-based alignment assumptions to preserve this information.
1150  return;
1151 
1152  AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller());
1153  auto &DL = CS.getCaller()->getParent()->getDataLayout();
1154 
1155  // To avoid inserting redundant assumptions, we should check for assumptions
1156  // already in the caller. To do this, we might need a DT of the caller.
1157  DominatorTree DT;
1158  bool DTCalculated = false;
1159 
1160  Function *CalledFunc = CS.getCalledFunction();
1161  for (Argument &Arg : CalledFunc->args()) {
1162  unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1163  if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) {
1164  if (!DTCalculated) {
1165  DT.recalculate(*CS.getCaller());
1166  DTCalculated = true;
1167  }
1168 
1169  // If we can already prove the asserted alignment in the context of the
1170  // caller, then don't bother inserting the assumption.
1171  Value *ArgVal = CS.getArgument(Arg.getArgNo());
1172  if (getKnownAlignment(ArgVal, DL, CS.getInstruction(), AC, &DT) >= Align)
1173  continue;
1174 
1175  CallInst *NewAsmp = IRBuilder<>(CS.getInstruction())
1176  .CreateAlignmentAssumption(DL, ArgVal, Align);
1177  AC->registerAssumption(NewAsmp);
1178  }
1179  }
1180 }
1181 
1182 /// Once we have cloned code over from a callee into the caller,
1183 /// update the specified callgraph to reflect the changes we made.
1184 /// Note that it's possible that not all code was copied over, so only
1185 /// some edges of the callgraph may remain.
1187  Function::iterator FirstNewBlock,
1188  ValueToValueMapTy &VMap,
1189  InlineFunctionInfo &IFI) {
1190  CallGraph &CG = *IFI.CG;
1191  const Function *Caller = CS.getCaller();
1192  const Function *Callee = CS.getCalledFunction();
1193  CallGraphNode *CalleeNode = CG[Callee];
1194  CallGraphNode *CallerNode = CG[Caller];
1195 
1196  // Since we inlined some uninlined call sites in the callee into the caller,
1197  // add edges from the caller to all of the callees of the callee.
1198  CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1199 
1200  // Consider the case where CalleeNode == CallerNode.
1202  if (CalleeNode == CallerNode) {
1203  CallCache.assign(I, E);
1204  I = CallCache.begin();
1205  E = CallCache.end();
1206  }
1207 
1208  for (; I != E; ++I) {
1209  const Value *OrigCall = I->first;
1210 
1211  ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1212  // Only copy the edge if the call was inlined!
1213  if (VMI == VMap.end() || VMI->second == nullptr)
1214  continue;
1215 
1216  // If the call was inlined, but then constant folded, there is no edge to
1217  // add. Check for this case.
1218  Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
1219  if (!NewCall)
1220  continue;
1221 
1222  // We do not treat intrinsic calls like real function calls because we
1223  // expect them to become inline code; do not add an edge for an intrinsic.
1224  CallSite CS = CallSite(NewCall);
1225  if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
1226  continue;
1227 
1228  // Remember that this call site got inlined for the client of
1229  // InlineFunction.
1230  IFI.InlinedCalls.push_back(NewCall);
1231 
1232  // It's possible that inlining the callsite will cause it to go from an
1233  // indirect to a direct call by resolving a function pointer. If this
1234  // happens, set the callee of the new call site to a more precise
1235  // destination. This can also happen if the call graph node of the caller
1236  // was just unnecessarily imprecise.
1237  if (!I->second->getFunction())
1238  if (Function *F = CallSite(NewCall).getCalledFunction()) {
1239  // Indirect call site resolved to direct call.
1240  CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
1241 
1242  continue;
1243  }
1244 
1245  CallerNode->addCalledFunction(CallSite(NewCall), I->second);
1246  }
1247 
1248  // Update the call graph by deleting the edge from Callee to Caller. We must
1249  // do this after the loop above in case Caller and Callee are the same.
1250  CallerNode->removeCallEdgeFor(CS);
1251 }
1252 
1253 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1254  BasicBlock *InsertBlock,
1255  InlineFunctionInfo &IFI) {
1256  Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1257  IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1258 
1259  Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1260 
1261  // Always generate a memcpy of alignment 1 here because we don't know
1262  // the alignment of the src pointer. Other optimizations can infer
1263  // better alignment.
1264  Builder.CreateMemCpy(Dst, /*DstAlign*/1, Src, /*SrcAlign*/1, Size);
1265 }
1266 
1267 /// When inlining a call site that has a byval argument,
1268 /// we have to make the implicit memcpy explicit by adding it.
1270  const Function *CalledFunc,
1271  InlineFunctionInfo &IFI,
1272  unsigned ByValAlignment) {
1273  PointerType *ArgTy = cast<PointerType>(Arg->getType());
1274  Type *AggTy = ArgTy->getElementType();
1275 
1276  Function *Caller = TheCall->getFunction();
1277  const DataLayout &DL = Caller->getParent()->getDataLayout();
1278 
1279  // If the called function is readonly, then it could not mutate the caller's
1280  // copy of the byval'd memory. In this case, it is safe to elide the copy and
1281  // temporary.
1282  if (CalledFunc->onlyReadsMemory()) {
1283  // If the byval argument has a specified alignment that is greater than the
1284  // passed in pointer, then we either have to round up the input pointer or
1285  // give up on this transformation.
1286  if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1287  return Arg;
1288 
1289  AssumptionCache *AC =
1290  IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1291 
1292  // If the pointer is already known to be sufficiently aligned, or if we can
1293  // round it up to a larger alignment, then we don't need a temporary.
1294  if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
1295  ByValAlignment)
1296  return Arg;
1297 
1298  // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1299  // for code quality, but rarely happens and is required for correctness.
1300  }
1301 
1302  // Create the alloca. If we have DataLayout, use nice alignment.
1303  unsigned Align = DL.getPrefTypeAlignment(AggTy);
1304 
1305  // If the byval had an alignment specified, we *must* use at least that
1306  // alignment, as it is required by the byval argument (and uses of the
1307  // pointer inside the callee).
1308  Align = std::max(Align, ByValAlignment);
1309 
1310  Value *NewAlloca = new AllocaInst(AggTy, DL.getAllocaAddrSpace(),
1311  nullptr, Align, Arg->getName(),
1312  &*Caller->begin()->begin());
1313  IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1314 
1315  // Uses of the argument in the function should use our new alloca
1316  // instead.
1317  return NewAlloca;
1318 }
1319 
1320 // Check whether this Value is used by a lifetime intrinsic.
1322  for (User *U : V->users())
1323  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1324  if (II->isLifetimeStartOrEnd())
1325  return true;
1326  return false;
1327 }
1328 
1329 // Check whether the given alloca already has
1330 // lifetime.start or lifetime.end intrinsics.
1331 static bool hasLifetimeMarkers(AllocaInst *AI) {
1332  Type *Ty = AI->getType();
1333  Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1334  Ty->getPointerAddressSpace());
1335  if (Ty == Int8PtrTy)
1336  return isUsedByLifetimeMarker(AI);
1337 
1338  // Do a scan to find all the casts to i8*.
1339  for (User *U : AI->users()) {
1340  if (U->getType() != Int8PtrTy) continue;
1341  if (U->stripPointerCasts() != AI) continue;
1342  if (isUsedByLifetimeMarker(U))
1343  return true;
1344  }
1345  return false;
1346 }
1347 
1348 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1349 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1350 /// cannot be static.
1351 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1352  return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1353 }
1354 
1355 /// Update inlined instructions' line numbers to
1356 /// to encode location where these instructions are inlined.
1358  Instruction *TheCall, bool CalleeHasDebugInfo) {
1359  const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1360  if (!TheCallDL)
1361  return;
1362 
1363  auto &Ctx = Fn->getContext();
1364  DILocation *InlinedAtNode = TheCallDL;
1365 
1366  // Create a unique call site, not to be confused with any other call from the
1367  // same location.
1368  InlinedAtNode = DILocation::getDistinct(
1369  Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1370  InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1371 
1372  // Cache the inlined-at nodes as they're built so they are reused, without
1373  // this every instruction's inlined-at chain would become distinct from each
1374  // other.
1376 
1377  for (; FI != Fn->end(); ++FI) {
1378  for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1379  BI != BE; ++BI) {
1380  if (DebugLoc DL = BI->getDebugLoc()) {
1381  auto IA = DebugLoc::appendInlinedAt(DL, InlinedAtNode, BI->getContext(),
1382  IANodes);
1383  auto IDL = DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), IA);
1384  BI->setDebugLoc(IDL);
1385  continue;
1386  }
1387 
1388  if (CalleeHasDebugInfo)
1389  continue;
1390 
1391  // If the inlined instruction has no line number, make it look as if it
1392  // originates from the call location. This is important for
1393  // ((__always_inline__, __nodebug__)) functions which must use caller
1394  // location for all instructions in their function body.
1395 
1396  // Don't update static allocas, as they may get moved later.
1397  if (auto *AI = dyn_cast<AllocaInst>(BI))
1399  continue;
1400 
1401  BI->setDebugLoc(TheCallDL);
1402  }
1403  }
1404 }
1405 
1406 /// Update the block frequencies of the caller after a callee has been inlined.
1407 ///
1408 /// Each block cloned into the caller has its block frequency scaled by the
1409 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1410 /// callee's entry block gets the same frequency as the callsite block and the
1411 /// relative frequencies of all cloned blocks remain the same after cloning.
1412 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1413  const ValueToValueMapTy &VMap,
1414  BlockFrequencyInfo *CallerBFI,
1415  BlockFrequencyInfo *CalleeBFI,
1416  const BasicBlock &CalleeEntryBlock) {
1418  for (auto const &Entry : VMap) {
1419  if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1420  continue;
1421  auto *OrigBB = cast<BasicBlock>(Entry.first);
1422  auto *ClonedBB = cast<BasicBlock>(Entry.second);
1423  uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1424  if (!ClonedBBs.insert(ClonedBB).second) {
1425  // Multiple blocks in the callee might get mapped to one cloned block in
1426  // the caller since we prune the callee as we clone it. When that happens,
1427  // we want to use the maximum among the original blocks' frequencies.
1428  uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1429  if (NewFreq > Freq)
1430  Freq = NewFreq;
1431  }
1432  CallerBFI->setBlockFreq(ClonedBB, Freq);
1433  }
1434  BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1435  CallerBFI->setBlockFreqAndScale(
1436  EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1437  ClonedBBs);
1438 }
1439 
1440 /// Update the branch metadata for cloned call instructions.
1441 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1442  const ProfileCount &CalleeEntryCount,
1443  const Instruction *TheCall,
1444  ProfileSummaryInfo *PSI,
1445  BlockFrequencyInfo *CallerBFI) {
1446  if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1447  CalleeEntryCount.getCount() < 1)
1448  return;
1449  auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1450  uint64_t CallCount =
1451  std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1452  CalleeEntryCount.getCount());
1453 
1454  for (auto const &Entry : VMap)
1455  if (isa<CallInst>(Entry.first))
1456  if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1457  CI->updateProfWeight(CallCount, CalleeEntryCount.getCount());
1458  for (BasicBlock &BB : *Callee)
1459  // No need to update the callsite if it is pruned during inlining.
1460  if (VMap.count(&BB))
1461  for (Instruction &I : BB)
1462  if (CallInst *CI = dyn_cast<CallInst>(&I))
1463  CI->updateProfWeight(CalleeEntryCount.getCount() - CallCount,
1464  CalleeEntryCount.getCount());
1465 }
1466 
1467 /// Update the entry count of callee after inlining.
1468 ///
1469 /// The callsite's block count is subtracted from the callee's function entry
1470 /// count.
1471 static void updateCalleeCount(BlockFrequencyInfo *CallerBFI, BasicBlock *CallBB,
1472  Instruction *CallInst, Function *Callee,
1473  ProfileSummaryInfo *PSI) {
1474  // If the callee has a original count of N, and the estimated count of
1475  // callsite is M, the new callee count is set to N - M. M is estimated from
1476  // the caller's entry count, its entry block frequency and the block frequency
1477  // of the callsite.
1478  auto CalleeCount = Callee->getEntryCount();
1479  if (!CalleeCount.hasValue() || !PSI)
1480  return;
1481  auto CallCount = PSI->getProfileCount(CallInst, CallerBFI);
1482  if (!CallCount.hasValue())
1483  return;
1484  // Since CallSiteCount is an estimate, it could exceed the original callee
1485  // count and has to be set to 0.
1486  if (CallCount.getValue() > CalleeCount.getCount())
1487  CalleeCount.setCount(0);
1488  else
1489  CalleeCount.setCount(CalleeCount.getCount() - CallCount.getValue());
1490  Callee->setEntryCount(CalleeCount);
1491 }
1492 
1493 /// This function inlines the called function into the basic block of the
1494 /// caller. This returns false if it is not possible to inline this call.
1495 /// The program is still in a well defined state if this occurs though.
1496 ///
1497 /// Note that this only does one level of inlining. For example, if the
1498 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1499 /// exists in the instruction stream. Similarly this will inline a recursive
1500 /// function by one level.
1502  AAResults *CalleeAAR,
1503  bool InsertLifetime,
1504  Function *ForwardVarArgsTo) {
1505  Instruction *TheCall = CS.getInstruction();
1506  assert(TheCall->getParent() && TheCall->getFunction()
1507  && "Instruction not in function!");
1508 
1509  // If IFI has any state in it, zap it before we fill it in.
1510  IFI.reset();
1511 
1512  Function *CalledFunc = CS.getCalledFunction();
1513  if (!CalledFunc || // Can't inline external function or indirect
1514  CalledFunc->isDeclaration()) // call!
1515  return "external or indirect";
1516 
1517  // The inliner does not know how to inline through calls with operand bundles
1518  // in general ...
1519  if (CS.hasOperandBundles()) {
1520  for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1522  // ... but it knows how to inline through "deopt" operand bundles ...
1523  if (Tag == LLVMContext::OB_deopt)
1524  continue;
1525  // ... and "funclet" operand bundles.
1526  if (Tag == LLVMContext::OB_funclet)
1527  continue;
1528 
1529  return "unsupported operand bundle";
1530  }
1531  }
1532 
1533  // If the call to the callee cannot throw, set the 'nounwind' flag on any
1534  // calls that we inline.
1535  bool MarkNoUnwind = CS.doesNotThrow();
1536 
1537  BasicBlock *OrigBB = TheCall->getParent();
1538  Function *Caller = OrigBB->getParent();
1539 
1540  // GC poses two hazards to inlining, which only occur when the callee has GC:
1541  // 1. If the caller has no GC, then the callee's GC must be propagated to the
1542  // caller.
1543  // 2. If the caller has a differing GC, it is invalid to inline.
1544  if (CalledFunc->hasGC()) {
1545  if (!Caller->hasGC())
1546  Caller->setGC(CalledFunc->getGC());
1547  else if (CalledFunc->getGC() != Caller->getGC())
1548  return "incompatible GC";
1549  }
1550 
1551  // Get the personality function from the callee if it contains a landing pad.
1552  Constant *CalledPersonality =
1553  CalledFunc->hasPersonalityFn()
1554  ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1555  : nullptr;
1556 
1557  // Find the personality function used by the landing pads of the caller. If it
1558  // exists, then check to see that it matches the personality function used in
1559  // the callee.
1560  Constant *CallerPersonality =
1561  Caller->hasPersonalityFn()
1562  ? Caller->getPersonalityFn()->stripPointerCasts()
1563  : nullptr;
1564  if (CalledPersonality) {
1565  if (!CallerPersonality)
1566  Caller->setPersonalityFn(CalledPersonality);
1567  // If the personality functions match, then we can perform the
1568  // inlining. Otherwise, we can't inline.
1569  // TODO: This isn't 100% true. Some personality functions are proper
1570  // supersets of others and can be used in place of the other.
1571  else if (CalledPersonality != CallerPersonality)
1572  return "incompatible personality";
1573  }
1574 
1575  // We need to figure out which funclet the callsite was in so that we may
1576  // properly nest the callee.
1577  Instruction *CallSiteEHPad = nullptr;
1578  if (CallerPersonality) {
1579  EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1580  if (isScopedEHPersonality(Personality)) {
1581  Optional<OperandBundleUse> ParentFunclet =
1583  if (ParentFunclet)
1584  CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1585 
1586  // OK, the inlining site is legal. What about the target function?
1587 
1588  if (CallSiteEHPad) {
1589  if (Personality == EHPersonality::MSVC_CXX) {
1590  // The MSVC personality cannot tolerate catches getting inlined into
1591  // cleanup funclets.
1592  if (isa<CleanupPadInst>(CallSiteEHPad)) {
1593  // Ok, the call site is within a cleanuppad. Let's check the callee
1594  // for catchpads.
1595  for (const BasicBlock &CalledBB : *CalledFunc) {
1596  if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1597  return "catch in cleanup funclet";
1598  }
1599  }
1600  } else if (isAsynchronousEHPersonality(Personality)) {
1601  // SEH is even less tolerant, there may not be any sort of exceptional
1602  // funclet in the callee.
1603  for (const BasicBlock &CalledBB : *CalledFunc) {
1604  if (CalledBB.isEHPad())
1605  return "SEH in cleanup funclet";
1606  }
1607  }
1608  }
1609  }
1610  }
1611 
1612  // Determine if we are dealing with a call in an EHPad which does not unwind
1613  // to caller.
1614  bool EHPadForCallUnwindsLocally = false;
1615  if (CallSiteEHPad && CS.isCall()) {
1616  UnwindDestMemoTy FuncletUnwindMap;
1617  Value *CallSiteUnwindDestToken =
1618  getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1619 
1620  EHPadForCallUnwindsLocally =
1621  CallSiteUnwindDestToken &&
1622  !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1623  }
1624 
1625  // Get an iterator to the last basic block in the function, which will have
1626  // the new function inlined after it.
1627  Function::iterator LastBlock = --Caller->end();
1628 
1629  // Make sure to capture all of the return instructions from the cloned
1630  // function.
1632  ClonedCodeInfo InlinedFunctionInfo;
1633  Function::iterator FirstNewBlock;
1634 
1635  { // Scope to destroy VMap after cloning.
1636  ValueToValueMapTy VMap;
1637  // Keep a list of pair (dst, src) to emit byval initializations.
1639 
1640  auto &DL = Caller->getParent()->getDataLayout();
1641 
1642  // Calculate the vector of arguments to pass into the function cloner, which
1643  // matches up the formal to the actual argument values.
1645  unsigned ArgNo = 0;
1646  for (Function::arg_iterator I = CalledFunc->arg_begin(),
1647  E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1648  Value *ActualArg = *AI;
1649 
1650  // When byval arguments actually inlined, we need to make the copy implied
1651  // by them explicit. However, we don't do this if the callee is readonly
1652  // or readnone, because the copy would be unneeded: the callee doesn't
1653  // modify the struct.
1654  if (CS.isByValArgument(ArgNo)) {
1655  ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1656  CalledFunc->getParamAlignment(ArgNo));
1657  if (ActualArg != *AI)
1658  ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1659  }
1660 
1661  VMap[&*I] = ActualArg;
1662  }
1663 
1664  // Add alignment assumptions if necessary. We do this before the inlined
1665  // instructions are actually cloned into the caller so that we can easily
1666  // check what will be known at the start of the inlined code.
1667  AddAlignmentAssumptions(CS, IFI);
1668 
1669  // We want the inliner to prune the code as it copies. We would LOVE to
1670  // have no dead or constant instructions leftover after inlining occurs
1671  // (which can happen, e.g., because an argument was constant), but we'll be
1672  // happy with whatever the cloner can do.
1673  CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1674  /*ModuleLevelChanges=*/false, Returns, ".i",
1675  &InlinedFunctionInfo, TheCall);
1676  // Remember the first block that is newly cloned over.
1677  FirstNewBlock = LastBlock; ++FirstNewBlock;
1678 
1679  if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1680  // Update the BFI of blocks cloned into the caller.
1681  updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1682  CalledFunc->front());
1683 
1684  updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), TheCall,
1685  IFI.PSI, IFI.CallerBFI);
1686  // Update the profile count of callee.
1687  updateCalleeCount(IFI.CallerBFI, OrigBB, TheCall, CalledFunc, IFI.PSI);
1688 
1689  // Inject byval arguments initialization.
1690  for (std::pair<Value*, Value*> &Init : ByValInit)
1691  HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1692  &*FirstNewBlock, IFI);
1693 
1694  Optional<OperandBundleUse> ParentDeopt =
1696  if (ParentDeopt) {
1698 
1699  for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1700  Instruction *I = dyn_cast_or_null<Instruction>(VH);
1701  if (!I) continue; // instruction was DCE'd or RAUW'ed to undef
1702 
1703  OpDefs.clear();
1704 
1705  CallSite ICS(I);
1706  OpDefs.reserve(ICS.getNumOperandBundles());
1707 
1708  for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1709  auto ChildOB = ICS.getOperandBundleAt(i);
1710  if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1711  // If the inlined call has other operand bundles, let them be
1712  OpDefs.emplace_back(ChildOB);
1713  continue;
1714  }
1715 
1716  // It may be useful to separate this logic (of handling operand
1717  // bundles) out to a separate "policy" component if this gets crowded.
1718  // Prepend the parent's deoptimization continuation to the newly
1719  // inlined call's deoptimization continuation.
1720  std::vector<Value *> MergedDeoptArgs;
1721  MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1722  ChildOB.Inputs.size());
1723 
1724  MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1725  ParentDeopt->Inputs.begin(),
1726  ParentDeopt->Inputs.end());
1727  MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1728  ChildOB.Inputs.end());
1729 
1730  OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1731  }
1732 
1733  Instruction *NewI = nullptr;
1734  if (isa<CallInst>(I))
1735  NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1736  else
1737  NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1738 
1739  // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1740  // this even if the call returns void.
1741  I->replaceAllUsesWith(NewI);
1742 
1743  VH = nullptr;
1744  I->eraseFromParent();
1745  }
1746  }
1747 
1748  // Update the callgraph if requested.
1749  if (IFI.CG)
1750  UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1751 
1752  // For 'nodebug' functions, the associated DISubprogram is always null.
1753  // Conservatively avoid propagating the callsite debug location to
1754  // instructions inlined from a function whose DISubprogram is not null.
1755  fixupLineNumbers(Caller, FirstNewBlock, TheCall,
1756  CalledFunc->getSubprogram() != nullptr);
1757 
1758  // Clone existing noalias metadata if necessary.
1759  CloneAliasScopeMetadata(CS, VMap);
1760 
1761  // Add noalias metadata if necessary.
1762  AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1763 
1764  // Propagate llvm.mem.parallel_loop_access if necessary.
1766 
1767  // Register any cloned assumptions.
1768  if (IFI.GetAssumptionCache)
1769  for (BasicBlock &NewBlock :
1770  make_range(FirstNewBlock->getIterator(), Caller->end()))
1771  for (Instruction &I : NewBlock) {
1772  if (auto *II = dyn_cast<IntrinsicInst>(&I))
1773  if (II->getIntrinsicID() == Intrinsic::assume)
1774  (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
1775  }
1776  }
1777 
1778  // If there are any alloca instructions in the block that used to be the entry
1779  // block for the callee, move them to the entry block of the caller. First
1780  // calculate which instruction they should be inserted before. We insert the
1781  // instructions at the end of the current alloca list.
1782  {
1783  BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1784  for (BasicBlock::iterator I = FirstNewBlock->begin(),
1785  E = FirstNewBlock->end(); I != E; ) {
1786  AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1787  if (!AI) continue;
1788 
1789  // If the alloca is now dead, remove it. This often occurs due to code
1790  // specialization.
1791  if (AI->use_empty()) {
1792  AI->eraseFromParent();
1793  continue;
1794  }
1795 
1796  if (!allocaWouldBeStaticInEntry(AI))
1797  continue;
1798 
1799  // Keep track of the static allocas that we inline into the caller.
1800  IFI.StaticAllocas.push_back(AI);
1801 
1802  // Scan for the block of allocas that we can move over, and move them
1803  // all at once.
1804  while (isa<AllocaInst>(I) &&
1805  allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1806  IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1807  ++I;
1808  }
1809 
1810  // Transfer all of the allocas over in a block. Using splice means
1811  // that the instructions aren't removed from the symbol table, then
1812  // reinserted.
1813  Caller->getEntryBlock().getInstList().splice(
1814  InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1815  }
1816  // Move any dbg.declares describing the allocas into the entry basic block.
1817  DIBuilder DIB(*Caller->getParent());
1818  for (auto &AI : IFI.StaticAllocas)
1821  }
1822 
1823  SmallVector<Value*,4> VarArgsToForward;
1824  SmallVector<AttributeSet, 4> VarArgsAttrs;
1825  for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
1826  i < CS.getNumArgOperands(); i++) {
1827  VarArgsToForward.push_back(CS.getArgOperand(i));
1828  VarArgsAttrs.push_back(CS.getAttributes().getParamAttributes(i));
1829  }
1830 
1831  bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1832  if (InlinedFunctionInfo.ContainsCalls) {
1833  CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1834  if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1835  CallSiteTailKind = CI->getTailCallKind();
1836 
1837  // For inlining purposes, the "notail" marker is the same as no marker.
1838  if (CallSiteTailKind == CallInst::TCK_NoTail)
1839  CallSiteTailKind = CallInst::TCK_None;
1840 
1841  for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1842  ++BB) {
1843  for (auto II = BB->begin(); II != BB->end();) {
1844  Instruction &I = *II++;
1845  CallInst *CI = dyn_cast<CallInst>(&I);
1846  if (!CI)
1847  continue;
1848 
1849  // Forward varargs from inlined call site to calls to the
1850  // ForwardVarArgsTo function, if requested, and to musttail calls.
1851  if (!VarArgsToForward.empty() &&
1852  ((ForwardVarArgsTo &&
1853  CI->getCalledFunction() == ForwardVarArgsTo) ||
1854  CI->isMustTailCall())) {
1855  // Collect attributes for non-vararg parameters.
1858  if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
1859  for (unsigned ArgNo = 0;
1860  ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
1861  ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
1862  }
1863 
1864  // Add VarArg attributes.
1865  ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
1866  Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
1867  Attrs.getRetAttributes(), ArgAttrs);
1868  // Add VarArgs to existing parameters.
1869  SmallVector<Value *, 6> Params(CI->arg_operands());
1870  Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
1871  CallInst *NewCI =
1873  : CI->getCalledValue(),
1874  Params, "", CI);
1875  NewCI->setDebugLoc(CI->getDebugLoc());
1876  NewCI->setAttributes(Attrs);
1877  NewCI->setCallingConv(CI->getCallingConv());
1878  CI->replaceAllUsesWith(NewCI);
1879  CI->eraseFromParent();
1880  CI = NewCI;
1881  }
1882 
1883  if (Function *F = CI->getCalledFunction())
1884  InlinedDeoptimizeCalls |=
1885  F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
1886 
1887  // We need to reduce the strength of any inlined tail calls. For
1888  // musttail, we have to avoid introducing potential unbounded stack
1889  // growth. For example, if functions 'f' and 'g' are mutually recursive
1890  // with musttail, we can inline 'g' into 'f' so long as we preserve
1891  // musttail on the cloned call to 'f'. If either the inlined call site
1892  // or the cloned call site is *not* musttail, the program already has
1893  // one frame of stack growth, so it's safe to remove musttail. Here is
1894  // a table of example transformations:
1895  //
1896  // f -> musttail g -> musttail f ==> f -> musttail f
1897  // f -> musttail g -> tail f ==> f -> tail f
1898  // f -> g -> musttail f ==> f -> f
1899  // f -> g -> tail f ==> f -> f
1900  //
1901  // Inlined notail calls should remain notail calls.
1902  CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1903  if (ChildTCK != CallInst::TCK_NoTail)
1904  ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1905  CI->setTailCallKind(ChildTCK);
1906  InlinedMustTailCalls |= CI->isMustTailCall();
1907 
1908  // Calls inlined through a 'nounwind' call site should be marked
1909  // 'nounwind'.
1910  if (MarkNoUnwind)
1911  CI->setDoesNotThrow();
1912  }
1913  }
1914  }
1915 
1916  // Leave lifetime markers for the static alloca's, scoping them to the
1917  // function we just inlined.
1918  if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1919  IRBuilder<> builder(&FirstNewBlock->front());
1920  for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1921  AllocaInst *AI = IFI.StaticAllocas[ai];
1922  // Don't mark swifterror allocas. They can't have bitcast uses.
1923  if (AI->isSwiftError())
1924  continue;
1925 
1926  // If the alloca is already scoped to something smaller than the whole
1927  // function then there's no need to add redundant, less accurate markers.
1928  if (hasLifetimeMarkers(AI))
1929  continue;
1930 
1931  // Try to determine the size of the allocation.
1932  ConstantInt *AllocaSize = nullptr;
1933  if (ConstantInt *AIArraySize =
1934  dyn_cast<ConstantInt>(AI->getArraySize())) {
1935  auto &DL = Caller->getParent()->getDataLayout();
1936  Type *AllocaType = AI->getAllocatedType();
1937  uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1938  uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1939 
1940  // Don't add markers for zero-sized allocas.
1941  if (AllocaArraySize == 0)
1942  continue;
1943 
1944  // Check that array size doesn't saturate uint64_t and doesn't
1945  // overflow when it's multiplied by type size.
1946  if (AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
1947  std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
1948  AllocaTypeSize) {
1949  AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1950  AllocaArraySize * AllocaTypeSize);
1951  }
1952  }
1953 
1954  builder.CreateLifetimeStart(AI, AllocaSize);
1955  for (ReturnInst *RI : Returns) {
1956  // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
1957  // call and a return. The return kills all local allocas.
1958  if (InlinedMustTailCalls &&
1960  continue;
1961  if (InlinedDeoptimizeCalls &&
1963  continue;
1964  IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1965  }
1966  }
1967  }
1968 
1969  // If the inlined code contained dynamic alloca instructions, wrap the inlined
1970  // code with llvm.stacksave/llvm.stackrestore intrinsics.
1971  if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1972  Module *M = Caller->getParent();
1973  // Get the two intrinsics we care about.
1974  Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1975  Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1976 
1977  // Insert the llvm.stacksave.
1978  CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
1979  .CreateCall(StackSave, {}, "savedstack");
1980 
1981  // Insert a call to llvm.stackrestore before any return instructions in the
1982  // inlined function.
1983  for (ReturnInst *RI : Returns) {
1984  // Don't insert llvm.stackrestore calls between a musttail or deoptimize
1985  // call and a return. The return will restore the stack pointer.
1986  if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1987  continue;
1988  if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
1989  continue;
1990  IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1991  }
1992  }
1993 
1994  // If we are inlining for an invoke instruction, we must make sure to rewrite
1995  // any call instructions into invoke instructions. This is sensitive to which
1996  // funclet pads were top-level in the inlinee, so must be done before
1997  // rewriting the "parent pad" links.
1998  if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
1999  BasicBlock *UnwindDest = II->getUnwindDest();
2000  Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2001  if (isa<LandingPadInst>(FirstNonPHI)) {
2002  HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2003  } else {
2004  HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2005  }
2006  }
2007 
2008  // Update the lexical scopes of the new funclets and callsites.
2009  // Anything that had 'none' as its parent is now nested inside the callsite's
2010  // EHPad.
2011 
2012  if (CallSiteEHPad) {
2013  for (Function::iterator BB = FirstNewBlock->getIterator(),
2014  E = Caller->end();
2015  BB != E; ++BB) {
2016  // Add bundle operands to any top-level call sites.
2018  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2019  Instruction *I = &*BBI++;
2020  CallSite CS(I);
2021  if (!CS)
2022  continue;
2023 
2024  // Skip call sites which are nounwind intrinsics.
2025  auto *CalledFn =
2027  if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
2028  continue;
2029 
2030  // Skip call sites which already have a "funclet" bundle.
2032  continue;
2033 
2034  CS.getOperandBundlesAsDefs(OpBundles);
2035  OpBundles.emplace_back("funclet", CallSiteEHPad);
2036 
2037  Instruction *NewInst;
2038  if (CS.isCall())
2039  NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
2040  else
2041  NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
2042  NewInst->takeName(I);
2043  I->replaceAllUsesWith(NewInst);
2044  I->eraseFromParent();
2045 
2046  OpBundles.clear();
2047  }
2048 
2049  // It is problematic if the inlinee has a cleanupret which unwinds to
2050  // caller and we inline it into a call site which doesn't unwind but into
2051  // an EH pad that does. Such an edge must be dynamically unreachable.
2052  // As such, we replace the cleanupret with unreachable.
2053  if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2054  if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2055  changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2056 
2057  Instruction *I = BB->getFirstNonPHI();
2058  if (!I->isEHPad())
2059  continue;
2060 
2061  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2062  if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2063  CatchSwitch->setParentPad(CallSiteEHPad);
2064  } else {
2065  auto *FPI = cast<FuncletPadInst>(I);
2066  if (isa<ConstantTokenNone>(FPI->getParentPad()))
2067  FPI->setParentPad(CallSiteEHPad);
2068  }
2069  }
2070  }
2071 
2072  if (InlinedDeoptimizeCalls) {
2073  // We need to at least remove the deoptimizing returns from the Return set,
2074  // so that the control flow from those returns does not get merged into the
2075  // caller (but terminate it instead). If the caller's return type does not
2076  // match the callee's return type, we also need to change the return type of
2077  // the intrinsic.
2078  if (Caller->getReturnType() == TheCall->getType()) {
2079  auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) {
2080  return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2081  });
2082  Returns.erase(NewEnd, Returns.end());
2083  } else {
2084  SmallVector<ReturnInst *, 8> NormalReturns;
2085  Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2086  Caller->getParent(), Intrinsic::experimental_deoptimize,
2087  {Caller->getReturnType()});
2088 
2089  for (ReturnInst *RI : Returns) {
2090  CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2091  if (!DeoptCall) {
2092  NormalReturns.push_back(RI);
2093  continue;
2094  }
2095 
2096  // The calling convention on the deoptimize call itself may be bogus,
2097  // since the code we're inlining may have undefined behavior (and may
2098  // never actually execute at runtime); but all
2099  // @llvm.experimental.deoptimize declarations have to have the same
2100  // calling convention in a well-formed module.
2101  auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2102  NewDeoptIntrinsic->setCallingConv(CallingConv);
2103  auto *CurBB = RI->getParent();
2104  RI->eraseFromParent();
2105 
2106  SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2107  DeoptCall->arg_end());
2108 
2110  DeoptCall->getOperandBundlesAsDefs(OpBundles);
2111  DeoptCall->eraseFromParent();
2112  assert(!OpBundles.empty() &&
2113  "Expected at least the deopt operand bundle");
2114 
2115  IRBuilder<> Builder(CurBB);
2116  CallInst *NewDeoptCall =
2117  Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2118  NewDeoptCall->setCallingConv(CallingConv);
2119  if (NewDeoptCall->getType()->isVoidTy())
2120  Builder.CreateRetVoid();
2121  else
2122  Builder.CreateRet(NewDeoptCall);
2123  }
2124 
2125  // Leave behind the normal returns so we can merge control flow.
2126  std::swap(Returns, NormalReturns);
2127  }
2128  }
2129 
2130  // Handle any inlined musttail call sites. In order for a new call site to be
2131  // musttail, the source of the clone and the inlined call site must have been
2132  // musttail. Therefore it's safe to return without merging control into the
2133  // phi below.
2134  if (InlinedMustTailCalls) {
2135  // Check if we need to bitcast the result of any musttail calls.
2136  Type *NewRetTy = Caller->getReturnType();
2137  bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
2138 
2139  // Handle the returns preceded by musttail calls separately.
2140  SmallVector<ReturnInst *, 8> NormalReturns;
2141  for (ReturnInst *RI : Returns) {
2142  CallInst *ReturnedMustTail =
2144  if (!ReturnedMustTail) {
2145  NormalReturns.push_back(RI);
2146  continue;
2147  }
2148  if (!NeedBitCast)
2149  continue;
2150 
2151  // Delete the old return and any preceding bitcast.
2152  BasicBlock *CurBB = RI->getParent();
2153  auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2154  RI->eraseFromParent();
2155  if (OldCast)
2156  OldCast->eraseFromParent();
2157 
2158  // Insert a new bitcast and return with the right type.
2159  IRBuilder<> Builder(CurBB);
2160  Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2161  }
2162 
2163  // Leave behind the normal returns so we can merge control flow.
2164  std::swap(Returns, NormalReturns);
2165  }
2166 
2167  // Now that all of the transforms on the inlined code have taken place but
2168  // before we splice the inlined code into the CFG and lose track of which
2169  // blocks were actually inlined, collect the call sites. We only do this if
2170  // call graph updates weren't requested, as those provide value handle based
2171  // tracking of inlined call sites instead.
2172  if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2173  // Otherwise just collect the raw call sites that were inlined.
2174  for (BasicBlock &NewBB :
2175  make_range(FirstNewBlock->getIterator(), Caller->end()))
2176  for (Instruction &I : NewBB)
2177  if (auto CS = CallSite(&I))
2178  IFI.InlinedCallSites.push_back(CS);
2179  }
2180 
2181  // If we cloned in _exactly one_ basic block, and if that block ends in a
2182  // return instruction, we splice the body of the inlined callee directly into
2183  // the calling basic block.
2184  if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2185  // Move all of the instructions right before the call.
2186  OrigBB->getInstList().splice(TheCall->getIterator(),
2187  FirstNewBlock->getInstList(),
2188  FirstNewBlock->begin(), FirstNewBlock->end());
2189  // Remove the cloned basic block.
2190  Caller->getBasicBlockList().pop_back();
2191 
2192  // If the call site was an invoke instruction, add a branch to the normal
2193  // destination.
2194  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2195  BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
2196  NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2197  }
2198 
2199  // If the return instruction returned a value, replace uses of the call with
2200  // uses of the returned value.
2201  if (!TheCall->use_empty()) {
2202  ReturnInst *R = Returns[0];
2203  if (TheCall == R->getReturnValue())
2204  TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2205  else
2206  TheCall->replaceAllUsesWith(R->getReturnValue());
2207  }
2208  // Since we are now done with the Call/Invoke, we can delete it.
2209  TheCall->eraseFromParent();
2210 
2211  // Since we are now done with the return instruction, delete it also.
2212  Returns[0]->eraseFromParent();
2213 
2214  // We are now done with the inlining.
2215  return true;
2216  }
2217 
2218  // Otherwise, we have the normal case, of more than one block to inline or
2219  // multiple return sites.
2220 
2221  // We want to clone the entire callee function into the hole between the
2222  // "starter" and "ender" blocks. How we accomplish this depends on whether
2223  // this is an invoke instruction or a call instruction.
2224  BasicBlock *AfterCallBB;
2225  BranchInst *CreatedBranchToNormalDest = nullptr;
2226  if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2227 
2228  // Add an unconditional branch to make this look like the CallInst case...
2229  CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
2230 
2231  // Split the basic block. This guarantees that no PHI nodes will have to be
2232  // updated due to new incoming edges, and make the invoke case more
2233  // symmetric to the call case.
2234  AfterCallBB =
2235  OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2236  CalledFunc->getName() + ".exit");
2237 
2238  } else { // It's a call
2239  // If this is a call instruction, we need to split the basic block that
2240  // the call lives in.
2241  //
2242  AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
2243  CalledFunc->getName() + ".exit");
2244  }
2245 
2246  if (IFI.CallerBFI) {
2247  // Copy original BB's block frequency to AfterCallBB
2248  IFI.CallerBFI->setBlockFreq(
2249  AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2250  }
2251 
2252  // Change the branch that used to go to AfterCallBB to branch to the first
2253  // basic block of the inlined function.
2254  //
2255  Instruction *Br = OrigBB->getTerminator();
2256  assert(Br && Br->getOpcode() == Instruction::Br &&
2257  "splitBasicBlock broken!");
2258  Br->setOperand(0, &*FirstNewBlock);
2259 
2260  // Now that the function is correct, make it a little bit nicer. In
2261  // particular, move the basic blocks inserted from the end of the function
2262  // into the space made by splitting the source basic block.
2263  Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2264  Caller->getBasicBlockList(), FirstNewBlock,
2265  Caller->end());
2266 
2267  // Handle all of the return instructions that we just cloned in, and eliminate
2268  // any users of the original call/invoke instruction.
2269  Type *RTy = CalledFunc->getReturnType();
2270 
2271  PHINode *PHI = nullptr;
2272  if (Returns.size() > 1) {
2273  // The PHI node should go at the front of the new basic block to merge all
2274  // possible incoming values.
2275  if (!TheCall->use_empty()) {
2276  PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
2277  &AfterCallBB->front());
2278  // Anything that used the result of the function call should now use the
2279  // PHI node as their operand.
2280  TheCall->replaceAllUsesWith(PHI);
2281  }
2282 
2283  // Loop over all of the return instructions adding entries to the PHI node
2284  // as appropriate.
2285  if (PHI) {
2286  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2287  ReturnInst *RI = Returns[i];
2288  assert(RI->getReturnValue()->getType() == PHI->getType() &&
2289  "Ret value not consistent in function!");
2290  PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2291  }
2292  }
2293 
2294  // Add a branch to the merge points and remove return instructions.
2295  DebugLoc Loc;
2296  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2297  ReturnInst *RI = Returns[i];
2298  BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2299  Loc = RI->getDebugLoc();
2300  BI->setDebugLoc(Loc);
2301  RI->eraseFromParent();
2302  }
2303  // We need to set the debug location to *somewhere* inside the
2304  // inlined function. The line number may be nonsensical, but the
2305  // instruction will at least be associated with the right
2306  // function.
2307  if (CreatedBranchToNormalDest)
2308  CreatedBranchToNormalDest->setDebugLoc(Loc);
2309  } else if (!Returns.empty()) {
2310  // Otherwise, if there is exactly one return value, just replace anything
2311  // using the return value of the call with the computed value.
2312  if (!TheCall->use_empty()) {
2313  if (TheCall == Returns[0]->getReturnValue())
2314  TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2315  else
2316  TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
2317  }
2318 
2319  // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2320  BasicBlock *ReturnBB = Returns[0]->getParent();
2321  ReturnBB->replaceAllUsesWith(AfterCallBB);
2322 
2323  // Splice the code from the return block into the block that it will return
2324  // to, which contains the code that was after the call.
2325  AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2326  ReturnBB->getInstList());
2327 
2328  if (CreatedBranchToNormalDest)
2329  CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2330 
2331  // Delete the return instruction now and empty ReturnBB now.
2332  Returns[0]->eraseFromParent();
2333  ReturnBB->eraseFromParent();
2334  } else if (!TheCall->use_empty()) {
2335  // No returns, but something is using the return value of the call. Just
2336  // nuke the result.
2337  TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2338  }
2339 
2340  // Since we are now done with the Call/Invoke, we can delete it.
2341  TheCall->eraseFromParent();
2342 
2343  // If we inlined any musttail calls and the original return is now
2344  // unreachable, delete it. It can only contain a bitcast and ret.
2345  if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2346  AfterCallBB->eraseFromParent();
2347 
2348  // We should always be able to fold the entry block of the function into the
2349  // single predecessor of the block...
2350  assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2351  BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2352 
2353  // Splice the code entry block into calling block, right before the
2354  // unconditional branch.
2355  CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2356  OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2357 
2358  // Remove the unconditional branch.
2359  OrigBB->getInstList().erase(Br);
2360 
2361  // Now we can remove the CalleeEntry block, which is now empty.
2362  Caller->getBasicBlockList().erase(CalleeEntry);
2363 
2364  // If we inserted a phi node, check to see if it has a single value (e.g. all
2365  // the entries are the same or undef). If so, remove the PHI so it doesn't
2366  // block other optimizations.
2367  if (PHI) {
2368  AssumptionCache *AC =
2369  IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
2370  auto &DL = Caller->getParent()->getDataLayout();
2371  if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2372  PHI->replaceAllUsesWith(V);
2373  PHI->eraseFromParent();
2374  }
2375  }
2376 
2377  return true;
2378 }
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:467
Return a value (possibly void), from a function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:198
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:67
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
void removePredecessor(BasicBlock *Pred, bool DontDeleteUselessPHIs=false)
Notify the BasicBlock that the predecessor Pred is no longer able to reach it.
Definition: BasicBlock.cpp:301
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:21
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1183
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const Instruction *TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, bool DerefBefore, int Offset, bool DerefAfter)
Replaces llvm.dbg.declare instruction when the alloca it describes is replaced with a new value...
Definition: Local.cpp:1539
iterator erase(iterator where)
Definition: ilist.h:266
This class represents lattice values for constants.
Definition: AllocatorList.h:23
CallGraph * CG
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
Definition: Cloning.h:186
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:64
iterator end()
Definition: Function.h:657
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:528
static DebugLoc appendInlinedAt(DebugLoc DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode *> &Cache, bool ReplaceLast=false)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
Definition: DebugLoc.cpp:82
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
std::function< AssumptionCache &(Function &)> * GetAssumptionCache
Definition: Cloning.h:187
void push_back(const T &Elt)
Definition: SmallVector.h:217
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
unsigned getParamAlignment(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Definition: Function.h:427
Analysis providing profile information.
const CallInst * getTerminatingMustTailCall() const
Returns the call instruction marked &#39;musttail&#39; prior to the terminating return instruction of this ba...
Definition: BasicBlock.cpp:143
This class represents a function call, abstracting a target machine&#39;s calling convention.
void setGC(std::string Str)
Definition: Function.cpp:469
This file contains the declarations for metadata subclasses.
static void updateCalleeCount(BlockFrequencyInfo *CallerBFI, BasicBlock *CallBB, Instruction *CallInst, Function *Callee, ProfileSummaryInfo *PSI)
Update the entry count of callee after inlining.
A cache of @llvm.assume calls within a function.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:135
uint64_t getFrequency() const
Returns the frequency as a fixpoint number scaled by the entry frequency.
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:704
static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add .assume-based alignment assumptions t...
Optional< uint64_t > getProfileCount(const Instruction *CallInst, BlockFrequencyInfo *BFI)
Returns the profile count for CallInst.
arg_iterator arg_end()
Definition: Function.h:679
A debug info location.
Definition: DebugLoc.h:33
Metadata node.
Definition: Metadata.h:863
F(f)
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1105
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1068
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:502
An instruction for reading from memory.
Definition: Instructions.h:167
InlineResult InlineFunction(CallInst *C, InlineFunctionInfo &IFI, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true)
This function inlines the called function into the basic block of the caller.
static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument...
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:176
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:691
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:137
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void reserve(size_type N)
Definition: SmallVector.h:374
bool isMustTailCall() const
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:173
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:92
A node in the call graph for a module.
Definition: CallGraph.h:164
Tuple of metadata.
Definition: Metadata.h:1105
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions&#39; line numbers to to encode location where these instructions are inlined...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:268
Function::ProfileCount ProfileCount
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:221
void addCalledFunction(CallSite CS, CallGraphNode *M)
Adds a function to the list of functions called by this one.
Definition: CallGraph.h:232
unsigned getAllocaAddrSpace() const
Definition: DataLayout.h:257
amdgpu Simplify well known AMD library false Value Value const Twine & Name
const CallInst * getTerminatingDeoptimizeCall() const
Returns the call instruction calling @llvm.experimental.deoptimize prior to the terminating return in...
Definition: BasicBlock.cpp:174
unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:1895
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:370
ProfileCount getEntryCount() const
Get the entry count for this function.
Definition: Function.cpp:1380
void setEntryCount(ProfileCount Count, const DenseSet< GlobalValue::GUID > *Imports=nullptr)
Set the entry count for this function.
Definition: Function.cpp:1362
iterator end()
Definition: CallGraph.h:190
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:96
TailCallKind getTailCallKind() const
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:941
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
ReturnInst * CreateRet(Value *V)
Create a &#39;ret <val>&#39; instruction.
Definition: IRBuilder.h:828
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:742
AttributeSet getRetAttributes() const
The attributes for the ret value are returned.
InstrTy * getInstruction() const
Definition: CallSite.h:91
static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap)
When inlining a function that contains noalias scope metadata, this metadata needs to be cloned so th...
The only memory references in this function (if it has any) are non-volatile loads from objects point...
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, uint64_t Freq, SmallPtrSetImpl< BasicBlock *> &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
std::vector< CallRecord > CalledFunctionsVector
Definition: CallGraph.h:171
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch, catchpad/ret, and cleanuppad/ret.
FunctionModRefBehavior getModRefBehavior(const CallBase *Call)
Return the behavior of the given call site.
void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction...
ValTy * getCalledValue() const
Return the pointer to function that is being called.
Definition: CallSite.h:99
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:1554
static void UpdateCallGraphAfterInlining(CallSite CS, Function::iterator FirstNewBlock, ValueToValueMapTy &VMap, InlineFunctionInfo &IFI)
Once we have cloned code over from a callee into the caller, update the specified callgraph to reflec...
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst *> &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, Instruction *TheCall=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1731
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock *> Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:244
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
Definition: Cloning.h:77
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
ValTy * getArgOperand(unsigned i) const
Definition: CallSite.h:296
LLVMContext & getContext() const
Definition: Metadata.h:923
FunctionModRefBehavior
Summary of how a function affects memory in the program.
AttributeSet getParamAttributes(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:124
iterator find(const KeyT &Val)
Definition: ValueMap.h:161
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:82
BlockFrequencyInfo * CalleeBFI
Definition: Cloning.h:189
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:220
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
iterator_range< User::op_iterator > arg_operands()
Definition: InstrTypes.h:1126
const std::string & getGC() const
Definition: Function.cpp:464
An instruction for storing to memory.
Definition: Instructions.h:320
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:701
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:428
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, OrderedBasicBlock *OBB=nullptr, unsigned MaxUsesToExplore=DefaultMaxUsesToExplore)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
Debug location.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:290
iterator begin()
Definition: Function.h:655
InlineResult is basically true or false.
Definition: InlineCost.h:135
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
amdgpu Simplify well known AMD library false Value * Callee
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1019
Value * getOperand(unsigned i) const
Definition: User.h:169
Class to represent pointers.
Definition: DerivedTypes.h:466
Optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:1679
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
bool isCall() const
Return true if a CallInst is enclosed.
Definition: CallSite.h:86
static bool isUsedByLifetimeMarker(Value *V)
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:176
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(true), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
Optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Definition: CallSite.h:554
bool isVoidTy() const
Return true if this is &#39;void&#39;.
Definition: Type.h:140
const BasicBlock & getEntryBlock() const
Definition: Function.h:639
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata *> MDs)
Return a temporary node.
Definition: Metadata.h:1152
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata *> MDs)
Definition: Metadata.h:1165
void setCallingConv(CallingConv::ID CC)
Definition: Function.h:216
SmallVector< CallSite, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
Definition: Cloning.h:204
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:422
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:168
The landingpad instruction holds all of the information necessary to generate correct exception handl...
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:189
bool hasNUses(unsigned N) const
Return true if this Value has exactly N users.
Definition: Value.cpp:130
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:307
uint64_t getCount() const
Definition: Function.h:271
Value * getCalledValue() const
Definition: InstrTypes.h:1173
bool hasName() const
Definition: Value.h:250
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1507
Conditional or Unconditional Branch instruction.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
ArrayRef< Use > Inputs
Definition: InstrTypes.h:914
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:128
Resume the propagation of an exception.
static MDTuple * getDistinct(LLVMContext &Context, ArrayRef< Metadata *> MDs)
Definition: Metadata.h:1173
Value * getIncomingValueForBlock(const BasicBlock *BB) const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:223
const Instruction & front() const
Definition: BasicBlock.h:280
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1050
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:138
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:112
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Definition: DataLayout.cpp:739
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool hasOperandBundles() const
Definition: CallSite.h:534
void splice(iterator where, iplist_impl &L2)
Definition: ilist.h:328
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
ProfileSummaryInfo * PSI
Definition: Cloning.h:188
static bool hasLifetimeMarkers(AllocaInst *AI)
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1228
The only memory references in this function (if it has any) are non-volatile loads and stores from ob...
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:115
arg_iterator arg_begin()
Definition: Function.h:670
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:381
self_iterator getIterator()
Definition: ilist_node.h:81
void setTailCallKind(TailCallKind TCK)
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:59
auto remove_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1225
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Definition: Function.cpp:192
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1414
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
const Constant * stripPointerCasts() const
Definition: Constant.h:173
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:528
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
iterator erase(const_iterator CI)
Definition: SmallVector.h:443
unsigned getNumArgOperands() const
Definition: CallSite.h:292
Class to represent profile counts.
Definition: Function.h:260
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:92
size_t size() const
Definition: SmallVector.h:52
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:219
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1225
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:105
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
iterator end()
Definition: ValueMap.h:141
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI)
const InstListType & getInstList() const
Return the underlying instruction list container.
Definition: BasicBlock.h:333
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:297
Iterator for intrusive lists based on ilist_node.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
ValTy * getArgument(unsigned ArgNo) const
Definition: CallSite.h:185
OperandBundleUse getOperandBundleAt(unsigned Index) const
Definition: CallSite.h:550
iterator end()
Definition: BasicBlock.h:270
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
IterTy arg_begin() const
Definition: CallSite.h:570
Module.h This file contains the declarations for the Module class.
void setBlockFreq(const BasicBlock *BB, uint64_t Freq)
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:379
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Definition: InstrTypes.h:1714
static cl::opt< bool > NoAliases("riscv-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:621
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
std::string utostr(uint64_t X, bool isNeg=false)
Definition: StringExtras.h:223
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1243
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:163
bool isCleanup() const
Return &#39;true&#39; if this landingpad instruction is a cleanup.
iterator_range< user_iterator > users()
Definition: Value.h:399
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:392
amdgpu Simplify well known AMD library false Value Value * Arg
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
Definition: Cloning.h:67
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1099
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
Definition: Cloning.h:193
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:73
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:132
FunTy * getCaller() const
Return the caller function for this call site.
Definition: CallSite.h:266
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition: Function.h:348
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:310
unsigned getNumOperandBundles() const
Definition: CallSite.h:530
void registerAssumption(CallInst *CI)
Add an @llvm.assume intrinsic to this function&#39;s cache.
void emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:650
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
SmallVector< WeakTrackingVH, 8 > InlinedCalls
InlineFunction fills this in with callsites that were inlined from the callee.
Definition: Cloning.h:197
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Definition: CallSite.h:581
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value *> Args, const Twine &NameStr, Instruction *InsertBefore=nullptr)
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1224
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:213
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
Definition: InstrTypes.h:1180
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:106
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
Definition: Metadata.cpp:895
static void PropagateParallelLoopAccessMetadata(CallSite CS, ValueToValueMapTy &VMap)
When inlining a call site that has !llvm.mem.parallel_loop_access or llvm.access.group metadata...
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink &#39;this&#39; from the containing function and delete it.
Definition: BasicBlock.cpp:114
#define I(x, y, z)
Definition: MD5.cpp:58
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:72
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: CallSite.h:504
static Value * HandleByValArgument(Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, unsigned ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
iterator end()
Definition: DenseMap.h:108
This struct can be used to capture information about code being cloned, while it is being cloned...
Definition: Cloning.h:65
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1247
unsigned getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition: Local.h:267
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:322
const BasicBlockListType & getBasicBlockList() const
Get the underlying elements of the Function...
Definition: Function.h:632
uint32_t Size
Definition: Profile.cpp:46
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1129
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:407
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
std::vector< CallRecord >::iterator iterator
Definition: CallGraph.h:183
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:171
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: CallSite.h:597
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:205
FunTy * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it&#39;s an indirect...
Definition: CallSite.h:106
void GetUnderlyingObjects(Value *V, SmallVectorImpl< Value *> &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
BlockFrequencyInfo * CallerBFI
Definition: Cloning.h:189
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a &#39;dynamic&#39; alloca.
Definition: Cloning.h:72
const BasicBlock & front() const
Definition: Function.h:662
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
BasicBlock * getUnwindDest() const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:565
LLVM Value Representation.
Definition: Value.h:72
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1298
uint64_t getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type...
Definition: DataLayout.h:418
A vector that has set insertion semantics.
Definition: SetVector.h:40
void removeCallEdgeFor(CallSite CS)
Removes the edge in the node for the specified call site.
Definition: CallGraph.cpp:187
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, Instruction *InsertBefore=nullptr)
AttributeSet getFnAttributes() const
The function attributes are returned.
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:1961
Invoke instruction.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:572
iterator begin()
Definition: CallGraph.h:189
void setPersonalityFn(Constant *Fn)
Definition: Function.cpp:1303
AttributeList getAttributes() const
Get the parameter attributes of the call.
Definition: CallSite.h:328
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
void pop_back()
Definition: ilist.h:317
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1074
bool isEmpty() const
Return true if there are no attributes.
Definition: Attributes.h:655
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
Root of the metadata hierarchy.
Definition: Metadata.h:57
void setDoesNotThrow()
Definition: InstrTypes.h:1555
bool use_empty() const
Definition: Value.h:322
iterator begin()
Definition: ValueMap.h:140
Type * getElementType() const
Definition: DerivedTypes.h:485
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
Definition: Attributes.cpp:872
iterator_range< arg_iterator > args()
Definition: Function.h:688
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:43
const BasicBlock * getParent() const
Definition: Instruction.h:66
an instruction to allocate memory on the stack
Definition: Instructions.h:59
bool mayReadOrWriteMemory() const
Return true if this instruction may read or write memory.
Definition: Instruction.h:521