LLVM  13.0.0git
InlineFunction.cpp
Go to the documentation of this file.
1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CFG.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DIBuilder.h"
41 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InstrTypes.h"
49 #include "llvm/IR/Instruction.h"
50 #include "llvm/IR/Instructions.h"
51 #include "llvm/IR/IntrinsicInst.h"
52 #include "llvm/IR/Intrinsics.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/IR/MDBuilder.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/User.h"
59 #include "llvm/IR/Value.h"
60 #include "llvm/Support/Casting.h"
67 #include <algorithm>
68 #include <cassert>
69 #include <cstdint>
70 #include <iterator>
71 #include <limits>
72 #include <string>
73 #include <utility>
74 #include <vector>
75 
76 using namespace llvm;
78 
79 static cl::opt<bool>
80 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
81  cl::Hidden,
82  cl::desc("Convert noalias attributes to metadata during inlining."));
83 
84 static cl::opt<bool>
85  UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden,
86  cl::ZeroOrMore, cl::init(true),
87  cl::desc("Use the llvm.experimental.noalias.scope.decl "
88  "intrinsic during inlining."));
89 
90 // Disabled by default, because the added alignment assumptions may increase
91 // compile-time and block optimizations. This option is not suitable for use
92 // with frontends that emit comprehensive parameter alignment annotations.
93 static cl::opt<bool>
94 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
95  cl::init(false), cl::Hidden,
96  cl::desc("Convert align attributes to assumptions during inlining."));
97 
99  "update-return-attrs", cl::init(true), cl::Hidden,
100  cl::desc("Update return attributes on calls within inlined body"));
101 
103  "max-inst-checked-for-throw-during-inlining", cl::Hidden,
104  cl::desc("the maximum number of instructions analyzed for may throw during "
105  "attribute inference in inlined body"),
106  cl::init(4));
107 
108 namespace {
109 
110  /// A class for recording information about inlining a landing pad.
111  class LandingPadInliningInfo {
112  /// Destination of the invoke's unwind.
113  BasicBlock *OuterResumeDest;
114 
115  /// Destination for the callee's resume.
116  BasicBlock *InnerResumeDest = nullptr;
117 
118  /// LandingPadInst associated with the invoke.
119  LandingPadInst *CallerLPad = nullptr;
120 
121  /// PHI for EH values from landingpad insts.
122  PHINode *InnerEHValuesPHI = nullptr;
123 
124  SmallVector<Value*, 8> UnwindDestPHIValues;
125 
126  public:
127  LandingPadInliningInfo(InvokeInst *II)
128  : OuterResumeDest(II->getUnwindDest()) {
129  // If there are PHI nodes in the unwind destination block, we need to keep
130  // track of which values came into them from the invoke before removing
131  // the edge from this block.
132  BasicBlock *InvokeBB = II->getParent();
133  BasicBlock::iterator I = OuterResumeDest->begin();
134  for (; isa<PHINode>(I); ++I) {
135  // Save the value to use for this edge.
136  PHINode *PHI = cast<PHINode>(I);
137  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
138  }
139 
140  CallerLPad = cast<LandingPadInst>(I);
141  }
142 
143  /// The outer unwind destination is the target of
144  /// unwind edges introduced for calls within the inlined function.
145  BasicBlock *getOuterResumeDest() const {
146  return OuterResumeDest;
147  }
148 
149  BasicBlock *getInnerResumeDest();
150 
151  LandingPadInst *getLandingPadInst() const { return CallerLPad; }
152 
153  /// Forward the 'resume' instruction to the caller's landing pad block.
154  /// When the landing pad block has only one predecessor, this is
155  /// a simple branch. When there is more than one predecessor, we need to
156  /// split the landing pad block after the landingpad instruction and jump
157  /// to there.
158  void forwardResume(ResumeInst *RI,
159  SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
160 
161  /// Add incoming-PHI values to the unwind destination block for the given
162  /// basic block, using the values for the original invoke's source block.
163  void addIncomingPHIValuesFor(BasicBlock *BB) const {
164  addIncomingPHIValuesForInto(BB, OuterResumeDest);
165  }
166 
167  void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
168  BasicBlock::iterator I = dest->begin();
169  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
170  PHINode *phi = cast<PHINode>(I);
171  phi->addIncoming(UnwindDestPHIValues[i], src);
172  }
173  }
174  };
175 
176 } // end anonymous namespace
177 
178 /// Get or create a target for the branch from ResumeInsts.
179 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
180  if (InnerResumeDest) return InnerResumeDest;
181 
182  // Split the landing pad.
183  BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
184  InnerResumeDest =
185  OuterResumeDest->splitBasicBlock(SplitPoint,
186  OuterResumeDest->getName() + ".body");
187 
188  // The number of incoming edges we expect to the inner landing pad.
189  const unsigned PHICapacity = 2;
190 
191  // Create corresponding new PHIs for all the PHIs in the outer landing pad.
192  Instruction *InsertPoint = &InnerResumeDest->front();
193  BasicBlock::iterator I = OuterResumeDest->begin();
194  for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
195  PHINode *OuterPHI = cast<PHINode>(I);
196  PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
197  OuterPHI->getName() + ".lpad-body",
198  InsertPoint);
199  OuterPHI->replaceAllUsesWith(InnerPHI);
200  InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
201  }
202 
203  // Create a PHI for the exception values.
204  InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
205  "eh.lpad-body", InsertPoint);
206  CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
207  InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
208 
209  // All done.
210  return InnerResumeDest;
211 }
212 
213 /// Forward the 'resume' instruction to the caller's landing pad block.
214 /// When the landing pad block has only one predecessor, this is a simple
215 /// branch. When there is more than one predecessor, we need to split the
216 /// landing pad block after the landingpad instruction and jump to there.
217 void LandingPadInliningInfo::forwardResume(
218  ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
219  BasicBlock *Dest = getInnerResumeDest();
220  BasicBlock *Src = RI->getParent();
221 
222  BranchInst::Create(Dest, Src);
223 
224  // Update the PHIs in the destination. They were inserted in an order which
225  // makes this work.
226  addIncomingPHIValuesForInto(Src, Dest);
227 
228  InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
229  RI->eraseFromParent();
230 }
231 
232 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
233 static Value *getParentPad(Value *EHPad) {
234  if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
235  return FPI->getParentPad();
236  return cast<CatchSwitchInst>(EHPad)->getParentPad();
237 }
238 
240 
241 /// Helper for getUnwindDestToken that does the descendant-ward part of
242 /// the search.
244  UnwindDestMemoTy &MemoMap) {
245  SmallVector<Instruction *, 8> Worklist(1, EHPad);
246 
247  while (!Worklist.empty()) {
248  Instruction *CurrentPad = Worklist.pop_back_val();
249  // We only put pads on the worklist that aren't in the MemoMap. When
250  // we find an unwind dest for a pad we may update its ancestors, but
251  // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
252  // so they should never get updated while queued on the worklist.
253  assert(!MemoMap.count(CurrentPad));
254  Value *UnwindDestToken = nullptr;
255  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
256  if (CatchSwitch->hasUnwindDest()) {
257  UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
258  } else {
259  // Catchswitch doesn't have a 'nounwind' variant, and one might be
260  // annotated as "unwinds to caller" when really it's nounwind (see
261  // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
262  // parent's unwind dest from this. We can check its catchpads'
263  // descendants, since they might include a cleanuppad with an
264  // "unwinds to caller" cleanupret, which can be trusted.
265  for (auto HI = CatchSwitch->handler_begin(),
266  HE = CatchSwitch->handler_end();
267  HI != HE && !UnwindDestToken; ++HI) {
268  BasicBlock *HandlerBlock = *HI;
269  auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
270  for (User *Child : CatchPad->users()) {
271  // Intentionally ignore invokes here -- since the catchswitch is
272  // marked "unwind to caller", it would be a verifier error if it
273  // contained an invoke which unwinds out of it, so any invoke we'd
274  // encounter must unwind to some child of the catch.
275  if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
276  continue;
277 
278  Instruction *ChildPad = cast<Instruction>(Child);
279  auto Memo = MemoMap.find(ChildPad);
280  if (Memo == MemoMap.end()) {
281  // Haven't figured out this child pad yet; queue it.
282  Worklist.push_back(ChildPad);
283  continue;
284  }
285  // We've already checked this child, but might have found that
286  // it offers no proof either way.
287  Value *ChildUnwindDestToken = Memo->second;
288  if (!ChildUnwindDestToken)
289  continue;
290  // We already know the child's unwind dest, which can either
291  // be ConstantTokenNone to indicate unwind to caller, or can
292  // be another child of the catchpad. Only the former indicates
293  // the unwind dest of the catchswitch.
294  if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
295  UnwindDestToken = ChildUnwindDestToken;
296  break;
297  }
298  assert(getParentPad(ChildUnwindDestToken) == CatchPad);
299  }
300  }
301  }
302  } else {
303  auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
304  for (User *U : CleanupPad->users()) {
305  if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
306  if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
307  UnwindDestToken = RetUnwindDest->getFirstNonPHI();
308  else
309  UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
310  break;
311  }
312  Value *ChildUnwindDestToken;
313  if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
314  ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
315  } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
316  Instruction *ChildPad = cast<Instruction>(U);
317  auto Memo = MemoMap.find(ChildPad);
318  if (Memo == MemoMap.end()) {
319  // Haven't resolved this child yet; queue it and keep searching.
320  Worklist.push_back(ChildPad);
321  continue;
322  }
323  // We've checked this child, but still need to ignore it if it
324  // had no proof either way.
325  ChildUnwindDestToken = Memo->second;
326  if (!ChildUnwindDestToken)
327  continue;
328  } else {
329  // Not a relevant user of the cleanuppad
330  continue;
331  }
332  // In a well-formed program, the child/invoke must either unwind to
333  // an(other) child of the cleanup, or exit the cleanup. In the
334  // first case, continue searching.
335  if (isa<Instruction>(ChildUnwindDestToken) &&
336  getParentPad(ChildUnwindDestToken) == CleanupPad)
337  continue;
338  UnwindDestToken = ChildUnwindDestToken;
339  break;
340  }
341  }
342  // If we haven't found an unwind dest for CurrentPad, we may have queued its
343  // children, so move on to the next in the worklist.
344  if (!UnwindDestToken)
345  continue;
346 
347  // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
348  // any ancestors of CurrentPad up to but not including UnwindDestToken's
349  // parent pad. Record this in the memo map, and check to see if the
350  // original EHPad being queried is one of the ones exited.
351  Value *UnwindParent;
352  if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
353  UnwindParent = getParentPad(UnwindPad);
354  else
355  UnwindParent = nullptr;
356  bool ExitedOriginalPad = false;
357  for (Instruction *ExitedPad = CurrentPad;
358  ExitedPad && ExitedPad != UnwindParent;
359  ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
360  // Skip over catchpads since they just follow their catchswitches.
361  if (isa<CatchPadInst>(ExitedPad))
362  continue;
363  MemoMap[ExitedPad] = UnwindDestToken;
364  ExitedOriginalPad |= (ExitedPad == EHPad);
365  }
366 
367  if (ExitedOriginalPad)
368  return UnwindDestToken;
369 
370  // Continue the search.
371  }
372 
373  // No definitive information is contained within this funclet.
374  return nullptr;
375 }
376 
377 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
378 /// return that pad instruction. If it unwinds to caller, return
379 /// ConstantTokenNone. If it does not have a definitive unwind destination,
380 /// return nullptr.
381 ///
382 /// This routine gets invoked for calls in funclets in inlinees when inlining
383 /// an invoke. Since many funclets don't have calls inside them, it's queried
384 /// on-demand rather than building a map of pads to unwind dests up front.
385 /// Determining a funclet's unwind dest may require recursively searching its
386 /// descendants, and also ancestors and cousins if the descendants don't provide
387 /// an answer. Since most funclets will have their unwind dest immediately
388 /// available as the unwind dest of a catchswitch or cleanupret, this routine
389 /// searches top-down from the given pad and then up. To avoid worst-case
390 /// quadratic run-time given that approach, it uses a memo map to avoid
391 /// re-processing funclet trees. The callers that rewrite the IR as they go
392 /// take advantage of this, for correctness, by checking/forcing rewritten
393 /// pads' entries to match the original callee view.
395  UnwindDestMemoTy &MemoMap) {
396  // Catchpads unwind to the same place as their catchswitch;
397  // redirct any queries on catchpads so the code below can
398  // deal with just catchswitches and cleanuppads.
399  if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
400  EHPad = CPI->getCatchSwitch();
401 
402  // Check if we've already determined the unwind dest for this pad.
403  auto Memo = MemoMap.find(EHPad);
404  if (Memo != MemoMap.end())
405  return Memo->second;
406 
407  // Search EHPad and, if necessary, its descendants.
408  Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
409  assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
410  if (UnwindDestToken)
411  return UnwindDestToken;
412 
413  // No information is available for this EHPad from itself or any of its
414  // descendants. An unwind all the way out to a pad in the caller would
415  // need also to agree with the unwind dest of the parent funclet, so
416  // search up the chain to try to find a funclet with information. Put
417  // null entries in the memo map to avoid re-processing as we go up.
418  MemoMap[EHPad] = nullptr;
419 #ifndef NDEBUG
421  TempMemos.insert(EHPad);
422 #endif
423  Instruction *LastUselessPad = EHPad;
424  Value *AncestorToken;
425  for (AncestorToken = getParentPad(EHPad);
426  auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
427  AncestorToken = getParentPad(AncestorToken)) {
428  // Skip over catchpads since they just follow their catchswitches.
429  if (isa<CatchPadInst>(AncestorPad))
430  continue;
431  // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
432  // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
433  // call to getUnwindDestToken, that would mean that AncestorPad had no
434  // information in itself, its descendants, or its ancestors. If that
435  // were the case, then we should also have recorded the lack of information
436  // for the descendant that we're coming from. So assert that we don't
437  // find a null entry in the MemoMap for AncestorPad.
438  assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
439  auto AncestorMemo = MemoMap.find(AncestorPad);
440  if (AncestorMemo == MemoMap.end()) {
441  UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
442  } else {
443  UnwindDestToken = AncestorMemo->second;
444  }
445  if (UnwindDestToken)
446  break;
447  LastUselessPad = AncestorPad;
448  MemoMap[LastUselessPad] = nullptr;
449 #ifndef NDEBUG
450  TempMemos.insert(LastUselessPad);
451 #endif
452  }
453 
454  // We know that getUnwindDestTokenHelper was called on LastUselessPad and
455  // returned nullptr (and likewise for EHPad and any of its ancestors up to
456  // LastUselessPad), so LastUselessPad has no information from below. Since
457  // getUnwindDestTokenHelper must investigate all downward paths through
458  // no-information nodes to prove that a node has no information like this,
459  // and since any time it finds information it records it in the MemoMap for
460  // not just the immediately-containing funclet but also any ancestors also
461  // exited, it must be the case that, walking downward from LastUselessPad,
462  // visiting just those nodes which have not been mapped to an unwind dest
463  // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
464  // they are just used to keep getUnwindDestTokenHelper from repeating work),
465  // any node visited must have been exhaustively searched with no information
466  // for it found.
467  SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
468  while (!Worklist.empty()) {
469  Instruction *UselessPad = Worklist.pop_back_val();
470  auto Memo = MemoMap.find(UselessPad);
471  if (Memo != MemoMap.end() && Memo->second) {
472  // Here the name 'UselessPad' is a bit of a misnomer, because we've found
473  // that it is a funclet that does have information about unwinding to
474  // a particular destination; its parent was a useless pad.
475  // Since its parent has no information, the unwind edge must not escape
476  // the parent, and must target a sibling of this pad. This local unwind
477  // gives us no information about EHPad. Leave it and the subtree rooted
478  // at it alone.
479  assert(getParentPad(Memo->second) == getParentPad(UselessPad));
480  continue;
481  }
482  // We know we don't have information for UselesPad. If it has an entry in
483  // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
484  // added on this invocation of getUnwindDestToken; if a previous invocation
485  // recorded nullptr, it would have had to prove that the ancestors of
486  // UselessPad, which include LastUselessPad, had no information, and that
487  // in turn would have required proving that the descendants of
488  // LastUselesPad, which include EHPad, have no information about
489  // LastUselessPad, which would imply that EHPad was mapped to nullptr in
490  // the MemoMap on that invocation, which isn't the case if we got here.
491  assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
492  // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
493  // information that we'd be contradicting by making a map entry for it
494  // (which is something that getUnwindDestTokenHelper must have proved for
495  // us to get here). Just assert on is direct users here; the checks in
496  // this downward walk at its descendants will verify that they don't have
497  // any unwind edges that exit 'UselessPad' either (i.e. they either have no
498  // unwind edges or unwind to a sibling).
499  MemoMap[UselessPad] = UnwindDestToken;
500  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
501  assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
502  for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
503  auto *CatchPad = HandlerBlock->getFirstNonPHI();
504  for (User *U : CatchPad->users()) {
505  assert(
506  (!isa<InvokeInst>(U) ||
507  (getParentPad(
508  cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
509  CatchPad)) &&
510  "Expected useless pad");
511  if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
512  Worklist.push_back(cast<Instruction>(U));
513  }
514  }
515  } else {
516  assert(isa<CleanupPadInst>(UselessPad));
517  for (User *U : UselessPad->users()) {
518  assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
519  assert((!isa<InvokeInst>(U) ||
520  (getParentPad(
521  cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
522  UselessPad)) &&
523  "Expected useless pad");
524  if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
525  Worklist.push_back(cast<Instruction>(U));
526  }
527  }
528  }
529 
530  return UnwindDestToken;
531 }
532 
533 /// When we inline a basic block into an invoke,
534 /// we have to turn all of the calls that can throw into invokes.
535 /// This function analyze BB to see if there are any calls, and if so,
536 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
537 /// nodes in that block with the values specified in InvokeDestPHIValues.
539  BasicBlock *BB, BasicBlock *UnwindEdge,
540  UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
541  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
542  Instruction *I = &*BBI++;
543 
544  // We only need to check for function calls: inlined invoke
545  // instructions require no special handling.
546  CallInst *CI = dyn_cast<CallInst>(I);
547 
548  if (!CI || CI->doesNotThrow() || CI->isInlineAsm())
549  continue;
550 
551  // We do not need to (and in fact, cannot) convert possibly throwing calls
552  // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
553  // invokes. The caller's "segment" of the deoptimization continuation
554  // attached to the newly inlined @llvm.experimental_deoptimize
555  // (resp. @llvm.experimental.guard) call should contain the exception
556  // handling logic, if any.
557  if (auto *F = CI->getCalledFunction())
558  if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
559  F->getIntrinsicID() == Intrinsic::experimental_guard)
560  continue;
561 
562  if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
563  // This call is nested inside a funclet. If that funclet has an unwind
564  // destination within the inlinee, then unwinding out of this call would
565  // be UB. Rewriting this call to an invoke which targets the inlined
566  // invoke's unwind dest would give the call's parent funclet multiple
567  // unwind destinations, which is something that subsequent EH table
568  // generation can't handle and that the veirifer rejects. So when we
569  // see such a call, leave it as a call.
570  auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
571  Value *UnwindDestToken =
572  getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
573  if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
574  continue;
575 #ifndef NDEBUG
576  Instruction *MemoKey;
577  if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
578  MemoKey = CatchPad->getCatchSwitch();
579  else
580  MemoKey = FuncletPad;
581  assert(FuncletUnwindMap->count(MemoKey) &&
582  (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
583  "must get memoized to avoid confusing later searches");
584 #endif // NDEBUG
585  }
586 
587  changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
588  return BB;
589  }
590  return nullptr;
591 }
592 
593 /// If we inlined an invoke site, we need to convert calls
594 /// in the body of the inlined function into invokes.
595 ///
596 /// II is the invoke instruction being inlined. FirstNewBlock is the first
597 /// block of the inlined code (the last block is the end of the function),
598 /// and InlineCodeInfo is information about the code that got inlined.
599 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
600  ClonedCodeInfo &InlinedCodeInfo) {
601  BasicBlock *InvokeDest = II->getUnwindDest();
602 
603  Function *Caller = FirstNewBlock->getParent();
604 
605  // The inlined code is currently at the end of the function, scan from the
606  // start of the inlined code to its end, checking for stuff we need to
607  // rewrite.
608  LandingPadInliningInfo Invoke(II);
609 
610  // Get all of the inlined landing pad instructions.
612  for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
613  I != E; ++I)
614  if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
615  InlinedLPads.insert(II->getLandingPadInst());
616 
617  // Append the clauses from the outer landing pad instruction into the inlined
618  // landing pad instructions.
619  LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
620  for (LandingPadInst *InlinedLPad : InlinedLPads) {
621  unsigned OuterNum = OuterLPad->getNumClauses();
622  InlinedLPad->reserveClauses(OuterNum);
623  for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
624  InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
625  if (OuterLPad->isCleanup())
626  InlinedLPad->setCleanup(true);
627  }
628 
629  for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
630  BB != E; ++BB) {
631  if (InlinedCodeInfo.ContainsCalls)
633  &*BB, Invoke.getOuterResumeDest()))
634  // Update any PHI nodes in the exceptional block to indicate that there
635  // is now a new entry in them.
636  Invoke.addIncomingPHIValuesFor(NewBB);
637 
638  // Forward any resumes that are remaining here.
639  if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
640  Invoke.forwardResume(RI, InlinedLPads);
641  }
642 
643  // Now that everything is happy, we have one final detail. The PHI nodes in
644  // the exception destination block still have entries due to the original
645  // invoke instruction. Eliminate these entries (which might even delete the
646  // PHI node) now.
647  InvokeDest->removePredecessor(II->getParent());
648 }
649 
650 /// If we inlined an invoke site, we need to convert calls
651 /// in the body of the inlined function into invokes.
652 ///
653 /// II is the invoke instruction being inlined. FirstNewBlock is the first
654 /// block of the inlined code (the last block is the end of the function),
655 /// and InlineCodeInfo is information about the code that got inlined.
656 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
657  ClonedCodeInfo &InlinedCodeInfo) {
658  BasicBlock *UnwindDest = II->getUnwindDest();
659  Function *Caller = FirstNewBlock->getParent();
660 
661  assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
662 
663  // If there are PHI nodes in the unwind destination block, we need to keep
664  // track of which values came into them from the invoke before removing the
665  // edge from this block.
666  SmallVector<Value *, 8> UnwindDestPHIValues;
667  BasicBlock *InvokeBB = II->getParent();
668  for (Instruction &I : *UnwindDest) {
669  // Save the value to use for this edge.
670  PHINode *PHI = dyn_cast<PHINode>(&I);
671  if (!PHI)
672  break;
673  UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
674  }
675 
676  // Add incoming-PHI values to the unwind destination block for the given basic
677  // block, using the values for the original invoke's source block.
678  auto UpdatePHINodes = [&](BasicBlock *Src) {
679  BasicBlock::iterator I = UnwindDest->begin();
680  for (Value *V : UnwindDestPHIValues) {
681  PHINode *PHI = cast<PHINode>(I);
682  PHI->addIncoming(V, Src);
683  ++I;
684  }
685  };
686 
687  // This connects all the instructions which 'unwind to caller' to the invoke
688  // destination.
689  UnwindDestMemoTy FuncletUnwindMap;
690  for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
691  BB != E; ++BB) {
692  if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
693  if (CRI->unwindsToCaller()) {
694  auto *CleanupPad = CRI->getCleanupPad();
695  CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
696  CRI->eraseFromParent();
697  UpdatePHINodes(&*BB);
698  // Finding a cleanupret with an unwind destination would confuse
699  // subsequent calls to getUnwindDestToken, so map the cleanuppad
700  // to short-circuit any such calls and recognize this as an "unwind
701  // to caller" cleanup.
702  assert(!FuncletUnwindMap.count(CleanupPad) ||
703  isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
704  FuncletUnwindMap[CleanupPad] =
705  ConstantTokenNone::get(Caller->getContext());
706  }
707  }
708 
709  Instruction *I = BB->getFirstNonPHI();
710  if (!I->isEHPad())
711  continue;
712 
713  Instruction *Replacement = nullptr;
714  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
715  if (CatchSwitch->unwindsToCaller()) {
716  Value *UnwindDestToken;
717  if (auto *ParentPad =
718  dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
719  // This catchswitch is nested inside another funclet. If that
720  // funclet has an unwind destination within the inlinee, then
721  // unwinding out of this catchswitch would be UB. Rewriting this
722  // catchswitch to unwind to the inlined invoke's unwind dest would
723  // give the parent funclet multiple unwind destinations, which is
724  // something that subsequent EH table generation can't handle and
725  // that the veirifer rejects. So when we see such a call, leave it
726  // as "unwind to caller".
727  UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
728  if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
729  continue;
730  } else {
731  // This catchswitch has no parent to inherit constraints from, and
732  // none of its descendants can have an unwind edge that exits it and
733  // targets another funclet in the inlinee. It may or may not have a
734  // descendant that definitively has an unwind to caller. In either
735  // case, we'll have to assume that any unwinds out of it may need to
736  // be routed to the caller, so treat it as though it has a definitive
737  // unwind to caller.
738  UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
739  }
740  auto *NewCatchSwitch = CatchSwitchInst::Create(
741  CatchSwitch->getParentPad(), UnwindDest,
742  CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
743  CatchSwitch);
744  for (BasicBlock *PadBB : CatchSwitch->handlers())
745  NewCatchSwitch->addHandler(PadBB);
746  // Propagate info for the old catchswitch over to the new one in
747  // the unwind map. This also serves to short-circuit any subsequent
748  // checks for the unwind dest of this catchswitch, which would get
749  // confused if they found the outer handler in the callee.
750  FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
751  Replacement = NewCatchSwitch;
752  }
753  } else if (!isa<FuncletPadInst>(I)) {
754  llvm_unreachable("unexpected EHPad!");
755  }
756 
757  if (Replacement) {
758  Replacement->takeName(I);
759  I->replaceAllUsesWith(Replacement);
760  I->eraseFromParent();
761  UpdatePHINodes(&*BB);
762  }
763  }
764 
765  if (InlinedCodeInfo.ContainsCalls)
766  for (Function::iterator BB = FirstNewBlock->getIterator(),
767  E = Caller->end();
768  BB != E; ++BB)
770  &*BB, UnwindDest, &FuncletUnwindMap))
771  // Update any PHI nodes in the exceptional block to indicate that there
772  // is now a new entry in them.
773  UpdatePHINodes(NewBB);
774 
775  // Now that everything is happy, we have one final detail. The PHI nodes in
776  // the exception destination block still have entries due to the original
777  // invoke instruction. Eliminate these entries (which might even delete the
778  // PHI node) now.
779  UnwindDest->removePredecessor(InvokeBB);
780 }
781 
782 /// When inlining a call site that has !llvm.mem.parallel_loop_access,
783 /// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
784 /// be propagated to all memory-accessing cloned instructions.
786  MDNode *MemParallelLoopAccess =
787  CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
788  MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
789  MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);
790  MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias);
791  if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
792  return;
793 
794  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
795  VMI != VMIE; ++VMI) {
796  // Check that key is an instruction, to skip the Argument mapping, which
797  // points to an instruction in the original function, not the inlined one.
798  if (!VMI->second || !isa<Instruction>(VMI->first))
799  continue;
800 
801  Instruction *NI = dyn_cast<Instruction>(VMI->second);
802  if (!NI)
803  continue;
804 
805  // This metadata is only relevant for instructions that access memory.
806  if (!NI->mayReadOrWriteMemory())
807  continue;
808 
809  if (MemParallelLoopAccess) {
810  // TODO: This probably should not overwrite MemParalleLoopAccess.
811  MemParallelLoopAccess = MDNode::concatenate(
812  NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access),
813  MemParallelLoopAccess);
814  NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access,
815  MemParallelLoopAccess);
816  }
817 
818  if (AccessGroup)
819  NI->setMetadata(LLVMContext::MD_access_group, uniteAccessGroups(
820  NI->getMetadata(LLVMContext::MD_access_group), AccessGroup));
821 
822  if (AliasScope)
823  NI->setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
824  NI->getMetadata(LLVMContext::MD_alias_scope), AliasScope));
825 
826  if (NoAlias)
827  NI->setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
828  NI->getMetadata(LLVMContext::MD_noalias), NoAlias));
829  }
830 }
831 
832 /// Utility for cloning !noalias and !alias.scope metadata. When a code region
833 /// using scoped alias metadata is inlined, the aliasing relationships may not
834 /// hold between the two version. It is necessary to create a deep clone of the
835 /// metadata, putting the two versions in separate scope domains.
839  MetadataMap MDMap;
840  void addRecursiveMetadataUses();
841 
842 public:
844 
845  /// Create a new clone of the scoped alias metadata, which will be used by
846  /// subsequent remap() calls.
847  void clone();
848 
849  /// Remap instructions in the given VMap from the original to the cloned
850  /// metadata.
851  void remap(ValueToValueMapTy &VMap);
852 };
853 
855  const Function *F) {
856  for (const BasicBlock &BB : *F) {
857  for (const Instruction &I : BB) {
858  if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
859  MD.insert(M);
860  if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
861  MD.insert(M);
862 
863  // We also need to clone the metadata in noalias intrinsics.
864  if (const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
865  MD.insert(Decl->getScopeList());
866  }
867  }
868  addRecursiveMetadataUses();
869 }
870 
871 void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
872  SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
873  while (!Queue.empty()) {
874  const MDNode *M = cast<MDNode>(Queue.pop_back_val());
875  for (const Metadata *Op : M->operands())
876  if (const MDNode *OpMD = dyn_cast<MDNode>(Op))
877  if (MD.insert(OpMD))
878  Queue.push_back(OpMD);
879  }
880 }
881 
883  assert(MDMap.empty() && "clone() already called ?");
884 
885  SmallVector<TempMDTuple, 16> DummyNodes;
886  for (const MDNode *I : MD) {
887  DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), None));
888  MDMap[I].reset(DummyNodes.back().get());
889  }
890 
891  // Create new metadata nodes to replace the dummy nodes, replacing old
892  // metadata references with either a dummy node or an already-created new
893  // node.
895  for (const MDNode *I : MD) {
896  for (const Metadata *Op : I->operands()) {
897  if (const MDNode *M = dyn_cast<MDNode>(Op))
898  NewOps.push_back(MDMap[M]);
899  else
900  NewOps.push_back(const_cast<Metadata *>(Op));
901  }
902 
903  MDNode *NewM = MDNode::get(I->getContext(), NewOps);
904  MDTuple *TempM = cast<MDTuple>(MDMap[I]);
905  assert(TempM->isTemporary() && "Expected temporary node");
906 
907  TempM->replaceAllUsesWith(NewM);
908  NewOps.clear();
909  }
910 }
911 
913  if (MDMap.empty())
914  return; // Nothing to do.
915 
916  for (auto Entry : VMap) {
917  // Check that key is an instruction, to skip the Argument mapping, which
918  // points to an instruction in the original function, not the inlined one.
919  if (!Entry->second || !isa<Instruction>(Entry->first))
920  continue;
921 
922  Instruction *I = dyn_cast<Instruction>(Entry->second);
923  if (!I)
924  continue;
925 
926  // Only update scopes when we find them in the map. If they are not, it is
927  // because we already handled that instruction before. This is faster than
928  // tracking which instructions we already updated.
929  if (MDNode *M = I->getMetadata(LLVMContext::MD_alias_scope))
930  if (MDNode *MNew = MDMap.lookup(M))
931  I->setMetadata(LLVMContext::MD_alias_scope, MNew);
932 
933  if (MDNode *M = I->getMetadata(LLVMContext::MD_noalias))
934  if (MDNode *MNew = MDMap.lookup(M))
935  I->setMetadata(LLVMContext::MD_noalias, MNew);
936 
937  if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(I))
938  if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
939  Decl->setScopeList(MNew);
940  }
941 }
942 
943 /// If the inlined function has noalias arguments,
944 /// then add new alias scopes for each noalias argument, tag the mapped noalias
945 /// parameters with noalias metadata specifying the new scope, and tag all
946 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
948  const DataLayout &DL, AAResults *CalleeAAR) {
950  return;
951 
952  const Function *CalledFunc = CB.getCalledFunction();
954 
955  for (const Argument &Arg : CalledFunc->args())
956  if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
957  NoAliasArgs.push_back(&Arg);
958 
959  if (NoAliasArgs.empty())
960  return;
961 
962  // To do a good job, if a noalias variable is captured, we need to know if
963  // the capture point dominates the particular use we're considering.
964  DominatorTree DT;
965  DT.recalculate(const_cast<Function&>(*CalledFunc));
966 
967  // noalias indicates that pointer values based on the argument do not alias
968  // pointer values which are not based on it. So we add a new "scope" for each
969  // noalias function argument. Accesses using pointers based on that argument
970  // become part of that alias scope, accesses using pointers not based on that
971  // argument are tagged as noalias with that scope.
972 
974  MDBuilder MDB(CalledFunc->getContext());
975 
976  // Create a new scope domain for this function.
977  MDNode *NewDomain =
978  MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
979  for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
980  const Argument *A = NoAliasArgs[i];
981 
982  std::string Name = std::string(CalledFunc->getName());
983  if (A->hasName()) {
984  Name += ": %";
985  Name += A->getName();
986  } else {
987  Name += ": argument ";
988  Name += utostr(i);
989  }
990 
991  // Note: We always create a new anonymous root here. This is true regardless
992  // of the linkage of the callee because the aliasing "scope" is not just a
993  // property of the callee, but also all control dependencies in the caller.
994  MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
995  NewScopes.insert(std::make_pair(A, NewScope));
996 
997  if (UseNoAliasIntrinsic) {
998  // Introduce a llvm.experimental.noalias.scope.decl for the noalias
999  // argument.
1000  MDNode *AScopeList = MDNode::get(CalledFunc->getContext(), NewScope);
1001  auto *NoAliasDecl =
1002  IRBuilder<>(&CB).CreateNoAliasScopeDeclaration(AScopeList);
1003  // Ignore the result for now. The result will be used when the
1004  // llvm.noalias intrinsic is introduced.
1005  (void)NoAliasDecl;
1006  }
1007  }
1008 
1009  // Iterate over all new instructions in the map; for all memory-access
1010  // instructions, add the alias scope metadata.
1011  for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
1012  VMI != VMIE; ++VMI) {
1013  if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
1014  if (!VMI->second)
1015  continue;
1016 
1017  Instruction *NI = dyn_cast<Instruction>(VMI->second);
1018  if (!NI)
1019  continue;
1020 
1021  bool IsArgMemOnlyCall = false, IsFuncCall = false;
1023 
1024  if (const LoadInst *LI = dyn_cast<LoadInst>(I))
1025  PtrArgs.push_back(LI->getPointerOperand());
1026  else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
1027  PtrArgs.push_back(SI->getPointerOperand());
1028  else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1029  PtrArgs.push_back(VAAI->getPointerOperand());
1030  else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1031  PtrArgs.push_back(CXI->getPointerOperand());
1032  else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1033  PtrArgs.push_back(RMWI->getPointerOperand());
1034  else if (const auto *Call = dyn_cast<CallBase>(I)) {
1035  // If we know that the call does not access memory, then we'll still
1036  // know that about the inlined clone of this call site, and we don't
1037  // need to add metadata.
1038  if (Call->doesNotAccessMemory())
1039  continue;
1040 
1041  IsFuncCall = true;
1042  if (CalleeAAR) {
1043  FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1045  IsArgMemOnlyCall = true;
1046  }
1047 
1048  for (Value *Arg : Call->args()) {
1049  // We need to check the underlying objects of all arguments, not just
1050  // the pointer arguments, because we might be passing pointers as
1051  // integers, etc.
1052  // However, if we know that the call only accesses pointer arguments,
1053  // then we only need to check the pointer arguments.
1054  if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1055  continue;
1056 
1057  PtrArgs.push_back(Arg);
1058  }
1059  }
1060 
1061  // If we found no pointers, then this instruction is not suitable for
1062  // pairing with an instruction to receive aliasing metadata.
1063  // However, if this is a call, this we might just alias with none of the
1064  // noalias arguments.
1065  if (PtrArgs.empty() && !IsFuncCall)
1066  continue;
1067 
1068  // It is possible that there is only one underlying object, but you
1069  // need to go through several PHIs to see it, and thus could be
1070  // repeated in the Objects list.
1073 
1075  for (const Value *V : PtrArgs) {
1077  getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
1078 
1079  for (const Value *O : Objects)
1080  ObjSet.insert(O);
1081  }
1082 
1083  // Figure out if we're derived from anything that is not a noalias
1084  // argument.
1085  bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1086  for (const Value *V : ObjSet) {
1087  // Is this value a constant that cannot be derived from any pointer
1088  // value (we need to exclude constant expressions, for example, that
1089  // are formed from arithmetic on global symbols).
1090  bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1091  isa<ConstantPointerNull>(V) ||
1092  isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1093  if (IsNonPtrConst)
1094  continue;
1095 
1096  // If this is anything other than a noalias argument, then we cannot
1097  // completely describe the aliasing properties using alias.scope
1098  // metadata (and, thus, won't add any).
1099  if (const Argument *A = dyn_cast<Argument>(V)) {
1100  if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1101  UsesAliasingPtr = true;
1102  } else {
1103  UsesAliasingPtr = true;
1104  }
1105 
1106  // If this is not some identified function-local object (which cannot
1107  // directly alias a noalias argument), or some other argument (which,
1108  // by definition, also cannot alias a noalias argument), then we could
1109  // alias a noalias argument that has been captured).
1110  if (!isa<Argument>(V) &&
1111  !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1112  CanDeriveViaCapture = true;
1113  }
1114 
1115  // A function call can always get captured noalias pointers (via other
1116  // parameters, globals, etc.).
1117  if (IsFuncCall && !IsArgMemOnlyCall)
1118  CanDeriveViaCapture = true;
1119 
1120  // First, we want to figure out all of the sets with which we definitely
1121  // don't alias. Iterate over all noalias set, and add those for which:
1122  // 1. The noalias argument is not in the set of objects from which we
1123  // definitely derive.
1124  // 2. The noalias argument has not yet been captured.
1125  // An arbitrary function that might load pointers could see captured
1126  // noalias arguments via other noalias arguments or globals, and so we
1127  // must always check for prior capture.
1128  for (const Argument *A : NoAliasArgs) {
1129  if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1130  // It might be tempting to skip the
1131  // PointerMayBeCapturedBefore check if
1132  // A->hasNoCaptureAttr() is true, but this is
1133  // incorrect because nocapture only guarantees
1134  // that no copies outlive the function, not
1135  // that the value cannot be locally captured.
1137  /* ReturnCaptures */ false,
1138  /* StoreCaptures */ false, I, &DT)))
1139  NoAliases.push_back(NewScopes[A]);
1140  }
1141 
1142  if (!NoAliases.empty())
1143  NI->setMetadata(LLVMContext::MD_noalias,
1145  NI->getMetadata(LLVMContext::MD_noalias),
1146  MDNode::get(CalledFunc->getContext(), NoAliases)));
1147 
1148  // Next, we want to figure out all of the sets to which we might belong.
1149  // We might belong to a set if the noalias argument is in the set of
1150  // underlying objects. If there is some non-noalias argument in our list
1151  // of underlying objects, then we cannot add a scope because the fact
1152  // that some access does not alias with any set of our noalias arguments
1153  // cannot itself guarantee that it does not alias with this access
1154  // (because there is some pointer of unknown origin involved and the
1155  // other access might also depend on this pointer). We also cannot add
1156  // scopes to arbitrary functions unless we know they don't access any
1157  // non-parameter pointer-values.
1158  bool CanAddScopes = !UsesAliasingPtr;
1159  if (CanAddScopes && IsFuncCall)
1160  CanAddScopes = IsArgMemOnlyCall;
1161 
1162  if (CanAddScopes)
1163  for (const Argument *A : NoAliasArgs) {
1164  if (ObjSet.count(A))
1165  Scopes.push_back(NewScopes[A]);
1166  }
1167 
1168  if (!Scopes.empty())
1169  NI->setMetadata(
1170  LLVMContext::MD_alias_scope,
1171  MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1172  MDNode::get(CalledFunc->getContext(), Scopes)));
1173  }
1174  }
1175 }
1176 
1178  Instruction *End) {
1179 
1180  assert(Begin->getParent() == End->getParent() &&
1181  "Expected to be in same basic block!");
1182  unsigned NumInstChecked = 0;
1183  // Check that all instructions in the range [Begin, End) are guaranteed to
1184  // transfer execution to successor.
1185  for (auto &I : make_range(Begin->getIterator(), End->getIterator()))
1186  if (NumInstChecked++ > InlinerAttributeWindow ||
1188  return true;
1189  return false;
1190 }
1191 
1193 
1195  if (AB.empty())
1196  return AB;
1197  AttrBuilder Valid;
1198  // Only allow these white listed attributes to be propagated back to the
1199  // callee. This is because other attributes may only be valid on the call
1200  // itself, i.e. attributes such as signext and zeroext.
1201  if (auto DerefBytes = AB.getDereferenceableBytes())
1202  Valid.addDereferenceableAttr(DerefBytes);
1203  if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes())
1204  Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1205  if (AB.contains(Attribute::NoAlias))
1206  Valid.addAttribute(Attribute::NoAlias);
1207  if (AB.contains(Attribute::NonNull))
1208  Valid.addAttribute(Attribute::NonNull);
1209  return Valid;
1210 }
1211 
1214  return;
1215 
1217  if (Valid.empty())
1218  return;
1219  auto *CalledFunction = CB.getCalledFunction();
1220  auto &Context = CalledFunction->getContext();
1221 
1222  for (auto &BB : *CalledFunction) {
1223  auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1224  if (!RI || !isa<CallBase>(RI->getOperand(0)))
1225  continue;
1226  auto *RetVal = cast<CallBase>(RI->getOperand(0));
1227  // Sanity check that the cloned RetVal exists and is a call, otherwise we
1228  // cannot add the attributes on the cloned RetVal.
1229  // Simplification during inlining could have transformed the cloned
1230  // instruction.
1231  auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1232  if (!NewRetVal)
1233  continue;
1234  // Backward propagation of attributes to the returned value may be incorrect
1235  // if it is control flow dependent.
1236  // Consider:
1237  // @callee {
1238  // %rv = call @foo()
1239  // %rv2 = call @bar()
1240  // if (%rv2 != null)
1241  // return %rv2
1242  // if (%rv == null)
1243  // exit()
1244  // return %rv
1245  // }
1246  // caller() {
1247  // %val = call nonnull @callee()
1248  // }
1249  // Here we cannot add the nonnull attribute on either foo or bar. So, we
1250  // limit the check to both RetVal and RI are in the same basic block and
1251  // there are no throwing/exiting instructions between these instructions.
1252  if (RI->getParent() != RetVal->getParent() ||
1253  MayContainThrowingOrExitingCall(RetVal, RI))
1254  continue;
1255  // Add to the existing attributes of NewRetVal, i.e. the cloned call
1256  // instruction.
1257  // NB! When we have the same attribute already existing on NewRetVal, but
1258  // with a differing value, the AttributeList's merge API honours the already
1259  // existing attribute value (i.e. attributes such as dereferenceable,
1260  // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1261  AttributeList AL = NewRetVal->getAttributes();
1262  AttributeList NewAL =
1263  AL.addAttributes(Context, AttributeList::ReturnIndex, Valid);
1264  NewRetVal->setAttributes(NewAL);
1265  }
1266 }
1267 
1268 /// If the inlined function has non-byval align arguments, then
1269 /// add @llvm.assume-based alignment assumptions to preserve this information.
1272  return;
1273 
1274  AssumptionCache *AC = &IFI.GetAssumptionCache(*CB.getCaller());
1275  auto &DL = CB.getCaller()->getParent()->getDataLayout();
1276 
1277  // To avoid inserting redundant assumptions, we should check for assumptions
1278  // already in the caller. To do this, we might need a DT of the caller.
1279  DominatorTree DT;
1280  bool DTCalculated = false;
1281 
1282  Function *CalledFunc = CB.getCalledFunction();
1283  for (Argument &Arg : CalledFunc->args()) {
1284  unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1285  if (Align && !Arg.hasPassPointeeByValueCopyAttr() && !Arg.hasNUses(0)) {
1286  if (!DTCalculated) {
1287  DT.recalculate(*CB.getCaller());
1288  DTCalculated = true;
1289  }
1290 
1291  // If we can already prove the asserted alignment in the context of the
1292  // caller, then don't bother inserting the assumption.
1293  Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1294  if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= Align)
1295  continue;
1296 
1297  CallInst *NewAsmp =
1299  AC->registerAssumption(cast<AssumeInst>(NewAsmp));
1300  }
1301  }
1302 }
1303 
1304 /// Once we have cloned code over from a callee into the caller,
1305 /// update the specified callgraph to reflect the changes we made.
1306 /// Note that it's possible that not all code was copied over, so only
1307 /// some edges of the callgraph may remain.
1309  Function::iterator FirstNewBlock,
1310  ValueToValueMapTy &VMap,
1311  InlineFunctionInfo &IFI) {
1312  CallGraph &CG = *IFI.CG;
1313  const Function *Caller = CB.getCaller();
1314  const Function *Callee = CB.getCalledFunction();
1315  CallGraphNode *CalleeNode = CG[Callee];
1316  CallGraphNode *CallerNode = CG[Caller];
1317 
1318  // Since we inlined some uninlined call sites in the callee into the caller,
1319  // add edges from the caller to all of the callees of the callee.
1320  CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1321 
1322  // Consider the case where CalleeNode == CallerNode.
1324  if (CalleeNode == CallerNode) {
1325  CallCache.assign(I, E);
1326  I = CallCache.begin();
1327  E = CallCache.end();
1328  }
1329 
1330  for (; I != E; ++I) {
1331  // Skip 'refererence' call records.
1332  if (!I->first)
1333  continue;
1334 
1335  const Value *OrigCall = *I->first;
1336 
1337  ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1338  // Only copy the edge if the call was inlined!
1339  if (VMI == VMap.end() || VMI->second == nullptr)
1340  continue;
1341 
1342  // If the call was inlined, but then constant folded, there is no edge to
1343  // add. Check for this case.
1344  auto *NewCall = dyn_cast<CallBase>(VMI->second);
1345  if (!NewCall)
1346  continue;
1347 
1348  // We do not treat intrinsic calls like real function calls because we
1349  // expect them to become inline code; do not add an edge for an intrinsic.
1350  if (NewCall->getCalledFunction() &&
1351  NewCall->getCalledFunction()->isIntrinsic())
1352  continue;
1353 
1354  // Remember that this call site got inlined for the client of
1355  // InlineFunction.
1356  IFI.InlinedCalls.push_back(NewCall);
1357 
1358  // It's possible that inlining the callsite will cause it to go from an
1359  // indirect to a direct call by resolving a function pointer. If this
1360  // happens, set the callee of the new call site to a more precise
1361  // destination. This can also happen if the call graph node of the caller
1362  // was just unnecessarily imprecise.
1363  if (!I->second->getFunction())
1364  if (Function *F = NewCall->getCalledFunction()) {
1365  // Indirect call site resolved to direct call.
1366  CallerNode->addCalledFunction(NewCall, CG[F]);
1367 
1368  continue;
1369  }
1370 
1371  CallerNode->addCalledFunction(NewCall, I->second);
1372  }
1373 
1374  // Update the call graph by deleting the edge from Callee to Caller. We must
1375  // do this after the loop above in case Caller and Callee are the same.
1376  CallerNode->removeCallEdgeFor(*cast<CallBase>(&CB));
1377 }
1378 
1379 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1380  BasicBlock *InsertBlock,
1381  InlineFunctionInfo &IFI) {
1382  Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1383  IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1384 
1385  Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1386 
1387  // Always generate a memcpy of alignment 1 here because we don't know
1388  // the alignment of the src pointer. Other optimizations can infer
1389  // better alignment.
1390  Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1391  /*SrcAlign*/ Align(1), Size);
1392 }
1393 
1394 /// When inlining a call site that has a byval argument,
1395 /// we have to make the implicit memcpy explicit by adding it.
1397  const Function *CalledFunc,
1398  InlineFunctionInfo &IFI,
1399  unsigned ByValAlignment) {
1400  PointerType *ArgTy = cast<PointerType>(Arg->getType());
1401  Type *AggTy = ArgTy->getElementType();
1402 
1403  Function *Caller = TheCall->getFunction();
1404  const DataLayout &DL = Caller->getParent()->getDataLayout();
1405 
1406  // If the called function is readonly, then it could not mutate the caller's
1407  // copy of the byval'd memory. In this case, it is safe to elide the copy and
1408  // temporary.
1409  if (CalledFunc->onlyReadsMemory()) {
1410  // If the byval argument has a specified alignment that is greater than the
1411  // passed in pointer, then we either have to round up the input pointer or
1412  // give up on this transformation.
1413  if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1414  return Arg;
1415 
1416  AssumptionCache *AC =
1417  IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1418 
1419  // If the pointer is already known to be sufficiently aligned, or if we can
1420  // round it up to a larger alignment, then we don't need a temporary.
1421  if (getOrEnforceKnownAlignment(Arg, Align(ByValAlignment), DL, TheCall,
1422  AC) >= ByValAlignment)
1423  return Arg;
1424 
1425  // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1426  // for code quality, but rarely happens and is required for correctness.
1427  }
1428 
1429  // Create the alloca. If we have DataLayout, use nice alignment.
1430  Align Alignment(DL.getPrefTypeAlignment(AggTy));
1431 
1432  // If the byval had an alignment specified, we *must* use at least that
1433  // alignment, as it is required by the byval argument (and uses of the
1434  // pointer inside the callee).
1435  Alignment = max(Alignment, MaybeAlign(ByValAlignment));
1436 
1437  Value *NewAlloca =
1438  new AllocaInst(AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
1439  Arg->getName(), &*Caller->begin()->begin());
1440  IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1441 
1442  // Uses of the argument in the function should use our new alloca
1443  // instead.
1444  return NewAlloca;
1445 }
1446 
1447 // Check whether this Value is used by a lifetime intrinsic.
1449  for (User *U : V->users())
1450  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1451  if (II->isLifetimeStartOrEnd())
1452  return true;
1453  return false;
1454 }
1455 
1456 // Check whether the given alloca already has
1457 // lifetime.start or lifetime.end intrinsics.
1458 static bool hasLifetimeMarkers(AllocaInst *AI) {
1459  Type *Ty = AI->getType();
1460  Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1461  Ty->getPointerAddressSpace());
1462  if (Ty == Int8PtrTy)
1463  return isUsedByLifetimeMarker(AI);
1464 
1465  // Do a scan to find all the casts to i8*.
1466  for (User *U : AI->users()) {
1467  if (U->getType() != Int8PtrTy) continue;
1468  if (U->stripPointerCasts() != AI) continue;
1469  if (isUsedByLifetimeMarker(U))
1470  return true;
1471  }
1472  return false;
1473 }
1474 
1475 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1476 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1477 /// cannot be static.
1478 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1479  return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1480 }
1481 
1482 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1483 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1484 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1485  LLVMContext &Ctx,
1487  auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1488  return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),
1489  OrigDL.getScope(), IA);
1490 }
1491 
1492 /// Update inlined instructions' line numbers to
1493 /// to encode location where these instructions are inlined.
1495  Instruction *TheCall, bool CalleeHasDebugInfo) {
1496  const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1497  if (!TheCallDL)
1498  return;
1499 
1500  auto &Ctx = Fn->getContext();
1501  DILocation *InlinedAtNode = TheCallDL;
1502 
1503  // Create a unique call site, not to be confused with any other call from the
1504  // same location.
1505  InlinedAtNode = DILocation::getDistinct(
1506  Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1507  InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1508 
1509  // Cache the inlined-at nodes as they're built so they are reused, without
1510  // this every instruction's inlined-at chain would become distinct from each
1511  // other.
1513 
1514  // Check if we are not generating inline line tables and want to use
1515  // the call site location instead.
1516  bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1517 
1518  for (; FI != Fn->end(); ++FI) {
1519  for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1520  BI != BE; ++BI) {
1521  // Loop metadata needs to be updated so that the start and end locs
1522  // reference inlined-at locations.
1523  auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode, &IANodes](
1524  const DILocation &Loc) -> DILocation * {
1525  return inlineDebugLoc(&Loc, InlinedAtNode, Ctx, IANodes).get();
1526  };
1527  updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc);
1528 
1529  if (!NoInlineLineTables)
1530  if (DebugLoc DL = BI->getDebugLoc()) {
1531  DebugLoc IDL =
1532  inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1533  BI->setDebugLoc(IDL);
1534  continue;
1535  }
1536 
1537  if (CalleeHasDebugInfo && !NoInlineLineTables)
1538  continue;
1539 
1540  // If the inlined instruction has no line number, or if inline info
1541  // is not being generated, make it look as if it originates from the call
1542  // location. This is important for ((__always_inline, __nodebug__))
1543  // functions which must use caller location for all instructions in their
1544  // function body.
1545 
1546  // Don't update static allocas, as they may get moved later.
1547  if (auto *AI = dyn_cast<AllocaInst>(BI))
1549  continue;
1550 
1551  BI->setDebugLoc(TheCallDL);
1552  }
1553 
1554  // Remove debug info intrinsics if we're not keeping inline info.
1555  if (NoInlineLineTables) {
1556  BasicBlock::iterator BI = FI->begin();
1557  while (BI != FI->end()) {
1558  if (isa<DbgInfoIntrinsic>(BI)) {
1559  BI = BI->eraseFromParent();
1560  continue;
1561  }
1562  ++BI;
1563  }
1564  }
1565 
1566  }
1567 }
1568 
1569 /// Update the block frequencies of the caller after a callee has been inlined.
1570 ///
1571 /// Each block cloned into the caller has its block frequency scaled by the
1572 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1573 /// callee's entry block gets the same frequency as the callsite block and the
1574 /// relative frequencies of all cloned blocks remain the same after cloning.
1575 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1576  const ValueToValueMapTy &VMap,
1577  BlockFrequencyInfo *CallerBFI,
1578  BlockFrequencyInfo *CalleeBFI,
1579  const BasicBlock &CalleeEntryBlock) {
1581  for (auto Entry : VMap) {
1582  if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1583  continue;
1584  auto *OrigBB = cast<BasicBlock>(Entry.first);
1585  auto *ClonedBB = cast<BasicBlock>(Entry.second);
1586  uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1587  if (!ClonedBBs.insert(ClonedBB).second) {
1588  // Multiple blocks in the callee might get mapped to one cloned block in
1589  // the caller since we prune the callee as we clone it. When that happens,
1590  // we want to use the maximum among the original blocks' frequencies.
1591  uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1592  if (NewFreq > Freq)
1593  Freq = NewFreq;
1594  }
1595  CallerBFI->setBlockFreq(ClonedBB, Freq);
1596  }
1597  BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1598  CallerBFI->setBlockFreqAndScale(
1599  EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1600  ClonedBBs);
1601 }
1602 
1603 /// Update the branch metadata for cloned call instructions.
1604 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1605  const ProfileCount &CalleeEntryCount,
1606  const CallBase &TheCall, ProfileSummaryInfo *PSI,
1607  BlockFrequencyInfo *CallerBFI) {
1608  if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1609  CalleeEntryCount.getCount() < 1)
1610  return;
1611  auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1612  int64_t CallCount =
1613  std::min(CallSiteCount.getValueOr(0), CalleeEntryCount.getCount());
1614  updateProfileCallee(Callee, -CallCount, &VMap);
1615 }
1616 
1618  Function *Callee, int64_t entryDelta,
1620  auto CalleeCount = Callee->getEntryCount();
1621  if (!CalleeCount.hasValue())
1622  return;
1623 
1624  uint64_t priorEntryCount = CalleeCount.getCount();
1625  uint64_t newEntryCount;
1626 
1627  // Since CallSiteCount is an estimate, it could exceed the original callee
1628  // count and has to be set to 0 so guard against underflow.
1629  if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1630  newEntryCount = 0;
1631  else
1632  newEntryCount = priorEntryCount + entryDelta;
1633 
1634  // During inlining ?
1635  if (VMap) {
1636  uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1637  for (auto Entry : *VMap)
1638  if (isa<CallInst>(Entry.first))
1639  if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1640  CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1641  }
1642 
1643  if (entryDelta) {
1644  Callee->setEntryCount(newEntryCount);
1645 
1646  for (BasicBlock &BB : *Callee)
1647  // No need to update the callsite if it is pruned during inlining.
1648  if (!VMap || VMap->count(&BB))
1649  for (Instruction &I : BB)
1650  if (CallInst *CI = dyn_cast<CallInst>(&I))
1651  CI->updateProfWeight(newEntryCount, priorEntryCount);
1652  }
1653 }
1654 
1655 /// An operand bundle "clang.arc.attachedcall" on a call indicates the call
1656 /// result is implicitly consumed by a call to retainRV or claimRV immediately
1657 /// after the call. This function inlines the retainRV/claimRV calls.
1658 ///
1659 /// There are three cases to consider:
1660 ///
1661 /// 1. If there is a call to autoreleaseRV that takes a pointer to the returned
1662 /// object in the callee return block, the autoreleaseRV call and the
1663 /// retainRV/claimRV call in the caller cancel out. If the call in the caller
1664 /// is a claimRV call, a call to objc_release is emitted.
1665 ///
1666 /// 2. If there is a call in the callee return block that doesn't have operand
1667 /// bundle "clang.arc.attachedcall", the operand bundle on the original call
1668 /// is transferred to the call in the callee.
1669 ///
1670 /// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is
1671 /// a retainRV call.
1672 static void
1674  const SmallVectorImpl<ReturnInst *> &Returns) {
1675  Module *Mod = CB.getModule();
1676  bool IsRetainRV = objcarc::hasAttachedCallOpBundle(&CB, true),
1677  IsClaimRV = !IsRetainRV;
1678 
1679  for (auto *RI : Returns) {
1680  Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0));
1683  bool InsertRetainCall = IsRetainRV;
1685 
1686  // Walk backwards through the basic block looking for either a matching
1687  // autoreleaseRV call or an unannotated call.
1688  for (; I != EI;) {
1689  auto CurI = I++;
1690 
1691  // Ignore casts.
1692  if (isa<CastInst>(*CurI))
1693  continue;
1694 
1695  if (auto *II = dyn_cast<IntrinsicInst>(&*CurI)) {
1696  if (II->getIntrinsicID() == Intrinsic::objc_autoreleaseReturnValue &&
1697  II->hasNUses(0) &&
1698  objcarc::GetRCIdentityRoot(II->getOperand(0)) == RetOpnd) {
1699  // If we've found a matching authoreleaseRV call:
1700  // - If claimRV is attached to the call, insert a call to objc_release
1701  // and erase the autoreleaseRV call.
1702  // - If retainRV is attached to the call, just erase the autoreleaseRV
1703  // call.
1704  if (IsClaimRV) {
1705  Builder.SetInsertPoint(II);
1706  Function *IFn =
1707  Intrinsic::getDeclaration(Mod, Intrinsic::objc_release);
1708  Value *BC =
1709  Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType());
1710  Builder.CreateCall(IFn, BC, "");
1711  }
1712  II->eraseFromParent();
1713  InsertRetainCall = false;
1714  }
1715  } else if (auto *CI = dyn_cast<CallInst>(&*CurI)) {
1716  if (objcarc::GetRCIdentityRoot(CI) == RetOpnd &&
1718  // If we've found an unannotated call that defines RetOpnd, add a
1719  // "clang.arc.attachedcall" operand bundle.
1720  Value *BundleArgs[] = {ConstantInt::get(
1721  Builder.getInt64Ty(),
1723  OperandBundleDef OB("clang.arc.attachedcall", BundleArgs);
1724  auto *NewCall = CallBase::addOperandBundle(
1726  NewCall->copyMetadata(*CI);
1727  CI->replaceAllUsesWith(NewCall);
1728  CI->eraseFromParent();
1729  InsertRetainCall = false;
1730  }
1731  }
1732 
1733  break;
1734  }
1735 
1736  if (InsertRetainCall) {
1737  // The retainRV is attached to the call and we've failed to find a
1738  // matching autoreleaseRV or an annotated call in the callee. Emit a call
1739  // to objc_retain.
1740  Builder.SetInsertPoint(RI);
1741  Function *IFn = Intrinsic::getDeclaration(Mod, Intrinsic::objc_retain);
1742  Value *BC = Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType());
1743  Builder.CreateCall(IFn, BC, "");
1744  }
1745  }
1746 }
1747 
1748 /// This function inlines the called function into the basic block of the
1749 /// caller. This returns false if it is not possible to inline this call.
1750 /// The program is still in a well defined state if this occurs though.
1751 ///
1752 /// Note that this only does one level of inlining. For example, if the
1753 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1754 /// exists in the instruction stream. Similarly this will inline a recursive
1755 /// function by one level.
1757  AAResults *CalleeAAR,
1758  bool InsertLifetime,
1759  Function *ForwardVarArgsTo) {
1760  assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
1761 
1762  // FIXME: we don't inline callbr yet.
1763  if (isa<CallBrInst>(CB))
1764  return InlineResult::failure("We don't inline callbr yet.");
1765 
1766  // If IFI has any state in it, zap it before we fill it in.
1767  IFI.reset();
1768 
1769  Function *CalledFunc = CB.getCalledFunction();
1770  if (!CalledFunc || // Can't inline external function or indirect
1771  CalledFunc->isDeclaration()) // call!
1772  return InlineResult::failure("external or indirect");
1773 
1774  // The inliner does not know how to inline through calls with operand bundles
1775  // in general ...
1776  if (CB.hasOperandBundles()) {
1777  for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
1778  uint32_t Tag = CB.getOperandBundleAt(i).getTagID();
1779  // ... but it knows how to inline through "deopt" operand bundles ...
1780  if (Tag == LLVMContext::OB_deopt)
1781  continue;
1782  // ... and "funclet" operand bundles.
1783  if (Tag == LLVMContext::OB_funclet)
1784  continue;
1786  continue;
1787 
1788  return InlineResult::failure("unsupported operand bundle");
1789  }
1790  }
1791 
1792  // If the call to the callee cannot throw, set the 'nounwind' flag on any
1793  // calls that we inline.
1794  bool MarkNoUnwind = CB.doesNotThrow();
1795 
1796  BasicBlock *OrigBB = CB.getParent();
1797  Function *Caller = OrigBB->getParent();
1798 
1799  // GC poses two hazards to inlining, which only occur when the callee has GC:
1800  // 1. If the caller has no GC, then the callee's GC must be propagated to the
1801  // caller.
1802  // 2. If the caller has a differing GC, it is invalid to inline.
1803  if (CalledFunc->hasGC()) {
1804  if (!Caller->hasGC())
1805  Caller->setGC(CalledFunc->getGC());
1806  else if (CalledFunc->getGC() != Caller->getGC())
1807  return InlineResult::failure("incompatible GC");
1808  }
1809 
1810  // Get the personality function from the callee if it contains a landing pad.
1811  Constant *CalledPersonality =
1812  CalledFunc->hasPersonalityFn()
1813  ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1814  : nullptr;
1815 
1816  // Find the personality function used by the landing pads of the caller. If it
1817  // exists, then check to see that it matches the personality function used in
1818  // the callee.
1819  Constant *CallerPersonality =
1820  Caller->hasPersonalityFn()
1821  ? Caller->getPersonalityFn()->stripPointerCasts()
1822  : nullptr;
1823  if (CalledPersonality) {
1824  if (!CallerPersonality)
1825  Caller->setPersonalityFn(CalledPersonality);
1826  // If the personality functions match, then we can perform the
1827  // inlining. Otherwise, we can't inline.
1828  // TODO: This isn't 100% true. Some personality functions are proper
1829  // supersets of others and can be used in place of the other.
1830  else if (CalledPersonality != CallerPersonality)
1831  return InlineResult::failure("incompatible personality");
1832  }
1833 
1834  // We need to figure out which funclet the callsite was in so that we may
1835  // properly nest the callee.
1836  Instruction *CallSiteEHPad = nullptr;
1837  if (CallerPersonality) {
1838  EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1839  if (isScopedEHPersonality(Personality)) {
1840  Optional<OperandBundleUse> ParentFunclet =
1842  if (ParentFunclet)
1843  CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1844 
1845  // OK, the inlining site is legal. What about the target function?
1846 
1847  if (CallSiteEHPad) {
1848  if (Personality == EHPersonality::MSVC_CXX) {
1849  // The MSVC personality cannot tolerate catches getting inlined into
1850  // cleanup funclets.
1851  if (isa<CleanupPadInst>(CallSiteEHPad)) {
1852  // Ok, the call site is within a cleanuppad. Let's check the callee
1853  // for catchpads.
1854  for (const BasicBlock &CalledBB : *CalledFunc) {
1855  if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1856  return InlineResult::failure("catch in cleanup funclet");
1857  }
1858  }
1859  } else if (isAsynchronousEHPersonality(Personality)) {
1860  // SEH is even less tolerant, there may not be any sort of exceptional
1861  // funclet in the callee.
1862  for (const BasicBlock &CalledBB : *CalledFunc) {
1863  if (CalledBB.isEHPad())
1864  return InlineResult::failure("SEH in cleanup funclet");
1865  }
1866  }
1867  }
1868  }
1869  }
1870 
1871  // Determine if we are dealing with a call in an EHPad which does not unwind
1872  // to caller.
1873  bool EHPadForCallUnwindsLocally = false;
1874  if (CallSiteEHPad && isa<CallInst>(CB)) {
1875  UnwindDestMemoTy FuncletUnwindMap;
1876  Value *CallSiteUnwindDestToken =
1877  getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1878 
1879  EHPadForCallUnwindsLocally =
1880  CallSiteUnwindDestToken &&
1881  !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1882  }
1883 
1884  // Get an iterator to the last basic block in the function, which will have
1885  // the new function inlined after it.
1886  Function::iterator LastBlock = --Caller->end();
1887 
1888  // Make sure to capture all of the return instructions from the cloned
1889  // function.
1891  ClonedCodeInfo InlinedFunctionInfo;
1892  Function::iterator FirstNewBlock;
1893 
1894  { // Scope to destroy VMap after cloning.
1895  ValueToValueMapTy VMap;
1896  // Keep a list of pair (dst, src) to emit byval initializations.
1898 
1899  // When inlining a function that contains noalias scope metadata,
1900  // this metadata needs to be cloned so that the inlined blocks
1901  // have different "unique scopes" at every call site.
1902  // Track the metadata that must be cloned. Do this before other changes to
1903  // the function, so that we do not get in trouble when inlining caller ==
1904  // callee.
1905  ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());
1906 
1907  auto &DL = Caller->getParent()->getDataLayout();
1908 
1909  // Calculate the vector of arguments to pass into the function cloner, which
1910  // matches up the formal to the actual argument values.
1911  auto AI = CB.arg_begin();
1912  unsigned ArgNo = 0;
1913  for (Function::arg_iterator I = CalledFunc->arg_begin(),
1914  E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1915  Value *ActualArg = *AI;
1916 
1917  // When byval arguments actually inlined, we need to make the copy implied
1918  // by them explicit. However, we don't do this if the callee is readonly
1919  // or readnone, because the copy would be unneeded: the callee doesn't
1920  // modify the struct.
1921  if (CB.isByValArgument(ArgNo)) {
1922  ActualArg = HandleByValArgument(ActualArg, &CB, CalledFunc, IFI,
1923  CalledFunc->getParamAlignment(ArgNo));
1924  if (ActualArg != *AI)
1925  ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1926  }
1927 
1928  VMap[&*I] = ActualArg;
1929  }
1930 
1931  // TODO: Remove this when users have been updated to the assume bundles.
1932  // Add alignment assumptions if necessary. We do this before the inlined
1933  // instructions are actually cloned into the caller so that we can easily
1934  // check what will be known at the start of the inlined code.
1935  AddAlignmentAssumptions(CB, IFI);
1936 
1937  AssumptionCache *AC =
1938  IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1939 
1940  /// Preserve all attributes on of the call and its parameters.
1941  salvageKnowledge(&CB, AC);
1942 
1943  // We want the inliner to prune the code as it copies. We would LOVE to
1944  // have no dead or constant instructions leftover after inlining occurs
1945  // (which can happen, e.g., because an argument was constant), but we'll be
1946  // happy with whatever the cloner can do.
1947  CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1948  /*ModuleLevelChanges=*/false, Returns, ".i",
1949  &InlinedFunctionInfo, &CB);
1950  // Remember the first block that is newly cloned over.
1951  FirstNewBlock = LastBlock; ++FirstNewBlock;
1952 
1953  // Insert retainRV/clainRV runtime calls.
1955  inlineRetainOrClaimRVCalls(CB, Returns);
1956 
1957  // Updated caller/callee profiles only when requested. For sample loader
1958  // inlining, the context-sensitive inlinee profile doesn't need to be
1959  // subtracted from callee profile, and the inlined clone also doesn't need
1960  // to be scaled based on call site count.
1961  if (IFI.UpdateProfile) {
1962  if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1963  // Update the BFI of blocks cloned into the caller.
1964  updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1965  CalledFunc->front());
1966 
1967  updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), CB,
1968  IFI.PSI, IFI.CallerBFI);
1969  }
1970 
1971  // Inject byval arguments initialization.
1972  for (std::pair<Value*, Value*> &Init : ByValInit)
1973  HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1974  &*FirstNewBlock, IFI);
1975 
1976  Optional<OperandBundleUse> ParentDeopt =
1978  if (ParentDeopt) {
1980 
1981  for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1982  CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
1983  if (!ICS)
1984  continue; // instruction was DCE'd or RAUW'ed to undef
1985 
1986  OpDefs.clear();
1987 
1988  OpDefs.reserve(ICS->getNumOperandBundles());
1989 
1990  for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
1991  ++COBi) {
1992  auto ChildOB = ICS->getOperandBundleAt(COBi);
1993  if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1994  // If the inlined call has other operand bundles, let them be
1995  OpDefs.emplace_back(ChildOB);
1996  continue;
1997  }
1998 
1999  // It may be useful to separate this logic (of handling operand
2000  // bundles) out to a separate "policy" component if this gets crowded.
2001  // Prepend the parent's deoptimization continuation to the newly
2002  // inlined call's deoptimization continuation.
2003  std::vector<Value *> MergedDeoptArgs;
2004  MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2005  ChildOB.Inputs.size());
2006 
2007  llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs);
2008  llvm::append_range(MergedDeoptArgs, ChildOB.Inputs);
2009 
2010  OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
2011  }
2012 
2013  Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS);
2014 
2015  // Note: the RAUW does the appropriate fixup in VMap, so we need to do
2016  // this even if the call returns void.
2017  ICS->replaceAllUsesWith(NewI);
2018 
2019  VH = nullptr;
2020  ICS->eraseFromParent();
2021  }
2022  }
2023 
2024  // Update the callgraph if requested.
2025  if (IFI.CG)
2026  UpdateCallGraphAfterInlining(CB, FirstNewBlock, VMap, IFI);
2027 
2028  // For 'nodebug' functions, the associated DISubprogram is always null.
2029  // Conservatively avoid propagating the callsite debug location to
2030  // instructions inlined from a function whose DISubprogram is not null.
2031  fixupLineNumbers(Caller, FirstNewBlock, &CB,
2032  CalledFunc->getSubprogram() != nullptr);
2033 
2034  // Now clone the inlined noalias scope metadata.
2035  SAMetadataCloner.clone();
2036  SAMetadataCloner.remap(VMap);
2037 
2038  // Add noalias metadata if necessary.
2039  AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR);
2040 
2041  // Clone return attributes on the callsite into the calls within the inlined
2042  // function which feed into its return value.
2043  AddReturnAttributes(CB, VMap);
2044 
2045  // Propagate metadata on the callsite if necessary.
2046  PropagateCallSiteMetadata(CB, VMap);
2047 
2048  // Register any cloned assumptions.
2049  if (IFI.GetAssumptionCache)
2050  for (BasicBlock &NewBlock :
2051  make_range(FirstNewBlock->getIterator(), Caller->end()))
2052  for (Instruction &I : NewBlock)
2053  if (auto *II = dyn_cast<AssumeInst>(&I))
2054  IFI.GetAssumptionCache(*Caller).registerAssumption(II);
2055  }
2056 
2057  // If there are any alloca instructions in the block that used to be the entry
2058  // block for the callee, move them to the entry block of the caller. First
2059  // calculate which instruction they should be inserted before. We insert the
2060  // instructions at the end of the current alloca list.
2061  {
2062  BasicBlock::iterator InsertPoint = Caller->begin()->begin();
2063  for (BasicBlock::iterator I = FirstNewBlock->begin(),
2064  E = FirstNewBlock->end(); I != E; ) {
2065  AllocaInst *AI = dyn_cast<AllocaInst>(I++);
2066  if (!AI) continue;
2067 
2068  // If the alloca is now dead, remove it. This often occurs due to code
2069  // specialization.
2070  if (AI->use_empty()) {
2071  AI->eraseFromParent();
2072  continue;
2073  }
2074 
2075  if (!allocaWouldBeStaticInEntry(AI))
2076  continue;
2077 
2078  // Keep track of the static allocas that we inline into the caller.
2079  IFI.StaticAllocas.push_back(AI);
2080 
2081  // Scan for the block of allocas that we can move over, and move them
2082  // all at once.
2083  while (isa<AllocaInst>(I) &&
2084  !cast<AllocaInst>(I)->use_empty() &&
2085  allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
2086  IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
2087  ++I;
2088  }
2089 
2090  // Transfer all of the allocas over in a block. Using splice means
2091  // that the instructions aren't removed from the symbol table, then
2092  // reinserted.
2093  Caller->getEntryBlock().getInstList().splice(
2094  InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
2095  }
2096  }
2097 
2098  SmallVector<Value*,4> VarArgsToForward;
2099  SmallVector<AttributeSet, 4> VarArgsAttrs;
2100  for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
2101  i < CB.getNumArgOperands(); i++) {
2102  VarArgsToForward.push_back(CB.getArgOperand(i));
2103  VarArgsAttrs.push_back(CB.getAttributes().getParamAttributes(i));
2104  }
2105 
2106  bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
2107  if (InlinedFunctionInfo.ContainsCalls) {
2108  CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
2109  if (CallInst *CI = dyn_cast<CallInst>(&CB))
2110  CallSiteTailKind = CI->getTailCallKind();
2111 
2112  // For inlining purposes, the "notail" marker is the same as no marker.
2113  if (CallSiteTailKind == CallInst::TCK_NoTail)
2114  CallSiteTailKind = CallInst::TCK_None;
2115 
2116  for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
2117  ++BB) {
2118  for (auto II = BB->begin(); II != BB->end();) {
2119  Instruction &I = *II++;
2120  CallInst *CI = dyn_cast<CallInst>(&I);
2121  if (!CI)
2122  continue;
2123 
2124  // Forward varargs from inlined call site to calls to the
2125  // ForwardVarArgsTo function, if requested, and to musttail calls.
2126  if (!VarArgsToForward.empty() &&
2127  ((ForwardVarArgsTo &&
2128  CI->getCalledFunction() == ForwardVarArgsTo) ||
2129  CI->isMustTailCall())) {
2130  // Collect attributes for non-vararg parameters.
2133  if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2134  for (unsigned ArgNo = 0;
2135  ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2136  ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
2137  }
2138 
2139  // Add VarArg attributes.
2140  ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2141  Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
2142  Attrs.getRetAttributes(), ArgAttrs);
2143  // Add VarArgs to existing parameters.
2144  SmallVector<Value *, 6> Params(CI->arg_operands());
2145  Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2146  CallInst *NewCI = CallInst::Create(
2147  CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
2148  NewCI->setDebugLoc(CI->getDebugLoc());
2149  NewCI->setAttributes(Attrs);
2150  NewCI->setCallingConv(CI->getCallingConv());
2151  CI->replaceAllUsesWith(NewCI);
2152  CI->eraseFromParent();
2153  CI = NewCI;
2154  }
2155 
2156  if (Function *F = CI->getCalledFunction())
2157  InlinedDeoptimizeCalls |=
2158  F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2159 
2160  // We need to reduce the strength of any inlined tail calls. For
2161  // musttail, we have to avoid introducing potential unbounded stack
2162  // growth. For example, if functions 'f' and 'g' are mutually recursive
2163  // with musttail, we can inline 'g' into 'f' so long as we preserve
2164  // musttail on the cloned call to 'f'. If either the inlined call site
2165  // or the cloned call site is *not* musttail, the program already has
2166  // one frame of stack growth, so it's safe to remove musttail. Here is
2167  // a table of example transformations:
2168  //
2169  // f -> musttail g -> musttail f ==> f -> musttail f
2170  // f -> musttail g -> tail f ==> f -> tail f
2171  // f -> g -> musttail f ==> f -> f
2172  // f -> g -> tail f ==> f -> f
2173  //
2174  // Inlined notail calls should remain notail calls.
2175  CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2176  if (ChildTCK != CallInst::TCK_NoTail)
2177  ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2178  CI->setTailCallKind(ChildTCK);
2179  InlinedMustTailCalls |= CI->isMustTailCall();
2180 
2181  // Calls inlined through a 'nounwind' call site should be marked
2182  // 'nounwind'.
2183  if (MarkNoUnwind)
2184  CI->setDoesNotThrow();
2185  }
2186  }
2187  }
2188 
2189  // Leave lifetime markers for the static alloca's, scoping them to the
2190  // function we just inlined.
2191  if (InsertLifetime && !IFI.StaticAllocas.empty()) {
2192  IRBuilder<> builder(&FirstNewBlock->front());
2193  for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2194  AllocaInst *AI = IFI.StaticAllocas[ai];
2195  // Don't mark swifterror allocas. They can't have bitcast uses.
2196  if (AI->isSwiftError())
2197  continue;
2198 
2199  // If the alloca is already scoped to something smaller than the whole
2200  // function then there's no need to add redundant, less accurate markers.
2201  if (hasLifetimeMarkers(AI))
2202  continue;
2203 
2204  // Try to determine the size of the allocation.
2205  ConstantInt *AllocaSize = nullptr;
2206  if (ConstantInt *AIArraySize =
2207  dyn_cast<ConstantInt>(AI->getArraySize())) {
2208  auto &DL = Caller->getParent()->getDataLayout();
2209  Type *AllocaType = AI->getAllocatedType();
2210  TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2211  uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2212 
2213  // Don't add markers for zero-sized allocas.
2214  if (AllocaArraySize == 0)
2215  continue;
2216 
2217  // Check that array size doesn't saturate uint64_t and doesn't
2218  // overflow when it's multiplied by type size.
2219  if (!AllocaTypeSize.isScalable() &&
2220  AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2221  std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2222  AllocaTypeSize.getFixedSize()) {
2223  AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2224  AllocaArraySize * AllocaTypeSize);
2225  }
2226  }
2227 
2228  builder.CreateLifetimeStart(AI, AllocaSize);
2229  for (ReturnInst *RI : Returns) {
2230  // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2231  // call and a return. The return kills all local allocas.
2232  if (InlinedMustTailCalls &&
2234  continue;
2235  if (InlinedDeoptimizeCalls &&
2237  continue;
2238  IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2239  }
2240  }
2241  }
2242 
2243  // If the inlined code contained dynamic alloca instructions, wrap the inlined
2244  // code with llvm.stacksave/llvm.stackrestore intrinsics.
2245  if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2246  Module *M = Caller->getParent();
2247  // Get the two intrinsics we care about.
2248  Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
2249  Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
2250 
2251  // Insert the llvm.stacksave.
2252  CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2253  .CreateCall(StackSave, {}, "savedstack");
2254 
2255  // Insert a call to llvm.stackrestore before any return instructions in the
2256  // inlined function.
2257  for (ReturnInst *RI : Returns) {
2258  // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2259  // call and a return. The return will restore the stack pointer.
2260  if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2261  continue;
2262  if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2263  continue;
2264  IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2265  }
2266  }
2267 
2268  // If we are inlining for an invoke instruction, we must make sure to rewrite
2269  // any call instructions into invoke instructions. This is sensitive to which
2270  // funclet pads were top-level in the inlinee, so must be done before
2271  // rewriting the "parent pad" links.
2272  if (auto *II = dyn_cast<InvokeInst>(&CB)) {
2273  BasicBlock *UnwindDest = II->getUnwindDest();
2274  Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2275  if (isa<LandingPadInst>(FirstNonPHI)) {
2276  HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2277  } else {
2278  HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2279  }
2280  }
2281 
2282  // Update the lexical scopes of the new funclets and callsites.
2283  // Anything that had 'none' as its parent is now nested inside the callsite's
2284  // EHPad.
2285 
2286  if (CallSiteEHPad) {
2287  for (Function::iterator BB = FirstNewBlock->getIterator(),
2288  E = Caller->end();
2289  BB != E; ++BB) {
2290  // Add bundle operands to any top-level call sites.
2292  for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2293  CallBase *I = dyn_cast<CallBase>(&*BBI++);
2294  if (!I)
2295  continue;
2296 
2297  // Skip call sites which are nounwind intrinsics.
2298  auto *CalledFn =
2299  dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
2300  if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow())
2301  continue;
2302 
2303  // Skip call sites which already have a "funclet" bundle.
2304  if (I->getOperandBundle(LLVMContext::OB_funclet))
2305  continue;
2306 
2307  I->getOperandBundlesAsDefs(OpBundles);
2308  OpBundles.emplace_back("funclet", CallSiteEHPad);
2309 
2310  Instruction *NewInst = CallBase::Create(I, OpBundles, I);
2311  NewInst->takeName(I);
2312  I->replaceAllUsesWith(NewInst);
2313  I->eraseFromParent();
2314 
2315  OpBundles.clear();
2316  }
2317 
2318  // It is problematic if the inlinee has a cleanupret which unwinds to
2319  // caller and we inline it into a call site which doesn't unwind but into
2320  // an EH pad that does. Such an edge must be dynamically unreachable.
2321  // As such, we replace the cleanupret with unreachable.
2322  if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2323  if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2324  changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2325 
2326  Instruction *I = BB->getFirstNonPHI();
2327  if (!I->isEHPad())
2328  continue;
2329 
2330  if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2331  if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2332  CatchSwitch->setParentPad(CallSiteEHPad);
2333  } else {
2334  auto *FPI = cast<FuncletPadInst>(I);
2335  if (isa<ConstantTokenNone>(FPI->getParentPad()))
2336  FPI->setParentPad(CallSiteEHPad);
2337  }
2338  }
2339  }
2340 
2341  if (InlinedDeoptimizeCalls) {
2342  // We need to at least remove the deoptimizing returns from the Return set,
2343  // so that the control flow from those returns does not get merged into the
2344  // caller (but terminate it instead). If the caller's return type does not
2345  // match the callee's return type, we also need to change the return type of
2346  // the intrinsic.
2347  if (Caller->getReturnType() == CB.getType()) {
2348  llvm::erase_if(Returns, [](ReturnInst *RI) {
2349  return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2350  });
2351  } else {
2352  SmallVector<ReturnInst *, 8> NormalReturns;
2353  Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2354  Caller->getParent(), Intrinsic::experimental_deoptimize,
2355  {Caller->getReturnType()});
2356 
2357  for (ReturnInst *RI : Returns) {
2358  CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2359  if (!DeoptCall) {
2360  NormalReturns.push_back(RI);
2361  continue;
2362  }
2363 
2364  // The calling convention on the deoptimize call itself may be bogus,
2365  // since the code we're inlining may have undefined behavior (and may
2366  // never actually execute at runtime); but all
2367  // @llvm.experimental.deoptimize declarations have to have the same
2368  // calling convention in a well-formed module.
2369  auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2370  NewDeoptIntrinsic->setCallingConv(CallingConv);
2371  auto *CurBB = RI->getParent();
2372  RI->eraseFromParent();
2373 
2374  SmallVector<Value *, 4> CallArgs(DeoptCall->args());
2375 
2377  DeoptCall->getOperandBundlesAsDefs(OpBundles);
2378  DeoptCall->eraseFromParent();
2379  assert(!OpBundles.empty() &&
2380  "Expected at least the deopt operand bundle");
2381 
2382  IRBuilder<> Builder(CurBB);
2383  CallInst *NewDeoptCall =
2384  Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2385  NewDeoptCall->setCallingConv(CallingConv);
2386  if (NewDeoptCall->getType()->isVoidTy())
2387  Builder.CreateRetVoid();
2388  else
2389  Builder.CreateRet(NewDeoptCall);
2390  }
2391 
2392  // Leave behind the normal returns so we can merge control flow.
2393  std::swap(Returns, NormalReturns);
2394  }
2395  }
2396 
2397  // Handle any inlined musttail call sites. In order for a new call site to be
2398  // musttail, the source of the clone and the inlined call site must have been
2399  // musttail. Therefore it's safe to return without merging control into the
2400  // phi below.
2401  if (InlinedMustTailCalls) {
2402  // Check if we need to bitcast the result of any musttail calls.
2403  Type *NewRetTy = Caller->getReturnType();
2404  bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
2405 
2406  // Handle the returns preceded by musttail calls separately.
2407  SmallVector<ReturnInst *, 8> NormalReturns;
2408  for (ReturnInst *RI : Returns) {
2409  CallInst *ReturnedMustTail =
2411  if (!ReturnedMustTail) {
2412  NormalReturns.push_back(RI);
2413  continue;
2414  }
2415  if (!NeedBitCast)
2416  continue;
2417 
2418  // Delete the old return and any preceding bitcast.
2419  BasicBlock *CurBB = RI->getParent();
2420  auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2421  RI->eraseFromParent();
2422  if (OldCast)
2423  OldCast->eraseFromParent();
2424 
2425  // Insert a new bitcast and return with the right type.
2426  IRBuilder<> Builder(CurBB);
2427  Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2428  }
2429 
2430  // Leave behind the normal returns so we can merge control flow.
2431  std::swap(Returns, NormalReturns);
2432  }
2433 
2434  // Now that all of the transforms on the inlined code have taken place but
2435  // before we splice the inlined code into the CFG and lose track of which
2436  // blocks were actually inlined, collect the call sites. We only do this if
2437  // call graph updates weren't requested, as those provide value handle based
2438  // tracking of inlined call sites instead.
2439  if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2440  // Otherwise just collect the raw call sites that were inlined.
2441  for (BasicBlock &NewBB :
2442  make_range(FirstNewBlock->getIterator(), Caller->end()))
2443  for (Instruction &I : NewBB)
2444  if (auto *CB = dyn_cast<CallBase>(&I))
2445  IFI.InlinedCallSites.push_back(CB);
2446  }
2447 
2448  // If we cloned in _exactly one_ basic block, and if that block ends in a
2449  // return instruction, we splice the body of the inlined callee directly into
2450  // the calling basic block.
2451  if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2452  // Move all of the instructions right before the call.
2453  OrigBB->getInstList().splice(CB.getIterator(), FirstNewBlock->getInstList(),
2454  FirstNewBlock->begin(), FirstNewBlock->end());
2455  // Remove the cloned basic block.
2456  Caller->getBasicBlockList().pop_back();
2457 
2458  // If the call site was an invoke instruction, add a branch to the normal
2459  // destination.
2460  if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2461  BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), &CB);
2462  NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2463  }
2464 
2465  // If the return instruction returned a value, replace uses of the call with
2466  // uses of the returned value.
2467  if (!CB.use_empty()) {
2468  ReturnInst *R = Returns[0];
2469  if (&CB == R->getReturnValue())
2471  else
2472  CB.replaceAllUsesWith(R->getReturnValue());
2473  }
2474  // Since we are now done with the Call/Invoke, we can delete it.
2475  CB.eraseFromParent();
2476 
2477  // Since we are now done with the return instruction, delete it also.
2478  Returns[0]->eraseFromParent();
2479 
2480  // We are now done with the inlining.
2481  return InlineResult::success();
2482  }
2483 
2484  // Otherwise, we have the normal case, of more than one block to inline or
2485  // multiple return sites.
2486 
2487  // We want to clone the entire callee function into the hole between the
2488  // "starter" and "ender" blocks. How we accomplish this depends on whether
2489  // this is an invoke instruction or a call instruction.
2490  BasicBlock *AfterCallBB;
2491  BranchInst *CreatedBranchToNormalDest = nullptr;
2492  if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2493 
2494  // Add an unconditional branch to make this look like the CallInst case...
2495  CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), &CB);
2496 
2497  // Split the basic block. This guarantees that no PHI nodes will have to be
2498  // updated due to new incoming edges, and make the invoke case more
2499  // symmetric to the call case.
2500  AfterCallBB =
2501  OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2502  CalledFunc->getName() + ".exit");
2503 
2504  } else { // It's a call
2505  // If this is a call instruction, we need to split the basic block that
2506  // the call lives in.
2507  //
2508  AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
2509  CalledFunc->getName() + ".exit");
2510  }
2511 
2512  if (IFI.CallerBFI) {
2513  // Copy original BB's block frequency to AfterCallBB
2514  IFI.CallerBFI->setBlockFreq(
2515  AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2516  }
2517 
2518  // Change the branch that used to go to AfterCallBB to branch to the first
2519  // basic block of the inlined function.
2520  //
2521  Instruction *Br = OrigBB->getTerminator();
2522  assert(Br && Br->getOpcode() == Instruction::Br &&
2523  "splitBasicBlock broken!");
2524  Br->setOperand(0, &*FirstNewBlock);
2525 
2526  // Now that the function is correct, make it a little bit nicer. In
2527  // particular, move the basic blocks inserted from the end of the function
2528  // into the space made by splitting the source basic block.
2529  Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2530  Caller->getBasicBlockList(), FirstNewBlock,
2531  Caller->end());
2532 
2533  // Handle all of the return instructions that we just cloned in, and eliminate
2534  // any users of the original call/invoke instruction.
2535  Type *RTy = CalledFunc->getReturnType();
2536 
2537  PHINode *PHI = nullptr;
2538  if (Returns.size() > 1) {
2539  // The PHI node should go at the front of the new basic block to merge all
2540  // possible incoming values.
2541  if (!CB.use_empty()) {
2542  PHI = PHINode::Create(RTy, Returns.size(), CB.getName(),
2543  &AfterCallBB->front());
2544  // Anything that used the result of the function call should now use the
2545  // PHI node as their operand.
2546  CB.replaceAllUsesWith(PHI);
2547  }
2548 
2549  // Loop over all of the return instructions adding entries to the PHI node
2550  // as appropriate.
2551  if (PHI) {
2552  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2553  ReturnInst *RI = Returns[i];
2554  assert(RI->getReturnValue()->getType() == PHI->getType() &&
2555  "Ret value not consistent in function!");
2556  PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2557  }
2558  }
2559 
2560  // Add a branch to the merge points and remove return instructions.
2561  DebugLoc Loc;
2562  for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2563  ReturnInst *RI = Returns[i];
2564  BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2565  Loc = RI->getDebugLoc();
2566  BI->setDebugLoc(Loc);
2567  RI->eraseFromParent();
2568  }
2569  // We need to set the debug location to *somewhere* inside the
2570  // inlined function. The line number may be nonsensical, but the
2571  // instruction will at least be associated with the right
2572  // function.
2573  if (CreatedBranchToNormalDest)
2574  CreatedBranchToNormalDest->setDebugLoc(Loc);
2575  } else if (!Returns.empty()) {
2576  // Otherwise, if there is exactly one return value, just replace anything
2577  // using the return value of the call with the computed value.
2578  if (!CB.use_empty()) {
2579  if (&CB == Returns[0]->getReturnValue())
2581  else
2582  CB.replaceAllUsesWith(Returns[0]->getReturnValue());
2583  }
2584 
2585  // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2586  BasicBlock *ReturnBB = Returns[0]->getParent();
2587  ReturnBB->replaceAllUsesWith(AfterCallBB);
2588 
2589  // Splice the code from the return block into the block that it will return
2590  // to, which contains the code that was after the call.
2591  AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2592  ReturnBB->getInstList());
2593 
2594  if (CreatedBranchToNormalDest)
2595  CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2596 
2597  // Delete the return instruction now and empty ReturnBB now.
2598  Returns[0]->eraseFromParent();
2599  ReturnBB->eraseFromParent();
2600  } else if (!CB.use_empty()) {
2601  // No returns, but something is using the return value of the call. Just
2602  // nuke the result.
2604  }
2605 
2606  // Since we are now done with the Call/Invoke, we can delete it.
2607  CB.eraseFromParent();
2608 
2609  // If we inlined any musttail calls and the original return is now
2610  // unreachable, delete it. It can only contain a bitcast and ret.
2611  if (InlinedMustTailCalls && pred_empty(AfterCallBB))
2612  AfterCallBB->eraseFromParent();
2613 
2614  // We should always be able to fold the entry block of the function into the
2615  // single predecessor of the block...
2616  assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2617  BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2618 
2619  // Splice the code entry block into calling block, right before the
2620  // unconditional branch.
2621  CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2622  OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2623 
2624  // Remove the unconditional branch.
2625  OrigBB->getInstList().erase(Br);
2626 
2627  // Now we can remove the CalleeEntry block, which is now empty.
2628  Caller->getBasicBlockList().erase(CalleeEntry);
2629 
2630  // If we inserted a phi node, check to see if it has a single value (e.g. all
2631  // the entries are the same or undef). If so, remove the PHI so it doesn't
2632  // block other optimizations.
2633  if (PHI) {
2634  AssumptionCache *AC =
2635  IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2636  auto &DL = Caller->getParent()->getDataLayout();
2637  if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2638  PHI->replaceAllUsesWith(V);
2639  PHI->eraseFromParent();
2640  }
2641  }
2642 
2643  return InlineResult::success();
2644 }
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
llvm::EHPersonality::MSVC_CXX
@ MSVC_CXX
llvm::AttrBuilder::addDereferenceableOrNullAttr
AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
Definition: Attributes.cpp:1819
i
i
Definition: README.txt:29
llvm::InvokeInst::getNormalDest
BasicBlock * getNormalDest() const
Definition: Instructions.h:3818
llvm::BasicBlock::getTerminatingDeoptimizeCall
const CallInst * getTerminatingDeoptimizeCall() const
Returns the call instruction calling @llvm.experimental.deoptimize prior to the terminating return in...
Definition: BasicBlock.cpp:185
llvm::CallBase::getNumOperandBundles
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:1898
IdentifyValidAttributes
static AttrBuilder IdentifyValidAttributes(CallBase &CB)
Definition: InlineFunction.cpp:1192
llvm::isAsynchronousEHPersonality
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
Definition: EHPersonalities.h:50
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
AssumptionCache.h
Attrs
Function Attrs
Definition: README_ALTIVEC.txt:215
llvm::InlineResult::success
static InlineResult success()
Definition: InlineCost.h:139
llvm::CatchSwitchInst::Create
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:4234
llvm::CallGraphNode::CalledFunctionsVector
std::vector< CallRecord > CalledFunctionsVector
Definition: CallGraph.h:182
llvm::AArch64CC::HI
@ HI
Definition: AArch64BaseInfo.h:244
llvm
Definition: AllocatorList.h:23
HandleInlinedEHPad
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
Definition: InlineFunction.cpp:656
llvm::LandingPadInst::isCleanup
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
Definition: Instructions.h:2869
getUnwindDestToken
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
Definition: InlineFunction.cpp:394
llvm::CallBase::getOperandBundlesAsDefs
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Definition: Instructions.cpp:361
llvm::Function::args
iterator_range< arg_iterator > args()
Definition: Function.h:803
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
llvm::Instruction::getModule
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
llvm::AArch64CC::AL
@ AL
Definition: AArch64BaseInfo.h:250
llvm::CallGraphNode::iterator
std::vector< CallRecord >::iterator iterator
Definition: CallGraph.h:194
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition: iterator_range.h:53
llvm::AssumptionCache::registerAssumption
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Definition: AssumptionCache.cpp:217
llvm::ReturnInst
Return a value (possibly void), from a function.
Definition: Instructions.h:2923
llvm::VAArgInst
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Definition: Instructions.h:1810
llvm::ClonedCodeInfo
This struct can be used to capture information about code being cloned, while it is being cloned.
Definition: Cloning.h:64
llvm::CallBase::getOperandBundle
Optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:1978
Optional.h
ValueMapper.h
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
allocaWouldBeStaticInEntry
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
Definition: InlineFunction.cpp:1478
llvm::IRBuilderBase::CreateLifetimeEnd
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
Definition: IRBuilder.cpp:424
MayContainThrowingOrExitingCall
static bool MayContainThrowingOrExitingCall(Instruction *Begin, Instruction *End)
Definition: InlineFunction.cpp:1177
llvm::Intrinsic::getDeclaration
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1291
Metadata.h
llvm::Type::getInt8PtrTy
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:256
llvm::Function::end
iterator end()
Definition: Function.h:766
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:90
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:107
IntrinsicInst.h
DebugInfoMetadata.h
llvm::ValueMap::end
iterator end()
Definition: ValueMap.h:136
llvm::objcarc::getAttachedCallOperandBundleEnum
AttachedCallOperandBundle getAttachedCallOperandBundleEnum(bool IsRetain)
Definition: ObjCARCUtil.h:30
llvm::TypeSize::getFixedSize
ScalarTy getFixedSize() const
Definition: TypeSize.h:426
llvm::PointerType::getElementType
Type * getElementType() const
Definition: DerivedTypes.h:653
llvm::Function
Definition: Function.h:61
fixupLineNumbers
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
Definition: InlineFunction.cpp:1494
getUnwindDestTokenHelper
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
Definition: InlineFunction.cpp:243
llvm::ReturnInst::getReturnValue
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
Definition: Instructions.h:2968
llvm::AllocaInst::getType
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:103
llvm::SmallVector< Value *, 8 >
llvm::CallInst::setTailCallKind
void setTailCallKind(TailCallKind TCK)
Definition: Instructions.h:1653
llvm::LandingPadInst
The landingpad instruction holds all of the information necessary to generate correct exception handl...
Definition: Instructions.h:2822
CaptureTracking.h
llvm::CallGraphNode::removeCallEdgeFor
void removeCallEdgeFor(CallBase &Call)
Removes the edge in the node for the specified call site.
Definition: CallGraph.cpp:214
llvm::CallBase::isInlineAsm
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1462
llvm::Function::getSubprogram
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1526
ErrorHandling.h
builder
assume builder
Definition: AssumeBundleBuilder.cpp:648
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:693
llvm::IRBuilder<>
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1674
ValueTracking.h
Local.h
llvm::AttributeList::get
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
Definition: Attributes.cpp:1175
llvm::CallGraph
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:73
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::updateLoopMetadataDebugLocations
void updateLoopMetadataDebugLocations(Instruction &I, function_ref< DILocation *(const DILocation &)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
Definition: DebugInfo.cpp:290
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:140
llvm::AAResults::onlyAccessesArgPointees
static bool onlyAccessesArgPointees(FunctionModRefBehavior MRB)
Checks if functions with the specified behavior are known to read and write at most from objects poin...
Definition: AliasAnalysis.h:637
llvm::CallBase::addOperandBundle
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
Definition: Instructions.cpp:440
llvm::DILocation
Debug location.
Definition: DebugInfoMetadata.h:1558
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:312
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:46
DenseMap.h
Module.h
llvm::BasicBlock::eraseFromParent
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition: BasicBlock.cpp:129
llvm::AttributeList
Definition: Attributes.h:375
llvm::getOrEnforceKnownAlignment
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1328
llvm::CallBase::getAttributes
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1472
llvm::OperandBundleDefT
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1117
llvm::CallBase::getFunctionType
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1244
EHPersonalities.h
llvm::updateProfileCallee
void updateProfileCallee(Function *Callee, int64_t entryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding entryDelta then scaling callsite i...
Definition: InlineFunction.cpp:1617
llvm::BasicBlock::splitBasicBlock
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:375
llvm::Function::hasFnAttribute
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:345
llvm::Optional
Definition: APInt.h:33
llvm::DenseMapBase::count
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:145
llvm::ProfileSummaryInfo::getProfileCount
Optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Definition: ProfileSummaryInfo.cpp:113
llvm::SmallPtrSet< Instruction *, 4 >
llvm::CallBase::isByValArgument
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: InstrTypes.h:1657
llvm::FunctionType::getNumParams
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:138
llvm::Function::ProfileCount::isSynthetic
bool isSynthetic() const
Definition: Function.h:295
llvm::CallBase::getNumArgOperands
unsigned getNumArgOperands() const
Definition: InstrTypes.h:1339
HandleCallsInBlockInlinedThroughInvoke
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
Definition: InlineFunction.cpp:538
STLExtras.h
llvm::CallBase::arg_begin
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1306
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::CallInst::TCK_None
@ TCK_None
Definition: Instructions.h:1628
llvm::CallBase::setDoesNotThrow
void setDoesNotThrow()
Definition: InstrTypes.h:1848
llvm::PointerMayBeCapturedBefore
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
Definition: CaptureTracking.cpp:221
llvm::uniteAccessGroups
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
Definition: VectorUtils.cpp:642
llvm::BasicBlock::rend
reverse_iterator rend()
Definition: BasicBlock.h:303
llvm::LinearPolySize::isScalable
bool isScalable() const
Returns whether the size is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:299
llvm::CallGraphNode::addCalledFunction
void addCalledFunction(CallBase *Call, CallGraphNode *M)
Adds a function to the list of functions called by this one.
Definition: CallGraph.h:243
llvm::MDBuilder::createAnonymousAliasScope
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
Definition: MDBuilder.h:140
llvm::MDNode::get
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1198
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::InlineFunctionInfo::CallerBFI
BlockFrequencyInfo * CallerBFI
Definition: Cloning.h:210
llvm::Instruction::setMetadata
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1330
llvm::InlineFunctionInfo::PSI
ProfileSummaryInfo * PSI
Definition: Cloning.h:209
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
AliasAnalysis.h
Context
LLVMContext & Context
Definition: NVVMIntrRange.cpp:66
llvm::isIdentifiedFunctionLocal
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
Definition: AliasAnalysis.cpp:985
llvm::classifyEHPersonality
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Definition: EHPersonalities.cpp:21
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:205
Instruction.h
CommandLine.h
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:77
llvm::Instruction::getOpcode
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:160
llvm::BlockFrequencyInfo
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Definition: BlockFrequencyInfo.h:37
llvm::CallGraphNode::end
iterator end()
Definition: CallGraph.h:201
llvm::InlineFunctionInfo::CG
CallGraph * CG
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
Definition: Cloning.h:207
llvm::ms_demangle::CallingConv
CallingConv
Definition: MicrosoftDemangleNodes.h:59
llvm::GlobalValue::isDeclaration
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:228
llvm::Instruction::mayReadOrWriteMemory
bool mayReadOrWriteMemory() const
Return true if this instruction may read or write memory.
Definition: Instruction.h:586
Constants.h
llvm::AAResults
Definition: AliasAnalysis.h:456
llvm::AllocaInst::getAllocatedType
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:112
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
UpdateReturnAttributes
static cl::opt< bool > UpdateReturnAttributes("update-return-attrs", cl::init(true), cl::Hidden, cl::desc("Update return attributes on calls within inlined body"))
llvm::DebugLoc::getCol
unsigned getCol() const
Definition: DebugLoc.cpp:30
llvm::SmallVectorImpl::append
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:648
llvm::InvokeInst::getLandingPadInst
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
Definition: Instructions.cpp:876
llvm::User
Definition: User.h:44
llvm::getKnownAlignment
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition: Local.h:224
Intrinsics.h
llvm::CleanupReturnInst::Create
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, Instruction *InsertBefore=nullptr)
Definition: Instructions.h:4561
llvm::LandingPadInst::getNumClauses
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Definition: Instructions.h:2894
InstrTypes.h
llvm::CallBase::getCalledFunction
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation.
Definition: InstrTypes.h:1396
llvm::CallBase::setAttributes
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1476
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:296
UpdatePHINodes
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
Definition: BasicBlockUtils.cpp:994
llvm::objcarc::hasAttachedCallOpBundle
bool hasAttachedCallOpBundle(const CallBase *CB, bool IsRetain)
Definition: ObjCARCUtil.h:34
llvm::CallInst::Create
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:1493
SI
@ SI
Definition: SIInstrInfo.cpp:7342
llvm::MDTuple
Tuple of metadata.
Definition: Metadata.h:1139
llvm::AttributeList::getParamAttributes
AttributeSet getParamAttributes(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
Definition: Attributes.cpp:1526
AssumeBundleBuilder.h
llvm::BlockFrequencyInfo::setBlockFreq
void setBlockFreq(const BasicBlock *BB, uint64_t Freq)
Definition: BlockFrequencyInfo.cpp:227
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:119
llvm::ClonedCodeInfo::OperandBundleCallSites
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
Definition: Cloning.h:76
llvm::Function::arg_end
arg_iterator arg_end()
Definition: Function.h:788
llvm::PHINode::getIncomingValueForBlock
Value * getIncomingValueForBlock(const BasicBlock *BB) const
Definition: Instructions.h:2755
llvm::AttrBuilder::empty
bool empty() const
Return true if the builder contains no target-independent attributes.
Definition: Attributes.h:969
llvm::Instruction
Definition: Instruction.h:45
llvm::CloneAndPruneFunctionInto
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, Instruction *TheCall=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
Definition: CloneFunction.cpp:775
llvm::SimplifyInstruction
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
Definition: InstructionSimplify.cpp:5852
MDBuilder.h
llvm::AllocaInst::getArraySize
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:99
HandleInlinedLandingPad
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
Definition: InlineFunction.cpp:599
llvm::Function::hasPersonalityFn
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:816
llvm::LLVMContext::OB_deopt
@ OB_deopt
Definition: LLVMContext.h:90
ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner
ScopedAliasMetadataDeepCloner(const Function *F)
Definition: InlineFunction.cpp:854
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1770
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:885
DebugLoc.h
SmallPtrSet.h
llvm::CallGraphNode
A node in the call graph for a module.
Definition: CallGraph.h:167
llvm::ValueMap::begin
iterator begin()
Definition: ValueMap.h:135
isUsedByLifetimeMarker
static bool isUsedByLifetimeMarker(Value *V)
Definition: InlineFunction.cpp:1448
llvm::X86II::OB
@ OB
Definition: X86BaseInfo.h:796
Align
uint64_t Align
Definition: ELFObjHandler.cpp:83
NoAliases
static cl::opt< bool > NoAliases("riscv-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
llvm::BasicBlock::getFirstNonPHI
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:212
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::Metadata
Root of the metadata hierarchy.
Definition: Metadata.h:62
llvm::CallBase::getCallingConv
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1453
llvm::ValueMap::count
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: ValueMap.h:152
llvm::LLVMContext::OB_clang_arc_attachedcall
@ OB_clang_arc_attachedcall
Definition: LLVMContext.h:96
llvm::Instruction::isLifetimeStartOrEnd
bool isLifetimeStartOrEnd() const
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
Definition: Instruction.cpp:677
llvm::None
const NoneType None
Definition: None.h:23
llvm::Value::use_empty
bool use_empty() const
Definition: Value.h:357
Type.h
getParentPad
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
Definition: InlineFunction.cpp:233
llvm::CallBase::getCaller
Function * getCaller()
Helper to get the caller (the parent function).
Definition: Instructions.cpp:278
llvm::DebugLoc::appendInlinedAt
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
Definition: DebugLoc.cpp:71
llvm::MDBuilder::createAnonymousAliasScopeDomain
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
Definition: MDBuilder.h:133
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:277
llvm::OperandBundleUse::getTagID
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Definition: InstrTypes.h:1087
CFG.h
llvm::AllocaInst::isSwiftError
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:147
llvm::ProfileSummaryInfo
Analysis providing profile information.
Definition: ProfileSummaryInfo.h:39
llvm::InvokeInst
Invoke instruction.
Definition: Instructions.h:3686
inlineRetainOrClaimRVCalls
static void inlineRetainOrClaimRVCalls(CallBase &CB, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
Definition: InlineFunction.cpp:1673
llvm::Function::getGC
const std::string & getGC() const
Definition: Function.cpp:618
llvm::cl::ZeroOrMore
@ ZeroOrMore
Definition: CommandLine.h:117
VectorUtils.h
BasicBlock.h
llvm::cl::opt< bool >
llvm::AttributeList::ReturnIndex
@ ReturnIndex
Definition: Attributes.h:378
llvm::RISCVFenceField::O
@ O
Definition: RISCVBaseInfo.h:128
llvm::StoreInst
An instruction for storing to memory.
Definition: Instructions.h:303
llvm::Constant
This is an important base class in LLVM.
Definition: Constant.h:41
llvm::getUnderlyingObjects
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Definition: ValueTracking.cpp:4341
llvm::Instruction::eraseFromParent
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:78
HandleByValArgumentInit
static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI)
Definition: InlineFunction.cpp:1379
llvm::Function::getReturnType
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:170
UseNoAliasIntrinsic
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::ZeroOrMore, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
inlineDebugLoc
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
Definition: InlineFunction.cpp:1484
llvm::InlineFunctionInfo::InlinedCalls
SmallVector< WeakTrackingVH, 8 > InlinedCalls
InlineFunction fills this in with callsites that were inlined from the callee.
Definition: Cloning.h:218
ProfileSummaryInfo.h
llvm::Function::getCallingConv
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:228
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:572
llvm::CallInst::TailCallKind
TailCallKind
Definition: Instructions.h:1627
hasLifetimeMarkers
static bool hasLifetimeMarkers(AllocaInst *AI)
Definition: InlineFunction.cpp:1458
llvm::Function::hasGC
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition: Function.h:383
llvm::changeToUnreachable
unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2131
llvm::PHINode::addIncoming
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition: Instructions.h:2720
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:58
llvm::BranchInst::Create
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
Definition: Instructions.h:3061
llvm::DenseMap
Definition: DenseMap.h:714
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::DebugLoc::get
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:21
llvm::AttrBuilder
Definition: Attributes.h:786
Cloning.h
StringExtras.h
llvm::BlockFrequency::getFrequency
uint64_t getFrequency() const
Returns the frequency as a fixpoint number scaled by the entry frequency.
Definition: BlockFrequency.h:35
llvm::isScopedEHPersonality
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Definition: EHPersonalities.h:80
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:440
DIBuilder.h
UpdateCallGraphAfterInlining
static void UpdateCallGraphAfterInlining(CallBase &CB, Function::iterator FirstNewBlock, ValueToValueMapTy &VMap, InlineFunctionInfo &IFI)
Once we have cloned code over from a callee into the caller, update the specified callgraph to reflec...
Definition: InlineFunction.cpp:1308
llvm::PointerType
Class to represent pointers.
Definition: DerivedTypes.h:634
llvm::Instruction::setDebugLoc
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:362
llvm::LandingPadInst::getClause
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
Definition: Instructions.h:2879
AddAlignmentAssumptions
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
Definition: InlineFunction.cpp:1270
ScopedAliasMetadataDeepCloner
Utility for cloning !noalias and !alias.scope metadata.
Definition: InlineFunction.cpp:836
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::ValueMapIterator::ValueTypeProxy::second
ValueT & second
Definition: ValueMap.h:346
llvm::CallBase::hasOperandBundles
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition: InstrTypes.h:1903
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:958
iterator_range.h
llvm::Type::isVoidTy
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
llvm::salvageKnowledge
void salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
Definition: AssumeBundleBuilder.cpp:291
AddAliasScopeMetadata
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
Definition: InlineFunction.cpp:947
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
PropagateCallSiteMetadata
static void PropagateCallSiteMetadata(CallBase &CB, ValueToValueMapTy &VMap)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
Definition: InlineFunction.cpp:785
llvm::MDNode
Metadata node.
Definition: Metadata.h:897
llvm::CallBase::Create
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, Instruction *InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
Definition: Instructions.cpp:251
llvm::changeToInvokeAndSplitBasicBlock
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:2223
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:649
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
llvm::IRBuilderBase::CreateNoAliasScopeDeclaration
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
Definition: IRBuilder.cpp:472
llvm::CallBase::getIntrinsicID
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
Definition: Instructions.cpp:307
getDebugLoc
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
Definition: MachineInstrBundle.cpp:109
llvm::DominatorTreeBase::recalculate
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Definition: GenericDomTree.h:778
None.h
llvm::min
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Definition: FileCheck.cpp:357
DataLayout.h
llvm::Function::setCallingConv
void setCallingConv(CallingConv::ID CC)
Definition: Function.h:232
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition: AssumptionCache.h:41
llvm::CallBase::getOperandBundleAt
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:1947
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
llvm::Function::getEntryCount
ProfileCount getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Definition: Function.cpp:1792
llvm::MDNode::concatenate
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
Definition: Metadata.cpp:908
llvm::Instruction::getFunction
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:526
uint32_t
AddReturnAttributes
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap)
Definition: InlineFunction.cpp:1212
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:1690
llvm::Value::getContext
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:937
HandleByValArgument
static Value * HandleByValArgument(Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, unsigned ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
Definition: InlineFunction.cpp:1396
llvm::ModRefInfo::Mod
@ Mod
The access may modify the value stored in memory.
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition: ilist_node.h:81
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
PreserveAlignmentAssumptions
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
llvm::pred_empty
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:119
llvm::FunctionModRefBehavior
FunctionModRefBehavior
Summary of how a function affects memory in the program.
Definition: AliasAnalysis.h:262
llvm::CallInst::isMustTailCall
bool isMustTailCall() const
Definition: Instructions.h:1649
llvm::MDTuple::getTemporary
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
Definition: Metadata.h:1185
BlockFrequencyInfo.h
llvm::MDNode::getDistinct
static MDTuple * getDistinct(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1206
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:298
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
llvm::ValueMap< const Value *, WeakTrackingVH >
llvm::LoadInst
An instruction for reading from memory.
Definition: Instructions.h:174
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:207
llvm::EHPersonality
EHPersonality
Definition: EHPersonalities.h:22
llvm::CallBase::paramHasAttr
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Definition: Instructions.cpp:339
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:148
llvm::objcarc::GetRCIdentityRoot
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
Definition: ObjCARCAnalysisUtils.h:107
llvm::Constant::stripPointerCasts
const Constant * stripPointerCasts() const
Definition: Constant.h:201
llvm::Init
Definition: Record.h:271
llvm::AAResults::getModRefBehavior
FunctionModRefBehavior getModRefBehavior(const CallBase *Call)
Return the behavior of the given call site.
Definition: AliasAnalysis.cpp:422
llvm::AtomicRMWInst
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:702
llvm::ClonedCodeInfo::ContainsCalls
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
Definition: Cloning.h:66
ObjCARCAnalysisUtils.h
llvm::CallBase::doesNotThrow
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:1847
llvm::Function::ProfileCount::getCount
uint64_t getCount() const
Definition: Function.h:293
llvm::OperandBundleUse::Inputs
ArrayRef< Use > Inputs
Definition: InstrTypes.h:1060
llvm::InlineFunctionInfo::InlinedCallSites
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
Definition: Cloning.h:225
Argument.h
llvm::BasicBlock::front
const Instruction & front() const
Definition: BasicBlock.h:308
Callee
amdgpu Simplify well known AMD library false FunctionCallee Callee
Definition: AMDGPULibCalls.cpp:205
ObjCARCUtil.h
llvm::BlockFrequencyInfo::setBlockFreqAndScale
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, uint64_t Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
Definition: BlockFrequencyInfo.cpp:232
llvm::InlineFunctionInfo::reset
void reset()
Definition: Cloning.h:231
Constant.h
llvm::ResumeInst
Resume the propagation of an exception.
Definition: Instructions.h:4136
llvm::MDNode::replaceAllUsesWith
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
Definition: Metadata.h:982
llvm::Type::getInt64Ty
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:205
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:52
llvm::ValueMapIterator
Definition: ValueMap.h:49
llvm::DenseMapBase::end
iterator end()
Definition: DenseMap.h:83
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:314
llvm::PHINode::Create
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Definition: Instructions.h:2612
llvm::Function::getArg
Argument * getArg(unsigned i) const
Definition: Function.h:797
ProfileCount
Function::ProfileCount ProfileCount
Definition: InlineFunction.cpp:77
llvm::isGuaranteedToTransferExecutionToSuccessor
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
Definition: ValueTracking.cpp:5205
llvm::TypeSize
Definition: TypeSize.h:417
llvm::ConstantTokenNone::get
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1427
Casting.h
Function.h
updateCallProfile
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
Definition: InlineFunction.cpp:1604
llvm::Value::hasNUses
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition: Value.cpp:150
llvm::InlineFunctionInfo
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:193
llvm::Function::getFunctionType
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:165
llvm::ValueMap::find
iterator find(const KeyT &Val)
Definition: ValueMap.h:156
llvm::Instruction::isEHPad
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:644
llvm::InlineResult::failure
static InlineResult failure(const char *Reason)
Definition: InlineCost.h:140
llvm::SmallVectorImpl::clear
void clear()
Definition: SmallVector.h:585
llvm::CallBase::getCalledOperand
Value * getCalledOperand() const
Definition: InstrTypes.h:1389
ScopedAliasMetadataDeepCloner::clone
void clone()
Create a new clone of the scoped alias metadata, which will be used by subsequent remap() calls.
Definition: InlineFunction.cpp:882
llvm::BlockFrequencyInfo::getBlockFreq
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Definition: BlockFrequencyInfo.cpp:202
llvm::Function::getPersonalityFn
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1705
llvm::Function::arg_begin
arg_iterator arg_begin()
Definition: Function.h:779
EnableNoAliasConversion
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:45
llvm::AttrBuilder::addDereferenceableAttr
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
Definition: Attributes.cpp:1811
llvm::MDBuilder
Definition: MDBuilder.h:35
llvm::Function::front
const BasicBlock & front() const
Definition: Function.h:771
CallGraph.h
ScopedAliasMetadataDeepCloner::remap
void remap(ValueToValueMapTy &VMap)
Remap instructions in the given VMap from the original to the cloned metadata.
Definition: InlineFunction.cpp:912
llvm::DebugLoc::getLine
unsigned getLine() const
Definition: DebugLoc.cpp:25
llvm::AttrBuilder::addAttribute
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
Definition: Attributes.h:814
llvm::BasicBlock::getInstList
const InstListType & getInstList() const
Return the underlying instruction list container.
Definition: BasicBlock.h:363
llvm::BasicBlock::reverse_iterator
InstListType::reverse_iterator reverse_iterator
Definition: BasicBlock.h:92
llvm::Function::getParamAlignment
unsigned getParamAlignment(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Definition: Function.h:476
llvm::InlineFunctionInfo::StaticAllocas
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
Definition: Cloning.h:214
llvm::MDNode::isTemporary
bool isTemporary() const
Definition: Metadata.h:977
Instructions.h
llvm::numbers::phi
constexpr double phi
Definition: MathExtras.h:72
llvm::AllocaInst::isUsedWithInAlloca
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:137
SmallVector.h
llvm::ilist_iterator::getReverse
ilist_iterator< OptionsT, !IsReverse, IsConst > getReverse() const
Get a reverse iterator to the same node.
Definition: ilist_iterator.h:121
llvm::Instruction::getDebugLoc
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:365
User.h
llvm::InlineFunctionInfo::UpdateProfile
bool UpdateProfile
Update profile for callee as well as cloned version.
Definition: Cloning.h:229
Dominators.h
updateCallerBFI
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
Definition: InlineFunction.cpp:1575
llvm::CallBase::getArgOperand
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1341
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:94
InstructionSimplify.h
llvm::max
Align max(MaybeAlign Lhs, Align Rhs)
Definition: Alignment.h:350
llvm::PHINode
Definition: Instructions.h:2572
llvm::Function::onlyReadsMemory
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.h:542
llvm::BasicBlock::removePredecessor
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition: BasicBlock.cpp:321
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::InlineFunction
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
Definition: InlineFunction.cpp:1756
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1164
llvm::Module::getDataLayout
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:397
DerivedTypes.h
llvm::SmallPtrSetImpl
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
llvm::SmallSetVector
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:307
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1450
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::ValueMap::lookup
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:165
LLVMContext.h
llvm::Value::takeName
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:376
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition: Instructions.h:61
llvm::Function::ProfileCount
Class to represent profile counts.
Definition: Function.h:282
llvm::DebugLoc::getScope
MDNode * getScope() const
Definition: DebugLoc.cpp:35
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition: User.h:169
llvm::InlineFunctionInfo::GetAssumptionCache
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
Definition: Cloning.h:208
llvm::BasicBlock::getTerminatingMustTailCall
const CallInst * getTerminatingMustTailCall() const
Returns the call instruction marked 'musttail' prior to the terminating return instruction of this ba...
Definition: BasicBlock.cpp:154
llvm::cl::desc
Definition: CommandLine.h:411
llvm::BranchInst
Conditional or Unconditional Branch instruction.
Definition: Instructions.h:3005
InlinerAttributeWindow
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
llvm::ClonedCodeInfo::ContainsDynamicAllocas
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
Definition: Cloning.h:71
llvm::Function::ProfileCount::hasValue
bool hasValue() const
Definition: Function.h:292
llvm::SetVector< const MDNode * >
llvm::CallBase::arg_operands
iterator_range< User::op_iterator > arg_operands()
Definition: InstrTypes.h:1333
llvm::LLVMContext::OB_funclet
@ OB_funclet
Definition: LLVMContext.h:91
llvm::SmallVectorImpl::reserve
void reserve(size_type N)
Definition: SmallVector.h:624
llvm::CallInst::TCK_NoTail
@ TCK_NoTail
Definition: Instructions.h:1631
llvm::IRBuilderBase::CreateAlignmentAssumption
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
Definition: IRBuilder.cpp:1156
Value.h
llvm::InvokeInst::getUnwindDest
BasicBlock * getUnwindDest() const
Definition: Instructions.h:3821
llvm::InlineFunctionInfo::CalleeBFI
BlockFrequencyInfo * CalleeBFI
Definition: Cloning.h:210
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
llvm::AtomicCmpXchgInst
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:522
llvm::InlineResult
InlineResult is basically true or false.
Definition: InlineCost.h:134
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:434
llvm::CallInst::getTailCallKind
TailCallKind getTailCallKind() const
Definition: Instructions.h:1640
llvm::IRBuilderBase::CreateCall
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2344
llvm::CallBase::args
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1322
SetVector.h
llvm::CallBase::setCallingConv
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1457
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:908
llvm::Function::iterator
BasicBlockListType::iterator iterator
Definition: Function.h:66
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::CallGraphNode::begin
iterator begin()
Definition: CallGraph.h:200