LLVM  13.0.0git
CoroSplit.cpp
Go to the documentation of this file.
1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This pass builds the coroutine frame and outlines resume and destroy parts
9 // of the coroutine into separate functions.
10 //
11 // We present a coroutine to an LLVM as an ordinary function with suspension
12 // points marked up with intrinsics. We let the optimizer party on the coroutine
13 // as a single function for as long as possible. Shortly before the coroutine is
14 // eligible to be inlined into its callers, we split up the coroutine into parts
15 // corresponding to an initial, resume and destroy invocations of the coroutine,
16 // add them to the current SCC and restart the IPO pipeline to optimize the
17 // coroutine subfunctions we extracted before proceeding to the caller of the
18 // coroutine.
19 //===----------------------------------------------------------------------===//
20 
22 #include "CoroInstr.h"
23 #include "CoroInternal.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/StringRef.h"
28 #include "llvm/ADT/Twine.h"
32 #include "llvm/IR/Argument.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/BasicBlock.h"
35 #include "llvm/IR/CFG.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalValue.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/IRBuilder.h"
44 #include "llvm/IR/InstIterator.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/IntrinsicInst.h"
49 #include "llvm/IR/LLVMContext.h"
51 #include "llvm/IR/Module.h"
52 #include "llvm/IR/Type.h"
53 #include "llvm/IR/Value.h"
54 #include "llvm/IR/Verifier.h"
55 #include "llvm/InitializePasses.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/Casting.h"
58 #include "llvm/Support/Debug.h"
61 #include "llvm/Transforms/Scalar.h"
67 #include <cassert>
68 #include <cstddef>
69 #include <cstdint>
70 #include <initializer_list>
71 #include <iterator>
72 
73 using namespace llvm;
74 
75 #define DEBUG_TYPE "coro-split"
76 
77 namespace {
78 
79 /// A little helper class for building
80 class CoroCloner {
81 public:
82  enum class Kind {
83  /// The shared resume function for a switch lowering.
84  SwitchResume,
85 
86  /// The shared unwind function for a switch lowering.
87  SwitchUnwind,
88 
89  /// The shared cleanup function for a switch lowering.
90  SwitchCleanup,
91 
92  /// An individual continuation function.
93  Continuation,
94 
95  /// An async resume function.
96  Async,
97  };
98 
99 private:
100  Function &OrigF;
101  Function *NewF;
102  const Twine &Suffix;
103  coro::Shape &Shape;
104  Kind FKind;
105  ValueToValueMapTy VMap;
107  Value *NewFramePtr = nullptr;
108 
109  /// The active suspend instruction; meaningful only for continuation and async
110  /// ABIs.
111  AnyCoroSuspendInst *ActiveSuspend = nullptr;
112 
113 public:
114  /// Create a cloner for a switch lowering.
115  CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
116  Kind FKind)
117  : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
118  FKind(FKind), Builder(OrigF.getContext()) {
119  assert(Shape.ABI == coro::ABI::Switch);
120  }
121 
122  /// Create a cloner for a continuation lowering.
123  CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
124  Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
125  : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
126  FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
127  Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
128  assert(Shape.ABI == coro::ABI::Retcon ||
129  Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async);
130  assert(NewF && "need existing function for continuation");
131  assert(ActiveSuspend && "need active suspend point for continuation");
132  }
133 
134  Function *getFunction() const {
135  assert(NewF != nullptr && "declaration not yet set");
136  return NewF;
137  }
138 
139  void create();
140 
141 private:
142  bool isSwitchDestroyFunction() {
143  switch (FKind) {
144  case Kind::Async:
145  case Kind::Continuation:
146  case Kind::SwitchResume:
147  return false;
148  case Kind::SwitchUnwind:
149  case Kind::SwitchCleanup:
150  return true;
151  }
152  llvm_unreachable("Unknown CoroCloner::Kind enum");
153  }
154 
155  void replaceEntryBlock();
156  Value *deriveNewFramePointer();
157  void replaceRetconOrAsyncSuspendUses();
158  void replaceCoroSuspends();
159  void replaceCoroEnds();
160  void replaceSwiftErrorOps();
161  void salvageDebugInfo();
162  void handleFinalSuspend();
163 };
164 
165 } // end anonymous namespace
166 
168  const coro::Shape &Shape, Value *FramePtr,
169  CallGraph *CG) {
170  assert(Shape.ABI == coro::ABI::Retcon ||
171  Shape.ABI == coro::ABI::RetconOnce);
173  return;
174 
175  Shape.emitDealloc(Builder, FramePtr, CG);
176 }
177 
178 /// Replace an llvm.coro.end.async.
179 /// Will inline the must tail call function call if there is one.
180 /// \returns true if cleanup of the coro.end block is needed, false otherwise.
182  IRBuilder<> Builder(End);
183 
184  auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
185  if (!EndAsync) {
186  Builder.CreateRetVoid();
187  return true /*needs cleanup of coro.end block*/;
188  }
189 
190  auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
191  if (!MustTailCallFunc) {
192  Builder.CreateRetVoid();
193  return true /*needs cleanup of coro.end block*/;
194  }
195 
196  // Move the must tail call from the predecessor block into the end block.
197  auto *CoroEndBlock = End->getParent();
198  auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
199  assert(MustTailCallFuncBlock && "Must have a single predecessor block");
200  auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
201  auto *MustTailCall = cast<CallInst>(&*std::prev(It));
202  CoroEndBlock->getInstList().splice(
203  End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall);
204 
205  // Insert the return instruction.
206  Builder.SetInsertPoint(End);
207  Builder.CreateRetVoid();
208  InlineFunctionInfo FnInfo;
209 
210  // Remove the rest of the block, by splitting it into an unreachable block.
211  auto *BB = End->getParent();
212  BB->splitBasicBlock(End);
213  BB->getTerminator()->eraseFromParent();
214 
215  auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
216  assert(InlineRes.isSuccess() && "Expected inlining to succeed");
217  (void)InlineRes;
218 
219  // We have cleaned up the coro.end block above.
220  return false;
221 }
222 
223 /// Replace a non-unwind call to llvm.coro.end.
225  const coro::Shape &Shape, Value *FramePtr,
226  bool InResume, CallGraph *CG) {
227  // Start inserting right before the coro.end.
228  IRBuilder<> Builder(End);
229 
230  // Create the return instruction.
231  switch (Shape.ABI) {
232  // The cloned functions in switch-lowering always return void.
233  case coro::ABI::Switch:
234  // coro.end doesn't immediately end the coroutine in the main function
235  // in this lowering, because we need to deallocate the coroutine.
236  if (!InResume)
237  return;
238  Builder.CreateRetVoid();
239  break;
240 
241  // In async lowering this returns.
242  case coro::ABI::Async: {
243  bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
244  if (!CoroEndBlockNeedsCleanup)
245  return;
246  break;
247  }
248 
249  // In unique continuation lowering, the continuations always return void.
250  // But we may have implicitly allocated storage.
252  maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
253  Builder.CreateRetVoid();
254  break;
255 
256  // In non-unique continuation lowering, we signal completion by returning
257  // a null continuation.
258  case coro::ABI::Retcon: {
259  maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
260  auto RetTy = Shape.getResumeFunctionType()->getReturnType();
261  auto RetStructTy = dyn_cast<StructType>(RetTy);
262  PointerType *ContinuationTy =
263  cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
264 
265  Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
266  if (RetStructTy) {
267  ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
268  ReturnValue, 0);
269  }
270  Builder.CreateRet(ReturnValue);
271  break;
272  }
273  }
274 
275  // Remove the rest of the block, by splitting it into an unreachable block.
276  auto *BB = End->getParent();
277  BB->splitBasicBlock(End);
278  BB->getTerminator()->eraseFromParent();
279 }
280 
281 /// Replace an unwind call to llvm.coro.end.
282 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
283  Value *FramePtr, bool InResume,
284  CallGraph *CG) {
285  IRBuilder<> Builder(End);
286 
287  switch (Shape.ABI) {
288  // In switch-lowering, this does nothing in the main function.
289  case coro::ABI::Switch:
290  if (!InResume)
291  return;
292  break;
293  // In async lowering this does nothing.
294  case coro::ABI::Async:
295  break;
296  // In continuation-lowering, this frees the continuation storage.
297  case coro::ABI::Retcon:
299  maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
300  break;
301  }
302 
303  // If coro.end has an associated bundle, add cleanupret instruction.
304  if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
305  auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
306  auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
307  End->getParent()->splitBasicBlock(End);
308  CleanupRet->getParent()->getTerminator()->eraseFromParent();
309  }
310 }
311 
312 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
313  Value *FramePtr, bool InResume, CallGraph *CG) {
314  if (End->isUnwind())
315  replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
316  else
317  replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
318 
319  auto &Context = End->getContext();
320  End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
322  End->eraseFromParent();
323 }
324 
325 // Create an entry block for a resume function with a switch that will jump to
326 // suspend points.
328  assert(Shape.ABI == coro::ABI::Switch);
329  LLVMContext &C = F.getContext();
330 
331  // resume.entry:
332  // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
333  // i32 2
334  // % index = load i32, i32* %index.addr
335  // switch i32 %index, label %unreachable [
336  // i32 0, label %resume.0
337  // i32 1, label %resume.1
338  // ...
339  // ]
340 
341  auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
342  auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
343 
344  IRBuilder<> Builder(NewEntry);
345  auto *FramePtr = Shape.FramePtr;
346  auto *FrameTy = Shape.FrameTy;
347  auto *GepIndex = Builder.CreateStructGEP(
348  FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
349  auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
350  auto *Switch =
351  Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
352  Shape.SwitchLowering.ResumeSwitch = Switch;
353 
354  size_t SuspendIndex = 0;
355  for (auto *AnyS : Shape.CoroSuspends) {
356  auto *S = cast<CoroSuspendInst>(AnyS);
357  ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
358 
359  // Replace CoroSave with a store to Index:
360  // %index.addr = getelementptr %f.frame... (index field number)
361  // store i32 0, i32* %index.addr1
362  auto *Save = S->getCoroSave();
363  Builder.SetInsertPoint(Save);
364  if (S->isFinal()) {
365  // Final suspend point is represented by storing zero in ResumeFnAddr.
366  auto *GepIndex = Builder.CreateStructGEP(FrameTy, FramePtr,
368  "ResumeFn.addr");
369  auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
370  cast<PointerType>(GepIndex->getType())->getElementType()));
371  Builder.CreateStore(NullPtr, GepIndex);
372  } else {
373  auto *GepIndex = Builder.CreateStructGEP(
374  FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
375  Builder.CreateStore(IndexVal, GepIndex);
376  }
377  Save->replaceAllUsesWith(ConstantTokenNone::get(C));
378  Save->eraseFromParent();
379 
380  // Split block before and after coro.suspend and add a jump from an entry
381  // switch:
382  //
383  // whateverBB:
384  // whatever
385  // %0 = call i8 @llvm.coro.suspend(token none, i1 false)
386  // switch i8 %0, label %suspend[i8 0, label %resume
387  // i8 1, label %cleanup]
388  // becomes:
389  //
390  // whateverBB:
391  // whatever
392  // br label %resume.0.landing
393  //
394  // resume.0: ; <--- jump from the switch in the resume.entry
395  // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
396  // br label %resume.0.landing
397  //
398  // resume.0.landing:
399  // %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
400  // switch i8 % 1, label %suspend [i8 0, label %resume
401  // i8 1, label %cleanup]
402 
403  auto *SuspendBB = S->getParent();
404  auto *ResumeBB =
405  SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
406  auto *LandingBB = ResumeBB->splitBasicBlock(
407  S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
408  Switch->addCase(IndexVal, ResumeBB);
409 
410  cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
411  auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front());
412  S->replaceAllUsesWith(PN);
413  PN->addIncoming(Builder.getInt8(-1), SuspendBB);
414  PN->addIncoming(S, ResumeBB);
415 
416  ++SuspendIndex;
417  }
418 
419  Builder.SetInsertPoint(UnreachBB);
420  Builder.CreateUnreachable();
421 
422  Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
423 }
424 
425 
426 // Rewrite final suspend point handling. We do not use suspend index to
427 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the
428 // coroutine frame, since it is undefined behavior to resume a coroutine
429 // suspended at the final suspend point. Thus, in the resume function, we can
430 // simply remove the last case (when coro::Shape is built, the final suspend
431 // point (if present) is always the last element of CoroSuspends array).
432 // In the destroy function, we add a code sequence to check if ResumeFnAddress
433 // is Null, and if so, jump to the appropriate label to handle cleanup from the
434 // final suspend point.
435 void CoroCloner::handleFinalSuspend() {
436  assert(Shape.ABI == coro::ABI::Switch &&
437  Shape.SwitchLowering.HasFinalSuspend);
438  auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
439  auto FinalCaseIt = std::prev(Switch->case_end());
440  BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
441  Switch->removeCase(FinalCaseIt);
442  if (isSwitchDestroyFunction()) {
443  BasicBlock *OldSwitchBB = Switch->getParent();
444  auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
445  Builder.SetInsertPoint(OldSwitchBB->getTerminator());
446  auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr,
448  "ResumeFn.addr");
449  auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(),
450  GepIndex);
451  auto *Cond = Builder.CreateIsNull(Load);
452  Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
453  OldSwitchBB->getTerminator()->eraseFromParent();
454  }
455 }
456 
457 static FunctionType *
459  auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
460  auto *StructTy = cast<StructType>(AsyncSuspend->getType());
461  auto &Context = Suspend->getParent()->getParent()->getContext();
462  auto *VoidTy = Type::getVoidTy(Context);
463  return FunctionType::get(VoidTy, StructTy->elements(), false);
464 }
465 
467  const Twine &Suffix,
468  Module::iterator InsertBefore,
469  AnyCoroSuspendInst *ActiveSuspend) {
470  Module *M = OrigF.getParent();
471  auto *FnTy = (Shape.ABI != coro::ABI::Async)
472  ? Shape.getResumeFunctionType()
473  : getFunctionTypeFromAsyncSuspend(ActiveSuspend);
474 
475  Function *NewF =
476  Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
477  OrigF.getName() + Suffix);
478  NewF->addParamAttr(0, Attribute::NonNull);
479 
480  // For the async lowering ABI we can't guarantee that the context argument is
481  // not access via a different pointer not based on the argument.
482  if (Shape.ABI != coro::ABI::Async)
483  NewF->addParamAttr(0, Attribute::NoAlias);
484 
485  M->getFunctionList().insert(InsertBefore, NewF);
486 
487  return NewF;
488 }
489 
490 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the
491 /// arguments to the continuation function.
492 ///
493 /// This assumes that the builder has a meaningful insertion point.
494 void CoroCloner::replaceRetconOrAsyncSuspendUses() {
495  assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
496  Shape.ABI == coro::ABI::Async);
497 
498  auto NewS = VMap[ActiveSuspend];
499  if (NewS->use_empty()) return;
500 
501  // Copy out all the continuation arguments after the buffer pointer into
502  // an easily-indexed data structure for convenience.
504  // The async ABI includes all arguments -- including the first argument.
505  bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
506  for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
507  E = NewF->arg_end();
508  I != E; ++I)
509  Args.push_back(&*I);
510 
511  // If the suspend returns a single scalar value, we can just do a simple
512  // replacement.
513  if (!isa<StructType>(NewS->getType())) {
514  assert(Args.size() == 1);
515  NewS->replaceAllUsesWith(Args.front());
516  return;
517  }
518 
519  // Try to peephole extracts of an aggregate return.
520  for (auto UI = NewS->use_begin(), UE = NewS->use_end(); UI != UE; ) {
521  auto EVI = dyn_cast<ExtractValueInst>((UI++)->getUser());
522  if (!EVI || EVI->getNumIndices() != 1)
523  continue;
524 
525  EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
526  EVI->eraseFromParent();
527  }
528 
529  // If we have no remaining uses, we're done.
530  if (NewS->use_empty()) return;
531 
532  // Otherwise, we need to create an aggregate.
533  Value *Agg = UndefValue::get(NewS->getType());
534  for (size_t I = 0, E = Args.size(); I != E; ++I)
535  Agg = Builder.CreateInsertValue(Agg, Args[I], I);
536 
537  NewS->replaceAllUsesWith(Agg);
538 }
539 
540 void CoroCloner::replaceCoroSuspends() {
541  Value *SuspendResult;
542 
543  switch (Shape.ABI) {
544  // In switch lowering, replace coro.suspend with the appropriate value
545  // for the type of function we're extracting.
546  // Replacing coro.suspend with (0) will result in control flow proceeding to
547  // a resume label associated with a suspend point, replacing it with (1) will
548  // result in control flow proceeding to a cleanup label associated with this
549  // suspend point.
550  case coro::ABI::Switch:
551  SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
552  break;
553 
554  // In async lowering there are no uses of the result.
555  case coro::ABI::Async:
556  return;
557 
558  // In returned-continuation lowering, the arguments from earlier
559  // continuations are theoretically arbitrary, and they should have been
560  // spilled.
562  case coro::ABI::Retcon:
563  return;
564  }
565 
566  for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
567  // The active suspend was handled earlier.
568  if (CS == ActiveSuspend) continue;
569 
570  auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
571  MappedCS->replaceAllUsesWith(SuspendResult);
572  MappedCS->eraseFromParent();
573  }
574 }
575 
576 void CoroCloner::replaceCoroEnds() {
577  for (AnyCoroEndInst *CE : Shape.CoroEnds) {
578  // We use a null call graph because there's no call graph node for
579  // the cloned function yet. We'll just be rebuilding that later.
580  auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
581  replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
582  }
583 }
584 
586  ValueToValueMapTy *VMap) {
587  if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
588  return;
589  Value *CachedSlot = nullptr;
590  auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
591  if (CachedSlot) {
592  assert(CachedSlot->getType()->getPointerElementType() == ValueTy &&
593  "multiple swifterror slots in function with different types");
594  return CachedSlot;
595  }
596 
597  // Check if the function has a swifterror argument.
598  for (auto &Arg : F.args()) {
599  if (Arg.isSwiftError()) {
600  CachedSlot = &Arg;
601  assert(Arg.getType()->getPointerElementType() == ValueTy &&
602  "swifterror argument does not have expected type");
603  return &Arg;
604  }
605  }
606 
607  // Create a swifterror alloca.
608  IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
609  auto Alloca = Builder.CreateAlloca(ValueTy);
610  Alloca->setSwiftError(true);
611 
612  CachedSlot = Alloca;
613  return Alloca;
614  };
615 
616  for (CallInst *Op : Shape.SwiftErrorOps) {
617  auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
618  IRBuilder<> Builder(MappedOp);
619 
620  // If there are no arguments, this is a 'get' operation.
621  Value *MappedResult;
622  if (Op->getNumArgOperands() == 0) {
623  auto ValueTy = Op->getType();
624  auto Slot = getSwiftErrorSlot(ValueTy);
625  MappedResult = Builder.CreateLoad(ValueTy, Slot);
626  } else {
627  assert(Op->getNumArgOperands() == 1);
628  auto Value = MappedOp->getArgOperand(0);
629  auto ValueTy = Value->getType();
630  auto Slot = getSwiftErrorSlot(ValueTy);
631  Builder.CreateStore(Value, Slot);
632  MappedResult = Slot;
633  }
634 
635  MappedOp->replaceAllUsesWith(MappedResult);
636  MappedOp->eraseFromParent();
637  }
638 
639  // If we're updating the original function, we've invalidated SwiftErrorOps.
640  if (VMap == nullptr) {
641  Shape.SwiftErrorOps.clear();
642  }
643 }
644 
646  ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
647 }
648 
652  for (auto &BB : *NewF)
653  for (auto &I : BB)
654  if (auto *DDI = dyn_cast<DbgDeclareInst>(&I))
655  Worklist.push_back(DDI);
656  for (DbgDeclareInst *DDI : Worklist)
657  coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.ReuseFrameSlot);
658 
659  // Remove all salvaged dbg.declare intrinsics that became
660  // either unreachable or stale due to the CoroSplit transformation.
661  auto IsUnreachableBlock = [&](BasicBlock *BB) {
662  return BB->hasNPredecessors(0) && BB != &NewF->getEntryBlock();
663  };
664  for (DbgDeclareInst *DDI : Worklist) {
665  if (IsUnreachableBlock(DDI->getParent()))
666  DDI->eraseFromParent();
667  else if (dyn_cast_or_null<AllocaInst>(DDI->getAddress())) {
668  // Count all non-debuginfo uses in reachable blocks.
669  unsigned Uses = 0;
670  for (auto *User : DDI->getAddress()->users())
671  if (auto *I = dyn_cast<Instruction>(User))
672  if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
673  ++Uses;
674  if (!Uses)
675  DDI->eraseFromParent();
676  }
677  }
678 }
679 
680 void CoroCloner::replaceEntryBlock() {
681  // In the original function, the AllocaSpillBlock is a block immediately
682  // following the allocation of the frame object which defines GEPs for
683  // all the allocas that have been moved into the frame, and it ends by
684  // branching to the original beginning of the coroutine. Make this
685  // the entry block of the cloned function.
686  auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
687  auto *OldEntry = &NewF->getEntryBlock();
688  Entry->setName("entry" + Suffix);
689  Entry->moveBefore(OldEntry);
690  Entry->getTerminator()->eraseFromParent();
691 
692  // Clear all predecessors of the new entry block. There should be
693  // exactly one predecessor, which we created when splitting out
694  // AllocaSpillBlock to begin with.
695  assert(Entry->hasOneUse());
696  auto BranchToEntry = cast<BranchInst>(Entry->user_back());
697  assert(BranchToEntry->isUnconditional());
698  Builder.SetInsertPoint(BranchToEntry);
699  Builder.CreateUnreachable();
700  BranchToEntry->eraseFromParent();
701 
702  // Branch from the entry to the appropriate place.
703  Builder.SetInsertPoint(Entry);
704  switch (Shape.ABI) {
705  case coro::ABI::Switch: {
706  // In switch-lowering, we built a resume-entry block in the original
707  // function. Make the entry block branch to this.
708  auto *SwitchBB =
709  cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
710  Builder.CreateBr(SwitchBB);
711  break;
712  }
713  case coro::ABI::Async:
714  case coro::ABI::Retcon:
715  case coro::ABI::RetconOnce: {
716  // In continuation ABIs, we want to branch to immediately after the
717  // active suspend point. Earlier phases will have put the suspend in its
718  // own basic block, so just thread our jump directly to its successor.
719  assert((Shape.ABI == coro::ABI::Async &&
720  isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
721  ((Shape.ABI == coro::ABI::Retcon ||
722  Shape.ABI == coro::ABI::RetconOnce) &&
723  isa<CoroSuspendRetconInst>(ActiveSuspend)));
724  auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
725  auto Branch = cast<BranchInst>(MappedCS->getNextNode());
726  assert(Branch->isUnconditional());
727  Builder.CreateBr(Branch->getSuccessor(0));
728  break;
729  }
730  }
731 
732  // Any static alloca that's still being used but not reachable from the new
733  // entry needs to be moved to the new entry.
734  Function *F = OldEntry->getParent();
735  DominatorTree DT{*F};
736  for (auto IT = inst_begin(F), End = inst_end(F); IT != End;) {
737  Instruction &I = *IT++;
738  auto *Alloca = dyn_cast<AllocaInst>(&I);
739  if (!Alloca || I.use_empty())
740  continue;
741  if (DT.isReachableFromEntry(I.getParent()) ||
742  !isa<ConstantInt>(Alloca->getArraySize()))
743  continue;
744  I.moveBefore(*Entry, Entry->getFirstInsertionPt());
745  }
746 }
747 
748 /// Derive the value of the new frame pointer.
749 Value *CoroCloner::deriveNewFramePointer() {
750  // Builder should be inserting to the front of the new entry block.
751 
752  switch (Shape.ABI) {
753  // In switch-lowering, the argument is the frame pointer.
754  case coro::ABI::Switch:
755  return &*NewF->arg_begin();
756  // In async-lowering, one of the arguments is an async context as determined
757  // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
758  // the resume function from the async context projection function associated
759  // with the active suspend. The frame is located as a tail to the async
760  // context header.
761  case coro::ABI::Async: {
762  auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
763  auto *CalleeContext =
764  NewF->getArg(ActiveAsyncSuspend->getStorageArgumentIndex());
765  auto *FramePtrTy = Shape.FrameTy->getPointerTo();
766  auto *ProjectionFunc =
767  ActiveAsyncSuspend->getAsyncContextProjectionFunction();
768  auto DbgLoc =
769  cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
770  // Calling i8* (i8*)
771  auto *CallerContext = Builder.CreateCall(
772  cast<FunctionType>(ProjectionFunc->getType()->getPointerElementType()),
773  ProjectionFunc, CalleeContext);
774  CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
775  CallerContext->setDebugLoc(DbgLoc);
776  // The frame is located after the async_context header.
777  auto &Context = Builder.getContext();
778  auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
779  Type::getInt8Ty(Context), CallerContext,
780  Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
781  // Inline the projection function.
783  auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
784  assert(InlineRes.isSuccess());
785  (void)InlineRes;
786  return Builder.CreateBitCast(FramePtrAddr, FramePtrTy);
787  }
788  // In continuation-lowering, the argument is the opaque storage.
789  case coro::ABI::Retcon:
790  case coro::ABI::RetconOnce: {
791  Argument *NewStorage = &*NewF->arg_begin();
792  auto FramePtrTy = Shape.FrameTy->getPointerTo();
793 
794  // If the storage is inline, just bitcast to the storage to the frame type.
795  if (Shape.RetconLowering.IsFrameInlineInStorage)
796  return Builder.CreateBitCast(NewStorage, FramePtrTy);
797 
798  // Otherwise, load the real frame from the opaque storage.
799  auto FramePtrPtr =
800  Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo());
801  return Builder.CreateLoad(FramePtrTy, FramePtrPtr);
802  }
803  }
804  llvm_unreachable("bad ABI");
805 }
806 
808  unsigned ParamIndex,
809  uint64_t Size, Align Alignment) {
810  AttrBuilder ParamAttrs;
811  ParamAttrs.addAttribute(Attribute::NonNull);
812  ParamAttrs.addAttribute(Attribute::NoAlias);
813  ParamAttrs.addAlignmentAttr(Alignment);
814  ParamAttrs.addDereferenceableAttr(Size);
815  Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
816 }
817 
818 /// Clone the body of the original function into a resume function of
819 /// some sort.
820 void CoroCloner::create() {
821  // Create the new function if we don't already have one.
822  if (!NewF) {
823  NewF = createCloneDeclaration(OrigF, Shape, Suffix,
824  OrigF.getParent()->end(), ActiveSuspend);
825  }
826 
827  // Replace all args with undefs. The buildCoroutineFrame algorithm already
828  // rewritten access to the args that occurs after suspend points with loads
829  // and stores to/from the coroutine frame.
830  for (Argument &A : OrigF.args())
831  VMap[&A] = UndefValue::get(A.getType());
832 
834 
835  // Ignore attempts to change certain attributes of the function.
836  // TODO: maybe there should be a way to suppress this during cloning?
837  auto savedVisibility = NewF->getVisibility();
838  auto savedUnnamedAddr = NewF->getUnnamedAddr();
839  auto savedDLLStorageClass = NewF->getDLLStorageClass();
840 
841  // NewF's linkage (which CloneFunctionInto does *not* change) might not
842  // be compatible with the visibility of OrigF (which it *does* change),
843  // so protect against that.
844  auto savedLinkage = NewF->getLinkage();
845  NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
846 
847  CloneFunctionInto(NewF, &OrigF, VMap,
849 
850  auto &Context = NewF->getContext();
851 
852  // For async functions / continuations, adjust the scope line of the
853  // clone to the line number of the suspend point. The scope line is
854  // associated with all pre-prologue instructions. This avoids a jump
855  // in the linetable from the function declaration to the suspend point.
856  if (DISubprogram *SP = NewF->getSubprogram()) {
857  assert(SP != OrigF.getSubprogram() && SP->isDistinct());
858  if (ActiveSuspend)
859  if (auto DL = ActiveSuspend->getDebugLoc())
860  SP->setScopeLine(DL->getLine());
861  // Update the linkage name to reflect the modified symbol name. It
862  // is necessary to update the linkage name in Swift, since the
863  // mangling changes for resume functions. It might also be the
864  // right thing to do in C++, but due to a limitation in LLVM's
865  // AsmPrinter we can only do this if the function doesn't have an
866  // abstract specification, since the DWARF backend expects the
867  // abstract specification to contain the linkage name and asserts
868  // that they are identical.
869  if (!SP->getDeclaration() && SP->getUnit() &&
870  SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift)
871  SP->replaceLinkageName(MDString::get(Context, NewF->getName()));
872  }
873 
874  NewF->setLinkage(savedLinkage);
875  NewF->setVisibility(savedVisibility);
876  NewF->setUnnamedAddr(savedUnnamedAddr);
877  NewF->setDLLStorageClass(savedDLLStorageClass);
878 
879  // Replace the attributes of the new function:
880  auto OrigAttrs = NewF->getAttributes();
881  auto NewAttrs = AttributeList();
882 
883  switch (Shape.ABI) {
884  case coro::ABI::Switch:
885  // Bootstrap attributes by copying function attributes from the
886  // original function. This should include optimization settings and so on.
887  NewAttrs = NewAttrs.addAttributes(Context, AttributeList::FunctionIndex,
888  OrigAttrs.getFnAttributes());
889 
890  addFramePointerAttrs(NewAttrs, Context, 0,
891  Shape.FrameSize, Shape.FrameAlign);
892  break;
893  case coro::ABI::Async: {
894  // Transfer the original function's attributes.
895  auto FnAttrs = OrigF.getAttributes().getFnAttributes();
896  NewAttrs =
897  NewAttrs.addAttributes(Context, AttributeList::FunctionIndex, FnAttrs);
898  break;
899  }
900  case coro::ABI::Retcon:
902  // If we have a continuation prototype, just use its attributes,
903  // full-stop.
904  NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
905 
906  addFramePointerAttrs(NewAttrs, Context, 0,
907  Shape.getRetconCoroId()->getStorageSize(),
908  Shape.getRetconCoroId()->getStorageAlignment());
909  break;
910  }
911 
912  switch (Shape.ABI) {
913  // In these ABIs, the cloned functions always return 'void', and the
914  // existing return sites are meaningless. Note that for unique
915  // continuations, this includes the returns associated with suspends;
916  // this is fine because we can't suspend twice.
917  case coro::ABI::Switch:
919  // Remove old returns.
920  for (ReturnInst *Return : Returns)
921  changeToUnreachable(Return, /*UseLLVMTrap=*/false);
922  break;
923 
924  // With multi-suspend continuations, we'll already have eliminated the
925  // original returns and inserted returns before all the suspend points,
926  // so we want to leave any returns in place.
927  case coro::ABI::Retcon:
928  break;
929  // Async lowering will insert musttail call functions at all suspend points
930  // followed by a return.
931  // Don't change returns to unreachable because that will trip up the verifier.
932  // These returns should be unreachable from the clone.
933  case coro::ABI::Async:
934  break;
935  }
936 
937  NewF->setAttributes(NewAttrs);
938  NewF->setCallingConv(Shape.getResumeFunctionCC());
939 
940  // Set up the new entry block.
941  replaceEntryBlock();
942 
943  Builder.SetInsertPoint(&NewF->getEntryBlock().front());
944  NewFramePtr = deriveNewFramePointer();
945 
946  // Remap frame pointer.
947  Value *OldFramePtr = VMap[Shape.FramePtr];
948  NewFramePtr->takeName(OldFramePtr);
949  OldFramePtr->replaceAllUsesWith(NewFramePtr);
950 
951  // Remap vFrame pointer.
952  auto *NewVFrame = Builder.CreateBitCast(
953  NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame");
954  Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
955  OldVFrame->replaceAllUsesWith(NewVFrame);
956 
957  switch (Shape.ABI) {
958  case coro::ABI::Switch:
959  // Rewrite final suspend handling as it is not done via switch (allows to
960  // remove final case from the switch, since it is undefined behavior to
961  // resume the coroutine suspended at the final suspend point.
962  if (Shape.SwitchLowering.HasFinalSuspend)
963  handleFinalSuspend();
964  break;
965  case coro::ABI::Async:
966  case coro::ABI::Retcon:
968  // Replace uses of the active suspend with the corresponding
969  // continuation-function arguments.
970  assert(ActiveSuspend != nullptr &&
971  "no active suspend when lowering a continuation-style coroutine");
972  replaceRetconOrAsyncSuspendUses();
973  break;
974  }
975 
976  // Handle suspends.
977  replaceCoroSuspends();
978 
979  // Handle swifterror.
981 
982  // Remove coro.end intrinsics.
983  replaceCoroEnds();
984 
985  // Salvage debug info that points into the coroutine frame.
987 
988  // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
989  // to suppress deallocation code.
990  if (Shape.ABI == coro::ABI::Switch)
991  coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
992  /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
993 }
994 
995 // Create a resume clone by cloning the body of the original function, setting
996 // new entry block and replacing coro.suspend an appropriate value to force
997 // resume or cleanup pass for every suspend point.
998 static Function *createClone(Function &F, const Twine &Suffix,
999  coro::Shape &Shape, CoroCloner::Kind FKind) {
1000  CoroCloner Cloner(F, Suffix, Shape, FKind);
1001  Cloner.create();
1002  return Cloner.getFunction();
1003 }
1004 
1005 /// Remove calls to llvm.coro.end in the original function.
1006 static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) {
1007  for (auto End : Shape.CoroEnds) {
1008  replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG);
1009  }
1010 }
1011 
1013  assert(Shape.ABI == coro::ABI::Async);
1014 
1015  auto *FuncPtrStruct = cast<ConstantStruct>(
1017  auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1018  auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1019  auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1020  Shape.AsyncLowering.ContextSize);
1021  auto *NewFuncPtrStruct = ConstantStruct::get(
1022  FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1023 
1024  Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1025 }
1026 
1027 static void replaceFrameSize(coro::Shape &Shape) {
1028  if (Shape.ABI == coro::ABI::Async)
1030 
1031  if (Shape.CoroSizes.empty())
1032  return;
1033 
1034  // In the same function all coro.sizes should have the same result type.
1035  auto *SizeIntrin = Shape.CoroSizes.back();
1036  Module *M = SizeIntrin->getModule();
1037  const DataLayout &DL = M->getDataLayout();
1038  auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1039  auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
1040 
1041  for (CoroSizeInst *CS : Shape.CoroSizes) {
1042  CS->replaceAllUsesWith(SizeConstant);
1043  CS->eraseFromParent();
1044  }
1045 }
1046 
1047 // Create a global constant array containing pointers to functions provided and
1048 // set Info parameter of CoroBegin to point at this constant. Example:
1049 //
1050 // @f.resumers = internal constant [2 x void(%f.frame*)*]
1051 // [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
1052 // define void @f() {
1053 // ...
1054 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1055 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
1056 //
1057 // Assumes that all the functions have the same signature.
1058 static void setCoroInfo(Function &F, coro::Shape &Shape,
1059  ArrayRef<Function *> Fns) {
1060  // This only works under the switch-lowering ABI because coro elision
1061  // only works on the switch-lowering ABI.
1062  assert(Shape.ABI == coro::ABI::Switch);
1063 
1064  SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
1065  assert(!Args.empty());
1066  Function *Part = *Fns.begin();
1067  Module *M = Part->getParent();
1068  auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
1069 
1070  auto *ConstVal = ConstantArray::get(ArrTy, Args);
1071  auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1073  F.getName() + Twine(".resumers"));
1074 
1075  // Update coro.begin instruction to refer to this constant.
1076  LLVMContext &C = F.getContext();
1078  Shape.getSwitchCoroId()->setInfo(BC);
1079 }
1080 
1081 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1082 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1083  Function *DestroyFn, Function *CleanupFn) {
1084  assert(Shape.ABI == coro::ABI::Switch);
1085 
1087  auto *ResumeAddr = Builder.CreateStructGEP(
1089  "resume.addr");
1090  Builder.CreateStore(ResumeFn, ResumeAddr);
1091 
1092  Value *DestroyOrCleanupFn = DestroyFn;
1093 
1094  CoroIdInst *CoroId = Shape.getSwitchCoroId();
1095  if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1096  // If there is a CoroAlloc and it returns false (meaning we elide the
1097  // allocation, use CleanupFn instead of DestroyFn).
1098  DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1099  }
1100 
1101  auto *DestroyAddr = Builder.CreateStructGEP(
1103  "destroy.addr");
1104  Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1105 }
1106 
1107 static void postSplitCleanup(Function &F) {
1109 
1110  // For now, we do a mandatory verification step because we don't
1111  // entirely trust this pass. Note that we don't want to add a verifier
1112  // pass to FPM below because it will also verify all the global data.
1113  if (verifyFunction(F, &errs()))
1114  report_fatal_error("Broken function");
1115 
1116  legacy::FunctionPassManager FPM(F.getParent());
1117 
1118  FPM.add(createSCCPPass());
1120  FPM.add(createEarlyCSEPass());
1122 
1123  FPM.doInitialization();
1124  FPM.run(F);
1125  FPM.doFinalization();
1126 }
1127 
1128 // Assuming we arrived at the block NewBlock from Prev instruction, store
1129 // PHI's incoming values in the ResolvedValues map.
1130 static void
1132  DenseMap<Value *, Value *> &ResolvedValues) {
1133  auto *PrevBB = Prev->getParent();
1134  for (PHINode &PN : NewBlock->phis()) {
1135  auto V = PN.getIncomingValueForBlock(PrevBB);
1136  // See if we already resolved it.
1137  auto VI = ResolvedValues.find(V);
1138  if (VI != ResolvedValues.end())
1139  V = VI->second;
1140  // Remember the value.
1141  ResolvedValues[&PN] = V;
1142  }
1143 }
1144 
1145 // Replace a sequence of branches leading to a ret, with a clone of a ret
1146 // instruction. Suspend instruction represented by a switch, track the PHI
1147 // values and select the correct case successor when possible.
1148 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
1149  DenseMap<Value *, Value *> ResolvedValues;
1150  BasicBlock *UnconditionalSucc = nullptr;
1151 
1152  Instruction *I = InitialInst;
1153  while (I->isTerminator() ||
1154  (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) {
1155  if (isa<ReturnInst>(I)) {
1156  if (I != InitialInst) {
1157  // If InitialInst is an unconditional branch,
1158  // remove PHI values that come from basic block of InitialInst
1159  if (UnconditionalSucc)
1160  UnconditionalSucc->removePredecessor(InitialInst->getParent(), true);
1161  ReplaceInstWithInst(InitialInst, I->clone());
1162  }
1163  return true;
1164  }
1165  if (auto *BR = dyn_cast<BranchInst>(I)) {
1166  if (BR->isUnconditional()) {
1167  BasicBlock *BB = BR->getSuccessor(0);
1168  if (I == InitialInst)
1169  UnconditionalSucc = BB;
1170  scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1171  I = BB->getFirstNonPHIOrDbgOrLifetime();
1172  continue;
1173  }
1174  } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
1175  auto *BR = dyn_cast<BranchInst>(I->getNextNode());
1176  if (BR && BR->isConditional() && CondCmp == BR->getCondition()) {
1177  // If the case number of suspended switch instruction is reduced to
1178  // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
1179  // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
1180  ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
1181  if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) {
1182  Value *V = CondCmp->getOperand(0);
1183  auto it = ResolvedValues.find(V);
1184  if (it != ResolvedValues.end())
1185  V = it->second;
1186 
1187  if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) {
1188  BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue())
1189  ? BR->getSuccessor(0)
1190  : BR->getSuccessor(1);
1191  scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1192  I = BB->getFirstNonPHIOrDbgOrLifetime();
1193  continue;
1194  }
1195  }
1196  }
1197  } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
1198  Value *V = SI->getCondition();
1199  auto it = ResolvedValues.find(V);
1200  if (it != ResolvedValues.end())
1201  V = it->second;
1202  if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1203  BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
1204  scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1205  I = BB->getFirstNonPHIOrDbgOrLifetime();
1206  continue;
1207  }
1208  }
1209  return false;
1210  }
1211  return false;
1212 }
1213 
1214 // Check whether CI obeys the rules of musttail attribute.
1215 static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
1216  if (CI.isInlineAsm())
1217  return false;
1218 
1219  // Match prototypes and calling conventions of resume function.
1220  FunctionType *CalleeTy = CI.getFunctionType();
1221  if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
1222  return false;
1223 
1224  Type *CalleeParmTy = CalleeTy->getParamType(0);
1225  if (!CalleeParmTy->isPointerTy() ||
1226  (CalleeParmTy->getPointerAddressSpace() != 0))
1227  return false;
1228 
1229  if (CI.getCallingConv() != F.getCallingConv())
1230  return false;
1231 
1232  // CI should not has any ABI-impacting function attributes.
1233  static const Attribute::AttrKind ABIAttrs[] = {
1234  Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
1235  Attribute::Preallocated, Attribute::InReg, Attribute::Returned,
1236  Attribute::SwiftSelf, Attribute::SwiftError};
1238  for (auto AK : ABIAttrs)
1239  if (Attrs.hasParamAttribute(0, AK))
1240  return false;
1241 
1242  return true;
1243 }
1244 
1245 // Add musttail to any resume instructions that is immediately followed by a
1246 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
1247 // for symmetrical coroutine control transfer (C++ Coroutines TS extension).
1248 // This transformation is done only in the resume part of the coroutine that has
1249 // identical signature and calling convention as the coro.resume call.
1251  bool changed = false;
1252 
1253  // Collect potential resume instructions.
1255  for (auto &I : instructions(F))
1256  if (auto *Call = dyn_cast<CallInst>(&I))
1257  if (shouldBeMustTail(*Call, F))
1258  Resumes.push_back(Call);
1259 
1260  // Set musttail on those that are followed by a ret instruction.
1261  for (CallInst *Call : Resumes)
1262  if (simplifyTerminatorLeadingToRet(Call->getNextNode())) {
1263  Call->setTailCallKind(CallInst::TCK_MustTail);
1264  changed = true;
1265  }
1266 
1267  if (changed)
1269 }
1270 
1271 // Coroutine has no suspend points. Remove heap allocation for the coroutine
1272 // frame if possible.
1274  auto *CoroBegin = Shape.CoroBegin;
1275  auto *CoroId = CoroBegin->getId();
1276  auto *AllocInst = CoroId->getCoroAlloc();
1277  switch (Shape.ABI) {
1278  case coro::ABI::Switch: {
1279  auto SwitchId = cast<CoroIdInst>(CoroId);
1280  coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1281  if (AllocInst) {
1282  IRBuilder<> Builder(AllocInst);
1283  auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1284  Frame->setAlignment(Shape.FrameAlign);
1285  auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy());
1286  AllocInst->replaceAllUsesWith(Builder.getFalse());
1287  AllocInst->eraseFromParent();
1288  CoroBegin->replaceAllUsesWith(VFrame);
1289  } else {
1290  CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1291  }
1292 
1293  break;
1294  }
1295  case coro::ABI::Async:
1296  case coro::ABI::Retcon:
1297  case coro::ABI::RetconOnce:
1298  CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
1299  break;
1300  }
1301 
1302  CoroBegin->eraseFromParent();
1303 }
1304 
1305 // SimplifySuspendPoint needs to check that there is no calls between
1306 // coro_save and coro_suspend, since any of the calls may potentially resume
1307 // the coroutine and if that is the case we cannot eliminate the suspend point.
1309  for (Instruction *I = From; I != To; I = I->getNextNode()) {
1310  // Assume that no intrinsic can resume the coroutine.
1311  if (isa<IntrinsicInst>(I))
1312  continue;
1313 
1314  if (isa<CallBase>(I))
1315  return true;
1316  }
1317  return false;
1318 }
1319 
1320 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1323 
1324  Set.insert(SaveBB);
1325  Worklist.push_back(ResDesBB);
1326 
1327  // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1328  // returns a token consumed by suspend instruction, all blocks in between
1329  // will have to eventually hit SaveBB when going backwards from ResDesBB.
1330  while (!Worklist.empty()) {
1331  auto *BB = Worklist.pop_back_val();
1332  Set.insert(BB);
1333  for (auto *Pred : predecessors(BB))
1334  if (Set.count(Pred) == 0)
1335  Worklist.push_back(Pred);
1336  }
1337 
1338  // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1339  Set.erase(SaveBB);
1340  Set.erase(ResDesBB);
1341 
1342  for (auto *BB : Set)
1343  if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
1344  return true;
1345 
1346  return false;
1347 }
1348 
1349 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1350  auto *SaveBB = Save->getParent();
1351  auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1352 
1353  if (SaveBB == ResumeOrDestroyBB)
1354  return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
1355 
1356  // Any calls from Save to the end of the block?
1357  if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
1358  return true;
1359 
1360  // Any calls from begging of the block up to ResumeOrDestroy?
1361  if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
1362  ResumeOrDestroy))
1363  return true;
1364 
1365  // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1366  if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1367  return true;
1368 
1369  return false;
1370 }
1371 
1372 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1373 // suspend point and replace it with nornal control flow.
1375  CoroBeginInst *CoroBegin) {
1376  Instruction *Prev = Suspend->getPrevNode();
1377  if (!Prev) {
1378  auto *Pred = Suspend->getParent()->getSinglePredecessor();
1379  if (!Pred)
1380  return false;
1381  Prev = Pred->getTerminator();
1382  }
1383 
1384  CallBase *CB = dyn_cast<CallBase>(Prev);
1385  if (!CB)
1386  return false;
1387 
1388  auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1389 
1390  // See if the callsite is for resumption or destruction of the coroutine.
1391  auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1392  if (!SubFn)
1393  return false;
1394 
1395  // Does not refer to the current coroutine, we cannot do anything with it.
1396  if (SubFn->getFrame() != CoroBegin)
1397  return false;
1398 
1399  // See if the transformation is safe. Specifically, see if there are any
1400  // calls in between Save and CallInstr. They can potenitally resume the
1401  // coroutine rendering this optimization unsafe.
1402  auto *Save = Suspend->getCoroSave();
1403  if (hasCallsBetween(Save, CB))
1404  return false;
1405 
1406  // Replace llvm.coro.suspend with the value that results in resumption over
1407  // the resume or cleanup path.
1408  Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1409  Suspend->eraseFromParent();
1410  Save->eraseFromParent();
1411 
1412  // No longer need a call to coro.resume or coro.destroy.
1413  if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1414  BranchInst::Create(Invoke->getNormalDest(), Invoke);
1415  }
1416 
1417  // Grab the CalledValue from CB before erasing the CallInstr.
1418  auto *CalledValue = CB->getCalledOperand();
1419  CB->eraseFromParent();
1420 
1421  // If no more users remove it. Usually it is a bitcast of SubFn.
1422  if (CalledValue != SubFn && CalledValue->user_empty())
1423  if (auto *I = dyn_cast<Instruction>(CalledValue))
1424  I->eraseFromParent();
1425 
1426  // Now we are good to remove SubFn.
1427  if (SubFn->user_empty())
1428  SubFn->eraseFromParent();
1429 
1430  return true;
1431 }
1432 
1433 // Remove suspend points that are simplified.
1434 static void simplifySuspendPoints(coro::Shape &Shape) {
1435  // Currently, the only simplification we do is switch-lowering-specific.
1436  if (Shape.ABI != coro::ABI::Switch)
1437  return;
1438 
1439  auto &S = Shape.CoroSuspends;
1440  size_t I = 0, N = S.size();
1441  if (N == 0)
1442  return;
1443  while (true) {
1444  auto SI = cast<CoroSuspendInst>(S[I]);
1445  // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1446  // to resume a coroutine suspended at the final suspend point.
1447  if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1448  if (--N == I)
1449  break;
1450  std::swap(S[I], S[N]);
1451  continue;
1452  }
1453  if (++I == N)
1454  break;
1455  }
1456  S.resize(N);
1457 }
1458 
1460  SmallVectorImpl<Function *> &Clones) {
1461  assert(Shape.ABI == coro::ABI::Switch);
1462 
1463  createResumeEntryBlock(F, Shape);
1464  auto ResumeClone = createClone(F, ".resume", Shape,
1465  CoroCloner::Kind::SwitchResume);
1466  auto DestroyClone = createClone(F, ".destroy", Shape,
1467  CoroCloner::Kind::SwitchUnwind);
1468  auto CleanupClone = createClone(F, ".cleanup", Shape,
1469  CoroCloner::Kind::SwitchCleanup);
1470 
1471  postSplitCleanup(*ResumeClone);
1472  postSplitCleanup(*DestroyClone);
1473  postSplitCleanup(*CleanupClone);
1474 
1475  addMustTailToCoroResumes(*ResumeClone);
1476 
1477  // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1478  updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1479 
1480  assert(Clones.empty());
1481  Clones.push_back(ResumeClone);
1482  Clones.push_back(DestroyClone);
1483  Clones.push_back(CleanupClone);
1484 
1485  // Create a constant array referring to resume/destroy/clone functions pointed
1486  // by the last argument of @llvm.coro.info, so that CoroElide pass can
1487  // determined correct function to call.
1488  setCoroInfo(F, Shape, Clones);
1489 }
1490 
1492  Value *Continuation) {
1493  auto *ResumeIntrinsic = Suspend->getResumeFunction();
1494  auto &Context = Suspend->getParent()->getParent()->getContext();
1495  auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1496 
1497  IRBuilder<> Builder(ResumeIntrinsic);
1498  auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1499  ResumeIntrinsic->replaceAllUsesWith(Val);
1500  ResumeIntrinsic->eraseFromParent();
1502  UndefValue::get(Int8PtrTy));
1503 }
1504 
1505 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1507  ArrayRef<Value *> FnArgs,
1508  SmallVectorImpl<Value *> &CallArgs) {
1509  size_t ArgIdx = 0;
1510  for (auto paramTy : FnTy->params()) {
1511  assert(ArgIdx < FnArgs.size());
1512  if (paramTy != FnArgs[ArgIdx]->getType())
1513  CallArgs.push_back(
1514  Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1515  else
1516  CallArgs.push_back(FnArgs[ArgIdx]);
1517  ++ArgIdx;
1518  }
1519 }
1520 
1523  IRBuilder<> &Builder) {
1524  auto *FnTy =
1525  cast<FunctionType>(MustTailCallFn->getType()->getPointerElementType());
1526  // Coerce the arguments, llvm optimizations seem to ignore the types in
1527  // vaarg functions and throws away casts in optimized mode.
1528  SmallVector<Value *, 8> CallArgs;
1529  coerceArguments(Builder, FnTy, Arguments, CallArgs);
1530 
1531  auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1532  TailCall->setTailCallKind(CallInst::TCK_MustTail);
1533  TailCall->setDebugLoc(Loc);
1534  TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1535  return TailCall;
1536 }
1537 
1539  SmallVectorImpl<Function *> &Clones) {
1540  assert(Shape.ABI == coro::ABI::Async);
1541  assert(Clones.empty());
1542  // Reset various things that the optimizer might have decided it
1543  // "knows" about the coroutine function due to not seeing a return.
1544  F.removeFnAttr(Attribute::NoReturn);
1545  F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1546  F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1547 
1548  auto &Context = F.getContext();
1549  auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1550 
1551  auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
1553 
1554  auto *FramePtr = Id->getStorage();
1555  FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1556  FramePtr = Builder.CreateConstInBoundsGEP1_32(
1558  "async.ctx.frameptr");
1559 
1560  // Map all uses of llvm.coro.begin to the allocated frame pointer.
1561  {
1562  // Make sure we don't invalidate Shape.FramePtr.
1563  TrackingVH<Instruction> Handle(Shape.FramePtr);
1564  Shape.CoroBegin->replaceAllUsesWith(FramePtr);
1565  Shape.FramePtr = Handle.getValPtr();
1566  }
1567 
1568  // Create all the functions in order after the main function.
1569  auto NextF = std::next(F.getIterator());
1570 
1571  // Create a continuation function for each of the suspend points.
1572  Clones.reserve(Shape.CoroSuspends.size());
1573  for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1574  auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
1575 
1576  // Create the clone declaration.
1577  auto *Continuation = createCloneDeclaration(
1578  F, Shape, ".resume." + Twine(Idx), NextF, Suspend);
1579  Clones.push_back(Continuation);
1580 
1581  // Insert a branch to a new return block immediately before the suspend
1582  // point.
1583  auto *SuspendBB = Suspend->getParent();
1584  auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1585  auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1586 
1587  // Place it before the first suspend.
1588  auto *ReturnBB =
1589  BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1590  Branch->setSuccessor(0, ReturnBB);
1591 
1592  IRBuilder<> Builder(ReturnBB);
1593 
1594  // Insert the call to the tail call function and inline it.
1595  auto *Fn = Suspend->getMustTailCallFunction();
1596  SmallVector<Value *, 8> Args(Suspend->args());
1597  auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1599  auto *TailCall =
1600  coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
1601  Builder.CreateRetVoid();
1602  InlineFunctionInfo FnInfo;
1603  auto InlineRes = InlineFunction(*TailCall, FnInfo);
1604  assert(InlineRes.isSuccess() && "Expected inlining to succeed");
1605  (void)InlineRes;
1606 
1607  // Replace the lvm.coro.async.resume intrisic call.
1608  replaceAsyncResumeFunction(Suspend, Continuation);
1609  }
1610 
1611  assert(Clones.size() == Shape.CoroSuspends.size());
1612  for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1613  auto *Suspend = Shape.CoroSuspends[Idx];
1614  auto *Clone = Clones[Idx];
1615 
1616  CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
1617  }
1618 }
1619 
1621  SmallVectorImpl<Function *> &Clones) {
1622  assert(Shape.ABI == coro::ABI::Retcon ||
1623  Shape.ABI == coro::ABI::RetconOnce);
1624  assert(Clones.empty());
1625 
1626  // Reset various things that the optimizer might have decided it
1627  // "knows" about the coroutine function due to not seeing a return.
1628  F.removeFnAttr(Attribute::NoReturn);
1629  F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1630  F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1631 
1632  // Allocate the frame.
1633  auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
1634  Value *RawFramePtr;
1636  RawFramePtr = Id->getStorage();
1637  } else {
1639 
1640  // Determine the size of the frame.
1641  const DataLayout &DL = F.getParent()->getDataLayout();
1642  auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1643 
1644  // Allocate. We don't need to update the call graph node because we're
1645  // going to recompute it from scratch after splitting.
1646  // FIXME: pass the required alignment
1647  RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1648  RawFramePtr =
1649  Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1650 
1651  // Stash the allocated frame pointer in the continuation storage.
1652  auto Dest = Builder.CreateBitCast(Id->getStorage(),
1653  RawFramePtr->getType()->getPointerTo());
1654  Builder.CreateStore(RawFramePtr, Dest);
1655  }
1656 
1657  // Map all uses of llvm.coro.begin to the allocated frame pointer.
1658  {
1659  // Make sure we don't invalidate Shape.FramePtr.
1660  TrackingVH<Instruction> Handle(Shape.FramePtr);
1661  Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1662  Shape.FramePtr = Handle.getValPtr();
1663  }
1664 
1665  // Create a unique return block.
1666  BasicBlock *ReturnBB = nullptr;
1667  SmallVector<PHINode *, 4> ReturnPHIs;
1668 
1669  // Create all the functions in order after the main function.
1670  auto NextF = std::next(F.getIterator());
1671 
1672  // Create a continuation function for each of the suspend points.
1673  Clones.reserve(Shape.CoroSuspends.size());
1674  for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1675  auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
1676 
1677  // Create the clone declaration.
1678  auto Continuation =
1679  createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr);
1680  Clones.push_back(Continuation);
1681 
1682  // Insert a branch to the unified return block immediately before
1683  // the suspend point.
1684  auto SuspendBB = Suspend->getParent();
1685  auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1686  auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1687 
1688  // Create the unified return block.
1689  if (!ReturnBB) {
1690  // Place it before the first suspend.
1691  ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
1692  NewSuspendBB);
1693  Shape.RetconLowering.ReturnBlock = ReturnBB;
1694 
1695  IRBuilder<> Builder(ReturnBB);
1696 
1697  // Create PHIs for all the return values.
1698  assert(ReturnPHIs.empty());
1699 
1700  // First, the continuation.
1701  ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
1702  Shape.CoroSuspends.size()));
1703 
1704  // Next, all the directly-yielded values.
1705  for (auto ResultTy : Shape.getRetconResultTypes())
1706  ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
1707  Shape.CoroSuspends.size()));
1708 
1709  // Build the return value.
1710  auto RetTy = F.getReturnType();
1711 
1712  // Cast the continuation value if necessary.
1713  // We can't rely on the types matching up because that type would
1714  // have to be infinite.
1715  auto CastedContinuationTy =
1716  (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
1717  auto *CastedContinuation =
1718  Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
1719 
1720  Value *RetV;
1721  if (ReturnPHIs.size() == 1) {
1722  RetV = CastedContinuation;
1723  } else {
1724  RetV = UndefValue::get(RetTy);
1725  RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
1726  for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
1727  RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
1728  }
1729 
1730  Builder.CreateRet(RetV);
1731  }
1732 
1733  // Branch to the return block.
1734  Branch->setSuccessor(0, ReturnBB);
1735  ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
1736  size_t NextPHIIndex = 1;
1737  for (auto &VUse : Suspend->value_operands())
1738  ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
1739  assert(NextPHIIndex == ReturnPHIs.size());
1740  }
1741 
1742  assert(Clones.size() == Shape.CoroSuspends.size());
1743  for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1744  auto Suspend = Shape.CoroSuspends[i];
1745  auto Clone = Clones[i];
1746 
1747  CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
1748  }
1749 }
1750 
1751 namespace {
1752  class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1753  Function &F;
1754  public:
1755  PrettyStackTraceFunction(Function &F) : F(F) {}
1756  void print(raw_ostream &OS) const override {
1757  OS << "While splitting coroutine ";
1758  F.printAsOperand(OS, /*print type*/ false, F.getParent());
1759  OS << "\n";
1760  }
1761  };
1762 }
1763 
1766  bool ReuseFrameSlot) {
1767  PrettyStackTraceFunction prettyStackTrace(F);
1768 
1769  // The suspend-crossing algorithm in buildCoroutineFrame get tripped
1770  // up by uses in unreachable blocks, so remove them as a first pass.
1772 
1773  coro::Shape Shape(F, ReuseFrameSlot);
1774  if (!Shape.CoroBegin)
1775  return Shape;
1776 
1777  simplifySuspendPoints(Shape);
1778  buildCoroutineFrame(F, Shape);
1779  replaceFrameSize(Shape);
1780 
1781  // If there are no suspend points, no split required, just remove
1782  // the allocation and deallocation blocks, they are not needed.
1783  if (Shape.CoroSuspends.empty()) {
1784  handleNoSuspendCoroutine(Shape);
1785  } else {
1786  switch (Shape.ABI) {
1787  case coro::ABI::Switch:
1788  splitSwitchCoroutine(F, Shape, Clones);
1789  break;
1790  case coro::ABI::Async:
1791  splitAsyncCoroutine(F, Shape, Clones);
1792  break;
1793  case coro::ABI::Retcon:
1794  case coro::ABI::RetconOnce:
1795  splitRetconCoroutine(F, Shape, Clones);
1796  break;
1797  }
1798  }
1799 
1800  // Replace all the swifterror operations in the original function.
1801  // This invalidates SwiftErrorOps in the Shape.
1802  replaceSwiftErrorOps(F, Shape, nullptr);
1803 
1804  return Shape;
1805 }
1806 
1807 static void
1809  const SmallVectorImpl<Function *> &Clones,
1810  CallGraph &CG, CallGraphSCC &SCC) {
1811  if (!Shape.CoroBegin)
1812  return;
1813 
1814  removeCoroEnds(Shape, &CG);
1816 
1817  // Update call graph and add the functions we created to the SCC.
1818  coro::updateCallGraph(F, Clones, CG, SCC);
1819 }
1820 
1822  LazyCallGraph::Node &N, const coro::Shape &Shape,
1825  FunctionAnalysisManager &FAM) {
1826  if (!Shape.CoroBegin)
1827  return;
1828 
1829  for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
1830  auto &Context = End->getContext();
1831  End->replaceAllUsesWith(ConstantInt::getFalse(Context));
1832  End->eraseFromParent();
1833  }
1834 
1835  if (!Clones.empty()) {
1836  switch (Shape.ABI) {
1837  case coro::ABI::Switch:
1838  // Each clone in the Switch lowering is independent of the other clones.
1839  // Let the LazyCallGraph know about each one separately.
1840  for (Function *Clone : Clones)
1841  CG.addSplitFunction(N.getFunction(), *Clone);
1842  break;
1843  case coro::ABI::Async:
1844  case coro::ABI::Retcon:
1845  case coro::ABI::RetconOnce:
1846  // Each clone in the Async/Retcon lowering references of the other clones.
1847  // Let the LazyCallGraph know about all of them at once.
1848  if (!Clones.empty())
1849  CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
1850  break;
1851  }
1852 
1853  // Let the CGSCC infra handle the changes to the original function.
1854  updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM);
1855  }
1856 
1857  // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
1858  // to the split functions.
1859  postSplitCleanup(N.getFunction());
1860  updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM);
1861 }
1862 
1863 // When we see the coroutine the first time, we insert an indirect call to a
1864 // devirt trigger function and mark the coroutine that it is now ready for
1865 // split.
1866 // Async lowering uses this after it has split the function to restart the
1867 // pipeline.
1869  bool MarkForAsyncRestart = false) {
1870  Module &M = *F.getParent();
1871  LLVMContext &Context = F.getContext();
1872 #ifndef NDEBUG
1873  Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN);
1874  assert(DevirtFn && "coro.devirt.trigger function not found");
1875 #endif
1876 
1877  F.addFnAttr(CORO_PRESPLIT_ATTR, MarkForAsyncRestart
1879  : PREPARED_FOR_SPLIT);
1880 
1881  // Insert an indirect call sequence that will be devirtualized by CoroElide
1882  // pass:
1883  // %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1)
1884  // %1 = bitcast i8* %0 to void(i8*)*
1885  // call void %1(i8* null)
1886  coro::LowererBase Lowerer(M);
1887  Instruction *InsertPt =
1888  MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime()
1889  : F.getEntryBlock().getTerminator();
1891  auto *DevirtFnAddr =
1892  Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt);
1894  {Type::getInt8PtrTy(Context)}, false);
1895  auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt);
1896 
1897  // Update CG graph with an indirect call we just added.
1898  CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode());
1899 }
1900 
1901 // Make sure that there is a devirtualization trigger function that the
1902 // coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt
1903 // trigger function is not found, we will create one and add it to the current
1904 // SCC.
1906  Module &M = CG.getModule();
1907  if (M.getFunction(CORO_DEVIRT_TRIGGER_FN))
1908  return;
1909 
1910  LLVMContext &C = M.getContext();
1912  /*isVarArg=*/false);
1913  Function *DevirtFn =
1914  Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage,
1916  DevirtFn->addFnAttr(Attribute::AlwaysInline);
1917  auto *Entry = BasicBlock::Create(C, "entry", DevirtFn);
1918  ReturnInst::Create(C, Entry);
1919 
1920  auto *Node = CG.getOrInsertFunction(DevirtFn);
1921 
1922  SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end());
1923  Nodes.push_back(Node);
1924  SCC.initialize(Nodes);
1925 }
1926 
1927 /// Replace a call to llvm.coro.prepare.retcon.
1928 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
1929  LazyCallGraph::SCC &C) {
1930  auto CastFn = Prepare->getArgOperand(0); // as an i8*
1931  auto Fn = CastFn->stripPointerCasts(); // as its original type
1932 
1933  // Attempt to peephole this pattern:
1934  // %0 = bitcast [[TYPE]] @some_function to i8*
1935  // %1 = call @llvm.coro.prepare.retcon(i8* %0)
1936  // %2 = bitcast %1 to [[TYPE]]
1937  // ==>
1938  // %2 = @some_function
1939  for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); UI != UE;) {
1940  // Look for bitcasts back to the original function type.
1941  auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser());
1942  if (!Cast || Cast->getType() != Fn->getType())
1943  continue;
1944 
1945  // Replace and remove the cast.
1946  Cast->replaceAllUsesWith(Fn);
1947  Cast->eraseFromParent();
1948  }
1949 
1950  // Replace any remaining uses with the function as an i8*.
1951  // This can never directly be a callee, so we don't need to update CG.
1952  Prepare->replaceAllUsesWith(CastFn);
1953  Prepare->eraseFromParent();
1954 
1955  // Kill dead bitcasts.
1956  while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
1957  if (!Cast->use_empty())
1958  break;
1959  CastFn = Cast->getOperand(0);
1960  Cast->eraseFromParent();
1961  }
1962 }
1963 /// Replace a call to llvm.coro.prepare.retcon.
1964 static void replacePrepare(CallInst *Prepare, CallGraph &CG) {
1965  auto CastFn = Prepare->getArgOperand(0); // as an i8*
1966  auto Fn = CastFn->stripPointerCasts(); // as its original type
1967 
1968  // Find call graph nodes for the preparation.
1969  CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr;
1970  if (auto ConcreteFn = dyn_cast<Function>(Fn)) {
1971  PrepareUserNode = CG[Prepare->getFunction()];
1972  FnNode = CG[ConcreteFn];
1973  }
1974 
1975  // Attempt to peephole this pattern:
1976  // %0 = bitcast [[TYPE]] @some_function to i8*
1977  // %1 = call @llvm.coro.prepare.retcon(i8* %0)
1978  // %2 = bitcast %1 to [[TYPE]]
1979  // ==>
1980  // %2 = @some_function
1981  for (auto UI = Prepare->use_begin(), UE = Prepare->use_end();
1982  UI != UE; ) {
1983  // Look for bitcasts back to the original function type.
1984  auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser());
1985  if (!Cast || Cast->getType() != Fn->getType()) continue;
1986 
1987  // Check whether the replacement will introduce new direct calls.
1988  // If so, we'll need to update the call graph.
1989  if (PrepareUserNode) {
1990  for (auto &Use : Cast->uses()) {
1991  if (auto *CB = dyn_cast<CallBase>(Use.getUser())) {
1992  if (!CB->isCallee(&Use))
1993  continue;
1994  PrepareUserNode->removeCallEdgeFor(*CB);
1995  PrepareUserNode->addCalledFunction(CB, FnNode);
1996  }
1997  }
1998  }
1999 
2000  // Replace and remove the cast.
2001  Cast->replaceAllUsesWith(Fn);
2002  Cast->eraseFromParent();
2003  }
2004 
2005  // Replace any remaining uses with the function as an i8*.
2006  // This can never directly be a callee, so we don't need to update CG.
2007  Prepare->replaceAllUsesWith(CastFn);
2008  Prepare->eraseFromParent();
2009 
2010  // Kill dead bitcasts.
2011  while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2012  if (!Cast->use_empty()) break;
2013  CastFn = Cast->getOperand(0);
2014  Cast->eraseFromParent();
2015  }
2016 }
2017 
2018 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2019  LazyCallGraph::SCC &C) {
2020  bool Changed = false;
2021  for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); PI != PE;) {
2022  // Intrinsics can only be used in calls.
2023  auto *Prepare = cast<CallInst>((PI++)->getUser());
2024  replacePrepare(Prepare, CG, C);
2025  Changed = true;
2026  }
2027 
2028  return Changed;
2029 }
2030 
2031 /// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent
2032 /// IPO from operating on calls to a retcon coroutine before it's been
2033 /// split. This is only safe to do after we've split all retcon
2034 /// coroutines in the module. We can do that this in this pass because
2035 /// this pass does promise to split all retcon coroutines (as opposed to
2036 /// switch coroutines, which are lowered in multiple stages).
2037 static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) {
2038  bool Changed = false;
2039  for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end();
2040  PI != PE; ) {
2041  // Intrinsics can only be used in calls.
2042  auto *Prepare = cast<CallInst>((PI++)->getUser());
2043  replacePrepare(Prepare, CG);
2044  Changed = true;
2045  }
2046 
2047  return Changed;
2048 }
2049 
2050 static bool declaresCoroSplitIntrinsics(const Module &M) {
2051  return coro::declaresIntrinsics(M, {"llvm.coro.begin",
2052  "llvm.coro.prepare.retcon",
2053  "llvm.coro.prepare.async"});
2054 }
2055 
2056 static void addPrepareFunction(const Module &M,
2058  StringRef Name) {
2059  auto *PrepareFn = M.getFunction(Name);
2060  if (PrepareFn && !PrepareFn->use_empty())
2061  Fns.push_back(PrepareFn);
2062 }
2063 
2066  LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2067  // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2068  // non-zero number of nodes, so we assume that here and grab the first
2069  // node's function's module.
2070  Module &M = *C.begin()->getFunction().getParent();
2071  auto &FAM =
2072  AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2073 
2075  return PreservedAnalyses::all();
2076 
2077  // Check for uses of llvm.coro.prepare.retcon/async.
2078  SmallVector<Function *, 2> PrepareFns;
2079  addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2080  addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2081 
2082  // Find coroutines for processing.
2084  for (LazyCallGraph::Node &N : C)
2085  if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR))
2086  Coroutines.push_back(&N);
2087 
2088  if (Coroutines.empty() && PrepareFns.empty())
2089  return PreservedAnalyses::all();
2090 
2091  if (Coroutines.empty()) {
2092  for (auto *PrepareFn : PrepareFns) {
2093  replaceAllPrepares(PrepareFn, CG, C);
2094  }
2095  }
2096 
2097  // Split all the coroutines.
2098  for (LazyCallGraph::Node *N : Coroutines) {
2099  Function &F = N->getFunction();
2100  Attribute Attr = F.getFnAttribute(CORO_PRESPLIT_ATTR);
2101  StringRef Value = Attr.getValueAsString();
2102  LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2103  << "' state: " << Value << "\n");
2104  if (Value == UNPREPARED_FOR_SPLIT) {
2105  // Enqueue a second iteration of the CGSCC pipeline on this SCC.
2106  UR.CWorklist.insert(&C);
2108  continue;
2109  }
2110  F.removeFnAttr(CORO_PRESPLIT_ATTR);
2111 
2113  const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot);
2114  updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
2115 
2116  if ((Shape.ABI == coro::ABI::Async || Shape.ABI == coro::ABI::Retcon ||
2117  Shape.ABI == coro::ABI::RetconOnce) &&
2118  !Shape.CoroSuspends.empty()) {
2119  // Run the CGSCC pipeline on the newly split functions.
2120  // All clones will be in the same RefSCC, so choose a random clone.
2121  UR.RCWorklist.insert(CG.lookupRefSCC(CG.get(*Clones[0])));
2122  }
2123  }
2124 
2125  if (!PrepareFns.empty()) {
2126  for (auto *PrepareFn : PrepareFns) {
2127  replaceAllPrepares(PrepareFn, CG, C);
2128  }
2129  }
2130 
2131  return PreservedAnalyses::none();
2132 }
2133 
2134 namespace {
2135 
2136 // We present a coroutine to LLVM as an ordinary function with suspension
2137 // points marked up with intrinsics. We let the optimizer party on the coroutine
2138 // as a single function for as long as possible. Shortly before the coroutine is
2139 // eligible to be inlined into its callers, we split up the coroutine into parts
2140 // corresponding to initial, resume and destroy invocations of the coroutine,
2141 // add them to the current SCC and restart the IPO pipeline to optimize the
2142 // coroutine subfunctions we extracted before proceeding to the caller of the
2143 // coroutine.
2144 struct CoroSplitLegacy : public CallGraphSCCPass {
2145  static char ID; // Pass identification, replacement for typeid
2146 
2147  CoroSplitLegacy(bool ReuseFrameSlot = false)
2148  : CallGraphSCCPass(ID), ReuseFrameSlot(ReuseFrameSlot) {
2150  }
2151 
2152  bool Run = false;
2153  bool ReuseFrameSlot;
2154 
2155  // A coroutine is identified by the presence of coro.begin intrinsic, if
2156  // we don't have any, this pass has nothing to do.
2157  bool doInitialization(CallGraph &CG) override {
2160  }
2161 
2162  bool runOnSCC(CallGraphSCC &SCC) override {
2163  if (!Run)
2164  return false;
2165 
2166  // Check for uses of llvm.coro.prepare.retcon.
2167  SmallVector<Function *, 2> PrepareFns;
2168  auto &M = SCC.getCallGraph().getModule();
2169  addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2170  addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2171 
2172  // Find coroutines for processing.
2173  SmallVector<Function *, 4> Coroutines;
2174  for (CallGraphNode *CGN : SCC)
2175  if (auto *F = CGN->getFunction())
2176  if (F->hasFnAttribute(CORO_PRESPLIT_ATTR))
2177  Coroutines.push_back(F);
2178 
2179  if (Coroutines.empty() && PrepareFns.empty())
2180  return false;
2181 
2182  CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
2183 
2184  if (Coroutines.empty()) {
2185  bool Changed = false;
2186  for (auto *PrepareFn : PrepareFns)
2187  Changed |= replaceAllPrepares(PrepareFn, CG);
2188  return Changed;
2189  }
2190 
2192 
2193  // Split all the coroutines.
2194  for (Function *F : Coroutines) {
2195  Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR);
2196  StringRef Value = Attr.getValueAsString();
2197  LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName()
2198  << "' state: " << Value << "\n");
2199  // Async lowering marks coroutines to trigger a restart of the pipeline
2200  // after it has split them.
2202  F->removeFnAttr(CORO_PRESPLIT_ATTR);
2203  continue;
2204  }
2205  if (Value == UNPREPARED_FOR_SPLIT) {
2206  prepareForSplit(*F, CG);
2207  continue;
2208  }
2209  F->removeFnAttr(CORO_PRESPLIT_ATTR);
2210 
2212  const coro::Shape Shape = splitCoroutine(*F, Clones, ReuseFrameSlot);
2213  updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC);
2214  if (Shape.ABI == coro::ABI::Async) {
2215  // Restart SCC passes.
2216  // Mark function for CoroElide pass. It will devirtualize causing a
2217  // restart of the SCC pipeline.
2218  prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/);
2219  }
2220  }
2221 
2222  for (auto *PrepareFn : PrepareFns)
2223  replaceAllPrepares(PrepareFn, CG);
2224 
2225  return true;
2226  }
2227 
2228  void getAnalysisUsage(AnalysisUsage &AU) const override {
2230  }
2231 
2232  StringRef getPassName() const override { return "Coroutine Splitting"; }
2233 };
2234 
2235 } // end anonymous namespace
2236 
2237 char CoroSplitLegacy::ID = 0;
2238 
2240  CoroSplitLegacy, "coro-split",
2241  "Split coroutine into a set of functions driving its state machine", false,
2242  false)
2245  CoroSplitLegacy, "coro-split",
2246  "Split coroutine into a set of functions driving its state machine", false,
2247  false)
2248 
2249 Pass *llvm::createCoroSplitLegacyPass(bool ReuseFrameSlot) {
2250  return new CoroSplitLegacy(ReuseFrameSlot);
2251 }
llvm::coro::Shape::CoroSizes
SmallVector< CoroSizeInst *, 2 > CoroSizes
Definition: CoroInternal.h:101
llvm::Check::Size
@ Size
Definition: FileCheck.h:73
postSplitCleanup
static void postSplitCleanup(Function &F)
Definition: CoroSplit.cpp:1107
i
i
Definition: README.txt:29
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:155
llvm::EngineKind::Kind
Kind
Definition: ExecutionEngine.h:524
set
We currently generate a but we really shouldn eax ecx xorl edx divl ecx eax divl ecx movl eax ret A similar code sequence works for division We currently compile i32 v2 eax eax jo LBB1_2 atomic and others It is also currently not done for read modify write instructions It is also current not done if the OF or CF flags are needed The shift operators have the complication that when the shift count is EFLAGS is not set
Definition: README.txt:1277
functions
amdgpu propagate attributes Late propagate attributes from kernels to functions
Definition: AMDGPUPropagateAttributes.cpp:199
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
llvm::coro::createMustTailCall
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, ArrayRef< Value * > Arguments, IRBuilder<> &)
Definition: CoroSplit.cpp:1521
addPrepareFunction
static void addPrepareFunction(const Module &M, SmallVectorImpl< Function * > &Fns, StringRef Name)
Definition: CoroSplit.cpp:2056
Attrs
Function Attrs
Definition: README_ALTIVEC.txt:215
llvm::predecessors
pred_range predecessors(BasicBlock *BB)
Definition: CFG.h:127
llvm::coro::Shape::AsyncLoweringStorage::FrameOffset
uint64_t FrameOffset
Definition: CoroInternal.h:154
PREPARED_FOR_SPLIT
#define PREPARED_FOR_SPLIT
Definition: CoroInternal.h:41
createCloneDeclaration
static Function * createCloneDeclaration(Function &OrigF, coro::Shape &Shape, const Twine &Suffix, Module::iterator InsertBefore, AnyCoroSuspendInst *ActiveSuspend)
Definition: CoroSplit.cpp:466
shouldBeMustTail
static bool shouldBeMustTail(const CallInst &CI, const Function &F)
Definition: CoroSplit.cpp:1215
llvm
Definition: AllocatorList.h:23
llvm::DILocalScope::getSubprogram
DISubprogram * getSubprogram() const
Get the subprogram for this scope.
Definition: DebugInfoMetadata.cpp:805
M
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
Definition: README.txt:252
updateCoroFrame
static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn, Function *DestroyFn, Function *CleanupFn)
Definition: CoroSplit.cpp:1082
llvm::CmpInst::ICMP_EQ
@ ICMP_EQ
equal
Definition: InstrTypes.h:743
llvm::ReturnInst
Return a value (possibly void), from a function.
Definition: Instructions.h:2923
llvm::SmallPtrSetImpl::erase
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
Definition: SmallPtrSet.h:378
ValueMapper.h
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:112
llvm::AnyCoroSuspendInst
Definition: CoroInstr.h:477
print
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
Definition: ArchiveWriter.cpp:147
llvm::Type::getInt8PtrTy
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:256
llvm::coro::Shape::SwitchLowering
SwitchLoweringStorage SwitchLowering
Definition: CoroInternal.h:162
llvm::initializeCoroSplitLegacyPass
void initializeCoroSplitLegacyPass(PassRegistry &)
updateCallGraphAfterCoroutineSplit
static void updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape, const SmallVectorImpl< Function * > &Clones, CallGraph &CG, CallGraphSCC &SCC)
Definition: CoroSplit.cpp:1808
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:107
IntrinsicInst.h
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:229
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:785
llvm::Module::iterator
FunctionListType::iterator iterator
The Function iterators.
Definition: Module.h:92
Scalar.h
llvm::coro::Shape::FrameTy
StructType * FrameTy
Definition: CoroInternal.h:122
InstIterator.h
llvm::Function
Definition: Function.h:61
getFunction
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:252
llvm::Attribute
Definition: Attributes.h:52
StringRef.h
llvm::ConstantStruct::get
static Constant * get(StructType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1312
Pass.h
llvm::TrackingVH::getValPtr
ValueTy * getValPtr() const
Definition: ValueHandle.h:335
it
Reference model for inliner Oz decision policy Note this model is also referenced by test Transforms Inline ML tests if replacing it
Definition: README.txt:3
llvm::createCFGSimplificationPass
FunctionPass * createCFGSimplificationPass(SimplifyCFGOptions Options=SimplifyCFGOptions(), std::function< bool(const Function &)> Ftor=nullptr)
Definition: SimplifyCFGPass.cpp:388
addFramePointerAttrs
static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context, unsigned ParamIndex, uint64_t Size, Align Alignment)
Definition: CoroSplit.cpp:807
hasCallsInBlockBetween
static bool hasCallsInBlockBetween(Instruction *From, Instruction *To)
Definition: CoroSplit.cpp:1308
removeCoroEnds
static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG)
Remove calls to llvm.coro.end in the original function.
Definition: CoroSplit.cpp:1006
llvm::ilist_node_with_parent::getNextNode
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:288
llvm::SmallVector< Value *, 8 >
llvm::CallGraphNode::removeCallEdgeFor
void removeCallEdgeFor(CallBase &Call)
Removes the edge in the node for the specified call site.
Definition: CallGraph.cpp:214
llvm::CallBase::isInlineAsm
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1462
prepareForSplit
static void prepareForSplit(Function &F, CallGraph &CG, bool MarkForAsyncRestart=false)
Definition: CoroSplit.cpp:1868
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Definition: DerivedTypes.h:693
llvm::coro::ABI::Retcon
@ Retcon
The "returned-continuation" lowering, where each suspend point creates a single continuation function...
llvm::IRBuilder<>
llvm::GlobalVariable
Definition: GlobalVariable.h:40
llvm::SmallDenseMap
Definition: DenseMap.h:880
llvm::FunctionType::get
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
Definition: Type.cpp:328
Local.h
llvm::CallGraph
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:73
llvm::CoroSplitPass::ReuseFrameSlot
bool ReuseFrameSlot
Definition: CoroSplit.h:32
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:151
llvm::createEarlyCSEPass
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1699
llvm::verifyFunction
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:5680
hasCallsBetween
static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy)
Definition: CoroSplit.cpp:1349
llvm::coro::ABI::Switch
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
llvm::PreservedAnalyses::none
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:158
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:312
scanPHIsAndUpdateValueMap
static void scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock, DenseMap< Value *, Value * > &ResolvedValues)
Definition: CoroSplit.cpp:1131
llvm::coro::LowererBase::makeSubFnCall
Value * makeSubFnCall(Value *Arg, int Index, Instruction *InsertPt)
Definition: Coroutines.cpp:107
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:46
llvm::coro::Shape::ABI
coro::ABI ABI
Definition: CoroInternal.h:120
DenseMap.h
llvm::removeUnreachableBlocks
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
Definition: Local.cpp:2478
llvm::coro::Shape::SwitchFieldIndex::Destroy
@ Destroy
Definition: CoroInternal.h:109
Module.h
llvm::CoroSuspendAsyncInst::getResumeFunction
CoroAsyncResumeInst * getResumeFunction() const
Definition: CoroInstr.h:546
llvm::AttributeList
Definition: Attributes.h:375
llvm::CoroBeginInst::getId
AnyCoroIdInst * getId() const
Definition: CoroInstr.h:424
llvm::CallBase::getAttributes
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1472
llvm::CallBase::getFunctionType
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1244
llvm::coro::declaresIntrinsics
bool declaresIntrinsics(const Module &M, const std::initializer_list< StringRef >)
Definition: Coroutines.cpp:161
replaceCoroEndAsync
static bool replaceCoroEndAsync(AnyCoroEndInst *End)
Replace an llvm.coro.end.async.
Definition: CoroSplit.cpp:181
llvm::BasicBlock::splitBasicBlock
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:375
llvm::AnyCoroIdInst::getCoroAlloc
CoroAllocInst * getCoroAlloc()
Definition: CoroInstr.h:84
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:449
llvm::CloneFunctionChangeType::LocalChangesOnly
@ LocalChangesOnly
declaresCoroSplitIntrinsics
static bool declaresCoroSplitIntrinsics(const Module &M)
Definition: CoroSplit.cpp:2050
llvm::errs
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Definition: raw_ostream.cpp:894
handleNoSuspendCoroutine
static void handleNoSuspendCoroutine(coro::Shape &Shape)
Definition: CoroSplit.cpp:1273
llvm::FunctionType::getNumParams
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:138
llvm::BasicBlock::getSinglePredecessor
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:264
llvm::SmallVectorImpl::pop_back_val
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:635
llvm::coro::Shape::SwitchLoweringStorage::ResumeSwitch
SwitchInst * ResumeSwitch
Definition: CoroInternal.h:132
replaceCoroEnd
static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, bool InResume, CallGraph *CG)
Definition: CoroSplit.cpp:312
llvm::coro::buildCoroutineFrame
void buildCoroutineFrame(Function &F, Shape &Shape)
Definition: CoroFrame.cpp:2151
llvm::Function::addFnAttr
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition: Function.h:245
LegacyPassManager.h
llvm::CallInst::TCK_MustTail
@ TCK_MustTail
Definition: Instructions.h:1630
llvm::Type::getInt8Ty
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:202
INITIALIZE_PASS_END
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
Definition: RegBankSelect.cpp:69
llvm::CallGraphNode::addCalledFunction
void addCalledFunction(CallBase *Call, CallGraphNode *M)
Adds a function to the list of functions called by this one.
Definition: CallGraph.h:243
LLVM_DEBUG
#define LLVM_DEBUG(X)
Definition: Debug.h:122
F
#define F(x, y, z)
Definition: MD5.cpp:56
llvm::ConstantExpr::getPointerCast
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2019
Uses
SmallPtrSet< MachineInstr *, 2 > Uses
Definition: ARMLowOverheadLoops.cpp:583
llvm::BasicBlock
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
a
=0.0 ? 0.0 :(a > 0.0 ? 1.0 :-1.0) a
Definition: README.txt:489
llvm::coro::Shape::emitDealloc
void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:552
Context
LLVMContext & Context
Definition: NVVMIntrRange.cpp:66
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
llvm::PseudoProbeType::IndirectCall
@ IndirectCall
llvm::CallGraph::getOrInsertFunction
CallGraphNode * getOrInsertFunction(const Function *F)
Similar to operator[], but this will insert a new CallGraphNode for F if one does not already exist.
Definition: CallGraph.cpp:175
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:205
llvm::PrettyStackTraceEntry
PrettyStackTraceEntry - This class is used to represent a frame of the "pretty" stack trace that is d...
Definition: PrettyStackTrace.h:52
updateAsyncFuncPointerContextSize
static void updateAsyncFuncPointerContextSize(coro::Shape &Shape)
Definition: CoroSplit.cpp:1012
Instruction.h
llvm::CoroIdInst::setInfo
void setInfo(Constant *C)
Definition: CoroInstr.h:180
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition: Constants.h:77
llvm::Intrinsic::getType
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Return the function type for an intrinsic.
Definition: Function.cpp:1247
llvm::coro::Shape::RetconLoweringStorage::IsFrameInlineInStorage
bool IsFrameInlineInStorage
Definition: CoroInternal.h:144
GlobalValue.h
llvm::CallGraphSCCPass::doInitialization
virtual bool doInitialization(CallGraph &CG)
doInitialization - This method is called before the SCC's of the program has been processed,...
Definition: CallGraphSCCPass.h:48
llvm::CallGraphSCC
CallGraphSCC - This is a single SCC that a CallGraphSCCPass is run on.
Definition: CallGraphSCCPass.h:87
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition: PassRegistry.cpp:31
llvm::LazyCallGraph::SCC
An SCC of the call graph.
Definition: LazyCallGraph.h:421
llvm::coro::Shape::AsyncLoweringStorage::AsyncFuncPointer
GlobalVariable * AsyncFuncPointer
Definition: CoroInternal.h:156
llvm::legacy::FunctionPassManager::doFinalization
bool doFinalization()
doFinalization - Run all of the finalizers for the function passes.
Definition: LegacyPassManager.cpp:1368
Constants.h
splitAsyncCoroutine
static void splitAsyncCoroutine(Function &F, coro::Shape &Shape, SmallVectorImpl< Function * > &Clones)
Definition: CoroSplit.cpp:1538
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
llvm::User
Definition: User.h:44
createClone
static Function * createClone(Function &F, const Twine &Suffix, coro::Shape &Shape, CoroCloner::Kind FKind)
Definition: CoroSplit.cpp:998
C
(vector float) vec_cmpeq(*A, *B) C
Definition: README_ALTIVEC.txt:86
llvm::ARM_PROC::A
@ A
Definition: ARMBaseInfo.h:34
Twine.h
InstrTypes.h
llvm::coro::Shape::getRetconResultTypes
ArrayRef< Type * > getRetconResultTypes() const
Definition: CoroInternal.h:220
llvm::CallInst::Create
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:1493
SI
@ SI
Definition: SIInstrInfo.cpp:7342
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition: PassAnalysisSupport.h:47
llvm::CoroSuspendInst
This represents the llvm.coro.suspend instruction.
Definition: CoroInstr.h:493
llvm::ReplaceInstWithInst
void ReplaceInstWithInst(BasicBlock::InstListType &BIL, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
Definition: BasicBlockUtils.cpp:476
llvm::CoroSuspendAsyncInst::MustTailCallFuncArg
@ MustTailCallFuncArg
Definition: CoroInstr.h:531
splitRetconCoroutine
static void splitRetconCoroutine(Function &F, coro::Shape &Shape, SmallVectorImpl< Function * > &Clones)
Definition: CoroSplit.cpp:1620
llvm::Value::uses
iterator_range< use_iterator > uses()
Definition: Value.h:389
false
Definition: StackSlotColoring.cpp:142
llvm::coro::Shape::CoroSuspends
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition: CoroInternal.h:102
llvm::coro::Shape::CoroEnds
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
Definition: CoroInternal.h:100
llvm::coro::replaceCoroFree
void replaceCoroFree(CoroIdInst *CoroId, bool Elide)
Definition: Coroutines.cpp:174
llvm::Instruction
Definition: Instruction.h:45
InlineInfo
@ InlineInfo
Definition: FunctionInfo.cpp:24
llvm::report_fatal_error
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:140
llvm::BasicBlock::phis
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:354
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:50
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1770
llvm::ConstantInt::get
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:885
llvm::Use::getUser
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:73
llvm::FunctionType::params
ArrayRef< Type * > params() const
Definition: DerivedTypes.h:129
SmallPtrSet.h
llvm::CallGraphNode
A node in the call graph for a module.
Definition: CallGraph.h:167
splitSwitchCoroutine
static void splitSwitchCoroutine(Function &F, coro::Shape &Shape, SmallVectorImpl< Function * > &Clones)
Definition: CoroSplit.cpp:1459
LazyCallGraph.h
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
llvm::CallBase::getCallingConv
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1453
llvm::Attribute::getValueAsString
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:297
llvm::AttrBuilder::addAlignmentAttr
AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
Definition: Attributes.cpp:1788
Type.h
llvm::coro::Shape::FrameAlign
Align FrameAlign
Definition: CoroInternal.h:123
ASYNC_RESTART_AFTER_SPLIT
#define ASYNC_RESTART_AFTER_SPLIT
Definition: CoroInternal.h:42
CFG.h
llvm::LazyCallGraph::get
Node & get(Function &F)
Get a graph node for a given function, scanning it to populate the graph data as necessary.
Definition: LazyCallGraph.h:974
llvm::coro::Shape::getIndexType
IntegerType * getIndexType() const
Definition: CoroInternal.h:188
into
Clang compiles this into
Definition: README.txt:504
llvm::coro::Shape
Definition: CoroInternal.h:98
llvm::coro::Shape::AsyncLowering
AsyncLoweringStorage AsyncLowering
Definition: CoroInternal.h:164
CoroInternal.h
BasicBlock.h
hasCallsInBlocksBetween
static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB)
Definition: CoroSplit.cpp:1320
llvm::AttributeList::ReturnIndex
@ ReturnIndex
Definition: Attributes.h:378
llvm::coro::Shape::SwiftErrorOps
SmallVector< CallInst *, 2 > SwiftErrorOps
Definition: CoroInternal.h:103
llvm::instructions
inst_range instructions(Function *F)
Definition: InstIterator.h:133
setCoroInfo
static void setCoroInfo(Function &F, coro::Shape &Shape, ArrayRef< Function * > Fns)
Definition: CoroSplit.cpp:1058
llvm::GlobalVariable::getInitializer
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
Definition: GlobalVariable.h:136
VI
@ VI
Definition: SIInstrInfo.cpp:7343
llvm::ArrayRef::drop_front
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:202
llvm::Instruction::eraseFromParent
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:78
simplifySuspendPoints
static void simplifySuspendPoints(coro::Shape &Shape)
Definition: CoroSplit.cpp:1434
llvm::CoroSplitPass::run
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
Definition: CoroSplit.cpp:2064
llvm::CallGraphSCCPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &Info) const override
getAnalysisUsage - For this class, we declare that we require and preserve the call graph.
Definition: CallGraphSCCPass.cpp:657
Index
uint32_t Index
Definition: ELFObjHandler.cpp:84
llvm::updateCGAndAnalysisManagerForFunctionPass
LazyCallGraph::SCC & updateCGAndAnalysisManagerForFunctionPass(LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, FunctionAnalysisManager &FAM)
Helper to update the call graph after running a function pass.
Definition: CGSCCPassManager.cpp:1227
llvm::Function::getCallingConv
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:228
llvm::LazyCallGraph::addSplitFunction
void addSplitFunction(Function &OriginalFunction, Function &NewFunction)
Add a new function split/outlined from an existing function.
Definition: LazyCallGraph.cpp:1612
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:572
llvm::DbgDeclareInst
This represents the llvm.dbg.declare instruction.
Definition: IntrinsicInst.h:303
llvm::LazyCallGraph::lookupRefSCC
RefSCC * lookupRefSCC(Node &N) const
Lookup a function's RefSCC in the graph.
Definition: LazyCallGraph.h:965
CoroSplit.h
INITIALIZE_PASS_DEPENDENCY
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
llvm::ConstantPointerNull::get
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1756
llvm::changeToUnreachable
unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2131
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::numbers::e
constexpr double e
Definition: MathExtras.h:58
llvm::BranchInst::Create
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
Definition: Instructions.h:3061
llvm::DenseMap
Definition: DenseMap.h:714
coerceArguments
static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy, ArrayRef< Value * > FnArgs, SmallVectorImpl< Value * > &CallArgs)
Coerce the arguments in FnArgs according to FnTy in CallArgs.
Definition: CoroSplit.cpp:1506
llvm::CallGraphWrapperPass
The ModulePass which wraps up a CallGraph and the logic to build it.
Definition: CallGraph.h:337
llvm::CoroSuspendAsyncInst
This represents the llvm.coro.suspend.async instruction.
Definition: CoroInstr.h:525
PrettyStackTrace.h
CORO_DEVIRT_TRIGGER_FN
#define CORO_DEVIRT_TRIGGER_FN
Definition: CoroInternal.h:44
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::AttrBuilder
Definition: Attributes.h:786
Cloning.h
llvm::FunctionType::getParamType
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:134
llvm::CGSCCUpdateResult::CWorklist
SmallPriorityWorklist< LazyCallGraph::SCC *, 1 > & CWorklist
Worklist of the SCCs queued for processing.
Definition: CGSCCPassManager.h:257
llvm::Attribute::AttrKind
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:71
llvm::PointerType
Class to represent pointers.
Definition: DerivedTypes.h:634
llvm::legacy::FunctionPassManager::doInitialization
bool doInitialization()
doInitialization - Run all of the initializers for the function passes.
Definition: LegacyPassManager.cpp:1362
addMustTailToCoroResumes
static void addMustTailToCoroResumes(Function &F)
Definition: CoroSplit.cpp:1250
INITIALIZE_PASS_BEGIN
INITIALIZE_PASS_BEGIN(CoroSplitLegacy, "coro-split", "Split coroutine into a set of functions driving its state machine", false, false) INITIALIZE_PASS_END(CoroSplitLegacy
llvm::TrackingVH
Value handle that tracks a Value across RAUW.
Definition: ValueHandle.h:331
llvm::coro::Shape::FramePtr
Instruction * FramePtr
Definition: CoroInternal.h:125
llvm::MDString::get
static MDString * get(LLVMContext &Context, StringRef Str)
Definition: Metadata.cpp:467
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::find
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:150
llvm::Function::Create
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:137
llvm::ilist_node_with_parent::getPrevNode
NodeTy * getPrevNode()
Definition: ilist_node.h:274
IRBuilder.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::Function::hasParamAttribute
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
Definition: Function.h:439
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:958
replaceUnwindCoroEnd
static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, bool InResume, CallGraph *CG)
Replace an unwind call to llvm.coro.end.
Definition: CoroSplit.cpp:282
llvm::coro::ABI::Async
@ Async
The "async continuation" lowering, where each suspend point creates a single continuation function.
llvm::Value::use_begin
use_iterator use_begin()
Definition: Value.h:373
UNPREPARED_FOR_SPLIT
#define UNPREPARED_FOR_SPLIT
Definition: CoroInternal.h:40
llvm::Type::isVoidTy
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
llvm::ArrayType::get
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:605
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
llvm::legacy::FunctionPassManager::add
void add(Pass *P) override
Add a pass to the queue of passes to run.
Definition: LegacyPassManager.cpp:1344
llvm::LazyCallGraph::Node
A node in the call graph.
Definition: LazyCallGraph.h:317
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:382
Builder
assume Assume Builder
Definition: AssumeBundleBuilder.cpp:649
llvm::createSCCPPass
FunctionPass * createSCCPPass()
Definition: SCCP.cpp:1814
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
llvm::coro::Shape::AsyncLoweringStorage::ContextSize
uint64_t ContextSize
Definition: CoroInternal.h:155
replaceAsyncResumeFunction
static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend, Value *Continuation)
Definition: CoroSplit.cpp:1491
llvm::AMDGPU::CPol::SCC
@ SCC
Definition: SIDefines.h:285
replaceFallthroughCoroEnd
static void replaceFallthroughCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, bool InResume, CallGraph *CG)
Replace a non-unwind call to llvm.coro.end.
Definition: CoroSplit.cpp:224
simplifyTerminatorLeadingToRet
static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst)
Definition: CoroSplit.cpp:1148
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
IT
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::ZeroOrMore, cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate IT block based on arch"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow deprecated IT based on ARMv8"), clEnumValN(NoRestrictedIT, "arm-no-restrict-it", "Allow IT blocks based on ARMv7")))
llvm::SPII::Load
@ Load
Definition: SparcInstrInfo.h:32
DataLayout.h
Cond
SmallVector< MachineOperand, 4 > Cond
Definition: BasicBlockSections.cpp:167
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:57
replaceFrameSize
static void replaceFrameSize(coro::Shape &Shape)
Definition: CoroSplit.cpp:1027
createResumeEntryBlock
static void createResumeEntryBlock(Function &F, coro::Shape &Shape)
Definition: CoroSplit.cpp:327
replaceSwiftErrorOps
static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape, ValueToValueMapTy *VMap)
Definition: CoroSplit.cpp:585
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:256
llvm::CoroIdInst
This represents the llvm.coro.id instruction.
Definition: CoroInstr.h:113
llvm::Instruction::getFunction
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:526
llvm::BasicBlock::Create
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:100
llvm::Value::use_end
use_iterator use_end()
Definition: Value.h:381
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::coro::Shape::CoroBegin
CoroBeginInst * CoroBegin
Definition: CoroInternal.h:99
llvm::CoroSubFnInst::RestartTrigger
@ RestartTrigger
Definition: CoroInstr.h:40
llvm::coro::Shape::getResumeFunctionType
FunctionType * getResumeFunctionType() const
Definition: CoroInternal.h:203
CallGraphSCCPass.h
llvm::AnyCoroEndInst
Definition: CoroInstr.h:602
llvm::CoroAllocInst
This represents the llvm.coro.alloc instruction.
Definition: CoroInstr.h:70
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:298
llvm::ValueMap< const Value *, WeakTrackingVH >
CORO_PRESPLIT_ATTR
#define CORO_PRESPLIT_ATTR
Definition: CoroInternal.h:39
llvm::MipsISD::TailCall
@ TailCall
Definition: MipsISelLowering.h:65
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:148
llvm::coro::Shape::SwitchLoweringStorage::ResumeEntryBlock
BasicBlock * ResumeEntryBlock
Definition: CoroInternal.h:134
llvm::coro::Shape::RetconLowering
RetconLoweringStorage RetconLowering
Definition: CoroInternal.h:163
llvm::Value::stripPointerCasts
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:636
llvm::ConstantInt::getFalse
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:840
Argument.h
llvm::ConstantInt::getZExtValue
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:140
Callee
amdgpu Simplify well known AMD library false FunctionCallee Callee
Definition: AMDGPULibCalls.cpp:205
llvm::CoroSuspendAsyncInst::ResumeFunctionArg
@ ResumeFunctionArg
Definition: CoroInstr.h:529
CallingConv.h
Attributes.h
llvm::Function::addParamAttr
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
adds the attribute to the list of attributes for the given arg.
Definition: Function.cpp:518
llvm::coro::ABI::RetconOnce
@ RetconOnce
The "unique returned-continuation" lowering, where each suspend point creates a single continuation f...
llvm::CGSCCUpdateResult
Support structure for SCC passes to communicate updates the call graph back to the CGSCC pass manager...
Definition: CGSCCPassManager.h:232
llvm::Twine
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
Verifier.h
splitCoroutine
static coro::Shape splitCoroutine(Function &F, SmallVectorImpl< Function * > &Clones, bool ReuseFrameSlot)
Definition: CoroSplit.cpp:1764
llvm::ConstantInt::getTrue
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:833
split
coro split
Definition: CoroSplit.cpp:2245
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:52
llvm::LazyCallGraph::addSplitRefRecursiveFunctions
void addSplitRefRecursiveFunctions(Function &OriginalFunction, ArrayRef< Function * > NewFunctions)
Add new ref-recursive functions split/outlined from an existing function.
Definition: LazyCallGraph.cpp:1691
llvm::legacy::FunctionPassManager::run
bool run(Function &F)
run - Execute all of the passes scheduled for execution.
Definition: LegacyPassManager.cpp:1352
llvm::DenseMapBase< DenseMap< KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >, KeyT, ValueT, DenseMapInfo< KeyT >, llvm::detail::DenseMapPair< KeyT, ValueT > >::end
iterator end()
Definition: DenseMap.h:83
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:314
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:161
llvm::ISD::BR
@ BR
Control flow instructions. These all have token chains.
Definition: ISDOpcodes.h:922
llvm::PHINode::Create
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Definition: Instructions.h:2612
llvm::ArrayRef::begin
iterator begin() const
Definition: ArrayRef.h:151
GlobalVariable.h
llvm::coro::Shape::SwitchFieldIndex::Resume
@ Resume
Definition: CoroInternal.h:108
llvm::MCID::Branch
@ Branch
Definition: MCInstrDesc.h:157
llvm::ConstantTokenNone::get
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1427
Casting.h
llvm::coro::Shape::getIndex
ConstantInt * getIndex(uint64_t Value) const
Definition: CoroInternal.h:193
Function.h
llvm::CallGraphSCCPass
Definition: CallGraphSCCPass.h:34
llvm::inst_end
inst_iterator inst_end(Function *F)
Definition: InstIterator.h:132
getFunctionTypeFromAsyncSuspend
static FunctionType * getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend)
Definition: CoroSplit.cpp:458
Arguments
AMDGPU Lower Kernel Arguments
Definition: AMDGPULowerKernelArguments.cpp:243
llvm::salvageDebugInfo
void salvageDebugInfo(Instruction &I)
Assuming the instruction I is going to be deleted, attempt to salvage debug users of I by writing the...
Definition: Local.cpp:1799
llvm::Type::getPointerTo
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:715
llvm::InlineFunctionInfo
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:193
llvm::ReturnInst::Create
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, Instruction *InsertBefore=nullptr)
Definition: Instructions.h:2950
llvm::coro::Shape::emitAlloc
Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
Definition: Coroutines.cpp:529
llvm::ConstantArray::get
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
Definition: Constants.cpp:1248
llvm::coro::Shape::getSwitchCoroId
CoroIdInst * getSwitchCoroId() const
Definition: CoroInternal.h:167
llvm::createCoroSplitLegacyPass
Pass * createCoroSplitLegacyPass(bool IsOptimizing=false)
Split up coroutines into multiple functions driving their state machines.
Definition: CoroSplit.cpp:2249
llvm::CallBase::getCalledOperand
Value * getCalledOperand() const
Definition: InstrTypes.h:1389
llvm::CallGraph::getModule
Module & getModule() const
Returns the module the call graph corresponds to.
Definition: CallGraph.h:102
replaceAllPrepares
static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG, LazyCallGraph::SCC &C)
Definition: CoroSplit.cpp:2018
llvm::updateCGAndAnalysisManagerForCGSCCPass
LazyCallGraph::SCC & updateCGAndAnalysisManagerForCGSCCPass(LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, FunctionAnalysisManager &FAM)
Helper to update the call graph after running a CGSCC pass.
Definition: CGSCCPassManager.cpp:1234
llvm::coro::Shape::getSwitchIndexField
unsigned getSwitchIndexField() const
Definition: CoroInternal.h:183
llvm::AttrBuilder::addDereferenceableAttr
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
Definition: Attributes.cpp:1811
llvm::GlobalValue::ExternalLinkage
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:48
CallGraph.h
llvm::AttributeList::FunctionIndex
@ FunctionIndex
Definition: Attributes.h:379
llvm::Type::getVoidTy
static Type * getVoidTy(LLVMContext &C)
Definition: Type.cpp:187
llvm::AttrBuilder::addAttribute
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
Definition: Attributes.h:814
llvm::Pass
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
llvm::GlobalValue::PrivateLinkage
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:56
Instructions.h
llvm::CoroSuspendInst::getCoroSave
CoroSaveInst * getCoroSave() const
Definition: CoroInstr.h:497
llvm::CoroSizeInst
This represents the llvm.coro.size instruction.
Definition: CoroInstr.h:591
llvm::coro::Shape::RetconLoweringStorage::ReturnBlock
BasicBlock * ReturnBlock
Definition: CoroInternal.h:143
SmallVector.h
llvm::Type::getPointerElementType
Type * getPointerElementType() const
Definition: Type.h:378
llvm::coro::updateCallGraph
void updateCallGraph(Function &Caller, ArrayRef< Function * > Funcs, CallGraph &CG, CallGraphSCC &SCC)
Definition: Coroutines.cpp:215
llvm::CallBase::getArgOperand
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1341
N
#define N
maybeFreeRetconStorage
static void maybeFreeRetconStorage(IRBuilder<> &Builder, const coro::Shape &Shape, Value *FramePtr, CallGraph *CG)
Definition: CoroSplit.cpp:167
llvm::Instruction::getParent
const BasicBlock * getParent() const
Definition: Instruction.h:94
llvm::legacy::FunctionPassManager
FunctionPassManager manages FunctionPasses.
Definition: LegacyPassManager.h:71
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
llvm::coro::LowererBase
Definition: CoroInternal.h:60
llvm::PHINode
Definition: Instructions.h:2572
llvm::CoroBeginInst
This class represents the llvm.coro.begin instruction.
Definition: CoroInstr.h:420
llvm::BasicBlock::removePredecessor
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition: BasicBlock.cpp:321
CoroInstr.h
llvm::DISubprogram
Subprogram description.
Definition: DebugInfoMetadata.h:1815
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:43
llvm::InlineFunction
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
Definition: InlineFunction.cpp:1756
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1164
llvm::GlobalValue::getType
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:271
DerivedTypes.h
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition: InstructionSimplify.h:43
replacePrepare
static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG, LazyCallGraph::SCC &C)
Replace a call to llvm.coro.prepare.retcon.
Definition: CoroSplit.cpp:1928
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition: Instructions.h:1450
llvm::CloneFunctionInto
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, CloneFunctionChangeType Changes, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc into NewFunc, transforming the old arguments into references to VMap values.
Definition: CloneFunction.cpp:84
BB
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
Definition: README.txt:39
llvm::inst_begin
inst_iterator inst_begin(Function *F)
Definition: InstIterator.h:131
LLVMContext.h
From
BlockVerifier::State From
Definition: BlockVerifier.cpp:55
llvm::Value::takeName
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:376
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
simplifySuspendPoint
static bool simplifySuspendPoint(CoroSuspendInst *Suspend, CoroBeginInst *CoroBegin)
Definition: CoroSplit.cpp:1374
llvm::AMDGPU::HSAMD::Kernel::Key::Args
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
Definition: AMDGPUMetadata.h:389
CallGraphUpdater.h
llvm::LazyCallGraph
A lazily constructed view of the call graph of a module.
Definition: LazyCallGraph.h:112
raw_ostream.h
llvm::AMDGPU::VGPRIndexMode::Id
Id
Definition: SIDefines.h:221
llvm::CallGraph::getCallsExternalNode
CallGraphNode * getCallsExternalNode() const
Definition: CallGraph.h:130
llvm::LLVMContext::OB_funclet
@ OB_funclet
Definition: LLVMContext.h:91
llvm::SmallVectorImpl::reserve
void reserve(size_type N)
Definition: SmallVector.h:624
llvm::FunctionAnalysisManagerCGSCCProxy
A proxy from a FunctionAnalysisManager to an SCC.
Definition: CGSCCPassManager.h:392
BasicBlockUtils.h
Value.h
createDevirtTriggerFunc
static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC)
Definition: CoroSplit.cpp:1905
InitializePasses.h
llvm::FunctionType::getReturnType
Type * getReturnType() const
Definition: DerivedTypes.h:123
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
Debug.h
llvm::Value::users
iterator_range< user_iterator > users()
Definition: Value.h:434
llvm::ArrayRef::end
iterator end() const
Definition: ArrayRef.h:152
llvm::coro::salvageDebugInfo
void salvageDebugInfo(SmallDenseMap< llvm::Value *, llvm::AllocaInst *, 4 > &DbgPtrAllocaCache, DbgDeclareInst *DDI, bool ReuseFrameSlot)
Recover a dbg.declare prepared by the frontend and emit an alloca holding a pointer to the coroutine ...
Definition: CoroFrame.cpp:2074
llvm::GlobalVariable::setInitializer
void setInitializer(Constant *InitVal)
setInitializer - Sets the initializer for this global variable, removing any existing initializer if ...
Definition: Globals.cpp:389
of
Add support for conditional and other related patterns Instead of
Definition: README.txt:134
llvm::FunctionType
Class to represent function types.
Definition: DerivedTypes.h:102
machine
coro Split coroutine into a set of functions driving its state machine
Definition: CoroSplit.cpp:2246
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition: Use.h:44
llvm::CGSCCUpdateResult::RCWorklist
SmallPriorityWorklist< LazyCallGraph::RefSCC *, 1 > & RCWorklist
Worklist of the RefSCCs queued for processing.
Definition: CGSCCPassManager.h:242
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:364
llvm::Intrinsic::ID
unsigned ID
Definition: TargetTransformInfo.h:38