LLVM 23.0.0git
CoroFrame.cpp
Go to the documentation of this file.
1//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// This file contains classes used to discover if for a particular value
9// its definition precedes and its uses follow a suspend block. This is
10// referred to as a suspend crossing value.
11//
12// Using the information discovered we form a Coroutine Frame structure to
13// contain those values. All uses of those values are replaced with appropriate
14// GEP + load from the coroutine frame. At the point of the definition we spill
15// the value into the coroutine frame.
16//===----------------------------------------------------------------------===//
17
18#include "CoroInternal.h"
19#include "llvm/ADT/ScopeExit.h"
22#include "llvm/IR/DIBuilder.h"
23#include "llvm/IR/DebugInfo.h"
24#include "llvm/IR/Dominators.h"
25#include "llvm/IR/IRBuilder.h"
28#include "llvm/IR/MDBuilder.h"
29#include "llvm/IR/Module.h"
31#include "llvm/Support/Debug.h"
41#include <algorithm>
42#include <optional>
43
44using namespace llvm;
45
46#define DEBUG_TYPE "coro-frame"
47
48namespace {
49class FrameTypeBuilder;
50// Mapping from the to-be-spilled value to all the users that need reload.
51struct FrameDataInfo {
52 // All the values (that are not allocas) that needs to be spilled to the
53 // frame.
54 coro::SpillInfo &Spills;
55 // Allocas contains all values defined as allocas that need to live in the
56 // frame.
58
59 FrameDataInfo(coro::SpillInfo &Spills,
61 : Spills(Spills), Allocas(Allocas) {}
62
63 SmallVector<Value *, 8> getAllDefs() const {
65 for (const auto &P : Spills)
66 Defs.push_back(P.first);
67 for (const auto &A : Allocas)
68 Defs.push_back(A.Alloca);
69 return Defs;
70 }
71
72 uint32_t getFieldIndex(Value *V) const {
73 auto Itr = FieldIndexMap.find(V);
74 assert(Itr != FieldIndexMap.end() &&
75 "Value does not have a frame field index");
76 return Itr->second;
77 }
78
79 void setFieldIndex(Value *V, uint32_t Index) {
80 assert(FieldIndexMap.count(V) == 0 &&
81 "Cannot set the index for the same field twice.");
82 FieldIndexMap[V] = Index;
83 }
84
85 Align getAlign(Value *V) const {
86 auto Iter = FieldAlignMap.find(V);
87 assert(Iter != FieldAlignMap.end());
88 return Iter->second;
89 }
90
91 void setAlign(Value *V, Align AL) {
92 assert(FieldAlignMap.count(V) == 0);
93 FieldAlignMap.insert({V, AL});
94 }
95
96 uint64_t getDynamicAlign(Value *V) const {
97 auto Iter = FieldDynamicAlignMap.find(V);
98 assert(Iter != FieldDynamicAlignMap.end());
99 return Iter->second;
100 }
101
102 void setDynamicAlign(Value *V, uint64_t Align) {
103 assert(FieldDynamicAlignMap.count(V) == 0);
104 FieldDynamicAlignMap.insert({V, Align});
105 }
106
107 uint64_t getOffset(Value *V) const {
108 auto Iter = FieldOffsetMap.find(V);
109 assert(Iter != FieldOffsetMap.end());
110 return Iter->second;
111 }
112
113 void setOffset(Value *V, uint64_t Offset) {
114 assert(FieldOffsetMap.count(V) == 0);
115 FieldOffsetMap.insert({V, Offset});
116 }
117
118 // Update field offset and alignment information from FrameTypeBuilder.
119 void updateLayoutInfo(FrameTypeBuilder &B);
120
121private:
122 // Map from values to their slot indexes on the frame (insertion order).
123 DenseMap<Value *, uint32_t> FieldIndexMap;
124 // Map from values to their alignment on the frame. They would be set after
125 // the frame is built.
126 DenseMap<Value *, Align> FieldAlignMap;
127 DenseMap<Value *, uint64_t> FieldDynamicAlignMap;
128 // Map from values to their offset on the frame. They would be set after
129 // the frame is built.
130 DenseMap<Value *, uint64_t> FieldOffsetMap;
131};
132} // namespace
133
134#ifndef NDEBUG
135static void dumpSpills(StringRef Title, const coro::SpillInfo &Spills) {
136 dbgs() << "------------- " << Title << " --------------\n";
137 for (const auto &E : Spills) {
138 E.first->dump();
139 dbgs() << " user: ";
140 for (auto *I : E.second)
141 I->dump();
142 }
143}
144
146 dbgs() << "------------- Allocas --------------\n";
147 for (const auto &A : Allocas) {
148 A.Alloca->dump();
149 }
150}
151#endif
152
153namespace {
154using FieldIDType = size_t;
155// We cannot rely solely on natural alignment of a type when building a
156// coroutine frame and if the alignment specified on the Alloca instruction
157// differs from the natural alignment of the alloca type we will need to insert
158// padding.
159class FrameTypeBuilder {
160private:
161 struct Field {
162 uint64_t Size;
163 uint64_t Offset;
165 uint64_t DynamicAlignBuffer;
166 };
167
168 const DataLayout &DL;
169 uint64_t StructSize = 0;
170 Align StructAlign;
171 bool IsFinished = false;
172
173 std::optional<Align> MaxFrameAlignment;
174
176 DenseMap<Value*, unsigned> FieldIndexByKey;
177
178public:
179 FrameTypeBuilder(const DataLayout &DL, std::optional<Align> MaxFrameAlignment)
180 : DL(DL), MaxFrameAlignment(MaxFrameAlignment) {}
181
182 /// Add a field to this structure for the storage of an `alloca`
183 /// instruction.
184 [[nodiscard]] FieldIDType addFieldForAlloca(AllocaInst *AI,
185 bool IsHeader = false) {
186 auto Size = AI->getAllocationSize(AI->getDataLayout());
187 if (!Size || !Size->isFixed())
189 "Coroutines cannot handle non static or vscale allocas yet");
190 return addField(Size->getFixedValue(), AI->getAlign(), IsHeader);
191 }
192
193 /// We want to put the allocas whose lifetime-ranges are not overlapped
194 /// into one slot of coroutine frame.
195 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
196 ///
197 /// cppcoro::task<void> alternative_paths(bool cond) {
198 /// if (cond) {
199 /// big_structure a;
200 /// process(a);
201 /// co_await something();
202 /// } else {
203 /// big_structure b;
204 /// process2(b);
205 /// co_await something();
206 /// }
207 /// }
208 ///
209 /// We want to put variable a and variable b in the same slot to
210 /// reduce the size of coroutine frame.
211 ///
212 /// This function use StackLifetime algorithm to partition the AllocaInsts in
213 /// Spills to non-overlapped sets in order to put Alloca in the same
214 /// non-overlapped set into the same slot in the Coroutine Frame. Then add
215 /// field for the allocas in the same non-overlapped set by using the largest
216 /// type as the field type.
217 ///
218 /// Side Effects: Because We sort the allocas, the order of allocas in the
219 /// frame may be different with the order in the source code.
220 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
221 coro::Shape &Shape, bool OptimizeFrame);
222
223 /// Add a field to this structure for a spill.
224 [[nodiscard]] FieldIDType addField(Type *Ty, MaybeAlign MaybeFieldAlignment,
225 bool IsHeader = false,
226 bool IsSpillOfValue = false) {
227 assert(Ty && "must provide a type for a field");
228 // The field size is the alloc size of the type.
229 uint64_t FieldSize = DL.getTypeAllocSize(Ty);
230 // The field alignment is usually the type alignment.
231 // But if we are spilling values we don't need to worry about ABI alignment
232 // concerns.
233 Align ABIAlign = DL.getABITypeAlign(Ty);
234 Align TyAlignment = ABIAlign;
235 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
236 TyAlignment = *MaxFrameAlignment;
237 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
238 return addField(FieldSize, FieldAlignment, IsHeader);
239 }
240
241 /// Add a field to this structure.
242 [[nodiscard]] FieldIDType addField(uint64_t FieldSize, Align FieldAlignment,
243 bool IsHeader = false) {
244 assert(!IsFinished && "adding fields to a finished builder");
245
246 // For an alloca with size=0, we don't need to add a field and they
247 // can just point to any index in the frame. Use index 0.
248 if (FieldSize == 0)
249 return 0;
250
251 // The field alignment could be bigger than the max frame case, in that case
252 // we request additional storage to be able to dynamically align the
253 // pointer.
254 uint64_t DynamicAlignBuffer = 0;
255 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
256 DynamicAlignBuffer =
257 offsetToAlignment(MaxFrameAlignment->value(), FieldAlignment);
258 FieldAlignment = *MaxFrameAlignment;
259 FieldSize = FieldSize + DynamicAlignBuffer;
260 }
261
262 // Lay out header fields immediately.
263 uint64_t Offset;
264 if (IsHeader) {
265 Offset = alignTo(StructSize, FieldAlignment);
266 StructSize = Offset + FieldSize;
267
268 // Everything else has a flexible offset.
269 } else {
271 }
272
273 Fields.push_back({FieldSize, Offset, FieldAlignment, DynamicAlignBuffer});
274 return Fields.size() - 1;
275 }
276
277 /// Finish the layout and compute final size and alignment.
278 void finish();
279
280 uint64_t getStructSize() const {
281 assert(IsFinished && "not yet finished!");
282 return StructSize;
283 }
284
285 Align getStructAlign() const {
286 assert(IsFinished && "not yet finished!");
287 return StructAlign;
288 }
289
290 Field getLayoutField(FieldIDType Id) const {
291 assert(IsFinished && "not yet finished!");
292 return Fields[Id];
293 }
294};
295} // namespace
296
297void FrameDataInfo::updateLayoutInfo(FrameTypeBuilder &B) {
298 auto Updater = [&](Value *I) {
299 uint32_t FieldIndex = getFieldIndex(I);
300 auto Field = B.getLayoutField(FieldIndex);
301 setAlign(I, Field.Alignment);
302 uint64_t dynamicAlign =
303 Field.DynamicAlignBuffer
304 ? Field.DynamicAlignBuffer + Field.Alignment.value()
305 : 0;
306 setDynamicAlign(I, dynamicAlign);
307 setOffset(I, Field.Offset);
308 };
309 for (auto &S : Spills)
310 Updater(S.first);
311 for (const auto &A : Allocas)
312 Updater(A.Alloca);
313}
314
315void FrameTypeBuilder::addFieldForAllocas(const Function &F,
316 FrameDataInfo &FrameData,
317 coro::Shape &Shape,
318 bool OptimizeFrame) {
319 using AllocaSetType = SmallVector<AllocaInst *, 4>;
320 SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
321
322 // We need to add field for allocas at the end of this function.
323 llvm::scope_exit AddFieldForAllocasAtExit([&]() {
324 for (auto AllocaList : NonOverlapedAllocas) {
325 auto *LargestAI = *AllocaList.begin();
326 FieldIDType Id = addFieldForAlloca(LargestAI);
327 for (auto *Alloca : AllocaList)
328 FrameData.setFieldIndex(Alloca, Id);
329 }
330 });
331
332 if (!OptimizeFrame) {
333 for (const auto &A : FrameData.Allocas) {
334 AllocaInst *Alloca = A.Alloca;
335 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
336 }
337 return;
338 }
339
340 // Because there are paths from the lifetime.start to coro.end
341 // for each alloca, the liferanges for every alloca is overlaped
342 // in the blocks who contain coro.end and the successor blocks.
343 // So we choose to skip there blocks when we calculate the liferange
344 // for each alloca. It should be reasonable since there shouldn't be uses
345 // in these blocks and the coroutine frame shouldn't be used outside the
346 // coroutine body.
347 //
348 // Note that the user of coro.suspend may not be SwitchInst. However, this
349 // case seems too complex to handle. And it is harmless to skip these
350 // patterns since it just prevend putting the allocas to live in the same
351 // slot.
352 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
353 for (auto *CoroSuspendInst : Shape.CoroSuspends) {
354 for (auto *U : CoroSuspendInst->users()) {
355 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
356 auto *SWI = const_cast<SwitchInst *>(ConstSWI);
357 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
358 SWI->setDefaultDest(SWI->getSuccessor(1));
359 }
360 }
361 }
362
363 auto ExtractAllocas = [&]() {
364 AllocaSetType Allocas;
365 Allocas.reserve(FrameData.Allocas.size());
366 for (const auto &A : FrameData.Allocas)
367 Allocas.push_back(A.Alloca);
368 return Allocas;
369 };
370 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
371 StackLifetime::LivenessType::May);
372 StackLifetimeAnalyzer.run();
373 auto DoAllocasInterfere = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
374 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
375 StackLifetimeAnalyzer.getLiveRange(AI2));
376 };
377 auto GetAllocaSize = [&](const coro::AllocaInfo &A) {
378 std::optional<TypeSize> RetSize = A.Alloca->getAllocationSize(DL);
379 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
380 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
381 return RetSize->getFixedValue();
382 };
383 // Put larger allocas in the front. So the larger allocas have higher
384 // priority to merge, which can save more space potentially. Also each
385 // AllocaSet would be ordered. So we can get the largest Alloca in one
386 // AllocaSet easily.
387 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
388 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
389 });
390 for (const auto &A : FrameData.Allocas) {
391 AllocaInst *Alloca = A.Alloca;
392 bool Merged = false;
393 // Try to find if the Alloca does not interfere with any existing
394 // NonOverlappedAllocaSet. If it is true, insert the alloca to that
395 // NonOverlappedAllocaSet.
396 for (auto &AllocaSet : NonOverlapedAllocas) {
397 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
398 bool NoInterference = none_of(AllocaSet, [&](auto Iter) {
399 return DoAllocasInterfere(Alloca, Iter);
400 });
401 // If the alignment of A is multiple of the alignment of B, the address
402 // of A should satisfy the requirement for aligning for B.
403 //
404 // There may be other more fine-grained strategies to handle the alignment
405 // infomation during the merging process. But it seems hard to handle
406 // these strategies and benefit little.
407 bool Alignable = [&]() -> bool {
408 auto *LargestAlloca = *AllocaSet.begin();
409 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
410 0;
411 }();
412 bool CouldMerge = NoInterference && Alignable;
413 if (!CouldMerge)
414 continue;
415 AllocaSet.push_back(Alloca);
416 Merged = true;
417 break;
418 }
419 if (!Merged) {
420 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
421 }
422 }
423 // Recover the default target destination for each Switch statement
424 // reserved.
425 for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
426 SwitchInst *SWI = SwitchAndDefaultDest.first;
427 BasicBlock *DestBB = SwitchAndDefaultDest.second;
428 SWI->setDefaultDest(DestBB);
429 }
430 // This Debug Info could tell us which allocas are merged into one slot.
431 LLVM_DEBUG(for (auto &AllocaSet
432 : NonOverlapedAllocas) {
433 if (AllocaSet.size() > 1) {
434 dbgs() << "In Function:" << F.getName() << "\n";
435 dbgs() << "Find Union Set "
436 << "\n";
437 dbgs() << "\tAllocas are \n";
438 for (auto Alloca : AllocaSet)
439 dbgs() << "\t\t" << *Alloca << "\n";
440 }
441 });
442}
443
444void FrameTypeBuilder::finish() {
445 assert(!IsFinished && "already finished!");
446
447 // Prepare the optimal-layout field array.
448 // The Id in the layout field is a pointer to our Field for it.
450 LayoutFields.reserve(Fields.size());
451 for (auto &Field : Fields) {
452 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
453 Field.Offset);
454 }
455
456 // Perform layout to compute size, alignment, and field offsets.
457 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
458 StructSize = SizeAndAlign.first;
459 StructAlign = SizeAndAlign.second;
460
461 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
462 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
463 };
464
465 // Update field offsets from the computed layout.
466 for (auto &LayoutField : LayoutFields) {
467 auto &F = getField(LayoutField);
468 F.Offset = LayoutField.Offset;
469 }
470
471 IsFinished = true;
472}
473
474static void cacheDIVar(FrameDataInfo &FrameData,
476 for (auto *V : FrameData.getAllDefs()) {
477 if (DIVarCache.contains(V))
478 continue;
479
480 auto CacheIt = [&DIVarCache, V](const auto &Container) {
481 auto *I = llvm::find_if(Container, [](auto *DDI) {
482 return DDI->getExpression()->getNumElements() == 0;
483 });
484 if (I != Container.end())
485 DIVarCache.insert({V, (*I)->getVariable()});
486 };
487 CacheIt(findDVRDeclares(V));
488 CacheIt(findDVRDeclareValues(V));
489 }
490}
491
492/// Create name for Type. It uses MDString to store new created string to
493/// avoid memory leak.
495 if (Ty->isIntegerTy()) {
496 // The longest name in common may be '__int_128', which has 9 bits.
497 SmallString<16> Buffer;
498 raw_svector_ostream OS(Buffer);
499 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
500 auto *MDName = MDString::get(Ty->getContext(), OS.str());
501 return MDName->getString();
502 }
503
504 if (Ty->isFloatingPointTy()) {
505 if (Ty->isFloatTy())
506 return "__float_";
507 if (Ty->isDoubleTy())
508 return "__double_";
509 return "__floating_type_";
510 }
511
512 if (Ty->isPointerTy())
513 return "PointerType";
514
515 if (Ty->isStructTy()) {
516 if (!cast<StructType>(Ty)->hasName())
517 return "__LiteralStructType_";
518
519 auto Name = Ty->getStructName();
520
521 SmallString<16> Buffer(Name);
522 for (auto &Iter : Buffer)
523 if (Iter == '.' || Iter == ':')
524 Iter = '_';
525 auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
526 return MDName->getString();
527 }
528
529 return "UnknownType";
530}
531
532static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
533 const DataLayout &Layout, DIScope *Scope,
534 unsigned LineNum,
535 DenseMap<Type *, DIType *> &DITypeCache) {
536 if (DIType *DT = DITypeCache.lookup(Ty))
537 return DT;
538
539 StringRef Name = solveTypeName(Ty);
540
541 DIType *RetType = nullptr;
542
543 if (Ty->isIntegerTy()) {
544 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
545 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
546 llvm::DINode::FlagArtificial);
547 } else if (Ty->isFloatingPointTy()) {
548 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
549 dwarf::DW_ATE_float,
550 llvm::DINode::FlagArtificial);
551 } else if (Ty->isPointerTy()) {
552 // Construct PointerType points to null (aka void *) instead of exploring
553 // pointee type to avoid infinite search problem. For example, we would be
554 // in trouble if we traverse recursively:
555 //
556 // struct Node {
557 // Node* ptr;
558 // };
559 RetType =
560 Builder.createPointerType(nullptr, Layout.getTypeSizeInBits(Ty),
561 Layout.getABITypeAlign(Ty).value() * CHAR_BIT,
562 /*DWARFAddressSpace=*/std::nullopt, Name);
563 } else if (Ty->isStructTy()) {
564 auto *DIStruct = Builder.createStructType(
565 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
566 Layout.getPrefTypeAlign(Ty).value() * CHAR_BIT,
567 llvm::DINode::FlagArtificial, nullptr, llvm::DINodeArray());
568
569 auto *StructTy = cast<StructType>(Ty);
571 for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
572 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
573 DIStruct, LineNum, DITypeCache);
574 assert(DITy);
575 Elements.push_back(Builder.createMemberType(
576 DIStruct, DITy->getName(), DIStruct->getFile(), LineNum,
577 DITy->getSizeInBits(), DITy->getAlignInBits(),
578 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
579 llvm::DINode::FlagArtificial, DITy));
580 }
581
582 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
583
584 RetType = DIStruct;
585 } else {
586 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n");
587 TypeSize Size = Layout.getTypeSizeInBits(Ty);
588 auto *CharSizeType = Builder.createBasicType(
589 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
590
591 if (Size <= 8)
592 RetType = CharSizeType;
593 else {
594 if (Size % 8 != 0)
595 Size = TypeSize::getFixed(Size + 8 - (Size % 8));
596
597 RetType = Builder.createArrayType(
598 Size, Layout.getPrefTypeAlign(Ty).value(), CharSizeType,
599 Builder.getOrCreateArray(Builder.getOrCreateSubrange(0, Size / 8)));
600 }
601 }
602
603 DITypeCache.insert({Ty, RetType});
604 return RetType;
605}
606
607/// Build artificial debug info for C++ coroutine frames to allow users to
608/// inspect the contents of the frame directly
609///
610/// Create Debug information for coroutine frame with debug name "__coro_frame".
611/// The debug information for the fields of coroutine frame is constructed from
612/// the following way:
613/// 1. For all the value in the Frame, we search the use of dbg.declare to find
614/// the corresponding debug variables for the value. If we can find the
615/// debug variable, we can get full and accurate debug information.
616/// 2. If we can't get debug information in step 1 and 2, we could only try to
617/// build the DIType by Type. We did this in solveDIType. We only handle
618/// integer, float, double, integer type and struct type for now.
620 FrameDataInfo &FrameData) {
621 DISubprogram *DIS = F.getSubprogram();
622 // If there is no DISubprogram for F, it implies the function is compiled
623 // without debug info. So we also don't generate debug info for the frame.
624
625 if (!DIS || !DIS->getUnit())
626 return;
627
629 DIS->getUnit()->getSourceLanguage().getUnversionedName())) ||
630 DIS->getUnit()->getEmissionKind() !=
632 return;
633
634 assert(Shape.ABI == coro::ABI::Switch &&
635 "We could only build debug infomation for C++ coroutine now.\n");
636
637 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
638
639 DIFile *DFile = DIS->getFile();
640 unsigned LineNum = DIS->getLine();
641
642 DICompositeType *FrameDITy = DBuilder.createStructType(
643 DIS->getUnit(), Twine(F.getName() + ".coro_frame_ty").str(), DFile,
644 LineNum, Shape.FrameSize * 8, Shape.FrameAlign.value() * 8,
645 llvm::DINode::FlagArtificial, nullptr, llvm::DINodeArray());
647 DataLayout Layout = F.getDataLayout();
648
650 cacheDIVar(FrameData, DIVarCache);
651
652 // This counter is used to avoid same type names. e.g., there would be
653 // many i32 and i64 types in one coroutine. And we would use i32_0 and
654 // i32_1 to avoid the same type. Since it makes no sense the name of the
655 // fields confilicts with each other.
656 unsigned UnknownTypeNum = 0;
657 DenseMap<Type *, DIType *> DITypeCache;
658
659 auto addElement = [&](StringRef Name, uint64_t SizeInBits, uint64_t Alignment,
660 uint64_t Offset, DIType *DITy) {
661 Elements.push_back(DBuilder.createMemberType(
662 FrameDITy, Name, DFile, LineNum, SizeInBits, Alignment, Offset * 8,
663 llvm::DINode::FlagArtificial, DITy));
664 };
665
666 auto addDIDef = [&](Value *V) {
667 // Get the offset and alignment for this value.
668 uint64_t Offset = FrameData.getOffset(V);
669 Align Alignment = FrameData.getAlign(V);
670
671 std::string Name;
672 uint64_t SizeInBits;
673 DIType *DITy = nullptr;
674
675 auto It = DIVarCache.find(V);
676 if (It != DIVarCache.end()) {
677 // Get the type from the debug variable.
678 Name = It->second->getName().str();
679 DITy = It->second->getType();
680 } else {
681 if (auto AI = dyn_cast<AllocaInst>(V)) {
682 // Frame alloca
683 DITy = solveDIType(DBuilder, AI->getAllocatedType(), Layout, FrameDITy,
684 LineNum, DITypeCache);
685 } else {
686 // Spill
687 DITy = solveDIType(DBuilder, V->getType(), Layout, FrameDITy, LineNum,
688 DITypeCache);
689 }
690 assert(DITy && "SolveDIType shouldn't return nullptr.\n");
691 Name = DITy->getName().str();
692 Name += "_" + std::to_string(UnknownTypeNum);
693 UnknownTypeNum++;
694 }
695
696 if (auto AI = dyn_cast<AllocaInst>(V)) {
697 // Lookup the total size of this alloca originally
698 auto Size = AI->getAllocationSize(Layout);
699 assert(Size && Size->isFixed() &&
700 "unreachable due to addFieldForAlloca checks");
701 SizeInBits = Size->getFixedValue() * 8;
702 } else {
703 // Compute the size of the active data of this member for this spill
704 SizeInBits = Layout.getTypeSizeInBits(V->getType());
705 }
706
707 addElement(Name, SizeInBits, Alignment.value() * 8, Offset, DITy);
708 };
709
710 // For Switch ABI, add debug info for the added fields (resume, destroy).
711 if (Shape.ABI == coro::ABI::Switch) {
712 auto *FnPtrTy = Shape.getSwitchResumePointerType();
713 uint64_t PtrSize = Layout.getPointerSizeInBits(FnPtrTy->getAddressSpace());
714 uint64_t PtrAlign =
715 Layout.getPointerABIAlignment(FnPtrTy->getAddressSpace()).value() * 8;
716 auto *DIPtr = DBuilder.createPointerType(nullptr, PtrSize,
717 FnPtrTy->getAddressSpace());
718 addElement("__resume_fn", PtrSize, PtrAlign, 0, DIPtr);
719 addElement("__destroy_fn", PtrSize, PtrAlign,
720 Shape.SwitchLowering.DestroyOffset, DIPtr);
721 uint64_t IndexSize =
723 addElement("__coro_index", IndexSize, Shape.SwitchLowering.IndexAlign * 8,
725 DBuilder.createBasicType("__coro_index",
726 (IndexSize < 8) ? 8 : IndexSize,
727 dwarf::DW_ATE_unsigned_char));
728 }
729 auto Defs = FrameData.getAllDefs();
730 for (auto *V : Defs)
731 addDIDef(V);
732
733 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
734
735 auto *FrameDIVar =
736 DBuilder.createAutoVariable(DIS, "__coro_frame", DFile, LineNum,
737 FrameDITy, true, DINode::FlagArtificial);
738
739 // Subprogram would have ContainedNodes field which records the debug
740 // variables it contained. So we need to add __coro_frame to the
741 // ContainedNodes of it.
742 //
743 // If we don't add __coro_frame to the RetainedNodes, user may get
744 // `no symbol __coro_frame in context` rather than `__coro_frame`
745 // is optimized out, which is more precise.
746 auto RetainedNodes = DIS->getRetainedNodes();
747 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
748 RetainedNodes.end());
749 RetainedNodesVec.push_back(FrameDIVar);
750 DIS->replaceOperandWith(7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
751
752 // Construct the location for the frame debug variable. The column number
753 // is fake but it should be fine.
754 DILocation *DILoc =
755 DILocation::get(DIS->getContext(), LineNum, /*Column=*/1, DIS);
756 assert(FrameDIVar->isValidLocationForIntrinsic(DILoc));
757
758 DbgVariableRecord *NewDVR =
759 new DbgVariableRecord(ValueAsMetadata::get(Shape.FramePtr), FrameDIVar,
760 DBuilder.createExpression(), DILoc,
763 It->getParent()->insertDbgRecordBefore(NewDVR, It);
764}
765
766// If there is memory accessing to promise alloca before CoroBegin
768 coro::Shape &Shape) {
769 auto *PA = Shape.SwitchLowering.PromiseAlloca;
770 return llvm::any_of(PA->uses(), [&](Use &U) {
771 auto *Inst = dyn_cast<Instruction>(U.getUser());
772 if (!Inst || DT.dominates(Shape.CoroBegin, Inst))
773 return false;
774
775 if (auto *CI = dyn_cast<CallInst>(Inst)) {
776 // It is fine if the call wouldn't write to the Promise.
777 // This is possible for @llvm.coro.id intrinsics, which
778 // would take the promise as the second argument as a
779 // marker.
780 if (CI->onlyReadsMemory() || CI->onlyReadsMemory(CI->getArgOperandNo(&U)))
781 return false;
782 return true;
783 }
784
785 return isa<StoreInst>(Inst) ||
786 // It may take too much time to track the uses.
787 // Be conservative about the case the use may escape.
789 // There would always be a bitcast for the promise alloca
790 // before we enabled Opaque pointers. And now given
791 // opaque pointers are enabled by default. This should be
792 // fine.
793 isa<BitCastInst>(Inst);
794 });
795}
796// Build the coroutine frame type as a byte array.
797// The frame layout includes:
798// - Resume function pointer at offset 0 (Switch ABI only)
799// - Destroy function pointer at offset ptrsize (Switch ABI only)
800// - Promise alloca (Switch ABI only, only if present)
801// - Suspend/Resume index
802// - Spilled values and allocas
803static void buildFrameLayout(Function &F, const DominatorTree &DT,
804 coro::Shape &Shape, FrameDataInfo &FrameData,
805 bool OptimizeFrame) {
806 const DataLayout &DL = F.getDataLayout();
807
808 // We will use this value to cap the alignment of spilled values.
809 std::optional<Align> MaxFrameAlignment;
810 if (Shape.ABI == coro::ABI::Async)
811 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
812 FrameTypeBuilder B(DL, MaxFrameAlignment);
813
814 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
815 std::optional<FieldIDType> SwitchIndexFieldId;
816 IntegerType *SwitchIndexType = nullptr;
817
818 if (Shape.ABI == coro::ABI::Switch) {
819 auto *FnPtrTy = Shape.getSwitchResumePointerType();
820
821 // Add header fields for the resume and destroy functions.
822 // We can rely on these being perfectly packed.
823 (void)B.addField(FnPtrTy, MaybeAlign(), /*header*/ true);
824 (void)B.addField(FnPtrTy, MaybeAlign(), /*header*/ true);
825
826 // PromiseAlloca field needs to be explicitly added here because it's
827 // a header field with a fixed offset based on its alignment. Hence it
828 // needs special handling.
829 if (PromiseAlloca)
830 FrameData.setFieldIndex(
831 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
832
833 // Add a field to store the suspend index. This doesn't need to
834 // be in the header.
835 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
836 SwitchIndexType = Type::getIntNTy(F.getContext(), IndexBits);
837
838 SwitchIndexFieldId = B.addField(SwitchIndexType, MaybeAlign());
839 } else {
840 assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
841 }
842
843 // Because multiple allocas may own the same field slot,
844 // we add allocas to field here.
845 B.addFieldForAllocas(F, FrameData, Shape, OptimizeFrame);
846 // Add PromiseAlloca to Allocas list so that
847 // 1. updateLayoutIndex could update its index after
848 // `performOptimizedStructLayout`
849 // 2. it is processed in insertSpills.
850 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca) {
851 // We assume that no alias will be create before CoroBegin.
852 FrameData.Allocas.emplace_back(
853 PromiseAlloca, DenseMap<Instruction *, std::optional<APInt>>{},
854 hasAccessingPromiseBeforeCB(DT, Shape));
855 }
856 // Create an entry for every spilled value.
857 for (auto &S : FrameData.Spills) {
858 Type *FieldType = S.first->getType();
859 MaybeAlign MA;
860 // For byval arguments, we need to store the pointed value in the frame,
861 // instead of the pointer itself.
862 if (const Argument *A = dyn_cast<Argument>(S.first)) {
863 if (A->hasByValAttr()) {
864 FieldType = A->getParamByValType();
865 MA = A->getParamAlign();
866 }
867 }
868 FieldIDType Id =
869 B.addField(FieldType, MA, false /*header*/, true /*IsSpillOfValue*/);
870 FrameData.setFieldIndex(S.first, Id);
871 }
872
873 B.finish();
874
875 FrameData.updateLayoutInfo(B);
876 Shape.FrameAlign = B.getStructAlign();
877 Shape.FrameSize = B.getStructSize();
878
879 switch (Shape.ABI) {
880 case coro::ABI::Switch: {
881 // In the switch ABI, remember the function pointer and index field info.
882 // Resume and Destroy function pointers are in the frame header.
883 const DataLayout &DL = F.getDataLayout();
884 Shape.SwitchLowering.DestroyOffset = DL.getPointerSize();
885
886 auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
887 Shape.SwitchLowering.IndexType = SwitchIndexType;
888 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
889 Shape.SwitchLowering.IndexOffset = IndexField.Offset;
890
891 // Also round the frame size up to a multiple of its alignment, as is
892 // generally expected in C/C++.
893 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
894 break;
895 }
896
897 // In the retcon ABI, remember whether the frame is inline in the storage.
900 auto Id = Shape.getRetconCoroId();
902 = (B.getStructSize() <= Id->getStorageSize() &&
903 B.getStructAlign() <= Id->getStorageAlignment());
904 break;
905 }
906 case coro::ABI::Async: {
909 // Also make the final context size a multiple of the context alignment to
910 // make allocation easier for allocators.
914 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
916 "The alignment requirment of frame variables cannot be higher than "
917 "the alignment of the async function context");
918 }
919 break;
920 }
921 }
922}
923
924/// If MaybeArgument is a byval Argument, return its byval type. Also removes
925/// the captures attribute, so that the argument *value* may be stored directly
926/// on the coroutine frame.
927static Type *extractByvalIfArgument(Value *MaybeArgument) {
928 if (auto *Arg = dyn_cast<Argument>(MaybeArgument)) {
929 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::Captures);
930
931 if (Arg->hasByValAttr())
932 return Arg->getParamByValType();
933 }
934 return nullptr;
935}
936
937/// Store Def into the coroutine frame.
938static void createStoreIntoFrame(IRBuilder<> &Builder, Value *Def,
939 Type *ByValTy, const coro::Shape &Shape,
940 const FrameDataInfo &FrameData) {
941 LLVMContext &Ctx = Shape.CoroBegin->getContext();
942 uint64_t Offset = FrameData.getOffset(Def);
943
944 Value *G = Shape.FramePtr;
945 if (Offset != 0) {
946 auto *OffsetVal = ConstantInt::get(Type::getInt64Ty(Ctx), Offset);
947 G = Builder.CreateInBoundsPtrAdd(G, OffsetVal,
948 Def->getName() + Twine(".spill.addr"));
949 }
950 auto SpillAlignment = Align(FrameData.getAlign(Def));
951
952 // For byval arguments, copy the pointed-to value to the frame.
953 if (ByValTy) {
954 auto &DL = Builder.GetInsertBlock()->getDataLayout();
955 auto Size = DL.getTypeStoreSize(ByValTy);
956 // Def is a pointer to the byval argument
957 Builder.CreateMemCpy(G, SpillAlignment, Def, SpillAlignment, Size);
958 } else {
959 Builder.CreateAlignedStore(Def, G, SpillAlignment);
960 }
961}
962
963/// Returns a pointer into the coroutine frame at the offset where Orig is
964/// located.
965static Value *createGEPToFramePointer(const FrameDataInfo &FrameData,
966 IRBuilder<> &Builder, coro::Shape &Shape,
967 Value *Orig) {
968 LLVMContext &Ctx = Shape.CoroBegin->getContext();
969 uint64_t Offset = FrameData.getOffset(Orig);
970 auto *OffsetVal = ConstantInt::get(Type::getInt64Ty(Ctx), Offset);
971 Value *Ptr = Builder.CreateInBoundsPtrAdd(Shape.FramePtr, OffsetVal);
972
973 if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
974 if (FrameData.getDynamicAlign(Orig) != 0) {
975 assert(FrameData.getDynamicAlign(Orig) == AI->getAlign().value());
976 auto *M = AI->getModule();
977 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->getType());
978 auto *PtrValue = Builder.CreatePtrToInt(Ptr, IntPtrTy);
979 auto *AlignMask = ConstantInt::get(IntPtrTy, AI->getAlign().value() - 1);
980 PtrValue = Builder.CreateAdd(PtrValue, AlignMask);
981 PtrValue = Builder.CreateAnd(PtrValue, Builder.CreateNot(AlignMask));
982 return Builder.CreateIntToPtr(PtrValue, AI->getType());
983 }
984 // If the type of Ptr is not equal to the type of AllocaInst, it implies
985 // that the AllocaInst may be reused in the Frame slot of other AllocaInst.
986 // Note: If the strategy dealing with alignment changes, this cast must be
987 // refined
988 if (Ptr->getType() != Orig->getType())
989 Ptr = Builder.CreateAddrSpaceCast(Ptr, Orig->getType(),
990 Orig->getName() + Twine(".cast"));
991 }
992 return Ptr;
993}
994
995/// Find dbg.declare or dbg.declare_value records referencing `Def`. If none are
996/// found, walk up the load chain to find one.
997template <DbgVariableRecord::LocationType record_type>
998static TinyPtrVector<DbgVariableRecord *>
1000 static_assert(record_type == DbgVariableRecord::LocationType::Declare ||
1002 constexpr auto FindFunc =
1006
1007 TinyPtrVector<DbgVariableRecord *> Records = FindFunc(Def);
1008
1009 if (!F.getSubprogram())
1010 return Records;
1011
1012 Value *CurDef = Def;
1013 while (Records.empty() && isa<LoadInst>(CurDef)) {
1014 auto *LdInst = cast<LoadInst>(CurDef);
1015 if (!LdInst->getType()->isPointerTy())
1016 break;
1017 CurDef = LdInst->getPointerOperand();
1018 if (!isa<AllocaInst, LoadInst>(CurDef))
1019 break;
1020 Records = FindFunc(CurDef);
1021 }
1022
1023 return Records;
1024}
1025
1026// Helper function to handle allocas that may be accessed before CoroBegin.
1027// This creates a memcpy from the original alloca to the coroutine frame after
1028// CoroBegin, ensuring the frame has the correct initial values.
1029static void handleAccessBeforeCoroBegin(const FrameDataInfo &FrameData,
1030 coro::Shape &Shape,
1031 IRBuilder<> &Builder,
1032 AllocaInst *Alloca) {
1033 Value *Size = Builder.CreateAllocationSize(Builder.getInt64Ty(), Alloca);
1034 auto *G = createGEPToFramePointer(FrameData, Builder, Shape, Alloca);
1035 Builder.CreateMemCpy(G, FrameData.getAlign(Alloca), Alloca,
1036 Alloca->getAlign(), Size);
1037}
1038
1039// Replace all alloca and SSA values that are accessed across suspend points
1040// with GetElementPointer from coroutine frame + loads and stores. Create an
1041// AllocaSpillBB that will become the new entry block for the resume parts of
1042// the coroutine:
1043//
1044// %hdl = coro.begin(...)
1045// whatever
1046//
1047// becomes:
1048//
1049// %hdl = coro.begin(...)
1050// br label %AllocaSpillBB
1051//
1052// AllocaSpillBB:
1053// ; geps corresponding to allocas that were moved to coroutine frame
1054// br label PostSpill
1055//
1056// PostSpill:
1057// whatever
1058//
1059//
1060static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
1061 LLVMContext &C = Shape.CoroBegin->getContext();
1062 Function *F = Shape.CoroBegin->getFunction();
1063 IRBuilder<> Builder(C);
1064 DominatorTree DT(*F);
1066
1067 MDBuilder MDB(C);
1068 // Create a TBAA tag for accesses to certain coroutine frame slots, so that
1069 // subsequent alias analysis will understand they do not intersect with
1070 // user memory.
1071 // We do this only if a suitable TBAA root already exists in the module.
1072 MDNode *TBAATag = nullptr;
1073 if (auto *CppTBAAStr = MDString::getIfExists(C, "Simple C++ TBAA")) {
1074 auto *TBAARoot = MDNode::getIfExists(C, CppTBAAStr);
1075 // Create a "fake" scalar type; all other types defined in the source
1076 // language will be assumed non-aliasing with this type.
1077 MDNode *Scalar = MDB.createTBAAScalarTypeNode(
1078 (F->getName() + ".Frame Slot").str(), TBAARoot);
1079 TBAATag = MDB.createTBAAStructTagNode(Scalar, Scalar, 0);
1080 }
1081 for (auto const &E : FrameData.Spills) {
1082 Value *Def = E.first;
1083 Type *ByValTy = extractByvalIfArgument(Def);
1084
1085 Builder.SetInsertPoint(coro::getSpillInsertionPt(Shape, Def, DT));
1086 createStoreIntoFrame(Builder, Def, ByValTy, Shape, FrameData);
1087
1088 BasicBlock *CurrentBlock = nullptr;
1089 Value *CurrentReload = nullptr;
1090 for (auto *U : E.second) {
1091 // If we have not seen the use block, create a load instruction to reload
1092 // the spilled value from the coroutine frame. Populates the Value pointer
1093 // reference provided with the frame GEP.
1094 if (CurrentBlock != U->getParent()) {
1095 CurrentBlock = U->getParent();
1096 Builder.SetInsertPoint(CurrentBlock,
1097 CurrentBlock->getFirstInsertionPt());
1098
1099 auto *GEP = createGEPToFramePointer(FrameData, Builder, Shape, E.first);
1100 GEP->setName(E.first->getName() + Twine(".reload.addr"));
1101 if (ByValTy) {
1102 CurrentReload = GEP;
1103 } else {
1104 auto SpillAlignment = Align(FrameData.getAlign(Def));
1105 auto *LI =
1106 Builder.CreateAlignedLoad(E.first->getType(), GEP, SpillAlignment,
1107 E.first->getName() + Twine(".reload"));
1108 if (TBAATag)
1109 LI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
1110 CurrentReload = LI;
1111 }
1112
1115
1116 auto SalvageOne = [&](DbgVariableRecord *DDI) {
1117 // This dbg.declare is preserved for all coro-split function
1118 // fragments. It will be unreachable in the main function, and
1119 // processed by coro::salvageDebugInfo() by the Cloner.
1121 ValueAsMetadata::get(CurrentReload), DDI->getVariable(),
1122 DDI->getExpression(), DDI->getDebugLoc(),
1124 Builder.GetInsertPoint()->getParent()->insertDbgRecordBefore(
1125 NewDVR, Builder.GetInsertPoint());
1126 // This dbg.declare is for the main function entry point. It
1127 // will be deleted in all coro-split functions.
1128 coro::salvageDebugInfo(ArgToAllocaMap, *DDI, false /*UseEntryValue*/);
1129 };
1130 for_each(DVRs, SalvageOne);
1131 }
1132
1133 TinyPtrVector<DbgVariableRecord *> DVRDeclareValues =
1136
1137 auto SalvageOneCoro = [&](auto *DDI) {
1138 // This dbg.declare_value is preserved for all coro-split function
1139 // fragments. It will be unreachable in the main function, and
1140 // processed by coro::salvageDebugInfo() by the Cloner. However, convert
1141 // it to a dbg.declare to make sure future passes don't have to deal
1142 // with a dbg.declare_value.
1143 auto *VAM = ValueAsMetadata::get(CurrentReload);
1144 Type *Ty = VAM->getValue()->getType();
1145 // If the metadata type is not a pointer, emit a dbg.value instead.
1147 ValueAsMetadata::get(CurrentReload), DDI->getVariable(),
1148 DDI->getExpression(), DDI->getDebugLoc(),
1151 Builder.GetInsertPoint()->getParent()->insertDbgRecordBefore(
1152 NewDVR, Builder.GetInsertPoint());
1153 // This dbg.declare_value is for the main function entry point. It
1154 // will be deleted in all coro-split functions.
1155 coro::salvageDebugInfo(ArgToAllocaMap, *DDI, false /*UseEntryValue*/);
1156 };
1157 for_each(DVRDeclareValues, SalvageOneCoro);
1158
1159 // If we have a single edge PHINode, remove it and replace it with a
1160 // reload from the coroutine frame. (We already took care of multi edge
1161 // PHINodes by normalizing them in the rewritePHIs function).
1162 if (auto *PN = dyn_cast<PHINode>(U)) {
1163 assert(PN->getNumIncomingValues() == 1 &&
1164 "unexpected number of incoming "
1165 "values in the PHINode");
1166 PN->replaceAllUsesWith(CurrentReload);
1167 PN->eraseFromParent();
1168 continue;
1169 }
1170
1171 // Replace all uses of CurrentValue in the current instruction with
1172 // reload.
1173 U->replaceUsesOfWith(Def, CurrentReload);
1174 // Instructions are added to Def's user list if the attached
1175 // debug records use Def. Update those now.
1176 for (DbgVariableRecord &DVR : filterDbgVars(U->getDbgRecordRange()))
1177 DVR.replaceVariableLocationOp(Def, CurrentReload, true);
1178 }
1179 }
1180
1181 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent();
1182
1183 auto SpillBlock = FramePtrBB->splitBasicBlock(
1184 Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB");
1185 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1186 Shape.AllocaSpillBlock = SpillBlock;
1187
1188 // retcon and retcon.once lowering assumes all uses have been sunk.
1189 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1190 Shape.ABI == coro::ABI::Async) {
1191 // If we found any allocas, replace all of their remaining uses with Geps.
1192 Builder.SetInsertPoint(SpillBlock, SpillBlock->begin());
1193 for (const auto &P : FrameData.Allocas) {
1194 AllocaInst *Alloca = P.Alloca;
1195 auto *G = createGEPToFramePointer(FrameData, Builder, Shape, Alloca);
1196
1197 // Remove any lifetime intrinsics, now that these are no longer allocas.
1198 for (User *U : make_early_inc_range(Alloca->users())) {
1199 auto *I = cast<Instruction>(U);
1200 if (I->isLifetimeStartOrEnd())
1201 I->eraseFromParent();
1202 }
1203
1204 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1205 // here, as we are changing location of the instruction.
1206 G->takeName(Alloca);
1207 Alloca->replaceAllUsesWith(G);
1208 Alloca->eraseFromParent();
1209 }
1210 return;
1211 }
1212
1213 // If we found any alloca, replace all of their remaining uses with GEP
1214 // instructions. To remain debugbility, we replace the uses of allocas for
1215 // dbg.declares and dbg.values with the reload from the frame.
1216 // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1217 // as some of the uses may not be dominated by CoroBegin.
1218 Builder.SetInsertPoint(Shape.AllocaSpillBlock,
1219 Shape.AllocaSpillBlock->begin());
1220 SmallVector<Instruction *, 4> UsersToUpdate;
1221 for (const auto &A : FrameData.Allocas) {
1222 AllocaInst *Alloca = A.Alloca;
1223 UsersToUpdate.clear();
1224 for (User *U : make_early_inc_range(Alloca->users())) {
1225 auto *I = cast<Instruction>(U);
1226 // It is meaningless to retain the lifetime intrinsics refer for the
1227 // member of coroutine frames and the meaningless lifetime intrinsics
1228 // are possible to block further optimizations.
1229 if (I->isLifetimeStartOrEnd())
1230 I->eraseFromParent();
1231 else if (DT.dominates(Shape.CoroBegin, I))
1232 UsersToUpdate.push_back(I);
1233 }
1234
1235 if (UsersToUpdate.empty())
1236 continue;
1237 auto *G = createGEPToFramePointer(FrameData, Builder, Shape, Alloca);
1238 G->setName(Alloca->getName() + Twine(".reload.addr"));
1239
1240 SmallVector<DbgVariableRecord *> DbgVariableRecords;
1241 findDbgUsers(Alloca, DbgVariableRecords);
1242 for (auto *DVR : DbgVariableRecords)
1243 DVR->replaceVariableLocationOp(Alloca, G);
1244
1245 for (Instruction *I : UsersToUpdate)
1246 I->replaceUsesOfWith(Alloca, G);
1247 }
1248 Builder.SetInsertPoint(&*Shape.getInsertPtAfterFramePtr());
1249 for (const auto &A : FrameData.Allocas) {
1250 AllocaInst *Alloca = A.Alloca;
1251 if (A.MayWriteBeforeCoroBegin) {
1252 // isEscaped really means potentially modified before CoroBegin.
1253 handleAccessBeforeCoroBegin(FrameData, Shape, Builder, Alloca);
1254 }
1255 // For each alias to Alloca created before CoroBegin but used after
1256 // CoroBegin, we recreate them after CoroBegin by applying the offset
1257 // to the pointer in the frame.
1258 for (const auto &Alias : A.Aliases) {
1259 auto *FramePtr =
1260 createGEPToFramePointer(FrameData, Builder, Shape, Alloca);
1261 auto &Value = *Alias.second;
1262 auto ITy = IntegerType::get(C, Value.getBitWidth());
1263 auto *AliasPtr =
1264 Builder.CreateInBoundsPtrAdd(FramePtr, ConstantInt::get(ITy, Value));
1265 Alias.first->replaceUsesWithIf(
1266 AliasPtr, [&](Use &U) { return DT.dominates(Shape.CoroBegin, U); });
1267 }
1268 }
1269}
1270
1271// Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1272// PHI in InsertedBB.
1274 BasicBlock *InsertedBB,
1275 BasicBlock *PredBB,
1276 PHINode *UntilPHI = nullptr) {
1277 auto *PN = cast<PHINode>(&SuccBB->front());
1278 do {
1279 int Index = PN->getBasicBlockIndex(InsertedBB);
1280 Value *V = PN->getIncomingValue(Index);
1281 PHINode *InputV = PHINode::Create(
1282 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName());
1283 InputV->insertBefore(InsertedBB->begin());
1284 InputV->addIncoming(V, PredBB);
1285 PN->setIncomingValue(Index, InputV);
1286 PN = dyn_cast<PHINode>(PN->getNextNode());
1287 } while (PN != UntilPHI);
1288}
1289
1290// Rewrites the PHI Nodes in a cleanuppad.
1291static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1292 CleanupPadInst *CleanupPad) {
1293 // For every incoming edge to a CleanupPad we will create a new block holding
1294 // all incoming values in single-value PHI nodes. We will then create another
1295 // block to act as a dispather (as all unwind edges for related EH blocks
1296 // must be the same).
1297 //
1298 // cleanuppad:
1299 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1300 // %3 = cleanuppad within none []
1301 //
1302 // It will create:
1303 //
1304 // cleanuppad.corodispatch
1305 // %2 = phi i8[0, %catchswitch], [1, %catch.1]
1306 // %3 = cleanuppad within none []
1307 // switch i8 % 2, label %unreachable
1308 // [i8 0, label %cleanuppad.from.catchswitch
1309 // i8 1, label %cleanuppad.from.catch.1]
1310 // cleanuppad.from.catchswitch:
1311 // %4 = phi i32 [%0, %catchswitch]
1312 // br %label cleanuppad
1313 // cleanuppad.from.catch.1:
1314 // %6 = phi i32 [%1, %catch.1]
1315 // br %label cleanuppad
1316 // cleanuppad:
1317 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1318 // [%6, %cleanuppad.from.catch.1]
1319
1320 // Unreachable BB, in case switching on an invalid value in the dispatcher.
1321 auto *UnreachBB = BasicBlock::Create(
1322 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1323 IRBuilder<> Builder(UnreachBB);
1324 Builder.CreateUnreachable();
1325
1326 // Create a new cleanuppad which will be the dispatcher.
1327 auto *NewCleanupPadBB =
1328 BasicBlock::Create(CleanupPadBB->getContext(),
1329 CleanupPadBB->getName() + Twine(".corodispatch"),
1330 CleanupPadBB->getParent(), CleanupPadBB);
1331 Builder.SetInsertPoint(NewCleanupPadBB);
1332 auto *SwitchType = Builder.getInt8Ty();
1333 auto *SetDispatchValuePN =
1334 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1335 CleanupPad->removeFromParent();
1336 CleanupPad->insertAfter(SetDispatchValuePN->getIterator());
1337 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1338 pred_size(CleanupPadBB));
1339
1340 int SwitchIndex = 0;
1341 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1342 for (BasicBlock *Pred : Preds) {
1343 // Create a new cleanuppad and move the PHI values to there.
1344 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1345 CleanupPadBB->getName() +
1346 Twine(".from.") + Pred->getName(),
1347 CleanupPadBB->getParent(), CleanupPadBB);
1348 updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1349 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1350 Pred->getName());
1351 Builder.SetInsertPoint(CaseBB);
1352 Builder.CreateBr(CleanupPadBB);
1353 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1354
1355 // Update this Pred to the new unwind point.
1356 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1357
1358 // Setup the switch in the dispatcher.
1359 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1360 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1361 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1362 SwitchIndex++;
1363 }
1364}
1365
1368 for (auto &BB : F) {
1369 for (auto &Phi : BB.phis()) {
1370 if (Phi.getNumIncomingValues() == 1) {
1371 Worklist.push_back(&Phi);
1372 } else
1373 break;
1374 }
1375 }
1376 while (!Worklist.empty()) {
1377 auto *Phi = Worklist.pop_back_val();
1378 auto *OriginalValue = Phi->getIncomingValue(0);
1379 Phi->replaceAllUsesWith(OriginalValue);
1380 }
1381}
1382
1383static void rewritePHIs(BasicBlock &BB) {
1384 // For every incoming edge we will create a block holding all
1385 // incoming values in a single PHI nodes.
1386 //
1387 // loop:
1388 // %n.val = phi i32[%n, %entry], [%inc, %loop]
1389 //
1390 // It will create:
1391 //
1392 // loop.from.entry:
1393 // %n.loop.pre = phi i32 [%n, %entry]
1394 // br %label loop
1395 // loop.from.loop:
1396 // %inc.loop.pre = phi i32 [%inc, %loop]
1397 // br %label loop
1398 //
1399 // After this rewrite, further analysis will ignore any phi nodes with more
1400 // than one incoming edge.
1401
1402 // TODO: Simplify PHINodes in the basic block to remove duplicate
1403 // predecessors.
1404
1405 // Special case for CleanupPad: all EH blocks must have the same unwind edge
1406 // so we need to create an additional "dispatcher" block.
1407 if (!BB.empty()) {
1408 if (auto *CleanupPad =
1411 for (BasicBlock *Pred : Preds) {
1412 if (CatchSwitchInst *CS =
1413 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1414 // CleanupPad with a CatchSwitch predecessor: therefore this is an
1415 // unwind destination that needs to be handle specially.
1416 assert(CS->getUnwindDest() == &BB);
1417 (void)CS;
1418 rewritePHIsForCleanupPad(&BB, CleanupPad);
1419 return;
1420 }
1421 }
1422 }
1423 }
1424
1425 LandingPadInst *LandingPad = nullptr;
1426 PHINode *ReplPHI = nullptr;
1427 if (!BB.empty()) {
1428 if ((LandingPad =
1430 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1431 // We replace the original landing pad with a PHINode that will collect the
1432 // results from all of them.
1433 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "");
1434 ReplPHI->insertBefore(LandingPad->getIterator());
1435 ReplPHI->takeName(LandingPad);
1436 LandingPad->replaceAllUsesWith(ReplPHI);
1437 // We will erase the original landing pad at the end of this function after
1438 // ehAwareSplitEdge cloned it in the transition blocks.
1439 }
1440 }
1441
1443 for (BasicBlock *Pred : Preds) {
1444 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1445 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1446
1447 // Stop the moving of values at ReplPHI, as this is either null or the PHI
1448 // that replaced the landing pad.
1449 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1450 }
1451
1452 if (LandingPad) {
1453 // Calls to ehAwareSplitEdge function cloned the original lading pad.
1454 // No longer need it.
1455 LandingPad->eraseFromParent();
1456 }
1457}
1458
1459static void rewritePHIs(Function &F) {
1461
1462 for (BasicBlock &BB : F)
1463 if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1464 if (PN->getNumIncomingValues() > 1)
1465 WorkList.push_back(&BB);
1466
1467 for (BasicBlock *BB : WorkList)
1468 rewritePHIs(*BB);
1469}
1470
1471// Splits the block at a particular instruction unless it is the first
1472// instruction in the block with a single predecessor.
1474 auto *BB = I->getParent();
1475 if (&BB->front() == I) {
1476 if (BB->getSinglePredecessor()) {
1477 BB->setName(Name);
1478 return BB;
1479 }
1480 }
1481 return BB->splitBasicBlock(I, Name);
1482}
1483
1484// Split above and below a particular instruction so that it
1485// will be all alone by itself in a block.
1486static void splitAround(Instruction *I, const Twine &Name) {
1487 splitBlockIfNotFirst(I, Name);
1488 splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
1489}
1490
1491/// After we split the coroutine, will the given basic block be along
1492/// an obvious exit path for the resumption function?
1494 unsigned depth = 3) {
1495 // If we've bottomed out our depth count, stop searching and assume
1496 // that the path might loop back.
1497 if (depth == 0) return false;
1498
1499 // If this is a suspend block, we're about to exit the resumption function.
1500 if (coro::isSuspendBlock(BB))
1501 return true;
1502
1503 // Recurse into the successors.
1504 for (auto *Succ : successors(BB)) {
1505 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
1506 return false;
1507 }
1508
1509 // If none of the successors leads back in a loop, we're on an exit/abort.
1510 return true;
1511}
1512
1514 // Look for a free that isn't sufficiently obviously followed by
1515 // either a suspend or a termination, i.e. something that will leave
1516 // the coro resumption frame.
1517 for (auto *U : AI->users()) {
1518 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
1519 if (!FI) continue;
1520
1521 if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
1522 return true;
1523 }
1524
1525 // If we never found one, we don't need a stack save.
1526 return false;
1527}
1528
1529/// Turn each of the given local allocas into a normal (dynamic) alloca
1530/// instruction.
1532 SmallVectorImpl<Instruction*> &DeadInsts) {
1533 for (auto *AI : LocalAllocas) {
1534 IRBuilder<> Builder(AI);
1535
1536 // Save the stack depth. Try to avoid doing this if the stackrestore
1537 // is going to immediately precede a return or something.
1538 Value *StackSave = nullptr;
1540 StackSave = Builder.CreateStackSave();
1541
1542 // Allocate memory.
1543 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
1544 Alloca->setAlignment(AI->getAlignment());
1545
1546 for (auto *U : AI->users()) {
1547 // Replace gets with the allocation.
1548 if (isa<CoroAllocaGetInst>(U)) {
1549 U->replaceAllUsesWith(Alloca);
1550
1551 // Replace frees with stackrestores. This is safe because
1552 // alloca.alloc is required to obey a stack discipline, although we
1553 // don't enforce that structurally.
1554 } else {
1555 auto FI = cast<CoroAllocaFreeInst>(U);
1556 if (StackSave) {
1557 Builder.SetInsertPoint(FI);
1558 Builder.CreateStackRestore(StackSave);
1559 }
1560 }
1561 DeadInsts.push_back(cast<Instruction>(U));
1562 }
1563
1564 DeadInsts.push_back(AI);
1565 }
1566}
1567
1568/// Get the current swifterror value.
1570 coro::Shape &Shape) {
1571 // Make a fake function pointer as a sort of intrinsic.
1572 auto FnTy = FunctionType::get(ValueTy, {}, false);
1573 auto Fn = ConstantPointerNull::get(Builder.getPtrTy());
1574
1575 auto Call = Builder.CreateCall(FnTy, Fn, {});
1576 Shape.SwiftErrorOps.push_back(Call);
1577
1578 return Call;
1579}
1580
1581/// Set the given value as the current swifterror value.
1582///
1583/// Returns a slot that can be used as a swifterror slot.
1585 coro::Shape &Shape) {
1586 // Make a fake function pointer as a sort of intrinsic.
1587 auto FnTy = FunctionType::get(Builder.getPtrTy(),
1588 {V->getType()}, false);
1589 auto Fn = ConstantPointerNull::get(Builder.getPtrTy());
1590
1591 auto Call = Builder.CreateCall(FnTy, Fn, { V });
1592 Shape.SwiftErrorOps.push_back(Call);
1593
1594 return Call;
1595}
1596
1597/// Set the swifterror value from the given alloca before a call,
1598/// then put in back in the alloca afterwards.
1599///
1600/// Returns an address that will stand in for the swifterror slot
1601/// until splitting.
1603 AllocaInst *Alloca,
1604 coro::Shape &Shape) {
1605 auto ValueTy = Alloca->getAllocatedType();
1606 IRBuilder<> Builder(Call);
1607
1608 // Load the current value from the alloca and set it as the
1609 // swifterror value.
1610 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
1611 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
1612
1613 // Move to after the call. Since swifterror only has a guaranteed
1614 // value on normal exits, we can ignore implicit and explicit unwind
1615 // edges.
1616 if (isa<CallInst>(Call)) {
1617 Builder.SetInsertPoint(Call->getNextNode());
1618 } else {
1619 auto Invoke = cast<InvokeInst>(Call);
1620 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
1621 }
1622
1623 // Get the current swifterror value and store it to the alloca.
1624 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
1625 Builder.CreateStore(ValueAfterCall, Alloca);
1626
1627 return Addr;
1628}
1629
1630/// Eliminate a formerly-swifterror alloca by inserting the get/set
1631/// intrinsics and attempting to MemToReg the alloca away.
1633 coro::Shape &Shape) {
1634 for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
1635 // swifterror values can only be used in very specific ways.
1636 // We take advantage of that here.
1637 auto User = Use.getUser();
1639 continue;
1640
1642 auto Call = cast<Instruction>(User);
1643
1644 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
1645
1646 // Use the returned slot address as the call argument.
1647 Use.set(Addr);
1648 }
1649
1650 // All the uses should be loads and stores now.
1651 assert(isAllocaPromotable(Alloca));
1652}
1653
1654/// "Eliminate" a swifterror argument by reducing it to the alloca case
1655/// and then loading and storing in the prologue and epilog.
1656///
1657/// The argument keeps the swifterror flag.
1659 coro::Shape &Shape,
1660 SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
1661 IRBuilder<> Builder(&F.getEntryBlock(),
1662 F.getEntryBlock().getFirstNonPHIOrDbg());
1663
1664 auto ArgTy = cast<PointerType>(Arg.getType());
1665 auto ValueTy = PointerType::getUnqual(F.getContext());
1666
1667 // Reduce to the alloca case:
1668
1669 // Create an alloca and replace all uses of the arg with it.
1670 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
1671 Arg.replaceAllUsesWith(Alloca);
1672
1673 // Set an initial value in the alloca. swifterror is always null on entry.
1674 auto InitialValue = Constant::getNullValue(ValueTy);
1675 Builder.CreateStore(InitialValue, Alloca);
1676
1677 // Find all the suspends in the function and save and restore around them.
1678 for (auto *Suspend : Shape.CoroSuspends) {
1679 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
1680 }
1681
1682 // Find all the coro.ends in the function and restore the error value.
1683 for (auto *End : Shape.CoroEnds) {
1684 Builder.SetInsertPoint(End);
1685 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
1686 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
1687 }
1688
1689 // Now we can use the alloca logic.
1690 AllocasToPromote.push_back(Alloca);
1691 eliminateSwiftErrorAlloca(F, Alloca, Shape);
1692}
1693
1694/// Eliminate all problematic uses of swifterror arguments and allocas
1695/// from the function. We'll fix them up later when splitting the function.
1697 SmallVector<AllocaInst*, 4> AllocasToPromote;
1698
1699 // Look for a swifterror argument.
1700 for (auto &Arg : F.args()) {
1701 if (!Arg.hasSwiftErrorAttr()) continue;
1702
1703 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
1704 break;
1705 }
1706
1707 // Look for swifterror allocas.
1708 for (auto &Inst : F.getEntryBlock()) {
1709 auto Alloca = dyn_cast<AllocaInst>(&Inst);
1710 if (!Alloca || !Alloca->isSwiftError()) continue;
1711
1712 // Clear the swifterror flag.
1713 Alloca->setSwiftError(false);
1714
1715 AllocasToPromote.push_back(Alloca);
1716 eliminateSwiftErrorAlloca(F, Alloca, Shape);
1717 }
1718
1719 // If we have any allocas to promote, compute a dominator tree and
1720 // promote them en masse.
1721 if (!AllocasToPromote.empty()) {
1722 DominatorTree DT(F);
1723 PromoteMemToReg(AllocasToPromote, DT);
1724 }
1725}
1726
1727/// For each local variable that all of its user are only used inside one of
1728/// suspended region, we sink their lifetime.start markers to the place where
1729/// after the suspend block. Doing so minimizes the lifetime of each variable,
1730/// hence minimizing the amount of data we end up putting on the frame.
1732 SuspendCrossingInfo &Checker,
1733 const DominatorTree &DT) {
1734 if (F.hasOptNone())
1735 return;
1736
1737 // Collect all possible basic blocks which may dominate all uses of allocas.
1739 DomSet.insert(&F.getEntryBlock());
1740 for (auto *CSI : Shape.CoroSuspends) {
1741 BasicBlock *SuspendBlock = CSI->getParent();
1742 assert(coro::isSuspendBlock(SuspendBlock) &&
1743 SuspendBlock->getSingleSuccessor() &&
1744 "should have split coro.suspend into its own block");
1745 DomSet.insert(SuspendBlock->getSingleSuccessor());
1746 }
1747
1748 for (Instruction &I : instructions(F)) {
1750 if (!AI)
1751 continue;
1752
1753 for (BasicBlock *DomBB : DomSet) {
1754 bool Valid = true;
1756
1757 auto isLifetimeStart = [](Instruction* I) {
1758 if (auto* II = dyn_cast<IntrinsicInst>(I))
1759 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1760 return false;
1761 };
1762
1763 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
1764 if (isLifetimeStart(U)) {
1765 Lifetimes.push_back(U);
1766 return true;
1767 }
1768 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
1769 return false;
1770 if (isLifetimeStart(U->user_back())) {
1771 Lifetimes.push_back(U->user_back());
1772 return true;
1773 }
1774 return false;
1775 };
1776
1777 for (User *U : AI->users()) {
1779 // For all users except lifetime.start markers, if they are all
1780 // dominated by one of the basic blocks and do not cross
1781 // suspend points as well, then there is no need to spill the
1782 // instruction.
1783 if (!DT.dominates(DomBB, UI->getParent()) ||
1784 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
1785 // Skip lifetime.start, GEP and bitcast used by lifetime.start
1786 // markers.
1787 if (collectLifetimeStart(UI, AI))
1788 continue;
1789 Valid = false;
1790 break;
1791 }
1792 }
1793 // Sink lifetime.start markers to dominate block when they are
1794 // only used outside the region.
1795 if (Valid && Lifetimes.size() != 0) {
1796 auto *NewLifetime = Lifetimes[0]->clone();
1797 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(0), AI);
1798 NewLifetime->insertBefore(DomBB->getTerminator()->getIterator());
1799
1800 // All the outsided lifetime.start markers are no longer necessary.
1801 for (Instruction *S : Lifetimes)
1802 S->eraseFromParent();
1803
1804 break;
1805 }
1806 }
1807 }
1808}
1809
1810static std::optional<std::pair<Value &, DIExpression &>>
1812 bool UseEntryValue, Function *F, Value *Storage,
1813 DIExpression *Expr, bool SkipOutermostLoad) {
1814 IRBuilder<> Builder(F->getContext());
1815 auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
1816 while (isa<IntrinsicInst>(InsertPt))
1817 ++InsertPt;
1818 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
1819
1820 while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
1821 if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
1822 Storage = LdInst->getPointerOperand();
1823 // FIXME: This is a heuristic that works around the fact that
1824 // LLVM IR debug intrinsics cannot yet distinguish between
1825 // memory and value locations: Because a dbg.declare(alloca) is
1826 // implicitly a memory location no DW_OP_deref operation for the
1827 // last direct load from an alloca is necessary. This condition
1828 // effectively drops the *last* DW_OP_deref in the expression.
1829 if (!SkipOutermostLoad)
1831 } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
1832 Storage = StInst->getValueOperand();
1833 } else {
1835 SmallVector<Value *, 0> AdditionalValues;
1837 *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
1838 AdditionalValues);
1839 if (!Op || !AdditionalValues.empty()) {
1840 // If salvaging failed or salvaging produced more than one location
1841 // operand, give up.
1842 break;
1843 }
1844 Storage = Op;
1845 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
1846 }
1847 SkipOutermostLoad = false;
1848 }
1849 if (!Storage)
1850 return std::nullopt;
1851
1852 auto *StorageAsArg = dyn_cast<Argument>(Storage);
1853
1854 const bool IsSingleLocationExpression = Expr->isSingleLocationExpression();
1855 // Use an EntryValue when requested (UseEntryValue) for swift async Arguments.
1856 // Entry values in variadic expressions are not supported.
1857 const bool WillUseEntryValue =
1858 UseEntryValue && StorageAsArg &&
1859 StorageAsArg->hasAttribute(Attribute::SwiftAsync) &&
1860 !Expr->isEntryValue() && IsSingleLocationExpression;
1861
1862 if (WillUseEntryValue)
1864
1865 // If the coroutine frame is an Argument, store it in an alloca to improve
1866 // its availability (e.g. registers may be clobbered).
1867 // Avoid this if the value is guaranteed to be available through other means
1868 // (e.g. swift ABI guarantees).
1869 // Avoid this if multiple location expressions are involved, as LLVM does not
1870 // know how to prepend a deref in this scenario.
1871 if (StorageAsArg && !WillUseEntryValue && IsSingleLocationExpression) {
1872 auto &Cached = ArgToAllocaMap[StorageAsArg];
1873 if (!Cached) {
1874 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
1875 Storage->getName() + ".debug");
1876 Builder.CreateStore(Storage, Cached);
1877 }
1878 Storage = Cached;
1879 // FIXME: LLVM lacks nuanced semantics to differentiate between
1880 // memory and direct locations at the IR level. The backend will
1881 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
1882 // location. Thus, if there are deref and offset operations in the
1883 // expression, we need to add a DW_OP_deref at the *start* of the
1884 // expression to first load the contents of the alloca before
1885 // adjusting it with the expression.
1887 }
1888
1889 Expr = Expr->foldConstantMath();
1890 return {{*Storage, *Expr}};
1891}
1892
1895 DbgVariableRecord &DVR, bool UseEntryValue) {
1896
1897 Function *F = DVR.getFunction();
1898 // Follow the pointer arithmetic all the way to the incoming
1899 // function argument and convert into a DIExpression.
1900 bool SkipOutermostLoad = DVR.isDbgDeclare() || DVR.isDbgDeclareValue();
1901 Value *OriginalStorage = DVR.getVariableLocationOp(0);
1902
1903 auto SalvagedInfo =
1904 ::salvageDebugInfoImpl(ArgToAllocaMap, UseEntryValue, F, OriginalStorage,
1905 DVR.getExpression(), SkipOutermostLoad);
1906 if (!SalvagedInfo)
1907 return;
1908
1909 Value *Storage = &SalvagedInfo->first;
1910 DIExpression *Expr = &SalvagedInfo->second;
1911
1912 DVR.replaceVariableLocationOp(OriginalStorage, Storage);
1913 DVR.setExpression(Expr);
1914 // We only hoist dbg.declare and dbg.declare_value today since it doesn't make
1915 // sense to hoist dbg.value since it does not have the same function wide
1916 // guarantees that dbg.declare does.
1919 std::optional<BasicBlock::iterator> InsertPt;
1920 if (auto *I = dyn_cast<Instruction>(Storage)) {
1921 InsertPt = I->getInsertionPointAfterDef();
1922 // Update DILocation only if variable was not inlined.
1923 DebugLoc ILoc = I->getDebugLoc();
1924 DebugLoc DVRLoc = DVR.getDebugLoc();
1925 if (ILoc && DVRLoc &&
1926 DVRLoc->getScope()->getSubprogram() ==
1927 ILoc->getScope()->getSubprogram())
1928 DVR.setDebugLoc(ILoc);
1929 } else if (isa<Argument>(Storage))
1930 InsertPt = F->getEntryBlock().begin();
1931 if (InsertPt) {
1932 DVR.removeFromParent();
1933 // If there is a dbg.declare_value being reinserted, insert it as a
1934 // dbg.declare instead, so that subsequent passes don't have to deal with
1935 // a dbg.declare_value.
1937 auto *MD = DVR.getRawLocation();
1938 if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
1939 Type *Ty = VAM->getValue()->getType();
1940 if (Ty->isPointerTy())
1942 else
1944 }
1945 }
1946 (*InsertPt)->getParent()->insertDbgRecordBefore(&DVR, *InsertPt);
1947 }
1948 }
1949}
1950
1953 // Don't eliminate swifterror in async functions that won't be split.
1954 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
1956
1957 if (Shape.ABI == coro::ABI::Switch &&
1960 }
1961
1962 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
1963 // intrinsics are in their own blocks to simplify the logic of building up
1964 // SuspendCrossing data.
1965 for (auto *CSI : Shape.CoroSuspends) {
1966 if (auto *Save = CSI->getCoroSave())
1967 splitAround(Save, "CoroSave");
1968 splitAround(CSI, "CoroSuspend");
1969 }
1970
1971 // Put CoroEnds into their own blocks.
1972 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
1973 splitAround(CE, "CoroEnd");
1974
1975 // Emit the musttail call function in a new block before the CoroEnd.
1976 // We do this here so that the right suspend crossing info is computed for
1977 // the uses of the musttail call function call. (Arguments to the coro.end
1978 // instructions would be ignored)
1979 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
1980 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
1981 if (!MustTailCallFn)
1982 continue;
1983 IRBuilder<> Builder(AsyncEnd);
1984 SmallVector<Value *, 8> Args(AsyncEnd->args());
1985 auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
1987 AsyncEnd->getDebugLoc(), MustTailCallFn, TTI, Arguments, Builder);
1988 splitAround(Call, "MustTailCall.Before.CoroEnd");
1989 }
1990 }
1991
1992 // Later code makes structural assumptions about single predecessors phis e.g
1993 // that they are not live across a suspend point.
1995
1996 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
1997 // never have its definition separated from the PHI by the suspend point.
1998 rewritePHIs(F);
1999}
2000
2001void coro::BaseABI::buildCoroutineFrame(bool OptimizeFrame) {
2002 SuspendCrossingInfo Checker(F, Shape.CoroSuspends, Shape.CoroEnds);
2004
2005 const DominatorTree DT(F);
2006 if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2008 sinkLifetimeStartMarkers(F, Shape, Checker, DT);
2009
2010 // All values (that are not allocas) that needs to be spilled to the frame.
2011 coro::SpillInfo Spills;
2012 // All values defined as allocas that need to live in the frame.
2014
2015 // Collect the spills for arguments and other not-materializable values.
2016 coro::collectSpillsFromArgs(Spills, F, Checker);
2017 SmallVector<Instruction *, 4> DeadInstructions;
2019 coro::collectSpillsAndAllocasFromInsts(Spills, Allocas, DeadInstructions,
2020 LocalAllocas, F, Checker, DT, Shape);
2021 coro::collectSpillsFromDbgInfo(Spills, F, Checker);
2022
2023 LLVM_DEBUG(dumpAllocas(Allocas));
2024 LLVM_DEBUG(dumpSpills("Spills", Spills));
2025
2026 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2027 Shape.ABI == coro::ABI::Async)
2028 sinkSpillUsesAfterCoroBegin(DT, Shape.CoroBegin, Spills, Allocas);
2029
2030 // Build frame layout
2031 FrameDataInfo FrameData(Spills, Allocas);
2032 buildFrameLayout(F, DT, Shape, FrameData, OptimizeFrame);
2033 Shape.FramePtr = Shape.CoroBegin;
2034 // For now, this works for C++ programs only.
2035 buildFrameDebugInfo(F, Shape, FrameData);
2036 // Insert spills and reloads
2037 insertSpills(FrameData, Shape);
2038 lowerLocalAllocas(LocalAllocas, DeadInstructions);
2039
2040 for (auto *I : DeadInstructions)
2041 I->eraseFromParent();
2042}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
Rewrite undef for false bool rewritePHIs(Function &F, UniformityInfo &UA, DominatorTree *DT)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void cleanupSinglePredPHIs(Function &F)
static TinyPtrVector< DbgVariableRecord * > findDbgRecordsThroughLoads(Function &F, Value *Def)
Find dbg.declare or dbg.declare_value records referencing Def.
static void createStoreIntoFrame(IRBuilder<> &Builder, Value *Def, Type *ByValTy, const coro::Shape &Shape, const FrameDataInfo &FrameData)
Store Def into the coroutine frame.
static void eliminateSwiftError(Function &F, coro::Shape &Shape)
Eliminate all problematic uses of swifterror arguments and allocas from the function.
static void lowerLocalAllocas(ArrayRef< CoroAllocaAllocInst * > LocalAllocas, SmallVectorImpl< Instruction * > &DeadInsts)
Turn each of the given local allocas into a normal (dynamic) alloca instruction.
static Value * emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, coro::Shape &Shape)
Set the given value as the current swifterror value.
static Value * emitSetAndGetSwiftErrorValueAround(Instruction *Call, AllocaInst *Alloca, coro::Shape &Shape)
Set the swifterror value from the given alloca before a call, then put in back in the alloca afterwar...
static void cacheDIVar(FrameDataInfo &FrameData, DenseMap< Value *, DILocalVariable * > &DIVarCache)
static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI)
static void dumpAllocas(const SmallVectorImpl< coro::AllocaInfo > &Allocas)
static void splitAround(Instruction *I, const Twine &Name)
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, coro::Shape &Shape)
Eliminate a formerly-swifterror alloca by inserting the get/set intrinsics and attempting to MemToReg...
static void buildFrameLayout(Function &F, const DominatorTree &DT, coro::Shape &Shape, FrameDataInfo &FrameData, bool OptimizeFrame)
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, BasicBlock *InsertedBB, BasicBlock *PredBB, PHINode *UntilPHI=nullptr)
static void dumpSpills(StringRef Title, const coro::SpillInfo &Spills)
static DIType * solveDIType(DIBuilder &Builder, Type *Ty, const DataLayout &Layout, DIScope *Scope, unsigned LineNum, DenseMap< Type *, DIType * > &DITypeCache)
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, unsigned depth=3)
After we split the coroutine, will the given basic block be along an obvious exit path for the resump...
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, coro::Shape &Shape, SmallVectorImpl< AllocaInst * > &AllocasToPromote)
"Eliminate" a swifterror argument by reducing it to the alloca case and then loading and storing in t...
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Build artificial debug info for C++ coroutine frames to allow users to inspect the contents of the fr...
static Value * createGEPToFramePointer(const FrameDataInfo &FrameData, IRBuilder<> &Builder, coro::Shape &Shape, Value *Orig)
Returns a pointer into the coroutine frame at the offset where Orig is located.
static bool hasAccessingPromiseBeforeCB(const DominatorTree &DT, coro::Shape &Shape)
static BasicBlock * splitBlockIfNotFirst(Instruction *I, const Twine &Name)
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, CleanupPadInst *CleanupPad)
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, SuspendCrossingInfo &Checker, const DominatorTree &DT)
For each local variable that all of its user are only used inside one of suspended region,...
static Type * extractByvalIfArgument(Value *MaybeArgument)
If MaybeArgument is a byval Argument, return its byval type.
static StringRef solveTypeName(Type *Ty)
Create name for Type.
static Value * emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, coro::Shape &Shape)
Get the current swifterror value.
static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape)
static void handleAccessBeforeCoroBegin(const FrameDataInfo &FrameData, coro::Shape &Shape, IRBuilder<> &Builder, AllocaInst *Alloca)
static bool isLifetimeStart(const Instruction *Inst)
Definition GVN.cpp:1210
Hexagon Common GEP
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
#define P(N)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallString class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const unsigned FramePtr
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:461
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
bool empty() const
Definition BasicBlock.h:483
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
const Instruction & front() const
Definition BasicBlock.h:484
LLVM_ABI const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This represents the llvm.coro.alloca.alloc instruction.
Definition CoroInstr.h:772
void clearPromise()
Definition CoroInstr.h:159
LLVM_ABI DIDerivedType * createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, Metadata *SizeInBits, uint32_t AlignInBits, Metadata *OffsetInBits, DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations=nullptr)
Create debugging information entry for a member.
LLVM_ABI DIDerivedType * createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits=0, std::optional< unsigned > DWARFAddressSpace=std::nullopt, StringRef Name="", DINodeArray Annotations=nullptr)
Create debugging information entry for a pointer.
LLVM_ABI DIBasicType * createBasicType(StringRef Name, uint64_t SizeInBits, unsigned Encoding, DINode::DIFlags Flags=DINode::FlagZero, uint32_t NumExtraInhabitants=0, uint32_t DataSizeInBits=0)
Create debugging information entry for a basic type.
LLVM_ABI DICompositeType * createStructType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, Metadata *SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang=0, DIType *VTableHolder=nullptr, StringRef UniqueIdentifier="", DIType *Specification=nullptr, uint32_t NumExtraInhabitants=0)
Create debugging information entry for a struct.
LLVM_ABI DINodeArray getOrCreateArray(ArrayRef< Metadata * > Elements)
Get a DINodeArray, create one if required.
LLVM_ABI DIExpression * createExpression(ArrayRef< uint64_t > Addr={})
Create a new descriptor for the specified variable which has a complex address expression for its add...
LLVM_ABI DILocalVariable * createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve=false, DINode::DIFlags Flags=DINode::FlagZero, uint32_t AlignInBits=0)
Create a new descriptor for an auto variable.
LLVM_ABI void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParams=DINodeArray())
Replace arrays on a composite type.
DWARF expression.
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
LLVM_ABI DIExpression * foldConstantMath()
Try to shorten an expression with constant math operations that can be evaluated at compile time.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
LLVM_ABI bool isSingleLocationExpression() const
Return whether the evaluated expression makes use of a single location at the start of the expression...
Base class for scope-like contexts.
DIFile * getFile() const
Subprogram description. Uses SubclassData1.
Base class for types.
StringRef getName() const
uint64_t getSizeInBits() const
LLVM_ABI uint32_t getAlignInBits() const
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
unsigned getPointerSizeInBits(unsigned AS=0) const
The size in bits of the pointer representation in a given address space.
Definition DataLayout.h:494
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition DataLayout.h:784
LLVM_ABI Align getPointerABIAlignment(unsigned AS) const
Layout pointer alignment.
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
LLVM_ABI void removeFromParent()
DebugLoc getDebugLoc() const
void setDebugLoc(DebugLoc Loc)
LLVM_ABI Function * getFunction()
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType Type
Classification of the debug-info record that this DbgVariableRecord represents.
LLVM_ABI void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Metadata * getRawLocation() const
Returns the metadata operand for the first location description.
A debug info location.
Definition DebugLoc.h:123
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:205
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:169
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2811
LLVM_ABI void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
The landingpad instruction holds all of the information necessary to generate correct exception handl...
LLVM_ABI MDNode * createTBAAScalarTypeNode(StringRef Name, MDNode *Parent, uint64_t Offset=0)
Return metadata for a TBAA scalar type node with the given name, an offset and a parent in the TBAA t...
LLVM_ABI MDNode * createTBAAStructTagNode(MDNode *BaseType, MDNode *AccessType, uint64_t Offset, bool IsConstant=false)
Return metadata for a TBAA tag node with the given base type, access type and offset relative to the ...
Metadata node.
Definition Metadata.h:1080
LLVM_ABI void replaceOperandWith(unsigned I, Metadata *New)
Replace a specific operand.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
LLVMContext & getContext() const
Definition Metadata.h:1244
static MDTuple * getIfExists(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1576
static LLVM_ABI MDString * getIfExists(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:624
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:614
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1529
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition Module.h:278
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition SmallString.h:26
StringRef str() const
Explicit conversion to StringRef.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:222
TypeSize getElementOffsetInBits(unsigned Idx) const
Definition DataLayout.h:772
bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const
void setDefaultDest(BasicBlock *DefaultCase)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
LLVM_ABI std::string str() const
Return the twine contents as a std::string.
Definition Twine.cpp:17
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:314
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:317
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM_ABI void set(Value *Val)
Definition Value.h:883
User * getUser() const
Returns the User that contains this Use.
Definition Use.h:61
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:509
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
iterator_range< user_iterator > users()
Definition Value.h:426
iterator_range< use_iterator > uses()
Definition Value.h:380
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:399
std::function< bool(Instruction &I)> IsMaterializable
Definition ABI.h:64
Function & F
Definition ABI.h:59
virtual void buildCoroutineFrame(bool OptimizeFrame)
coro::Shape & Shape
Definition ABI.h:60
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
A raw_ostream that writes to an SmallVector or SmallString.
StringRef str() const
Return a StringRef for the vector contents.
CallInst * Call
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
SmallMapVector< Value *, SmallVector< Instruction *, 2 >, 8 > SpillInfo
Definition SpillUtils.h:18
@ Async
The "async continuation" lowering, where each suspend point creates a single continuation function.
Definition CoroShape.h:48
@ RetconOnce
The "unique returned-continuation" lowering, where each suspend point creates a single continuation f...
Definition CoroShape.h:43
@ Retcon
The "returned-continuation" lowering, where each suspend point creates a single continuation function...
Definition CoroShape.h:36
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
Definition CoroShape.h:31
BasicBlock::iterator getSpillInsertionPt(const coro::Shape &, Value *Def, const DominatorTree &DT)
bool isSuspendBlock(BasicBlock *BB)
void normalizeCoroutine(Function &F, coro::Shape &Shape, TargetTransformInfo &TTI)
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, TargetTransformInfo &TTI, ArrayRef< Value * > Arguments, IRBuilder<> &)
void sinkSpillUsesAfterCoroBegin(const DominatorTree &DT, CoroBeginInst *CoroBegin, coro::SpillInfo &Spills, SmallVectorImpl< coro::AllocaInfo > &Allocas)
Async and Retcon{Once} conventions assume that all spill uses can be sunk after the coro....
LLVM_ABI void doRematerializations(Function &F, SuspendCrossingInfo &Checker, std::function< bool(Instruction &)> IsMaterializable)
void collectSpillsFromArgs(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
void collectSpillsFromDbgInfo(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableRecord &DVR, bool UseEntryValue)
Attempts to rewrite the location operand of debug records in terms of the coroutine frame pointer,...
void collectSpillsAndAllocasFromInsts(SpillInfo &Spills, SmallVector< AllocaInfo, 8 > &Allocas, SmallVector< Instruction *, 4 > &DeadInstructions, SmallVector< CoroAllocaAllocInst *, 4 > &LocalAllocas, Function &F, const SuspendCrossingInfo &Checker, const DominatorTree &DT, const coro::Shape &Shape)
bool isCPlusPlus(SourceLanguage S)
Definition Dwarf.h:512
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
Definition MathExtras.h:350
auto successors(const MachineBasicBlock *BB)
scope_exit(Callable) -> scope_exit< Callable >
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1753
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI BasicBlock * ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, LandingPadInst *OriginalPad=nullptr, PHINode *LandingPadReplacement=nullptr, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
Split the edge connect the specficed blocks in the case that Succ is an Exception Handling Block.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
Definition Local.cpp:2289
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
Definition Alignment.h:186
TargetTransformInfo TTI
LLVM_ABI std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
DWARFExpression::Operation Op
LLVM_ABI TinyPtrVector< DbgVariableRecord * > findDVRDeclareValues(Value *V)
As above, for DVRDeclareValues.
Definition DebugInfo.cpp:65
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, BasicBlock *NewPred, PHINode *Until=nullptr)
Replaces all uses of OldPred with the NewPred block in all PHINodes in a block.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
LLVM_ABI TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
Finds dbg.declare records declaring local variables as living in the memory that 'V' points to.
Definition DebugInfo.cpp:48
auto predecessors(const MachineBasicBlock *BB)
LLVM_ABI void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ)
Sets the unwind edge of an instruction to a particular successor.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align Alignment
The required alignment of this field.
uint64_t Offset
The offset of this field in the final layout.
uint64_t Size
The required size of this field in bytes.
static constexpr uint64_t FlexibleOffset
A special value for Offset indicating that the field can be moved anywhere.
AsyncLoweringStorage AsyncLowering
Definition CoroShape.h:142
IntegerType * getIndexType() const
Definition CoroShape.h:160
AnyCoroIdRetconInst * getRetconCoroId() const
Definition CoroShape.h:150
PointerType * getSwitchResumePointerType() const
Definition CoroShape.h:169
CoroIdInst * getSwitchCoroId() const
Definition CoroShape.h:145
coro::ABI ABI
Definition CoroShape.h:98
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition CoroShape.h:59
uint64_t FrameSize
Definition CoroShape.h:101
AllocaInst * getPromiseAlloca() const
Definition CoroShape.h:226
SwitchLoweringStorage SwitchLowering
Definition CoroShape.h:140
CoroBeginInst * CoroBegin
Definition CoroShape.h:54
BasicBlock::iterator getInsertPtAfterFramePtr() const
Definition CoroShape.h:232
RetconLoweringStorage RetconLowering
Definition CoroShape.h:141
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
Definition CoroShape.h:55
SmallVector< CallInst *, 2 > SwiftErrorOps
Definition CoroShape.h:64
BasicBlock * AllocaSpillBlock
Definition CoroShape.h:103