24#include "llvm/Config/llvm-config.h"
48#define DEBUG_TYPE "coro-suspend-crossing"
54class BlockToIndexMapping {
58 size_t size()
const {
return V.size(); }
66 size_t blockToIndex(
BasicBlock const *BB)
const {
68 assert(
I !=
V.end() && *
I == BB &&
"BasicBlockNumberng: Unknown block");
92class SuspendCrossingInfo {
93 BlockToIndexMapping Mapping;
100 bool KillLoop =
false;
101 bool Changed =
false;
106 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
111 return Block[Mapping.blockToIndex(BB)];
118 template <
bool Initialize = false>
122#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
132 size_t const FromIndex = Mapping.blockToIndex(
From);
133 size_t const ToIndex = Mapping.blockToIndex(To);
134 bool const Result =
Block[ToIndex].Kills[FromIndex];
136 <<
" answer is " << Result <<
"\n");
145 size_t const FromIndex = Mapping.blockToIndex(
From);
146 size_t const ToIndex = Mapping.blockToIndex(To);
150 <<
" answer is " << Result <<
" (path or loop)\n");
154 bool isDefinitionAcrossSuspend(
BasicBlock *DefBB,
User *U)
const {
155 auto *
I = cast<Instruction>(U);
159 if (
auto *PN = dyn_cast<PHINode>(
I))
160 if (PN->getNumIncomingValues() > 1)
168 if (isa<CoroSuspendRetconInst>(
I) || isa<CoroSuspendAsyncInst>(
I)) {
170 assert(UseBB &&
"should have split coro.suspend into its own block");
173 return hasPathCrossingSuspendPoint(DefBB, UseBB);
177 return isDefinitionAcrossSuspend(&
A.getParent()->getEntryBlock(), U);
181 auto *DefBB =
I.getParent();
186 if (isa<AnyCoroSuspendInst>(
I)) {
188 assert(DefBB &&
"should have split coro.suspend into its own block");
191 return isDefinitionAcrossSuspend(DefBB, U);
194 bool isDefinitionAcrossSuspend(
Value &V,
User *U)
const {
195 if (
auto *Arg = dyn_cast<Argument>(&V))
196 return isDefinitionAcrossSuspend(*Arg, U);
197 if (
auto *Inst = dyn_cast<Instruction>(&V))
198 return isDefinitionAcrossSuspend(*Inst, U);
201 "Coroutine could only collect Argument and Instruction now.");
206#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
210 for (
size_t I = 0,
N = BV.
size();
I <
N; ++
I)
212 dbgs() <<
" " << Mapping.indexToBlock(
I)->getName();
217 for (
size_t I = 0,
N =
Block.size();
I <
N; ++
I) {
219 dbgs() <<
B->getName() <<
":\n";
227template <
bool Initialize>
228bool SuspendCrossingInfo::computeBlockData(
230 bool Changed =
false;
233 auto BBNo = Mapping.blockToIndex(BB);
237 if constexpr (!Initialize)
241 return !
Block[Mapping.blockToIndex(BB)].Changed;
249 auto SavedConsumes =
B.Consumes;
250 auto SavedKills =
B.Kills;
253 auto PrevNo = Mapping.blockToIndex(PI);
257 B.Consumes |=
P.Consumes;
263 B.Kills |=
P.Consumes;
269 B.Kills |=
B.Consumes;
279 B.KillLoop |=
B.Kills[BBNo];
283 if constexpr (!Initialize) {
284 B.Changed = (
B.Kills != SavedKills) || (
B.Consumes != SavedConsumes);
285 Changed |=
B.Changed;
294 const size_t N = Mapping.size();
298 for (
size_t I = 0;
I <
N; ++
I) {
300 B.Consumes.resize(
N);
310 getBlockData(
CE->getParent()).End =
true;
318 auto &
B = getBlockData(SuspendBlock);
320 B.Kills |=
B.Consumes;
323 markSuspendBlock(CSI);
324 if (
auto *Save = CSI->getCoroSave())
325 markSuspendBlock(Save);
331 computeBlockData<
true>(RPOT);
332 while (computeBlockData</*Initialize*/ false>(RPOT))
352 RematNode() =
default;
356 RematNode *EntryNode;
361 SuspendCrossingInfo &Checker;
363 RematGraph(
const std::function<
bool(
Instruction &)> &MaterializableCallback,
365 : MaterializableCallback(MaterializableCallback), Checker(Checker) {
366 std::unique_ptr<RematNode> FirstNode = std::make_unique<RematNode>(
I);
367 EntryNode = FirstNode.get();
368 std::deque<std::unique_ptr<RematNode>> WorkList;
369 addNode(std::move(FirstNode), WorkList, cast<User>(
I));
370 while (WorkList.size()) {
371 std::unique_ptr<RematNode>
N = std::move(WorkList.front());
372 WorkList.pop_front();
373 addNode(std::move(
N), WorkList, cast<User>(
I));
377 void addNode(std::unique_ptr<RematNode> NUPtr,
378 std::deque<std::unique_ptr<RematNode>> &WorkList,
380 RematNode *
N = NUPtr.get();
381 if (Remats.count(
N->Node))
385 Remats[
N->Node] = std::move(NUPtr);
386 for (
auto &Def :
N->Node->operands()) {
388 if (!
D || !MaterializableCallback(*
D) ||
389 !Checker.isDefinitionAcrossSuspend(*
D, FirstUse))
392 if (Remats.count(
D)) {
394 N->Operands.push_back(Remats[
D].
get());
399 for (
auto &
I : WorkList) {
402 N->Operands.push_back(
I.get());
408 std::unique_ptr<RematNode> ChildNode = std::make_unique<RematNode>(
D);
409 N->Operands.push_back(ChildNode.get());
410 WorkList.push_back(std::move(ChildNode));
415#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
418 if (EntryNode->Node->getParent()->hasName())
419 dbgs() << EntryNode->Node->getParent()->getName();
421 EntryNode->Node->getParent()->printAsOperand(
dbgs(),
false);
422 dbgs() <<
") : " << *EntryNode->Node <<
"\n";
423 for (
auto &E : Remats) {
424 dbgs() << *(E.first) <<
"\n";
425 for (RematNode *U : E.second->Operands)
426 dbgs() <<
" " << *
U->Node <<
"\n";
441 return N->Operands.begin();
449#define DEBUG_TYPE "coro-frame"
452class FrameTypeBuilder;
458 bool MayWriteBeforeCoroBegin;
461 bool MayWriteBeforeCoroBegin)
462 : Alloca(Alloca), Aliases(std::move(Aliases)),
463 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
465struct FrameDataInfo {
475 for (
const auto &
P : Spills)
477 for (
const auto &
A : Allocas)
483 auto Itr = FieldIndexMap.find(V);
484 assert(Itr != FieldIndexMap.end() &&
485 "Value does not have a frame field index");
490 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
491 "Cannot set the index for the same field twice.");
492 FieldIndexMap[V] =
Index;
496 auto Iter = FieldAlignMap.find(V);
497 assert(Iter != FieldAlignMap.end());
502 assert(FieldAlignMap.count(V) == 0);
503 FieldAlignMap.insert({V, AL});
507 auto Iter = FieldDynamicAlignMap.find(V);
508 assert(Iter != FieldDynamicAlignMap.end());
513 assert(FieldDynamicAlignMap.count(V) == 0);
514 FieldDynamicAlignMap.insert({V,
Align});
518 auto Iter = FieldOffsetMap.find(V);
519 assert(Iter != FieldOffsetMap.end());
524 assert(FieldOffsetMap.count(V) == 0);
525 FieldOffsetMap.insert({V,
Offset});
529 void updateLayoutIndex(FrameTypeBuilder &
B);
534 bool LayoutIndexUpdateStarted =
false;
551 dbgs() <<
"------------- " << Title <<
"--------------\n";
552 for (
const auto &E : Spills) {
555 for (
auto *
I : E.second)
562 dbgs() <<
"------------- " << Title <<
"--------------\n";
563 for (
const auto &E : RM) {
570 dbgs() <<
"------------- Allocas --------------\n";
571 for (
const auto &
A : Allocas) {
578using FieldIDType = size_t;
583class FrameTypeBuilder {
589 FieldIDType LayoutFieldIndex;
599 bool IsFinished =
false;
601 std::optional<Align> MaxFrameAlignment;
608 std::optional<Align> MaxFrameAlignment)
613 [[nodiscard]] FieldIDType addFieldForAlloca(
AllocaInst *AI,
614 bool IsHeader =
false) {
619 if (
auto *CI = dyn_cast<ConstantInt>(AI->
getArraySize()))
620 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
625 return addField(Ty, AI->
getAlign(), IsHeader);
655 void addFieldForAllocas(
const Function &
F, FrameDataInfo &FrameData,
659 [[nodiscard]] FieldIDType addField(
Type *Ty,
MaybeAlign MaybeFieldAlignment,
660 bool IsHeader =
false,
661 bool IsSpillOfValue =
false) {
662 assert(!IsFinished &&
"adding fields to a finished builder");
663 assert(Ty &&
"must provide a type for a field");
670 if (FieldSize == 0) {
678 Align ABIAlign =
DL.getABITypeAlign(Ty);
679 Align TyAlignment = ABIAlign;
680 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
681 TyAlignment = *MaxFrameAlignment;
682 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
688 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
691 FieldAlignment = *MaxFrameAlignment;
692 FieldSize = FieldSize + DynamicAlignBuffer;
699 StructSize =
Offset + FieldSize;
706 Fields.
push_back({FieldSize,
Offset, Ty, 0, FieldAlignment, TyAlignment,
707 DynamicAlignBuffer});
708 return Fields.
size() - 1;
715 assert(IsFinished &&
"not yet finished!");
719 Align getStructAlign()
const {
720 assert(IsFinished &&
"not yet finished!");
724 FieldIDType getLayoutFieldIndex(FieldIDType Id)
const {
725 assert(IsFinished &&
"not yet finished!");
726 return Fields[
Id].LayoutFieldIndex;
729 Field getLayoutField(FieldIDType Id)
const {
730 assert(IsFinished &&
"not yet finished!");
736void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &
B) {
737 auto Updater = [&](
Value *
I) {
738 auto Field =
B.getLayoutField(getFieldIndex(
I));
739 setFieldIndex(
I,
Field.LayoutFieldIndex);
742 Field.DynamicAlignBuffer
745 setDynamicAlign(
I, dynamicAlign);
748 LayoutIndexUpdateStarted =
true;
749 for (
auto &S : Spills)
751 for (
const auto &
A : Allocas)
753 LayoutIndexUpdateStarted =
false;
756void FrameTypeBuilder::addFieldForAllocas(
const Function &
F,
757 FrameDataInfo &FrameData,
764 for (
auto AllocaList : NonOverlapedAllocas) {
765 auto *LargestAI = *AllocaList.begin();
766 FieldIDType
Id = addFieldForAlloca(LargestAI);
767 for (
auto *Alloca : AllocaList)
775 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
795 if (
auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
796 auto *SWI =
const_cast<SwitchInst *
>(ConstSWI);
797 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
798 SWI->setDefaultDest(SWI->getSuccessor(1));
803 auto ExtractAllocas = [&]() {
804 AllocaSetType Allocas;
807 Allocas.push_back(
A.Alloca);
811 StackLifetime::LivenessType::May);
812 StackLifetimeAnalyzer.run();
814 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
815 StackLifetimeAnalyzer.getLiveRange(AI2));
817 auto GetAllocaSize = [&](
const AllocaInfo &
A) {
818 std::optional<TypeSize> RetSize =
A.Alloca->getAllocationSize(
DL);
819 assert(RetSize &&
"Variable Length Arrays (VLA) are not supported.\n");
820 assert(!RetSize->isScalable() &&
"Scalable vectors are not yet supported");
821 return RetSize->getFixedValue();
827 sort(
FrameData.Allocas, [&](
const auto &Iter1,
const auto &Iter2) {
828 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
836 for (
auto &AllocaSet : NonOverlapedAllocas) {
837 assert(!AllocaSet.empty() &&
"Processing Alloca Set is not empty.\n");
838 bool NoInference =
none_of(AllocaSet, [&](
auto Iter) {
839 return IsAllocaInferenre(Alloca, Iter);
847 bool Alignable = [&]() ->
bool {
848 auto *LargestAlloca = *AllocaSet.begin();
849 return LargestAlloca->getAlign().value() % Alloca->
getAlign().
value() ==
852 bool CouldMerge = NoInference && Alignable;
855 AllocaSet.push_back(Alloca);
860 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
865 for (
auto SwitchAndDefaultDest : DefaultSuspendDest) {
867 BasicBlock *DestBB = SwitchAndDefaultDest.second;
872 : NonOverlapedAllocas) {
873 if (AllocaSet.size() > 1) {
874 dbgs() <<
"In Function:" << F.getName() <<
"\n";
875 dbgs() <<
"Find Union Set "
877 dbgs() <<
"\tAllocas are \n";
878 for (auto Alloca : AllocaSet)
879 dbgs() <<
"\t\t" << *Alloca <<
"\n";
884void FrameTypeBuilder::finish(
StructType *Ty) {
885 assert(!IsFinished &&
"already finished!");
891 for (
auto &
Field : Fields) {
898 StructSize = SizeAndAlign.first;
899 StructAlign = SizeAndAlign.second;
902 return *
static_cast<Field *
>(
const_cast<void*
>(LayoutField.Id));
908 for (
auto &LayoutField : LayoutFields) {
909 auto &
F = getField(LayoutField);
910 if (!
isAligned(
F.TyAlignment, LayoutField.Offset))
918 FieldTypes.
reserve(LayoutFields.size() * 3 / 2);
920 for (
auto &LayoutField : LayoutFields) {
921 auto &
F = getField(LayoutField);
923 auto Offset = LayoutField.Offset;
929 if (
Offset != LastOffset) {
936 F.LayoutFieldIndex = FieldTypes.
size();
939 if (
F.DynamicAlignBuffer) {
946 Ty->
setBody(FieldTypes, Packed);
950 auto Layout =
DL.getStructLayout(Ty);
951 for (
auto &
F : Fields) {
953 assert(Layout->getElementOffset(
F.LayoutFieldIndex) ==
F.Offset);
962 for (
auto *V : FrameData.getAllDefs()) {
966 auto CacheIt = [&DIVarCache, V](
const auto &Container) {
968 return DDI->getExpression()->getNumElements() == 0;
970 if (
I != Container.end())
971 DIVarCache.
insert({V, (*I)->getVariable()});
985 OS <<
"__int_" << cast<IntegerType>(Ty)->getBitWidth();
987 return MDName->getString();
995 return "__floating_type_";
999 return "PointerType";
1002 if (!cast<StructType>(Ty)->hasName())
1003 return "__LiteralStructType_";
1008 for (
auto &Iter : Buffer)
1009 if (Iter ==
'.' || Iter ==
':')
1012 return MDName->getString();
1015 return "UnknownType";
1027 DIType *RetType =
nullptr;
1030 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
1032 llvm::DINode::FlagArtificial);
1035 dwarf::DW_ATE_float,
1036 llvm::DINode::FlagArtificial);
1048 std::nullopt,
Name);
1053 llvm::DINode::FlagArtificial,
nullptr, llvm::DINodeArray());
1055 auto *StructTy = cast<StructType>(Ty);
1057 for (
unsigned I = 0;
I < StructTy->getNumElements();
I++) {
1059 Scope, LineNum, DITypeCache);
1062 Scope, DITy->
getName(), Scope->getFile(), LineNum,
1065 llvm::DINode::FlagArtificial, DITy));
1075 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
1078 RetType = CharSizeType;
1089 DITypeCache.
insert({Ty, RetType});
1106 FrameDataInfo &FrameData) {
1111 if (!DIS || !DIS->getUnit() ||
1116 assert(Shape.
ABI == coro::ABI::Switch &&
1117 "We could only build debug infomation for C++ coroutine now.\n");
1123 "Coroutine with switch ABI should own Promise alloca");
1134 }
else if (!DVRs.
empty()) {
1144 unsigned LineNum = PromiseDIVariable->
getLine();
1147 DIS->getUnit(),
Twine(
F.getName() +
".coro_frame_ty").
str(),
1150 llvm::DINodeArray());
1153 DataLayout Layout =
F.getParent()->getDataLayout();
1163 NameCache.
insert({ResumeIndex,
"__resume_fn"});
1164 NameCache.
insert({DestroyIndex,
"__destroy_fn"});
1165 NameCache.
insert({IndexIndex,
"__coro_index"});
1186 dwarf::DW_ATE_unsigned_char)});
1188 for (
auto *V : FrameData.getAllDefs()) {
1192 auto Index = FrameData.getFieldIndex(V);
1194 NameCache.
insert({
Index, DIVarCache[V]->getName()});
1195 TyCache.
insert({
Index, DIVarCache[V]->getType()});
1201 OffsetCache.
insert({ResumeIndex, {8, 0}});
1202 OffsetCache.
insert({DestroyIndex, {8, 8}});
1207 for (
auto *V : FrameData.getAllDefs()) {
1208 auto Index = FrameData.getFieldIndex(V);
1211 {
Index, {FrameData.getAlign(V).
value(), FrameData.getOffset(V)}});
1219 unsigned UnknownTypeNum = 0;
1231 assert(Ty->
isSized() &&
"We can't handle type which is not sized.\n");
1233 AlignInBits = OffsetCache[
Index].first * 8;
1234 OffsetInBits = OffsetCache[
Index].second * 8;
1238 DITy = TyCache[
Index];
1240 DITy =
solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1241 assert(DITy &&
"SolveDIType shouldn't return nullptr.\n");
1243 Name +=
"_" + std::to_string(UnknownTypeNum);
1248 FrameDITy,
Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1249 llvm::DINode::FlagArtificial, DITy));
1255 DFile, LineNum, FrameDITy,
1256 true, DINode::FlagArtificial);
1257 assert(FrameDIVar->isValidLocationForIntrinsic(DILoc));
1266 if (
auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1267 auto RetainedNodes = SubProgram->getRetainedNodes();
1269 RetainedNodes.end());
1271 SubProgram->replaceOperandWith(
1279 DbgVariableRecord::LocationType::Declare);
1281 It->getParent()->insertDbgRecordBefore(NewDVR, It);
1283 DBuilder.insertDeclare(Shape.
FramePtr, FrameDIVar,
1298 FrameDataInfo &FrameData) {
1303 Name.append(
".Frame");
1308 std::optional<Align> MaxFrameAlignment;
1309 if (Shape.
ABI == coro::ABI::Async)
1311 FrameTypeBuilder
B(
C,
DL, MaxFrameAlignment);
1314 std::optional<FieldIDType> SwitchIndexFieldId;
1316 if (Shape.
ABI == coro::ABI::Switch) {
1317 auto *FnPtrTy = PointerType::getUnqual(
C);
1321 (void)
B.addField(FnPtrTy, std::nullopt,
true);
1322 (void)
B.addField(FnPtrTy, std::nullopt,
true);
1328 FrameData.setFieldIndex(
1329 PromiseAlloca,
B.addFieldForAlloca(PromiseAlloca,
true));
1336 SwitchIndexFieldId =
B.addField(IndexType, std::nullopt);
1338 assert(PromiseAlloca ==
nullptr &&
"lowering doesn't support promises");
1343 B.addFieldForAllocas(
F, FrameData, Shape);
1348 if (Shape.
ABI == coro::ABI::Switch && PromiseAlloca)
1351 FrameData.Allocas.emplace_back(
1354 for (
auto &S : FrameData.Spills) {
1355 Type *FieldType = S.first->getType();
1358 if (
const Argument *
A = dyn_cast<Argument>(S.first))
1359 if (
A->hasByValAttr())
1360 FieldType =
A->getParamByValType();
1361 FieldIDType Id =
B.addField(FieldType, std::nullopt,
false ,
1363 FrameData.setFieldIndex(S.first, Id);
1367 FrameData.updateLayoutIndex(
B);
1371 switch (Shape.
ABI) {
1372 case coro::ABI::Switch: {
1374 auto IndexField =
B.getLayoutField(*SwitchIndexFieldId);
1386 case coro::ABI::Retcon:
1387 case coro::ABI::RetconOnce: {
1390 = (
B.getStructSize() <= Id->getStorageSize() &&
1391 B.getStructAlign() <= Id->getStorageAlignment());
1394 case coro::ABI::Async: {
1404 "The alignment requirment of frame variables cannot be higher than "
1405 "the alignment of the async function context");
1443 const CoroBeginInst &CB,
const SuspendCrossingInfo &Checker,
1444 bool ShouldUseLifetimeStartInfo)
1446 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
1453 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1454 MayWriteBeforeCoroBegin =
true;
1476 if (
SI.getValueOperand() !=
U->get())
1489 auto IsSimpleStoreThenLoad = [&]() {
1490 auto *AI = dyn_cast<AllocaInst>(
SI.getPointerOperand());
1498 while (!StoreAliases.
empty()) {
1500 for (
User *U :
I->users()) {
1503 if (
auto *LI = dyn_cast<LoadInst>(U)) {
1510 if (
auto *S = dyn_cast<StoreInst>(U))
1511 if (S->getPointerOperand() ==
I)
1513 if (
auto *II = dyn_cast<IntrinsicInst>(U))
1514 if (II->isLifetimeStartOrEnd())
1518 if (
auto *BI = dyn_cast<BitCastInst>(U)) {
1529 if (!IsSimpleStoreThenLoad())
1556 if (II.
getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
1559 LifetimeStarts.insert(&II);
1563 for (
unsigned Op = 0, OpCount = CB.
arg_size();
Op < OpCount; ++
Op)
1569 bool getShouldLiveOnFrame()
const {
1570 if (!ShouldLiveOnFrame)
1571 ShouldLiveOnFrame = computeShouldLiveOnFrame();
1572 return *ShouldLiveOnFrame;
1575 bool getMayWriteBeforeCoroBegin()
const {
return MayWriteBeforeCoroBegin; }
1578 assert(getShouldLiveOnFrame() &&
"This method should only be called if the "
1579 "alloca needs to live on the frame.");
1580 for (
const auto &
P : AliasOffetMap)
1583 "created before CoroBegin.");
1584 return AliasOffetMap;
1590 const SuspendCrossingInfo &Checker;
1597 bool MayWriteBeforeCoroBegin{
false};
1598 bool ShouldUseLifetimeStartInfo{
true};
1600 mutable std::optional<bool> ShouldLiveOnFrame{};
1602 bool computeShouldLiveOnFrame()
const {
1607 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1608 for (
auto *
I : Users)
1609 for (
auto *S : LifetimeStarts)
1610 if (Checker.isDefinitionAcrossSuspend(*S,
I))
1616 if (PI.isEscaped()) {
1617 for (
auto *
A : LifetimeStarts) {
1618 for (
auto *
B : LifetimeStarts) {
1619 if (Checker.hasPathOrLoopCrossingSuspendPoint(
A->getParent(),
1640 for (
auto *U1 : Users)
1641 for (
auto *U2 : Users)
1642 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1650 MayWriteBeforeCoroBegin =
true;
1654 for (
auto &U :
I.uses())
1664 if (DT.
dominates(&CoroBegin, &
I) || !usedAfterCoroBegin(
I))
1667 if (!IsOffsetKnown) {
1668 AliasOffetMap[&
I].reset();
1670 auto Itr = AliasOffetMap.
find(&
I);
1671 if (Itr == AliasOffetMap.end()) {
1673 }
else if (Itr->second && *Itr->second !=
Offset) {
1676 AliasOffetMap[&
I].reset();
1738 auto GetFramePointer = [&](
Value *Orig) ->
Value * {
1739 FieldIDType
Index = FrameData.getFieldIndex(Orig);
1745 if (
auto *AI = dyn_cast<AllocaInst>(Orig)) {
1746 if (
auto *CI = dyn_cast<ConstantInt>(AI->
getArraySize())) {
1747 auto Count = CI->getValue().getZExtValue();
1756 auto GEP = cast<GetElementPtrInst>(
1758 if (
auto *AI = dyn_cast<AllocaInst>(Orig)) {
1759 if (FrameData.getDynamicAlign(Orig) != 0) {
1762 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->
getType());
1766 PtrValue = Builder.
CreateAdd(PtrValue, AlignMask);
1777 if (
GEP->getType() != Orig->getType())
1779 Orig->getName() +
Twine(
".cast"));
1784 for (
auto const &E : FrameData.Spills) {
1785 Value *Def = E.first;
1786 auto SpillAlignment =
Align(FrameData.getAlign(Def));
1790 Type *ByValTy =
nullptr;
1791 if (
auto *Arg = dyn_cast<Argument>(Def)) {
1798 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1800 if (Arg->hasByValAttr())
1801 ByValTy = Arg->getParamByValType();
1802 }
else if (
auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1805 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt();
1807 auto *
I = cast<Instruction>(Def);
1812 }
else if (
auto *II = dyn_cast<InvokeInst>(
I)) {
1816 InsertPt = NewBB->getTerminator()->getIterator();
1817 }
else if (isa<PHINode>(
I)) {
1820 if (
auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->
getTerminator()))
1825 assert(!
I->isTerminator() &&
"unexpected terminator");
1828 InsertPt =
I->getNextNode()->getIterator();
1832 auto Index = FrameData.getFieldIndex(Def);
1846 Value *CurrentReload =
nullptr;
1847 for (
auto *U : E.second) {
1851 if (CurrentBlock != U->getParent()) {
1852 CurrentBlock = U->getParent();
1856 auto *
GEP = GetFramePointer(E.first);
1857 GEP->setName(E.first->getName() +
Twine(
".reload.addr"));
1859 CurrentReload =
GEP;
1863 SpillAlignment, E.first->getName() +
Twine(
".reload"));
1870 if (
F->getSubprogram()) {
1872 while (DIs.
empty() && DVRs.
empty() && isa<LoadInst>(CurDef)) {
1873 auto *LdInst = cast<LoadInst>(CurDef);
1875 if (LdInst->getPointerOperandType() != LdInst->getType())
1877 CurDef = LdInst->getPointerOperand();
1878 if (!isa<AllocaInst, LoadInst>(CurDef))
1885 auto SalvageOne = [&](
auto *DDI) {
1886 bool AllowUnresolved =
false;
1893 DDI->getExpression(), DDI->getDebugLoc(),
1894 DbgVariableRecord::LocationType::Declare);
1899 .insertDeclare(CurrentReload, DDI->getVariable(),
1900 DDI->getExpression(), DDI->getDebugLoc(),
1915 if (
auto *PN = dyn_cast<PHINode>(U)) {
1916 assert(PN->getNumIncomingValues() == 1 &&
1917 "unexpected number of incoming "
1918 "values in the PHINode");
1919 PN->replaceAllUsesWith(CurrentReload);
1920 PN->eraseFromParent();
1926 U->replaceUsesOfWith(Def, CurrentReload);
1930 DVR.replaceVariableLocationOp(Def, CurrentReload,
true);
1942 if (Shape.
ABI == coro::ABI::Retcon || Shape.
ABI == coro::ABI::RetconOnce ||
1943 Shape.
ABI == coro::ABI::Async) {
1946 for (
const auto &
P : FrameData.Allocas) {
1948 auto *
G = GetFramePointer(Alloca);
1952 G->takeName(Alloca);
1967 for (
const auto &
A : FrameData.Allocas) {
1969 UsersToUpdate.
clear();
1971 auto *
I = cast<Instruction>(U);
1975 if (UsersToUpdate.
empty())
1977 auto *
G = GetFramePointer(Alloca);
1983 for (
auto *DVI : DIs)
1984 DVI->replaceUsesOfWith(Alloca,
G);
1985 for (
auto *DVR : DbgVariableRecords)
1986 DVR->replaceVariableLocationOp(Alloca,
G);
1992 if (
I->isLifetimeStartOrEnd()) {
1993 I->eraseFromParent();
1997 I->replaceUsesOfWith(Alloca,
G);
2001 for (
const auto &
A : FrameData.Allocas) {
2003 if (
A.MayWriteBeforeCoroBegin) {
2007 "Coroutines cannot handle copying of array allocas yet");
2009 auto *
G = GetFramePointer(Alloca);
2016 for (
const auto &Alias :
A.Aliases) {
2017 auto *
FramePtr = GetFramePointer(Alloca);
2018 auto &
Value = *Alias.second;
2023 AliasPtr, [&](
Use &U) {
return DT.
dominates(CB, U); });
2035 auto *Inst = dyn_cast<Instruction>(U.getUser());
2036 if (!Inst || DT.dominates(CB, Inst))
2039 if (auto *CI = dyn_cast<CallInst>(Inst)) {
2044 if (CI->onlyReadsMemory() ||
2045 CI->onlyReadsMemory(CI->getArgOperandNo(&U)))
2050 return isa<StoreInst>(Inst) ||
2053 isa<GetElementPtrInst>(Inst) ||
2058 isa<BitCastInst>(Inst);
2060 if (HasAccessingPromiseBeforeCB) {
2062 auto *
G = GetFramePointer(PA);
2074 PHINode *UntilPHI =
nullptr) {
2075 auto *PN = cast<PHINode>(&SuccBB->
front());
2077 int Index = PN->getBasicBlockIndex(InsertedBB);
2080 V->getType(), 1, V->getName() +
Twine(
".") + SuccBB->
getName());
2083 PN->setIncomingValue(
Index, InputV);
2084 PN = dyn_cast<PHINode>(PN->getNextNode());
2085 }
while (PN != UntilPHI);
2125 auto *NewCleanupPadBB =
2128 CleanupPadBB->
getParent(), CleanupPadBB);
2131 auto *SetDispatchValuePN =
2135 auto *SwitchOnDispatch = Builder.
CreateSwitch(SetDispatchValuePN, UnreachBB,
2138 int SwitchIndex = 0;
2144 Twine(
".from.") + Pred->getName(),
2145 CleanupPadBB->
getParent(), CleanupPadBB);
2147 CaseBB->setName(CleanupPadBB->
getName() +
Twine(
".from.") +
2157 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
2158 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
2159 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
2166 for (
auto &BB :
F) {
2167 for (
auto &Phi : BB.
phis()) {
2168 if (Phi.getNumIncomingValues() == 1) {
2174 while (!Worklist.
empty()) {
2176 auto *OriginalValue = Phi->getIncomingValue(0);
2177 Phi->replaceAllUsesWith(OriginalValue);
2205 if (
auto *CleanupPad =
2210 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
2213 assert(CS->getUnwindDest() == &BB);
2223 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.
getFirstNonPHI()))) {
2238 IncomingBB->setName(BB.
getName() +
Twine(
".from.") + Pred->getName());
2256 if (
auto *PN = dyn_cast<PHINode>(&BB.
front()))
2257 if (PN->getNumIncomingValues() > 1)
2268 return (isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
2269 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V));
2275 return isa<CoroIdInst>(&
I) || isa<CoroSaveInst>(&
I) ||
2276 isa<CoroSuspendInst>(&
I);
2298 for (
const auto &E : AllRemats) {
2301 RematGraph *RG = E.second.get();
2309 auto InsertPoint = &*
Use->getParent()->getFirstInsertionPt();
2310 if (isa<AnyCoroSuspendInst>(
Use)) {
2312 Use->getParent()->getSinglePredecessor();
2313 assert(SuspendPredecessorBlock &&
"malformed coro suspend instruction");
2321 for (;
I != RPOT.
end(); ++
I) {
2323 CurrentMaterialization =
D->clone();
2324 CurrentMaterialization->
setName(
D->getName());
2326 InsertPoint = CurrentMaterialization;
2330 for (
auto &
I : InstructionsToProcess)
2331 I->replaceUsesOfWith(
D, CurrentMaterialization);
2336 for (
unsigned i = 0, E =
Use->getNumOperands(); i != E; ++i)
2337 if (
Use->getOperand(i) ==
D)
2339 {
Use,
D, CurrentMaterialization});
2341 InstructionsToProcess.push_back(CurrentMaterialization);
2346 for (
auto &R : FinalInstructionsToProcess) {
2347 if (
auto *PN = dyn_cast<PHINode>(R.Use)) {
2348 assert(PN->getNumIncomingValues() == 1 &&
"unexpected number of incoming "
2349 "values in the PHINode");
2350 PN->replaceAllUsesWith(R.Remat);
2351 PN->eraseFromParent();
2354 R.Use->replaceUsesOfWith(R.Def, R.Remat);
2361 auto *BB =
I->getParent();
2379 return isa<AnyCoroSuspendInst>(BB->
front());
2414 if (
auto FI = dyn_cast<CoroAllocaFreeInst>(
User))
2415 VisitedOrFreeBBs.
insert(FI->getParent());
2424 unsigned depth = 3) {
2427 if (depth == 0)
return false;
2446 for (
auto *U : AI->
users()) {
2447 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2462 for (
auto *AI : LocalAllocas) {
2467 Value *StackSave =
nullptr;
2475 for (
auto *U : AI->
users()) {
2477 if (isa<CoroAllocaGetInst>(U)) {
2478 U->replaceAllUsesWith(Alloca);
2484 auto FI = cast<CoroAllocaFreeInst>(U);
2490 DeadInsts.
push_back(cast<Instruction>(U));
2507 if (isa<CoroAllocaGetInst>(U)) {
2508 U->replaceAllUsesWith(Alloc);
2510 auto FI = cast<CoroAllocaFreeInst>(U);
2514 DeadInsts.
push_back(cast<Instruction>(U));
2521 return cast<Instruction>(Alloc);
2528 auto FnTy = FunctionType::get(ValueTy, {},
false);
2531 auto Call = Builder.
CreateCall(FnTy, Fn, {});
2543 auto FnTy = FunctionType::get(Builder.
getPtrTy(),
2544 {V->getType()},
false);
2547 auto Call = Builder.
CreateCall(FnTy, Fn, { V });
2566 auto ValueBeforeCall = Builder.
CreateLoad(ValueTy, Alloca);
2572 if (isa<CallInst>(Call)) {
2575 auto Invoke = cast<InvokeInst>(Call);
2576 Builder.
SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2594 if (isa<LoadInst>(
User) || isa<StoreInst>(
User))
2598 auto Call = cast<Instruction>(
User);
2617 IRBuilder<> Builder(
F.getEntryBlock().getFirstNonPHIOrDbg());
2619 auto ArgTy = cast<PointerType>(Arg.
getType());
2620 auto ValueTy = PointerType::getUnqual(
F.getContext());
2625 auto Alloca = Builder.
CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2640 auto FinalValue = Builder.
CreateLoad(ValueTy, Alloca);
2655 for (
auto &Arg :
F.args()) {
2656 if (!Arg.hasSwiftErrorAttr())
continue;
2663 for (
auto &Inst :
F.getEntryBlock()) {
2664 auto Alloca = dyn_cast<AllocaInst>(&Inst);
2676 if (!AllocasToPromote.
empty()) {
2685 const FrameDataInfo &FrameData,
2693 for (
auto *Def : FrameData.getAllDefs()) {
2694 for (
User *U : Def->users()) {
2695 auto Inst = cast<Instruction>(U);
2696 if (Inst->getParent() != CoroBegin->
getParent() ||
2704 while (!Worklist.
empty()) {
2706 for (
User *U : Def->users()) {
2707 auto Inst = cast<Instruction>(U);
2732 SuspendCrossingInfo &Checker) {
2740 DomSet.
insert(&
F.getEntryBlock());
2744 "should have split coro.suspend into its own block");
2758 if (
auto* II = dyn_cast<IntrinsicInst>(
I))
2768 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2784 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2787 if (collectLifetimeStart(UI, AI))
2795 if (Valid && Lifetimes.
size() != 0) {
2796 auto *NewLifetime = Lifetimes[0]->clone();
2797 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), AI);
2798 NewLifetime->insertBefore(DomBB->getTerminator());
2802 S->eraseFromParent();
2811 const SuspendCrossingInfo &Checker,
2824 if (AI->
hasMetadata(LLVMContext::MD_coro_outside_frame))
2830 bool ShouldUseLifetimeStartInfo =
2831 (Shape.
ABI != coro::ABI::Async && Shape.
ABI != coro::ABI::Retcon &&
2832 Shape.
ABI != coro::ABI::RetconOnce);
2835 ShouldUseLifetimeStartInfo};
2836 Visitor.visitPtr(*AI);
2837 if (!Visitor.getShouldLiveOnFrame())
2840 Visitor.getMayWriteBeforeCoroBegin());
2843static std::optional<std::pair<Value &, DIExpression &>>
2845 bool OptimizeFrame,
bool UseEntryValue,
Function *
F,
2847 bool SkipOutermostLoad) {
2849 auto InsertPt =
F->getEntryBlock().getFirstInsertionPt();
2850 while (isa<IntrinsicInst>(InsertPt))
2854 while (
auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2855 if (
auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2856 Storage = LdInst->getPointerOperand();
2863 if (!SkipOutermostLoad)
2865 }
else if (
auto *StInst = dyn_cast<StoreInst>(Inst)) {
2866 Storage = StInst->getValueOperand();
2873 if (!
Op || !AdditionalValues.
empty()) {
2881 SkipOutermostLoad =
false;
2884 return std::nullopt;
2886 auto *StorageAsArg = dyn_cast<Argument>(Storage);
2887 const bool IsSwiftAsyncArg =
2888 StorageAsArg && StorageAsArg->hasAttribute(Attribute::SwiftAsync);
2893 if (IsSwiftAsyncArg && UseEntryValue && !Expr->
isEntryValue() &&
2902 if (StorageAsArg && !OptimizeFrame && !IsSwiftAsyncArg) {
2903 auto &Cached = ArgToAllocaMap[StorageAsArg];
2906 Storage->
getName() +
".debug");
2920 return {{*Storage, *Expr}};
2930 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2934 ArgToAllocaMap, OptimizeFrame, UseEntryValue,
F, OriginalStorage,
2939 Value *Storage = &SalvagedInfo->first;
2947 if (isa<DbgDeclareInst>(DVI)) {
2948 std::optional<BasicBlock::iterator> InsertPt;
2949 if (
auto *
I = dyn_cast<Instruction>(Storage)) {
2950 InsertPt =
I->getInsertionPointAfterDef();
2954 if (!OptimizeFrame &&
I->getDebugLoc())
2956 }
else if (isa<Argument>(Storage))
2957 InsertPt =
F->getEntryBlock().begin();
2959 DVI.
moveBefore(*(*InsertPt)->getParent(), *InsertPt);
2974 ArgToAllocaMap, OptimizeFrame, UseEntryValue,
F, OriginalStorage,
2979 Value *Storage = &SalvagedInfo->first;
2987 if (DVR.
getType() == DbgVariableRecord::LocationType::Declare) {
2988 std::optional<BasicBlock::iterator> InsertPt;
2989 if (
auto *
I = dyn_cast<Instruction>(Storage)) {
2990 InsertPt =
I->getInsertionPointAfterDef();
2994 if (!OptimizeFrame &&
I->getDebugLoc())
2996 }
else if (isa<Argument>(Storage))
2997 InsertPt =
F->getEntryBlock().begin();
3000 (*InsertPt)->getParent()->insertDbgRecordBefore(&DVR, *InsertPt);
3006 Function &
F, SuspendCrossingInfo &Checker,
3007 const std::function<
bool(
Instruction &)> &MaterializableCallback) {
3017 if (!MaterializableCallback(
I))
3019 for (
User *U :
I.users())
3020 if (Checker.isDefinitionAcrossSuspend(
I, U))
3021 Spills[&
I].push_back(cast<Instruction>(U));
3040 for (
auto &E : Spills) {
3044 if (AllRemats.
count(U))
3049 std::make_unique<RematGraph>(MaterializableCallback, U, Checker);
3053 for (
auto I = RPOT.begin();
I != RPOT.end();
3054 ++
I) { (*I)->Node->dump(); }
dbgs()
3057 AllRemats[U] = std::move(RematUPtr);
3069 const std::function<
bool(
Instruction &)> &MaterializableCallback) {
3074 if (
Shape.
ABI == coro::ABI::Switch &&
3083 if (
auto *Save = CSI->getCoroSave())
3096 if (
auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
3097 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
3098 if (!MustTailCallFn)
3118 SuspendCrossingInfo Checker(
F,
Shape);
3122 FrameDataInfo FrameData;
3126 Shape.
ABI != coro::ABI::RetconOnce)
3131 for (
User *U :
A.users())
3132 if (Checker.isDefinitionAcrossSuspend(
A, U))
3133 FrameData.Spills[&
A].push_back(cast<Instruction>(U));
3143 if (
auto AI = dyn_cast<CoroAllocaAllocInst>(&
I)) {
3157 for (
User *U : Alloc->users()) {
3158 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
3159 FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
3165 if (isa<CoroAllocaGetInst>(
I))
3168 if (
auto *AI = dyn_cast<AllocaInst>(&
I)) {
3173 for (
User *U :
I.users())
3174 if (Checker.isDefinitionAcrossSuspend(
I, U)) {
3176 if (
I.getType()->isTokenTy())
3178 "token definition is separated from the use by a suspend point");
3179 FrameData.Spills[&
I].push_back(cast<Instruction>(U));
3189 for (
auto &Iter : FrameData.Spills) {
3190 auto *V = Iter.first;
3195 if (Checker.isDefinitionAcrossSuspend(*V, DVI))
3196 FrameData.Spills[V].push_back(DVI);
3199 if (Checker.isDefinitionAcrossSuspend(*V, DVR->Marker->MarkedInstr))
3200 FrameData.Spills[V].push_back(DVR->Marker->MarkedInstr);
3204 if (
Shape.
ABI == coro::ABI::Retcon ||
Shape.
ABI == coro::ABI::RetconOnce ||
3214 for (
auto *
I : DeadInstructions)
3215 I->eraseFromParent();
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
Expand Atomic instructions
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
static void cleanupSinglePredPHIs(Function &F)
static bool isSuspendReachableFrom(BasicBlock *From, VisitedBlocksSet &VisitedOrFreeBBs)
Does control flow starting at the given block ever reach a suspend instruction before reaching a bloc...
static bool isCoroutineStructureIntrinsic(Instruction &I)
SmallPtrSet< BasicBlock *, 8 > VisitedBlocksSet
static Instruction * lowerNonLocalAlloca(CoroAllocaAllocInst *AI, coro::Shape &Shape, SmallVectorImpl< Instruction * > &DeadInsts)
Turn the given coro.alloca.alloc call into a dynamic allocation.
static Instruction * splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch)
static void eliminateSwiftError(Function &F, coro::Shape &Shape)
Eliminate all problematic uses of swifterror arguments and allocas from the function.
static void lowerLocalAllocas(ArrayRef< CoroAllocaAllocInst * > LocalAllocas, SmallVectorImpl< Instruction * > &DeadInsts)
Turn each of the given local allocas into a normal (dynamic) alloca instruction.
static bool isLocalAlloca(CoroAllocaAllocInst *AI)
Is the given alloca "local", i.e.
static Value * emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, coro::Shape &Shape)
Set the given value as the current swifterror value.
static Value * emitSetAndGetSwiftErrorValueAround(Instruction *Call, AllocaInst *Alloca, coro::Shape &Shape)
Set the swifterror value from the given alloca before a call, then put in back in the alloca afterwar...
static void cacheDIVar(FrameDataInfo &FrameData, DenseMap< Value *, DILocalVariable * > &DIVarCache)
static void collectFrameAlloca(AllocaInst *AI, coro::Shape &Shape, const SuspendCrossingInfo &Checker, SmallVectorImpl< AllocaInfo > &Allocas, const DominatorTree &DT)
static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI)
static void splitAround(Instruction *I, const Twine &Name)
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, coro::Shape &Shape)
Eliminate a formerly-swifterror alloca by inserting the get/set intrinsics and attempting to MemToReg...
static void rewritePHIs(BasicBlock &BB)
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, BasicBlock *InsertedBB, BasicBlock *PredBB, PHINode *UntilPHI=nullptr)
static std::optional< std::pair< Value &, DIExpression & > > salvageDebugInfoImpl(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, bool OptimizeFrame, bool UseEntryValue, Function *F, Value *Storage, DIExpression *Expr, bool SkipOutermostLoad)
static DIType * solveDIType(DIBuilder &Builder, Type *Ty, const DataLayout &Layout, DIScope *Scope, unsigned LineNum, DenseMap< Type *, DIType * > &DITypeCache)
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, unsigned depth=3)
After we split the coroutine, will the given basic block be along an obvious exit path for the resump...
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, coro::Shape &Shape, SmallVectorImpl< AllocaInst * > &AllocasToPromote)
"Eliminate" a swifterror argument by reducing it to the alloca case and then loading and storing in t...
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Build artificial debug info for C++ coroutine frames to allow users to inspect the contents of the fr...
static StructType * buildFrameType(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
static BasicBlock * splitBlockIfNotFirst(Instruction *I, const Twine &Name)
static void sinkSpillUsesAfterCoroBegin(Function &F, const FrameDataInfo &FrameData, CoroBeginInst *CoroBegin)
retcon and retcon.once conventions assume that all spill uses can be sunk after the coro....
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, SuspendCrossingInfo &Checker)
For each local variable that all of its user are only used inside one of suspended region,...
static bool isSuspendBlock(BasicBlock *BB)
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, CleanupPadInst *CleanupPad)
static void rewriteMaterializableInstructions(const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &AllRemats)
static void dumpAllocas(const SmallVectorImpl< AllocaInfo > &Allocas)
static StringRef solveTypeName(Type *Ty)
Create name for Type.
static void dumpSpills(StringRef Title, const SpillInfo &Spills)
static void doRematerializations(Function &F, SuspendCrossingInfo &Checker, const std::function< bool(Instruction &)> &MaterializableCallback)
static Value * emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, coro::Shape &Shape)
Get the current swifterror value.
static void dumpRemats(StringRef Title, const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &RM)
static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape)
Given that RA is a live value
static bool isLifetimeStart(const Instruction *Inst)
static MaybeAlign getAlign(Value *Ptr)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
mir Rename Register Operands
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
llvm::cl::opt< bool > UseNewDbgInfoFormat
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallString class.
static const unsigned FramePtr
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const Instruction & front() const
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class represents a no-op cast from one type to another.
size_type size() const
size - Returns the number of bits in this bitvector.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
Value * getParentPad() const
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB, BasicBlock::iterator InsertBefore)
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This represents the llvm.coro.alloca.alloc instruction.
This class represents the llvm.coro.begin instruction.
This represents the llvm.coro.suspend instruction.
DICompositeType * createStructType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang=0, DIType *VTableHolder=nullptr, StringRef UniqueIdentifier="")
Create debugging information entry for a struct.
DIDerivedType * createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits=0, std::optional< unsigned > DWARFAddressSpace=std::nullopt, StringRef Name="", DINodeArray Annotations=nullptr)
Create debugging information entry for a pointer.
DIExpression * createExpression(ArrayRef< uint64_t > Addr=std::nullopt)
Create a new descriptor for the specified variable which has a complex address expression for its add...
DISubrange * getOrCreateSubrange(int64_t Lo, int64_t Count)
Create a descriptor for a value range.
DICompositeType * createArrayType(uint64_t Size, uint32_t AlignInBits, DIType *Ty, DINodeArray Subscripts, PointerUnion< DIExpression *, DIVariable * > DataLocation=nullptr, PointerUnion< DIExpression *, DIVariable * > Associated=nullptr, PointerUnion< DIExpression *, DIVariable * > Allocated=nullptr, PointerUnion< DIExpression *, DIVariable * > Rank=nullptr)
Create debugging information entry for an array.
DIBasicType * createBasicType(StringRef Name, uint64_t SizeInBits, unsigned Encoding, DINode::DIFlags Flags=DINode::FlagZero)
Create debugging information entry for a basic type.
DINodeArray getOrCreateArray(ArrayRef< Metadata * > Elements)
Get a DINodeArray, create one if required.
DIDerivedType * createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations=nullptr)
Create debugging information entry for a member.
DILocalVariable * createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve=false, DINode::DIFlags Flags=DINode::FlagZero, uint32_t AlignInBits=0)
Create a new descriptor for an auto variable.
void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParams=DINodeArray())
Replace arrays on a composite type.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
bool isSingleLocationExpression() const
Return whether the evaluated expression makes use of a single location at the start of the expression...
DILocalScope * getScope() const
Get the local scope for this variable.
Base class for scope-like contexts.
StringRef getName() const
uint64_t getSizeInBits() const
uint32_t getAlignInBits() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
This represents the llvm.dbg.declare instruction.
DebugLoc getDebugLoc() const
void setDebugLoc(DebugLoc Loc)
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DILocalVariable * getVariable() const
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
DILocation * get() const
Get the underlying DILocation.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Module * getParent()
Get the module that this global value is contained inside of...
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", bool IsInBounds=false)
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
UnreachableInst * CreateUnreachable()
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
void visitIntrinsicInst(IntrinsicInst &I)
void visitBitCastInst(BitCastInst &I)
void visit(Iterator Start, Iterator End)
void visitPHINode(PHINode &I)
void visitAddrSpaceCastInst(AddrSpaceCastInst &I)
void visitSelectInst(SelectInst &I)
void visitGetElementPtrInst(GetElementPtrInst &I)
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
const BasicBlock * getParent() const
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
static MDString * get(LLVMContext &Context, StringRef Str)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
size_type count(const KeyT &Key) const
This is the common base class for memset/memcpy/memmove.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
A base class for visitors over the uses of a pointer value.
void visitCallBase(CallBase &CB)
void visitGetElementPtrInst(GetElementPtrInst &GEPI)
void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC)
void visitBitCastInst(BitCastInst &BC)
void visitStoreInst(StoreInst &SI)
void visitIntrinsicInst(IntrinsicInst &II)
void visitMemIntrinsic(MemIntrinsic &I)
This class represents the LLVM 'select' instruction.
iterator end()
Get an iterator to the end of the SetVector.
iterator begin()
Get an iterator to the beginning of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
iterator find(ConstPtrType Ptr) const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
StringRef str() const
Explicit conversion to StringRef.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Compute live ranges of allocas.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
std::string str() const
str - Get the contents as an std::string.
TypeSize getElementOffsetInBits(unsigned Idx) const
Class to represent struct types.
void setBody(ArrayRef< Type * > Elements, bool isPacked=false)
Specify a body for an opaque identified type.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
void setDefaultDest(BasicBlock *DefaultCase)
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
StringRef getStructName() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void setName(const Twine &Name)
Change the name of the value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
APInt Offset
The constant offset of the use if that is known.
void enqueueUsers(Instruction &I)
Enqueue the users of this instruction in the visit worklist.
constexpr ScalarTy getFixedValue() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A range adaptor for a pair of iterators.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ CE
Windows NT (Windows on ARM)
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableIntrinsic &DVI, bool OptimizeFrame, bool IsEntryPoint)
Attempts to rewrite the location operand of debug intrinsics in terms of the coroutine frame pointer,...
bool defaultMaterializable(Instruction &V)
Default materializable callback.
void buildCoroutineFrame(Function &F, Shape &Shape, TargetTransformInfo &TTI, const std::function< bool(Instruction &)> &MaterializableCallback)
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, TargetTransformInfo &TTI, ArrayRef< Value * > Arguments, IRBuilder<> &)
bool isCPlusPlus(SourceLanguage S)
NodeAddr< DefNode * > Def
NodeAddr< BlockNode * > Block
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
TinyPtrVector< DbgDeclareInst * > findDbgDeclares(Value *V)
Finds dbg.declare intrinsics declaring local variables as living in the memory that 'V' points to.
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
auto successors(const MachineBasicBlock *BB)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
void findDbgValues(SmallVectorImpl< DbgValueInst * > &DbgValues, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the llvm.dbg.value intrinsics describing a value.
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
BasicBlock * ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, LandingPadInst *OriginalPad=nullptr, PHINode *LandingPadReplacement=nullptr, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
Split the edge connect the specficed blocks in the case that Succ is an Exception Handling Block.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, BasicBlock *NewPred, PHINode *Until=nullptr)
Replaces all uses of OldPred with the NewPred block in all PHINodes in a block.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
As above, for DVRDeclares.
auto predecessors(const MachineBasicBlock *BB)
BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ)
Sets the unwind edge of an instruction to a particular successor.
unsigned pred_size(const MachineBasicBlock *BB)
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
RematGraph::RematNode * NodeRef
static ChildIteratorType child_end(NodeRef N)
RematGraph::RematNode ** ChildIteratorType
static NodeRef getEntryNode(RematGraph *G)
static ChildIteratorType child_begin(NodeRef N)
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align Alignment
The required alignment of this field.
uint64_t Offset
The offset of this field in the final layout.
uint64_t Size
The required size of this field in bytes.
static constexpr uint64_t FlexibleOffset
A special value for Offset indicating that the field can be moved anywhere.
A MapVector that performs no allocations if smaller than a certain size.
Align getContextAlignment() const
uint64_t ContextHeaderSize
bool IsFrameInlineInStorage
AllocaInst * PromiseAlloca
AsyncLoweringStorage AsyncLowering
AnyCoroIdRetconInst * getRetconCoroId() const
CoroIdInst * getSwitchCoroId() const
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
SmallVector< CallInst *, 2 > SwiftErrorOps
AllocaInst * getPromiseAlloca() const
bool OptimizeFrame
This would only be true if optimization are enabled.
SwitchLoweringStorage SwitchLowering
CoroBeginInst * CoroBegin
BasicBlock::iterator getInsertPtAfterFramePtr() const
void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.
RetconLoweringStorage RetconLowering
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
BasicBlock * AllocaSpillBlock