25#include "llvm/Config/llvm-config.h"
51#define DEBUG_TYPE "coro-suspend-crossing"
57class BlockToIndexMapping {
61 size_t size()
const {
return V.size(); }
69 size_t blockToIndex(
BasicBlock const *BB)
const {
71 assert(
I !=
V.end() && *
I == BB &&
"BasicBlockNumberng: Unknown block");
95class SuspendCrossingInfo {
96 BlockToIndexMapping Mapping;
101 bool Suspend =
false;
103 bool KillLoop =
false;
104 bool Changed =
false;
109 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
114 return Block[Mapping.blockToIndex(BB)];
121 template <
bool Initialize = false>
125#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
135 size_t const FromIndex = Mapping.blockToIndex(
From);
136 size_t const ToIndex = Mapping.blockToIndex(To);
137 bool const Result =
Block[ToIndex].Kills[FromIndex];
139 <<
" answer is " << Result <<
"\n");
148 size_t const FromIndex = Mapping.blockToIndex(
From);
149 size_t const ToIndex = Mapping.blockToIndex(To);
153 <<
" answer is " << Result <<
" (path or loop)\n");
157 bool isDefinitionAcrossSuspend(
BasicBlock *DefBB,
User *U)
const {
158 auto *
I = cast<Instruction>(U);
162 if (
auto *PN = dyn_cast<PHINode>(
I))
163 if (PN->getNumIncomingValues() > 1)
171 if (isa<CoroSuspendRetconInst>(
I) || isa<CoroSuspendAsyncInst>(
I)) {
173 assert(UseBB &&
"should have split coro.suspend into its own block");
176 return hasPathCrossingSuspendPoint(DefBB, UseBB);
180 return isDefinitionAcrossSuspend(&
A.getParent()->getEntryBlock(), U);
184 auto *DefBB =
I.getParent();
189 if (isa<AnyCoroSuspendInst>(
I)) {
191 assert(DefBB &&
"should have split coro.suspend into its own block");
194 return isDefinitionAcrossSuspend(DefBB, U);
197 bool isDefinitionAcrossSuspend(
Value &V,
User *U)
const {
198 if (
auto *Arg = dyn_cast<Argument>(&V))
199 return isDefinitionAcrossSuspend(*Arg, U);
200 if (
auto *Inst = dyn_cast<Instruction>(&V))
201 return isDefinitionAcrossSuspend(*Inst, U);
204 "Coroutine could only collect Argument and Instruction now.");
209#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
213 for (
size_t I = 0,
N = BV.
size();
I <
N; ++
I)
215 dbgs() <<
" " << Mapping.indexToBlock(
I)->getName();
220 for (
size_t I = 0,
N =
Block.size();
I <
N; ++
I) {
222 dbgs() <<
B->getName() <<
":\n";
230template <
bool Initialize>
231bool SuspendCrossingInfo::computeBlockData(
233 bool Changed =
false;
236 auto BBNo = Mapping.blockToIndex(BB);
240 if constexpr (!Initialize)
244 return !
Block[Mapping.blockToIndex(BB)].Changed;
252 auto SavedConsumes =
B.Consumes;
253 auto SavedKills =
B.Kills;
256 auto PrevNo = Mapping.blockToIndex(PI);
260 B.Consumes |=
P.Consumes;
266 B.Kills |=
P.Consumes;
272 B.Kills |=
B.Consumes;
282 B.KillLoop |=
B.Kills[BBNo];
286 if constexpr (!Initialize) {
287 B.Changed = (
B.Kills != SavedKills) || (
B.Consumes != SavedConsumes);
288 Changed |=
B.Changed;
297 const size_t N = Mapping.size();
301 for (
size_t I = 0;
I <
N; ++
I) {
303 B.Consumes.resize(
N);
313 getBlockData(
CE->getParent()).End =
true;
321 auto &
B = getBlockData(SuspendBlock);
323 B.Kills |=
B.Consumes;
326 markSuspendBlock(CSI);
327 if (
auto *Save = CSI->getCoroSave())
328 markSuspendBlock(Save);
334 computeBlockData<
true>(RPOT);
335 while (computeBlockData</*Initialize*/ false>(RPOT))
355 RematNode() =
default;
359 RematNode *EntryNode;
364 SuspendCrossingInfo &Checker;
366 RematGraph(
const std::function<
bool(
Instruction &)> &MaterializableCallback,
368 : MaterializableCallback(MaterializableCallback), Checker(Checker) {
369 std::unique_ptr<RematNode> FirstNode = std::make_unique<RematNode>(
I);
370 EntryNode = FirstNode.get();
371 std::deque<std::unique_ptr<RematNode>> WorkList;
372 addNode(std::move(FirstNode), WorkList, cast<User>(
I));
373 while (WorkList.size()) {
374 std::unique_ptr<RematNode>
N = std::move(WorkList.front());
375 WorkList.pop_front();
376 addNode(std::move(
N), WorkList, cast<User>(
I));
380 void addNode(std::unique_ptr<RematNode> NUPtr,
381 std::deque<std::unique_ptr<RematNode>> &WorkList,
383 RematNode *
N = NUPtr.get();
384 if (Remats.count(
N->Node))
388 Remats[
N->Node] = std::move(NUPtr);
389 for (
auto &Def :
N->Node->operands()) {
391 if (!
D || !MaterializableCallback(*
D) ||
392 !Checker.isDefinitionAcrossSuspend(*
D, FirstUse))
395 if (Remats.count(
D)) {
397 N->Operands.push_back(Remats[
D].
get());
402 for (
auto &
I : WorkList) {
405 N->Operands.push_back(
I.get());
411 std::unique_ptr<RematNode> ChildNode = std::make_unique<RematNode>(
D);
412 N->Operands.push_back(ChildNode.get());
413 WorkList.push_back(std::move(ChildNode));
418#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
421 if (EntryNode->Node->getParent()->hasName())
422 dbgs() << EntryNode->Node->getParent()->getName();
424 EntryNode->Node->getParent()->printAsOperand(
dbgs(),
false);
425 dbgs() <<
") : " << *EntryNode->Node <<
"\n";
426 for (
auto &E : Remats) {
427 dbgs() << *(E.first) <<
"\n";
428 for (RematNode *U : E.second->Operands)
429 dbgs() <<
" " << *
U->Node <<
"\n";
444 return N->Operands.begin();
452#define DEBUG_TYPE "coro-frame"
455class FrameTypeBuilder;
461 bool MayWriteBeforeCoroBegin;
464 bool MayWriteBeforeCoroBegin)
465 : Alloca(Alloca), Aliases(std::move(Aliases)),
466 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
468struct FrameDataInfo {
478 for (
const auto &
P : Spills)
480 for (
const auto &
A : Allocas)
486 auto Itr = FieldIndexMap.find(V);
487 assert(Itr != FieldIndexMap.end() &&
488 "Value does not have a frame field index");
493 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
494 "Cannot set the index for the same field twice.");
495 FieldIndexMap[V] =
Index;
499 auto Iter = FieldAlignMap.find(V);
500 assert(Iter != FieldAlignMap.end());
505 assert(FieldAlignMap.count(V) == 0);
506 FieldAlignMap.insert({V, AL});
510 auto Iter = FieldDynamicAlignMap.find(V);
511 assert(Iter != FieldDynamicAlignMap.end());
516 assert(FieldDynamicAlignMap.count(V) == 0);
517 FieldDynamicAlignMap.insert({V,
Align});
521 auto Iter = FieldOffsetMap.find(V);
522 assert(Iter != FieldOffsetMap.end());
527 assert(FieldOffsetMap.count(V) == 0);
528 FieldOffsetMap.insert({V,
Offset});
532 void updateLayoutIndex(FrameTypeBuilder &
B);
537 bool LayoutIndexUpdateStarted =
false;
554 dbgs() <<
"------------- " << Title <<
"--------------\n";
555 for (
const auto &E : Spills) {
558 for (
auto *
I : E.second)
565 dbgs() <<
"------------- " << Title <<
"--------------\n";
566 for (
const auto &E : RM) {
573 dbgs() <<
"------------- Allocas --------------\n";
574 for (
const auto &
A : Allocas) {
581using FieldIDType = size_t;
586class FrameTypeBuilder {
592 FieldIDType LayoutFieldIndex;
602 bool IsFinished =
false;
604 std::optional<Align> MaxFrameAlignment;
611 std::optional<Align> MaxFrameAlignment)
612 :
DL(
DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
616 [[nodiscard]] FieldIDType addFieldForAlloca(
AllocaInst *AI,
617 bool IsHeader =
false) {
622 if (
auto *CI = dyn_cast<ConstantInt>(AI->
getArraySize()))
623 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
628 return addField(Ty, AI->
getAlign(), IsHeader);
658 void addFieldForAllocas(
const Function &
F, FrameDataInfo &FrameData,
662 [[nodiscard]] FieldIDType addField(
Type *Ty,
MaybeAlign MaybeFieldAlignment,
663 bool IsHeader =
false,
664 bool IsSpillOfValue =
false) {
665 assert(!IsFinished &&
"adding fields to a finished builder");
666 assert(Ty &&
"must provide a type for a field");
673 if (FieldSize == 0) {
681 Align ABIAlign =
DL.getABITypeAlign(Ty);
682 Align TyAlignment = ABIAlign;
683 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
684 TyAlignment = *MaxFrameAlignment;
685 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
691 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
694 FieldAlignment = *MaxFrameAlignment;
695 FieldSize = FieldSize + DynamicAlignBuffer;
702 StructSize =
Offset + FieldSize;
709 Fields.
push_back({FieldSize,
Offset, Ty, 0, FieldAlignment, TyAlignment,
710 DynamicAlignBuffer});
711 return Fields.
size() - 1;
718 assert(IsFinished &&
"not yet finished!");
722 Align getStructAlign()
const {
723 assert(IsFinished &&
"not yet finished!");
727 FieldIDType getLayoutFieldIndex(FieldIDType Id)
const {
728 assert(IsFinished &&
"not yet finished!");
729 return Fields[
Id].LayoutFieldIndex;
732 Field getLayoutField(FieldIDType Id)
const {
733 assert(IsFinished &&
"not yet finished!");
739void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &
B) {
740 auto Updater = [&](
Value *
I) {
741 auto Field =
B.getLayoutField(getFieldIndex(
I));
742 setFieldIndex(
I,
Field.LayoutFieldIndex);
745 Field.DynamicAlignBuffer
748 setDynamicAlign(
I, dynamicAlign);
751 LayoutIndexUpdateStarted =
true;
752 for (
auto &S : Spills)
754 for (
const auto &
A : Allocas)
756 LayoutIndexUpdateStarted =
false;
759void FrameTypeBuilder::addFieldForAllocas(
const Function &
F,
760 FrameDataInfo &FrameData,
767 for (
auto AllocaList : NonOverlapedAllocas) {
768 auto *LargestAI = *AllocaList.begin();
769 FieldIDType
Id = addFieldForAlloca(LargestAI);
770 for (
auto *Alloca : AllocaList)
778 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
798 if (
auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
799 auto *SWI =
const_cast<SwitchInst *
>(ConstSWI);
800 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
801 SWI->setDefaultDest(SWI->getSuccessor(1));
806 auto ExtractAllocas = [&]() {
807 AllocaSetType Allocas;
810 Allocas.push_back(
A.Alloca);
814 StackLifetime::LivenessType::May);
815 StackLifetimeAnalyzer.run();
817 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
818 StackLifetimeAnalyzer.getLiveRange(AI2));
820 auto GetAllocaSize = [&](
const AllocaInfo &
A) {
821 std::optional<TypeSize> RetSize =
A.Alloca->getAllocationSize(
DL);
822 assert(RetSize &&
"Variable Length Arrays (VLA) are not supported.\n");
823 assert(!RetSize->isScalable() &&
"Scalable vectors are not yet supported");
824 return RetSize->getFixedValue();
830 sort(
FrameData.Allocas, [&](
const auto &Iter1,
const auto &Iter2) {
831 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
839 for (
auto &AllocaSet : NonOverlapedAllocas) {
840 assert(!AllocaSet.empty() &&
"Processing Alloca Set is not empty.\n");
841 bool NoInference =
none_of(AllocaSet, [&](
auto Iter) {
842 return IsAllocaInferenre(Alloca, Iter);
850 bool Alignable = [&]() ->
bool {
851 auto *LargestAlloca = *AllocaSet.begin();
852 return LargestAlloca->getAlign().value() % Alloca->
getAlign().
value() ==
855 bool CouldMerge = NoInference && Alignable;
858 AllocaSet.push_back(Alloca);
863 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
868 for (
auto SwitchAndDefaultDest : DefaultSuspendDest) {
870 BasicBlock *DestBB = SwitchAndDefaultDest.second;
875 : NonOverlapedAllocas) {
876 if (AllocaSet.size() > 1) {
877 dbgs() <<
"In Function:" << F.getName() <<
"\n";
878 dbgs() <<
"Find Union Set "
880 dbgs() <<
"\tAllocas are \n";
881 for (auto Alloca : AllocaSet)
882 dbgs() <<
"\t\t" << *Alloca <<
"\n";
887void FrameTypeBuilder::finish(
StructType *Ty) {
888 assert(!IsFinished &&
"already finished!");
894 for (
auto &
Field : Fields) {
901 StructSize = SizeAndAlign.first;
902 StructAlign = SizeAndAlign.second;
905 return *
static_cast<Field *
>(
const_cast<void*
>(LayoutField.Id));
911 for (
auto &LayoutField : LayoutFields) {
912 auto &
F = getField(LayoutField);
913 if (!
isAligned(
F.TyAlignment, LayoutField.Offset))
921 FieldTypes.
reserve(LayoutFields.size() * 3 / 2);
923 for (
auto &LayoutField : LayoutFields) {
924 auto &
F = getField(LayoutField);
926 auto Offset = LayoutField.Offset;
932 if (
Offset != LastOffset) {
939 F.LayoutFieldIndex = FieldTypes.
size();
942 if (
F.DynamicAlignBuffer) {
949 Ty->
setBody(FieldTypes, Packed);
953 auto Layout =
DL.getStructLayout(Ty);
954 for (
auto &
F : Fields) {
956 assert(Layout->getElementOffset(
F.LayoutFieldIndex) ==
F.Offset);
965 for (
auto *V : FrameData.getAllDefs()) {
969 auto CacheIt = [&DIVarCache, V](
const auto &Container) {
973 if (
I != Container.end())
974 DIVarCache.
insert({V, (*I)->getVariable()});
988 OS <<
"__int_" << cast<IntegerType>(Ty)->getBitWidth();
990 return MDName->getString();
998 return "__floating_type_";
1002 return "PointerType";
1005 if (!cast<StructType>(Ty)->hasName())
1006 return "__LiteralStructType_";
1011 for (
auto &Iter : Buffer)
1012 if (Iter ==
'.' || Iter ==
':')
1015 return MDName->getString();
1018 return "UnknownType";
1030 DIType *RetType =
nullptr;
1033 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
1035 llvm::DINode::FlagArtificial);
1038 dwarf::DW_ATE_float,
1039 llvm::DINode::FlagArtificial);
1051 std::nullopt,
Name);
1056 llvm::DINode::FlagArtificial,
nullptr, llvm::DINodeArray());
1058 auto *StructTy = cast<StructType>(Ty);
1060 for (
unsigned I = 0;
I < StructTy->getNumElements();
I++) {
1062 Scope, LineNum, DITypeCache);
1065 Scope, DITy->
getName(), Scope->getFile(), LineNum,
1068 llvm::DINode::FlagArtificial, DITy));
1078 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
1081 RetType = CharSizeType;
1092 DITypeCache.
insert({Ty, RetType});
1109 FrameDataInfo &FrameData) {
1114 if (!DIS || !DIS->getUnit() ||
1119 assert(Shape.
ABI == coro::ABI::Switch &&
1120 "We could only build debug infomation for C++ coroutine now.\n");
1126 "Coroutine with switch ABI should own Promise alloca");
1137 }
else if (!DVRs.
empty()) {
1147 unsigned LineNum = PromiseDIVariable->
getLine();
1150 DIS->getUnit(),
Twine(
F.getName() +
".coro_frame_ty").
str(),
1153 llvm::DINodeArray());
1166 NameCache.
insert({ResumeIndex,
"__resume_fn"});
1167 NameCache.
insert({DestroyIndex,
"__destroy_fn"});
1168 NameCache.
insert({IndexIndex,
"__coro_index"});
1189 dwarf::DW_ATE_unsigned_char)});
1191 for (
auto *V : FrameData.getAllDefs()) {
1195 auto Index = FrameData.getFieldIndex(V);
1197 NameCache.
insert({
Index, DIVarCache[V]->getName()});
1198 TyCache.
insert({
Index, DIVarCache[V]->getType()});
1204 OffsetCache.
insert({ResumeIndex, {8, 0}});
1205 OffsetCache.
insert({DestroyIndex, {8, 8}});
1210 for (
auto *V : FrameData.getAllDefs()) {
1211 auto Index = FrameData.getFieldIndex(V);
1214 {
Index, {FrameData.getAlign(V).
value(), FrameData.getOffset(V)}});
1222 unsigned UnknownTypeNum = 0;
1234 assert(Ty->
isSized() &&
"We can't handle type which is not sized.\n");
1236 AlignInBits = OffsetCache[
Index].first * 8;
1237 OffsetInBits = OffsetCache[
Index].second * 8;
1241 DITy = TyCache[
Index];
1243 DITy =
solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1244 assert(DITy &&
"SolveDIType shouldn't return nullptr.\n");
1246 Name +=
"_" + std::to_string(UnknownTypeNum);
1251 FrameDITy,
Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1252 llvm::DINode::FlagArtificial, DITy));
1258 DFile, LineNum, FrameDITy,
1259 true, DINode::FlagArtificial);
1260 assert(FrameDIVar->isValidLocationForIntrinsic(DILoc));
1269 if (
auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1270 auto RetainedNodes = SubProgram->getRetainedNodes();
1272 RetainedNodes.end());
1274 SubProgram->replaceOperandWith(
1282 DbgVariableRecord::LocationType::Declare);
1284 It->getParent()->insertDbgRecordBefore(NewDVR, It);
1286 DBuilder.insertDeclare(Shape.
FramePtr, FrameDIVar,
1301 FrameDataInfo &FrameData) {
1306 Name.append(
".Frame");
1311 std::optional<Align> MaxFrameAlignment;
1312 if (Shape.
ABI == coro::ABI::Async)
1314 FrameTypeBuilder
B(
C,
DL, MaxFrameAlignment);
1317 std::optional<FieldIDType> SwitchIndexFieldId;
1319 if (Shape.
ABI == coro::ABI::Switch) {
1320 auto *FnPtrTy = PointerType::getUnqual(
C);
1324 (void)
B.addField(FnPtrTy, std::nullopt,
true);
1325 (void)
B.addField(FnPtrTy, std::nullopt,
true);
1331 FrameData.setFieldIndex(
1332 PromiseAlloca,
B.addFieldForAlloca(PromiseAlloca,
true));
1339 SwitchIndexFieldId =
B.addField(IndexType, std::nullopt);
1341 assert(PromiseAlloca ==
nullptr &&
"lowering doesn't support promises");
1346 B.addFieldForAllocas(
F, FrameData, Shape);
1351 if (Shape.
ABI == coro::ABI::Switch && PromiseAlloca)
1354 FrameData.Allocas.emplace_back(
1357 for (
auto &S : FrameData.Spills) {
1358 Type *FieldType = S.first->getType();
1361 if (
const Argument *
A = dyn_cast<Argument>(S.first))
1362 if (
A->hasByValAttr())
1363 FieldType =
A->getParamByValType();
1364 FieldIDType Id =
B.addField(FieldType, std::nullopt,
false ,
1366 FrameData.setFieldIndex(S.first, Id);
1370 FrameData.updateLayoutIndex(
B);
1374 switch (Shape.
ABI) {
1375 case coro::ABI::Switch: {
1377 auto IndexField =
B.getLayoutField(*SwitchIndexFieldId);
1389 case coro::ABI::Retcon:
1390 case coro::ABI::RetconOnce: {
1393 = (
B.getStructSize() <= Id->getStorageSize() &&
1394 B.getStructAlign() <= Id->getStorageAlignment());
1397 case coro::ABI::Async: {
1407 "The alignment requirment of frame variables cannot be higher than "
1408 "the alignment of the async function context");
1447 const SuspendCrossingInfo &Checker,
1448 bool ShouldUseLifetimeStartInfo)
1450 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {
1452 CoroSuspendBBs.insert(SuspendInst->
getParent());
1460 if (PI.isEscaped() &&
1462 MayWriteBeforeCoroBegin =
true;
1484 if (
SI.getValueOperand() !=
U->get())
1497 auto IsSimpleStoreThenLoad = [&]() {
1498 auto *AI = dyn_cast<AllocaInst>(
SI.getPointerOperand());
1506 while (!StoreAliases.
empty()) {
1508 for (
User *U :
I->users()) {
1511 if (
auto *LI = dyn_cast<LoadInst>(U)) {
1518 if (
auto *S = dyn_cast<StoreInst>(U))
1519 if (S->getPointerOperand() ==
I)
1521 if (
auto *
II = dyn_cast<IntrinsicInst>(U))
1522 if (
II->isLifetimeStartOrEnd())
1526 if (
auto *BI = dyn_cast<BitCastInst>(U)) {
1537 if (!IsSimpleStoreThenLoad())
1564 if (!IsOffsetKnown || !
Offset.isZero())
1566 switch (
II.getIntrinsicID()) {
1569 case Intrinsic::lifetime_start:
1570 LifetimeStarts.insert(&
II);
1571 LifetimeStartBBs.push_back(
II.getParent());
1573 case Intrinsic::lifetime_end:
1574 LifetimeEndBBs.insert(
II.getParent());
1580 for (
unsigned Op = 0, OpCount = CB.
arg_size();
Op < OpCount; ++
Op)
1586 bool getShouldLiveOnFrame()
const {
1587 if (!ShouldLiveOnFrame)
1588 ShouldLiveOnFrame = computeShouldLiveOnFrame();
1589 return *ShouldLiveOnFrame;
1592 bool getMayWriteBeforeCoroBegin()
const {
return MayWriteBeforeCoroBegin; }
1595 assert(getShouldLiveOnFrame() &&
"This method should only be called if the "
1596 "alloca needs to live on the frame.");
1597 for (
const auto &
P : AliasOffetMap)
1600 "created before CoroBegin.");
1601 return AliasOffetMap;
1607 const SuspendCrossingInfo &Checker;
1617 bool MayWriteBeforeCoroBegin{
false};
1618 bool ShouldUseLifetimeStartInfo{
true};
1620 mutable std::optional<bool> ShouldLiveOnFrame{};
1622 bool computeShouldLiveOnFrame()
const {
1627 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1630 if (LifetimeEndBBs.empty())
1638 &LifetimeEndBBs, &DT))
1645 if (PI.isEscaped()) {
1646 for (
auto *
A : LifetimeStarts) {
1647 for (
auto *
B : LifetimeStarts) {
1648 if (Checker.hasPathOrLoopCrossingSuspendPoint(
A->getParent(),
1669 for (
auto *U1 : Users)
1670 for (
auto *U2 : Users)
1671 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1679 MayWriteBeforeCoroBegin =
true;
1683 for (
auto &U :
I.uses())
1696 if (!IsOffsetKnown) {
1697 AliasOffetMap[&
I].reset();
1699 auto Itr = AliasOffetMap.find(&
I);
1700 if (Itr == AliasOffetMap.end()) {
1702 }
else if (Itr->second && *Itr->second !=
Offset) {
1705 AliasOffetMap[&
I].reset();
1767 auto GetFramePointer = [&](
Value *Orig) ->
Value * {
1768 FieldIDType
Index = FrameData.getFieldIndex(Orig);
1774 if (
auto *AI = dyn_cast<AllocaInst>(Orig)) {
1775 if (
auto *CI = dyn_cast<ConstantInt>(AI->
getArraySize())) {
1776 auto Count = CI->getValue().getZExtValue();
1785 auto GEP = cast<GetElementPtrInst>(
1787 if (
auto *AI = dyn_cast<AllocaInst>(Orig)) {
1788 if (FrameData.getDynamicAlign(Orig) != 0) {
1791 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->
getType());
1795 PtrValue = Builder.
CreateAdd(PtrValue, AlignMask);
1806 if (
GEP->getType() != Orig->getType())
1808 Orig->getName() +
Twine(
".cast"));
1813 for (
auto const &E : FrameData.Spills) {
1814 Value *Def = E.first;
1815 auto SpillAlignment =
Align(FrameData.getAlign(Def));
1819 Type *ByValTy =
nullptr;
1820 if (
auto *Arg = dyn_cast<Argument>(Def)) {
1827 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1829 if (Arg->hasByValAttr())
1830 ByValTy = Arg->getParamByValType();
1831 }
else if (
auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1834 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt();
1836 auto *
I = cast<Instruction>(Def);
1841 }
else if (
auto *
II = dyn_cast<InvokeInst>(
I)) {
1844 auto *NewBB =
SplitEdge(
II->getParent(),
II->getNormalDest());
1845 InsertPt = NewBB->getTerminator()->getIterator();
1846 }
else if (isa<PHINode>(
I)) {
1849 if (
auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->
getTerminator()))
1854 assert(!
I->isTerminator() &&
"unexpected terminator");
1857 InsertPt =
I->getNextNode()->getIterator();
1861 auto Index = FrameData.getFieldIndex(Def);
1875 Value *CurrentReload =
nullptr;
1876 for (
auto *U : E.second) {
1880 if (CurrentBlock != U->getParent()) {
1881 CurrentBlock = U->getParent();
1885 auto *
GEP = GetFramePointer(E.first);
1886 GEP->setName(E.first->getName() +
Twine(
".reload.addr"));
1888 CurrentReload =
GEP;
1892 SpillAlignment, E.first->getName() +
Twine(
".reload"));
1899 if (
F->getSubprogram()) {
1901 while (DIs.
empty() && DVRs.
empty() && isa<LoadInst>(CurDef)) {
1902 auto *LdInst = cast<LoadInst>(CurDef);
1904 if (LdInst->getPointerOperandType() != LdInst->getType())
1906 CurDef = LdInst->getPointerOperand();
1907 if (!isa<AllocaInst, LoadInst>(CurDef))
1914 auto SalvageOne = [&](
auto *DDI) {
1915 bool AllowUnresolved =
false;
1922 DDI->getExpression(), DDI->getDebugLoc(),
1923 DbgVariableRecord::LocationType::Declare);
1928 .insertDeclare(CurrentReload, DDI->getVariable(),
1929 DDI->getExpression(), DDI->getDebugLoc(),
1944 if (
auto *PN = dyn_cast<PHINode>(U)) {
1945 assert(PN->getNumIncomingValues() == 1 &&
1946 "unexpected number of incoming "
1947 "values in the PHINode");
1948 PN->replaceAllUsesWith(CurrentReload);
1949 PN->eraseFromParent();
1955 U->replaceUsesOfWith(Def, CurrentReload);
1959 DVR.replaceVariableLocationOp(Def, CurrentReload,
true);
1971 if (Shape.
ABI == coro::ABI::Retcon || Shape.
ABI == coro::ABI::RetconOnce ||
1972 Shape.
ABI == coro::ABI::Async) {
1975 for (
const auto &
P : FrameData.Allocas) {
1977 auto *
G = GetFramePointer(Alloca);
1981 G->takeName(Alloca);
1996 for (
const auto &
A : FrameData.Allocas) {
1998 UsersToUpdate.
clear();
2000 auto *
I = cast<Instruction>(U);
2004 if (UsersToUpdate.
empty())
2006 auto *
G = GetFramePointer(Alloca);
2012 for (
auto *DVI : DIs)
2013 DVI->replaceUsesOfWith(Alloca,
G);
2014 for (
auto *DVR : DbgVariableRecords)
2015 DVR->replaceVariableLocationOp(Alloca,
G);
2021 if (
I->isLifetimeStartOrEnd()) {
2022 I->eraseFromParent();
2026 I->replaceUsesOfWith(Alloca,
G);
2030 for (
const auto &
A : FrameData.Allocas) {
2032 if (
A.MayWriteBeforeCoroBegin) {
2036 "Coroutines cannot handle copying of array allocas yet");
2038 auto *
G = GetFramePointer(Alloca);
2045 for (
const auto &Alias :
A.Aliases) {
2046 auto *
FramePtr = GetFramePointer(Alloca);
2047 auto &
Value = *Alias.second;
2052 AliasPtr, [&](
Use &U) {
return DT.
dominates(CB, U); });
2064 auto *Inst = dyn_cast<Instruction>(U.getUser());
2065 if (!Inst || DT.dominates(CB, Inst))
2068 if (auto *CI = dyn_cast<CallInst>(Inst)) {
2073 if (CI->onlyReadsMemory() ||
2074 CI->onlyReadsMemory(CI->getArgOperandNo(&U)))
2079 return isa<StoreInst>(Inst) ||
2082 isa<GetElementPtrInst>(Inst) ||
2087 isa<BitCastInst>(Inst);
2089 if (HasAccessingPromiseBeforeCB) {
2091 auto *
G = GetFramePointer(PA);
2103 PHINode *UntilPHI =
nullptr) {
2104 auto *PN = cast<PHINode>(&SuccBB->
front());
2106 int Index = PN->getBasicBlockIndex(InsertedBB);
2109 V->getType(), 1, V->getName() +
Twine(
".") + SuccBB->
getName());
2112 PN->setIncomingValue(
Index, InputV);
2113 PN = dyn_cast<PHINode>(PN->getNextNode());
2114 }
while (PN != UntilPHI);
2154 auto *NewCleanupPadBB =
2157 CleanupPadBB->
getParent(), CleanupPadBB);
2160 auto *SetDispatchValuePN =
2164 auto *SwitchOnDispatch = Builder.
CreateSwitch(SetDispatchValuePN, UnreachBB,
2167 int SwitchIndex = 0;
2173 Twine(
".from.") + Pred->getName(),
2174 CleanupPadBB->
getParent(), CleanupPadBB);
2176 CaseBB->setName(CleanupPadBB->
getName() +
Twine(
".from.") +
2186 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
2187 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
2188 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
2195 for (
auto &BB :
F) {
2196 for (
auto &Phi : BB.
phis()) {
2197 if (Phi.getNumIncomingValues() == 1) {
2203 while (!Worklist.
empty()) {
2205 auto *OriginalValue = Phi->getIncomingValue(0);
2206 Phi->replaceAllUsesWith(OriginalValue);
2234 if (
auto *CleanupPad =
2239 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
2242 assert(CS->getUnwindDest() == &BB);
2252 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.
getFirstNonPHI()))) {
2267 IncomingBB->setName(BB.
getName() +
Twine(
".from.") + Pred->getName());
2285 if (
auto *PN = dyn_cast<PHINode>(&BB.
front()))
2286 if (PN->getNumIncomingValues() > 1)
2297 return (isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
2298 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V));
2304 return isa<CoroIdInst>(&
I) || isa<CoroSaveInst>(&
I) ||
2305 isa<CoroSuspendInst>(&
I);
2327 for (
const auto &E : AllRemats) {
2330 RematGraph *RG = E.second.get();
2338 auto InsertPoint = &*
Use->getParent()->getFirstInsertionPt();
2339 if (isa<AnyCoroSuspendInst>(
Use)) {
2341 Use->getParent()->getSinglePredecessor();
2342 assert(SuspendPredecessorBlock &&
"malformed coro suspend instruction");
2350 for (;
I != RPOT.
end(); ++
I) {
2352 CurrentMaterialization =
D->clone();
2353 CurrentMaterialization->
setName(
D->getName());
2355 InsertPoint = CurrentMaterialization;
2359 for (
auto &
I : InstructionsToProcess)
2360 I->replaceUsesOfWith(
D, CurrentMaterialization);
2365 for (
unsigned i = 0, E =
Use->getNumOperands(); i != E; ++i)
2366 if (
Use->getOperand(i) ==
D)
2368 {
Use,
D, CurrentMaterialization});
2370 InstructionsToProcess.push_back(CurrentMaterialization);
2375 for (
auto &R : FinalInstructionsToProcess) {
2376 if (
auto *PN = dyn_cast<PHINode>(R.Use)) {
2377 assert(PN->getNumIncomingValues() == 1 &&
"unexpected number of incoming "
2378 "values in the PHINode");
2379 PN->replaceAllUsesWith(R.Remat);
2380 PN->eraseFromParent();
2383 R.Use->replaceUsesOfWith(R.Def, R.Remat);
2390 auto *BB =
I->getParent();
2408 return isa<AnyCoroSuspendInst>(BB->
front());
2443 if (
auto FI = dyn_cast<CoroAllocaFreeInst>(
User))
2444 VisitedOrFreeBBs.
insert(FI->getParent());
2453 unsigned depth = 3) {
2456 if (depth == 0)
return false;
2475 for (
auto *U : AI->
users()) {
2476 auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2491 for (
auto *AI : LocalAllocas) {
2496 Value *StackSave =
nullptr;
2504 for (
auto *U : AI->
users()) {
2506 if (isa<CoroAllocaGetInst>(U)) {
2507 U->replaceAllUsesWith(Alloca);
2513 auto FI = cast<CoroAllocaFreeInst>(U);
2519 DeadInsts.
push_back(cast<Instruction>(U));
2536 if (isa<CoroAllocaGetInst>(U)) {
2537 U->replaceAllUsesWith(Alloc);
2539 auto FI = cast<CoroAllocaFreeInst>(U);
2543 DeadInsts.
push_back(cast<Instruction>(U));
2550 return cast<Instruction>(Alloc);
2557 auto FnTy = FunctionType::get(ValueTy, {},
false);
2560 auto Call = Builder.
CreateCall(FnTy, Fn, {});
2572 auto FnTy = FunctionType::get(Builder.
getPtrTy(),
2573 {V->getType()},
false);
2576 auto Call = Builder.
CreateCall(FnTy, Fn, { V });
2595 auto ValueBeforeCall = Builder.
CreateLoad(ValueTy, Alloca);
2601 if (isa<CallInst>(Call)) {
2604 auto Invoke = cast<InvokeInst>(Call);
2605 Builder.
SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2623 if (isa<LoadInst>(
User) || isa<StoreInst>(
User))
2627 auto Call = cast<Instruction>(
User);
2646 IRBuilder<> Builder(
F.getEntryBlock().getFirstNonPHIOrDbg());
2648 auto ArgTy = cast<PointerType>(Arg.
getType());
2649 auto ValueTy = PointerType::getUnqual(
F.getContext());
2654 auto Alloca = Builder.
CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2669 auto FinalValue = Builder.
CreateLoad(ValueTy, Alloca);
2684 for (
auto &Arg :
F.args()) {
2685 if (!Arg.hasSwiftErrorAttr())
continue;
2692 for (
auto &Inst :
F.getEntryBlock()) {
2693 auto Alloca = dyn_cast<AllocaInst>(&Inst);
2705 if (!AllocasToPromote.
empty()) {
2714 const FrameDataInfo &FrameData,
2722 for (
auto *Def : FrameData.getAllDefs()) {
2723 for (
User *U : Def->users()) {
2724 auto Inst = cast<Instruction>(U);
2725 if (Inst->getParent() != CoroBegin->
getParent() ||
2733 while (!Worklist.
empty()) {
2735 for (
User *U : Def->users()) {
2736 auto Inst = cast<Instruction>(U);
2761 SuspendCrossingInfo &Checker,
2768 DomSet.
insert(&
F.getEntryBlock());
2772 "should have split coro.suspend into its own block");
2786 if (
auto*
II = dyn_cast<IntrinsicInst>(
I))
2787 return II->getIntrinsicID() == Intrinsic::lifetime_start;
2796 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2812 Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2815 if (collectLifetimeStart(UI, AI))
2823 if (Valid && Lifetimes.
size() != 0) {
2824 auto *NewLifetime = Lifetimes[0]->clone();
2825 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), AI);
2826 NewLifetime->insertBefore(DomBB->getTerminator());
2830 S->eraseFromParent();
2839 const SuspendCrossingInfo &Checker,
2852 if (AI->
hasMetadata(LLVMContext::MD_coro_outside_frame))
2858 bool ShouldUseLifetimeStartInfo =
2859 (Shape.
ABI != coro::ABI::Async && Shape.
ABI != coro::ABI::Retcon &&
2860 Shape.
ABI != coro::ABI::RetconOnce);
2861 AllocaUseVisitor Visitor{AI->
getDataLayout(), DT, Shape, Checker,
2862 ShouldUseLifetimeStartInfo};
2863 Visitor.visitPtr(*AI);
2864 if (!Visitor.getShouldLiveOnFrame())
2867 Visitor.getMayWriteBeforeCoroBegin());
2870static std::optional<std::pair<Value &, DIExpression &>>
2872 bool OptimizeFrame,
bool UseEntryValue,
Function *
F,
2874 bool SkipOutermostLoad) {
2876 auto InsertPt =
F->getEntryBlock().getFirstInsertionPt();
2877 while (isa<IntrinsicInst>(InsertPt))
2881 while (
auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2882 if (
auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2883 Storage = LdInst->getPointerOperand();
2890 if (!SkipOutermostLoad)
2892 }
else if (
auto *StInst = dyn_cast<StoreInst>(Inst)) {
2893 Storage = StInst->getValueOperand();
2900 if (!
Op || !AdditionalValues.
empty()) {
2908 SkipOutermostLoad =
false;
2911 return std::nullopt;
2913 auto *StorageAsArg = dyn_cast<Argument>(Storage);
2914 const bool IsSwiftAsyncArg =
2915 StorageAsArg && StorageAsArg->hasAttribute(Attribute::SwiftAsync);
2920 if (IsSwiftAsyncArg && UseEntryValue && !Expr->
isEntryValue() &&
2929 if (StorageAsArg && !OptimizeFrame && !IsSwiftAsyncArg) {
2930 auto &Cached = ArgToAllocaMap[StorageAsArg];
2933 Storage->
getName() +
".debug");
2947 return {{*Storage, *Expr}};
2957 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2961 ArgToAllocaMap, OptimizeFrame, UseEntryValue,
F, OriginalStorage,
2966 Value *Storage = &SalvagedInfo->first;
2974 if (isa<DbgDeclareInst>(DVI)) {
2975 std::optional<BasicBlock::iterator> InsertPt;
2976 if (
auto *
I = dyn_cast<Instruction>(Storage)) {
2977 InsertPt =
I->getInsertionPointAfterDef();
2981 if (ILoc && DVILoc &&
2982 DVILoc->getScope()->getSubprogram() ==
2983 ILoc->getScope()->getSubprogram())
2985 }
else if (isa<Argument>(Storage))
2986 InsertPt =
F->getEntryBlock().begin();
2988 DVI.
moveBefore(*(*InsertPt)->getParent(), *InsertPt);
3003 ArgToAllocaMap, OptimizeFrame, UseEntryValue,
F, OriginalStorage,
3008 Value *Storage = &SalvagedInfo->first;
3016 if (DVR.
getType() == DbgVariableRecord::LocationType::Declare) {
3017 std::optional<BasicBlock::iterator> InsertPt;
3018 if (
auto *
I = dyn_cast<Instruction>(Storage)) {
3019 InsertPt =
I->getInsertionPointAfterDef();
3023 if (ILoc && DVRLoc &&
3024 DVRLoc->getScope()->getSubprogram() ==
3025 ILoc->getScope()->getSubprogram())
3027 }
else if (isa<Argument>(Storage))
3028 InsertPt =
F->getEntryBlock().begin();
3031 (*InsertPt)->getParent()->insertDbgRecordBefore(&DVR, *InsertPt);
3037 Function &
F, SuspendCrossingInfo &Checker,
3038 const std::function<
bool(
Instruction &)> &MaterializableCallback) {
3048 if (!MaterializableCallback(
I))
3050 for (
User *U :
I.users())
3051 if (Checker.isDefinitionAcrossSuspend(
I, U))
3052 Spills[&
I].push_back(cast<Instruction>(U));
3071 for (
auto &E : Spills) {
3075 if (AllRemats.
count(U))
3080 std::make_unique<RematGraph>(MaterializableCallback, U, Checker);
3084 for (
auto I = RPOT.begin();
I != RPOT.end();
3085 ++
I) { (*I)->Node->dump(); }
dbgs()
3088 AllRemats[U] = std::move(RematUPtr);
3100 const std::function<
bool(
Instruction &)> &MaterializableCallback) {
3105 if (
Shape.
ABI == coro::ABI::Switch &&
3114 if (
auto *Save = CSI->getCoroSave())
3127 if (
auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
3128 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
3129 if (!MustTailCallFn)
3149 SuspendCrossingInfo Checker(
F,
Shape);
3154 FrameDataInfo FrameData;
3158 Shape.
ABI != coro::ABI::RetconOnce)
3163 for (
User *U :
A.users())
3164 if (Checker.isDefinitionAcrossSuspend(
A, U))
3165 FrameData.Spills[&
A].push_back(cast<Instruction>(U));
3174 if (
auto AI = dyn_cast<CoroAllocaAllocInst>(&
I)) {
3188 for (
User *U : Alloc->users()) {
3189 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
3190 FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
3196 if (isa<CoroAllocaGetInst>(
I))
3199 if (
auto *AI = dyn_cast<AllocaInst>(&
I)) {
3204 for (
User *U :
I.users())
3205 if (Checker.isDefinitionAcrossSuspend(
I, U)) {
3207 if (
I.getType()->isTokenTy())
3209 "token definition is separated from the use by a suspend point");
3210 FrameData.Spills[&
I].push_back(cast<Instruction>(U));
3220 for (
auto &Iter : FrameData.Spills) {
3221 auto *V = Iter.first;
3226 if (Checker.isDefinitionAcrossSuspend(*V, DVI))
3227 FrameData.Spills[V].push_back(DVI);
3230 if (Checker.isDefinitionAcrossSuspend(*V, DVR->Marker->MarkedInstr))
3231 FrameData.Spills[V].push_back(DVR->Marker->MarkedInstr);
3235 if (
Shape.
ABI == coro::ABI::Retcon ||
Shape.
ABI == coro::ABI::RetconOnce ||
3245 for (
auto *
I : DeadInstructions)
3246 I->eraseFromParent();
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
static void cleanupSinglePredPHIs(Function &F)
static bool isSuspendReachableFrom(BasicBlock *From, VisitedBlocksSet &VisitedOrFreeBBs)
Does control flow starting at the given block ever reach a suspend instruction before reaching a bloc...
static bool isCoroutineStructureIntrinsic(Instruction &I)
SmallPtrSet< BasicBlock *, 8 > VisitedBlocksSet
static Instruction * lowerNonLocalAlloca(CoroAllocaAllocInst *AI, coro::Shape &Shape, SmallVectorImpl< Instruction * > &DeadInsts)
Turn the given coro.alloca.alloc call into a dynamic allocation.
static Instruction * splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch)
static void eliminateSwiftError(Function &F, coro::Shape &Shape)
Eliminate all problematic uses of swifterror arguments and allocas from the function.
static void lowerLocalAllocas(ArrayRef< CoroAllocaAllocInst * > LocalAllocas, SmallVectorImpl< Instruction * > &DeadInsts)
Turn each of the given local allocas into a normal (dynamic) alloca instruction.
static bool isLocalAlloca(CoroAllocaAllocInst *AI)
Is the given alloca "local", i.e.
static Value * emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, coro::Shape &Shape)
Set the given value as the current swifterror value.
static Value * emitSetAndGetSwiftErrorValueAround(Instruction *Call, AllocaInst *Alloca, coro::Shape &Shape)
Set the swifterror value from the given alloca before a call, then put in back in the alloca afterwar...
static void cacheDIVar(FrameDataInfo &FrameData, DenseMap< Value *, DILocalVariable * > &DIVarCache)
static void collectFrameAlloca(AllocaInst *AI, coro::Shape &Shape, const SuspendCrossingInfo &Checker, SmallVectorImpl< AllocaInfo > &Allocas, const DominatorTree &DT)
static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI)
static void splitAround(Instruction *I, const Twine &Name)
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, coro::Shape &Shape)
Eliminate a formerly-swifterror alloca by inserting the get/set intrinsics and attempting to MemToReg...
static void rewritePHIs(BasicBlock &BB)
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, BasicBlock *InsertedBB, BasicBlock *PredBB, PHINode *UntilPHI=nullptr)
static std::optional< std::pair< Value &, DIExpression & > > salvageDebugInfoImpl(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, bool OptimizeFrame, bool UseEntryValue, Function *F, Value *Storage, DIExpression *Expr, bool SkipOutermostLoad)
static DIType * solveDIType(DIBuilder &Builder, Type *Ty, const DataLayout &Layout, DIScope *Scope, unsigned LineNum, DenseMap< Type *, DIType * > &DITypeCache)
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, unsigned depth=3)
After we split the coroutine, will the given basic block be along an obvious exit path for the resump...
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, coro::Shape &Shape, SmallVectorImpl< AllocaInst * > &AllocasToPromote)
"Eliminate" a swifterror argument by reducing it to the alloca case and then loading and storing in t...
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Build artificial debug info for C++ coroutine frames to allow users to inspect the contents of the fr...
static StructType * buildFrameType(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
static BasicBlock * splitBlockIfNotFirst(Instruction *I, const Twine &Name)
static void sinkSpillUsesAfterCoroBegin(Function &F, const FrameDataInfo &FrameData, CoroBeginInst *CoroBegin)
retcon and retcon.once conventions assume that all spill uses can be sunk after the coro....
static bool isSuspendBlock(BasicBlock *BB)
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, CleanupPadInst *CleanupPad)
static void rewriteMaterializableInstructions(const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &AllRemats)
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, SuspendCrossingInfo &Checker, const DominatorTree &DT)
For each local variable that all of its user are only used inside one of suspended region,...
static void dumpAllocas(const SmallVectorImpl< AllocaInfo > &Allocas)
static StringRef solveTypeName(Type *Ty)
Create name for Type.
static void dumpSpills(StringRef Title, const SpillInfo &Spills)
static void doRematerializations(Function &F, SuspendCrossingInfo &Checker, const std::function< bool(Instruction &)> &MaterializableCallback)
static Value * emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, coro::Shape &Shape)
Get the current swifterror value.
static void dumpRemats(StringRef Title, const SmallMapVector< Instruction *, std::unique_ptr< RematGraph >, 8 > &RM)
cl::opt< bool > UseNewDbgInfoFormat
static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape)
Given that RA is a live value
static bool isLifetimeStart(const Instruction *Inst)
static MaybeAlign getAlign(Value *Ptr)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
mir Rename Register Operands
uint64_t IntrinsicInst * II
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallString class.
static const unsigned FramePtr
This class represents a conversion between pointers from one address space to another.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const Instruction & front() const
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class represents a no-op cast from one type to another.
size_type size() const
size - Returns the number of bits in this bitvector.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
Value * getParentPad() const
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args=std::nullopt, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This represents the llvm.coro.alloca.alloc instruction.
This class represents the llvm.coro.begin instruction.
This represents the llvm.coro.suspend instruction.
DICompositeType * createStructType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang=0, DIType *VTableHolder=nullptr, StringRef UniqueIdentifier="")
Create debugging information entry for a struct.
DIDerivedType * createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits=0, std::optional< unsigned > DWARFAddressSpace=std::nullopt, StringRef Name="", DINodeArray Annotations=nullptr)
Create debugging information entry for a pointer.
DIExpression * createExpression(ArrayRef< uint64_t > Addr=std::nullopt)
Create a new descriptor for the specified variable which has a complex address expression for its add...
DISubrange * getOrCreateSubrange(int64_t Lo, int64_t Count)
Create a descriptor for a value range.
DICompositeType * createArrayType(uint64_t Size, uint32_t AlignInBits, DIType *Ty, DINodeArray Subscripts, PointerUnion< DIExpression *, DIVariable * > DataLocation=nullptr, PointerUnion< DIExpression *, DIVariable * > Associated=nullptr, PointerUnion< DIExpression *, DIVariable * > Allocated=nullptr, PointerUnion< DIExpression *, DIVariable * > Rank=nullptr)
Create debugging information entry for an array.
DIBasicType * createBasicType(StringRef Name, uint64_t SizeInBits, unsigned Encoding, DINode::DIFlags Flags=DINode::FlagZero)
Create debugging information entry for a basic type.
DINodeArray getOrCreateArray(ArrayRef< Metadata * > Elements)
Get a DINodeArray, create one if required.
DIDerivedType * createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations=nullptr)
Create debugging information entry for a member.
DILocalVariable * createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve=false, DINode::DIFlags Flags=DINode::FlagZero, uint32_t AlignInBits=0)
Create a new descriptor for an auto variable.
void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParams=DINodeArray())
Replace arrays on a composite type.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
bool isSingleLocationExpression() const
Return whether the evaluated expression makes use of a single location at the start of the expression...
DILocalScope * getScope() const
Get the local scope for this variable.
Base class for scope-like contexts.
StringRef getName() const
uint64_t getSizeInBits() const
uint32_t getAlignInBits() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
This represents the llvm.dbg.declare instruction.
DebugLoc getDebugLoc() const
void setDebugLoc(DebugLoc Loc)
This represents the llvm.dbg.value instruction.
This is the common base class for debug info intrinsics for variables.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DILocalVariable * getVariable() const
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
DILocation * get() const
Get the underlying DILocation.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Module * getParent()
Get the module that this global value is contained inside of...
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
UnreachableInst * CreateUnreachable()
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
void visitIntrinsicInst(IntrinsicInst &I)
void visitBitCastInst(BitCastInst &I)
void visit(Iterator Start, Iterator End)
void visitPHINode(PHINode &I)
void visitAddrSpaceCastInst(AddrSpaceCastInst &I)
void visitSelectInst(SelectInst &I)
void visitGetElementPtrInst(GetElementPtrInst &I)
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
static MDString * get(LLVMContext &Context, StringRef Str)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
size_type count(const KeyT &Key) const
This is the common base class for memset/memcpy/memmove.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
A base class for visitors over the uses of a pointer value.
void visitCallBase(CallBase &CB)
void visitGetElementPtrInst(GetElementPtrInst &GEPI)
void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC)
void visitBitCastInst(BitCastInst &BC)
void visitStoreInst(StoreInst &SI)
void visitIntrinsicInst(IntrinsicInst &II)
void visitMemIntrinsic(MemIntrinsic &I)
This class represents the LLVM 'select' instruction.
iterator end()
Get an iterator to the end of the SetVector.
iterator begin()
Get an iterator to the beginning of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
StringRef str() const
Explicit conversion to StringRef.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Compute live ranges of allocas.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
std::string str() const
str - Get the contents as an std::string.
TypeSize getElementOffsetInBits(unsigned Idx) const
Class to represent struct types.
void setBody(ArrayRef< Type * > Elements, bool isPacked=false)
Specify a body for an opaque identified type.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
void setDefaultDest(BasicBlock *DefaultCase)
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
StringRef getStructName() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void setName(const Twine &Name)
Change the name of the value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
APInt Offset
The constant offset of the use if that is known.
void enqueueUsers(Instruction &I)
Enqueue the users of this instruction in the visit worklist.
SmallVector< UseToVisit, 8 > Worklist
The worklist of to-visit uses.
constexpr ScalarTy getFixedValue() const
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A range adaptor for a pair of iterators.
A raw_ostream that writes to an SmallVector or SmallString.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ CE
Windows NT (Windows on ARM)
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableIntrinsic &DVI, bool OptimizeFrame, bool IsEntryPoint)
Attempts to rewrite the location operand of debug intrinsics in terms of the coroutine frame pointer,...
bool defaultMaterializable(Instruction &V)
Default materializable callback.
void buildCoroutineFrame(Function &F, Shape &Shape, TargetTransformInfo &TTI, const std::function< bool(Instruction &)> &MaterializableCallback)
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, TargetTransformInfo &TTI, ArrayRef< Value * > Arguments, IRBuilder<> &)
bool isCPlusPlus(SourceLanguage S)
NodeAddr< DefNode * > Def
NodeAddr< BlockNode * > Block
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
TinyPtrVector< DbgDeclareInst * > findDbgDeclares(Value *V)
Finds dbg.declare intrinsics declaring local variables as living in the memory that 'V' points to.
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
auto successors(const MachineBasicBlock *BB)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
void findDbgValues(SmallVectorImpl< DbgValueInst * > &DbgValues, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the llvm.dbg.value intrinsics describing a value.
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isManyPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const SmallPtrSetImpl< const BasicBlock * > &StopSet, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is a potentially a path from at least one block in 'Worklist' to at least one...
BasicBlock * ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, LandingPadInst *OriginalPad=nullptr, PHINode *LandingPadReplacement=nullptr, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
Split the edge connect the specficed blocks in the case that Succ is an Exception Handling Block.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, BasicBlock *NewPred, PHINode *Until=nullptr)
Replaces all uses of OldPred with the NewPred block in all PHINodes in a block.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
As above, for DVRDeclares.
auto predecessors(const MachineBasicBlock *BB)
BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ)
Sets the unwind edge of an instruction to a particular successor.
unsigned pred_size(const MachineBasicBlock *BB)
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
RematGraph::RematNode * NodeRef
static ChildIteratorType child_end(NodeRef N)
RematGraph::RematNode ** ChildIteratorType
static NodeRef getEntryNode(RematGraph *G)
static ChildIteratorType child_begin(NodeRef N)
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align Alignment
The required alignment of this field.
uint64_t Offset
The offset of this field in the final layout.
uint64_t Size
The required size of this field in bytes.
static constexpr uint64_t FlexibleOffset
A special value for Offset indicating that the field can be moved anywhere.
A MapVector that performs no allocations if smaller than a certain size.
Align getContextAlignment() const
uint64_t ContextHeaderSize
bool IsFrameInlineInStorage
AllocaInst * PromiseAlloca
AsyncLoweringStorage AsyncLowering
AnyCoroIdRetconInst * getRetconCoroId() const
CoroIdInst * getSwitchCoroId() const
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
SmallVector< CallInst *, 2 > SwiftErrorOps
AllocaInst * getPromiseAlloca() const
bool OptimizeFrame
This would only be true if optimization are enabled.
SwitchLoweringStorage SwitchLowering
CoroBeginInst * CoroBegin
BasicBlock::iterator getInsertPtAfterFramePtr() const
void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.
RetconLoweringStorage RetconLowering
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
BasicBlock * AllocaSpillBlock