Go to the documentation of this file.
83 cl::desc(
"Convert noalias attributes to metadata during inlining."));
88 cl::desc(
"Use the llvm.experimental.noalias.scope.decl "
89 "intrinsic during inlining."));
97 cl::desc(
"Convert align attributes to assumptions during inlining."));
101 cl::desc(
"Update return attributes on calls within inlined body"));
104 "max-inst-checked-for-throw-during-inlining",
cl::Hidden,
105 cl::desc(
"the maximum number of instructions analyzed for may throw during "
106 "attribute inference in inlined body"),
112 class LandingPadInliningInfo {
123 PHINode *InnerEHValuesPHI =
nullptr;
129 : OuterResumeDest(II->getUnwindDest()) {
135 for (; isa<PHINode>(
I); ++
I) {
141 CallerLPad = cast<LandingPadInst>(
I);
147 return OuterResumeDest;
165 addIncomingPHIValuesForInto(
BB, OuterResumeDest);
170 for (
unsigned i = 0,
e = UnwindDestPHIValues.size();
i !=
e; ++
i, ++
I) {
172 phi->addIncoming(UnwindDestPHIValues[
i], src);
180 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
181 if (InnerResumeDest)
return InnerResumeDest;
187 OuterResumeDest->
getName() +
".body");
190 const unsigned PHICapacity = 2;
195 for (
unsigned i = 0,
e = UnwindDestPHIValues.size();
i !=
e; ++
i, ++
I) {
196 PHINode *OuterPHI = cast<PHINode>(
I);
198 OuterPHI->
getName() +
".lpad-body",
206 "eh.lpad-body", InsertPoint);
208 InnerEHValuesPHI->
addIncoming(CallerLPad, OuterResumeDest);
211 return InnerResumeDest;
218 void LandingPadInliningInfo::forwardResume(
227 addIncomingPHIValuesForInto(Src, Dest);
235 if (
auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
236 return FPI->getParentPad();
237 return cast<CatchSwitchInst>(EHPad)->getParentPad();
248 while (!Worklist.empty()) {
255 Value *UnwindDestToken =
nullptr;
256 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
257 if (CatchSwitch->hasUnwindDest()) {
258 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
266 for (
auto HI = CatchSwitch->handler_begin(),
267 HE = CatchSwitch->handler_end();
268 HI != HE && !UnwindDestToken; ++
HI) {
270 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->
getFirstNonPHI());
276 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
280 auto Memo = MemoMap.
find(ChildPad);
281 if (Memo == MemoMap.
end()) {
283 Worklist.push_back(ChildPad);
288 Value *ChildUnwindDestToken = Memo->second;
289 if (!ChildUnwindDestToken)
295 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
296 UnwindDestToken = ChildUnwindDestToken;
304 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
306 if (
auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
307 if (
BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
308 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
313 Value *ChildUnwindDestToken;
314 if (
auto *Invoke = dyn_cast<InvokeInst>(U)) {
315 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
316 }
else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
318 auto Memo = MemoMap.
find(ChildPad);
319 if (Memo == MemoMap.
end()) {
321 Worklist.push_back(ChildPad);
326 ChildUnwindDestToken = Memo->second;
327 if (!ChildUnwindDestToken)
336 if (isa<Instruction>(ChildUnwindDestToken) &&
339 UnwindDestToken = ChildUnwindDestToken;
345 if (!UnwindDestToken)
353 if (
auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
356 UnwindParent =
nullptr;
357 bool ExitedOriginalPad =
false;
359 ExitedPad && ExitedPad != UnwindParent;
360 ExitedPad = dyn_cast<Instruction>(
getParentPad(ExitedPad))) {
362 if (isa<CatchPadInst>(ExitedPad))
364 MemoMap[ExitedPad] = UnwindDestToken;
365 ExitedOriginalPad |= (ExitedPad == EHPad);
368 if (ExitedOriginalPad)
369 return UnwindDestToken;
400 if (
auto *CPI = dyn_cast<CatchPadInst>(EHPad))
401 EHPad = CPI->getCatchSwitch();
404 auto Memo = MemoMap.
find(EHPad);
405 if (Memo != MemoMap.
end())
410 assert((UnwindDestToken ==
nullptr) != (MemoMap.
count(EHPad) != 0));
412 return UnwindDestToken;
419 MemoMap[EHPad] =
nullptr;
425 Value *AncestorToken;
427 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
430 if (isa<CatchPadInst>(AncestorPad))
439 assert(!MemoMap.
count(AncestorPad) || MemoMap[AncestorPad]);
440 auto AncestorMemo = MemoMap.
find(AncestorPad);
441 if (AncestorMemo == MemoMap.
end()) {
444 UnwindDestToken = AncestorMemo->second;
448 LastUselessPad = AncestorPad;
449 MemoMap[LastUselessPad] =
nullptr;
451 TempMemos.
insert(LastUselessPad);
469 while (!Worklist.empty()) {
471 auto Memo = MemoMap.
find(UselessPad);
472 if (Memo != MemoMap.
end() && Memo->second) {
500 MemoMap[UselessPad] = UnwindDestToken;
501 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
502 assert(CatchSwitch->getUnwindDest() ==
nullptr &&
"Expected useless pad");
503 for (
BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
504 auto *CatchPad = HandlerBlock->getFirstNonPHI();
507 (!isa<InvokeInst>(U) ||
509 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
511 "Expected useless pad");
512 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
513 Worklist.push_back(cast<Instruction>(U));
517 assert(isa<CleanupPadInst>(UselessPad));
519 assert(!isa<CleanupReturnInst>(U) &&
"Expected useless pad");
520 assert((!isa<InvokeInst>(U) ||
522 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
524 "Expected useless pad");
525 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
526 Worklist.push_back(cast<Instruction>(U));
531 return UnwindDestToken;
564 if (
F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
565 F->getIntrinsicID() == Intrinsic::experimental_guard)
576 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
577 Value *UnwindDestToken =
579 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
583 if (
auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
584 MemoKey = CatchPad->getCatchSwitch();
586 MemoKey = FuncletPad;
587 assert(FuncletUnwindMap->count(MemoKey) &&
588 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
589 "must get memoized to avoid confusing later searches");
614 LandingPadInliningInfo Invoke(II);
620 if (
InvokeInst *II = dyn_cast<InvokeInst>(
I->getTerminator()))
628 InlinedLPad->reserveClauses(OuterNum);
629 for (
unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
630 InlinedLPad->addClause(OuterLPad->
getClause(OuterIdx));
632 InlinedLPad->setCleanup(
true);
639 &*
BB, Invoke.getOuterResumeDest()))
642 Invoke.addIncomingPHIValuesFor(NewBB);
645 if (
ResumeInst *RI = dyn_cast<ResumeInst>(
BB->getTerminator()))
646 Invoke.forwardResume(RI, InlinedLPads);
676 UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB));
683 for (
Value *V : UnwindDestPHIValues) {
695 if (
auto *CRI = dyn_cast<CleanupReturnInst>(
BB->getTerminator())) {
696 if (CRI->unwindsToCaller()) {
697 auto *CleanupPad = CRI->getCleanupPad();
699 CRI->eraseFromParent();
706 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
707 FuncletUnwindMap[CleanupPad] =
717 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
I)) {
718 if (CatchSwitch->unwindsToCaller()) {
719 Value *UnwindDestToken;
720 if (
auto *ParentPad =
721 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
731 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
744 CatchSwitch->getParentPad(), UnwindDest,
745 CatchSwitch->getNumHandlers(), CatchSwitch->
getName(),
747 for (
BasicBlock *PadBB : CatchSwitch->handlers())
748 NewCatchSwitch->addHandler(PadBB);
753 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
754 Replacement = NewCatchSwitch;
756 }
else if (!isa<FuncletPadInst>(
I)) {
762 I->replaceAllUsesWith(Replacement);
763 I->eraseFromParent();
773 &*
BB, UnwindDest, &FuncletUnwindMap))
790 MDNode *MemParallelLoopAccess =
791 CB.
getMetadata(LLVMContext::MD_mem_parallel_loop_access);
795 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
801 if (!
I.mayReadOrWriteMemory())
804 if (MemParallelLoopAccess) {
807 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
808 MemParallelLoopAccess);
809 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
810 MemParallelLoopAccess);
815 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
819 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
823 I.getMetadata(LLVMContext::MD_noalias), NoAlias));
833 class ScopedAliasMetadataDeepCloner {
837 void addRecursiveMetadataUses();
840 ScopedAliasMetadataDeepCloner(
const Function *
F);
852 ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
856 if (
const MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
858 if (
const MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
862 if (
const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
863 MD.insert(Decl->getScopeList());
866 addRecursiveMetadataUses();
869 void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
871 while (!
Queue.empty()) {
874 if (
const MDNode *OpMD = dyn_cast<MDNode>(
Op))
876 Queue.push_back(OpMD);
880 void ScopedAliasMetadataDeepCloner::clone() {
881 assert(MDMap.empty() &&
"clone() already called ?");
886 MDMap[
I].reset(DummyNodes.back().get());
895 if (
const MDNode *M = dyn_cast<MDNode>(
Op))
896 NewOps.push_back(MDMap[M]);
902 MDTuple *TempM = cast<MDTuple>(MDMap[
I]);
919 if (
MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
920 if (
MDNode *MNew = MDMap.lookup(M))
921 I.setMetadata(LLVMContext::MD_alias_scope, MNew);
923 if (
MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
924 if (
MDNode *MNew = MDMap.lookup(M))
925 I.setMetadata(LLVMContext::MD_noalias, MNew);
927 if (
auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
928 if (
MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
929 Decl->setScopeList(MNew);
949 NoAliasArgs.push_back(&
Arg);
951 if (NoAliasArgs.empty())
971 for (
unsigned i = 0,
e = NoAliasArgs.size();
i !=
e; ++
i) {
974 std::string
Name = std::string(CalledFunc->
getName());
977 Name += A->getName();
979 Name +=
": argument ";
987 NewScopes.
insert(std::make_pair(A, NewScope));
1004 VMI != VMIE; ++VMI) {
1005 if (
const Instruction *
I = dyn_cast<Instruction>(VMI->first)) {
1009 Instruction *NI = dyn_cast<Instruction>(VMI->second);
1013 bool IsArgMemOnlyCall =
false, IsFuncCall =
false;
1016 if (
const LoadInst *LI = dyn_cast<LoadInst>(
I))
1017 PtrArgs.push_back(LI->getPointerOperand());
1019 PtrArgs.push_back(
SI->getPointerOperand());
1020 else if (
const VAArgInst *VAAI = dyn_cast<VAArgInst>(
I))
1021 PtrArgs.push_back(VAAI->getPointerOperand());
1023 PtrArgs.push_back(CXI->getPointerOperand());
1025 PtrArgs.push_back(RMWI->getPointerOperand());
1026 else if (
const auto *Call = dyn_cast<CallBase>(
I)) {
1030 if (Call->doesNotAccessMemory())
1042 IsArgMemOnlyCall =
true;
1051 if (IsArgMemOnlyCall && !
Arg->getType()->isPointerTy())
1054 PtrArgs.push_back(
Arg);
1062 if (PtrArgs.empty() && !IsFuncCall)
1072 for (
const Value *V : PtrArgs) {
1076 for (
const Value *
O : Objects)
1082 bool CanDeriveViaCapture =
false, UsesAliasingPtr =
false;
1083 for (
const Value *V : ObjSet) {
1087 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1088 isa<ConstantPointerNull>(V) ||
1089 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1096 if (
const Argument *A = dyn_cast<Argument>(V)) {
1097 if (!CB.
paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1098 UsesAliasingPtr =
true;
1100 UsesAliasingPtr =
true;
1107 if (!isa<Argument>(V) &&
1109 CanDeriveViaCapture =
true;
1114 if (IsFuncCall && !IsArgMemOnlyCall)
1115 CanDeriveViaCapture =
true;
1125 for (
const Argument *A : NoAliasArgs) {
1126 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1155 bool CanAddScopes = !UsesAliasingPtr;
1156 if (CanAddScopes && IsFuncCall)
1157 CanAddScopes = IsArgMemOnlyCall;
1160 for (
const Argument *A : NoAliasArgs) {
1161 if (ObjSet.count(A))
1162 Scopes.push_back(NewScopes[A]);
1165 if (!Scopes.empty())
1167 LLVMContext::MD_alias_scope,
1178 "Expected to be in same basic block!");
1186 if (!AB.hasAttributes())
1192 if (
auto DerefBytes = AB.getDereferenceableBytes())
1194 if (
auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes())
1196 if (AB.contains(Attribute::NoAlias))
1198 if (AB.contains(Attribute::NonNull))
1211 auto &
Context = CalledFunction->getContext();
1213 for (
auto &
BB : *CalledFunction) {
1214 auto *RI = dyn_cast<ReturnInst>(
BB.getTerminator());
1215 if (!RI || !isa<CallBase>(RI->
getOperand(0)))
1217 auto *RetVal = cast<CallBase>(RI->
getOperand(0));
1221 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.
lookup(RetVal));
1253 NewRetVal->setAttributes(NewAL);
1269 bool DTCalculated =
false;
1273 unsigned Align =
Arg.getType()->isPointerTy() ?
Arg.getParamAlignment() : 0;
1274 if (
Align && !
Arg.hasPassPointeeByValueCopyAttr() && !
Arg.hasNUses(0)) {
1275 if (!DTCalculated) {
1277 DTCalculated =
true;
1313 if (CalleeNode == CallerNode) {
1314 CallCache.assign(
I,
E);
1315 I = CallCache.begin();
1316 E = CallCache.end();
1319 for (;
I !=
E; ++
I) {
1324 const Value *OrigCall = *
I->first;
1328 if (VMI == VMap.
end() || VMI->
second ==
nullptr)
1333 auto *NewCall = dyn_cast<CallBase>(VMI->
second);
1339 if (NewCall->getCalledFunction() &&
1340 NewCall->getCalledFunction()->isIntrinsic())
1352 if (!
I->second->getFunction())
1353 if (
Function *
F = NewCall->getCalledFunction()) {
1374 Builder.getInt64(
M->getDataLayout().getTypeStoreSize(ByValType));
1389 unsigned ByValAlignment) {
1390 assert(cast<PointerType>(
Arg->getType())
1391 ->isOpaqueOrPointeeTypeMatches(ByValType));
1393 const DataLayout &
DL = Caller->getParent()->getDataLayout();
1402 if (ByValAlignment <= 1)
1411 AC) >= ByValAlignment)
1419 Align Alignment(
DL.getPrefTypeAlignment(ByValType));
1427 new AllocaInst(ByValType,
DL.getAllocaAddrSpace(),
nullptr, Alignment,
1428 Arg->getName(), &*Caller->begin()->begin());
1451 if (Ty == Int8PtrTy)
1456 if (U->getType() != Int8PtrTy)
continue;
1457 if (U->stripPointerCasts() != AI)
continue;
1495 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1496 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1505 bool NoInlineLineTables = Fn->
hasFnAttribute(
"no-inline-line-tables");
1507 for (; FI != Fn->
end(); ++FI) {
1512 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1514 if (
auto *Loc = dyn_cast_or_null<DILocation>(MD))
1520 if (!NoInlineLineTables)
1524 BI->setDebugLoc(IDL);
1528 if (CalleeHasDebugInfo && !NoInlineLineTables)
1538 if (
auto *AI = dyn_cast<AllocaInst>(BI))
1542 BI->setDebugLoc(TheCallDL);
1546 if (NoInlineLineTables) {
1548 while (BI != FI->end()) {
1549 if (isa<DbgInfoIntrinsic>(BI)) {
1550 BI = BI->eraseFromParent();
1572 for (
auto Entry : VMap) {
1573 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1575 auto *OrigBB = cast<BasicBlock>(Entry.first);
1576 auto *ClonedBB = cast<BasicBlock>(Entry.second);
1578 if (!ClonedBBs.
insert(ClonedBB).second) {
1588 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1608 Function *Callee, int64_t EntryDelta,
1610 auto CalleeCount =
Callee->getEntryCount();
1611 if (!CalleeCount.hasValue())
1614 const uint64_t PriorEntryCount = CalleeCount->getCount();
1619 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
1621 : PriorEntryCount + EntryDelta;
1625 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
1626 for (
auto Entry : *VMap)
1627 if (isa<CallInst>(Entry.first))
1628 if (
auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1629 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
1633 Callee->setEntryCount(NewEntryCount);
1637 if (!VMap || VMap->
count(&
BB))
1639 if (
CallInst *CI = dyn_cast<CallInst>(&
I))
1640 CI->updateProfWeight(NewEntryCount, PriorEntryCount);
1667 IsUnsafeClaimRV = !IsRetainRV;
1669 for (
auto *RI : Returns) {
1671 bool InsertRetainCall = IsRetainRV;
1680 if (isa<CastInst>(
I))
1683 if (
auto *II = dyn_cast<IntrinsicInst>(&
I)) {
1684 if (II->
getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
1694 if (IsUnsafeClaimRV) {
1699 Builder.CreateCall(IFn, BC,
"");
1702 InsertRetainCall =
false;
1706 auto *CI = dyn_cast<CallInst>(&
I);
1721 NewCall->copyMetadata(*CI);
1722 CI->replaceAllUsesWith(NewCall);
1723 CI->eraseFromParent();
1724 InsertRetainCall =
false;
1728 if (InsertRetainCall) {
1735 Builder.CreateCall(IFn, BC,
"");
1750 bool InsertLifetime,
1755 if (isa<CallBrInst>(CB))
1794 !Caller->getAttributes().hasFnAttr(Attribute::StrictFP)) {
1802 if (CalledFunc->
hasGC()) {
1803 if (!Caller->hasGC())
1804 Caller->setGC(CalledFunc->
getGC());
1805 else if (CalledFunc->
getGC() != Caller->getGC())
1819 Caller->hasPersonalityFn()
1820 ? Caller->getPersonalityFn()->stripPointerCasts()
1822 if (CalledPersonality) {
1823 if (!CallerPersonality)
1824 Caller->setPersonalityFn(CalledPersonality);
1829 else if (CalledPersonality != CallerPersonality)
1836 if (CallerPersonality) {
1842 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->
Inputs.front());
1846 if (CallSiteEHPad) {
1850 if (isa<CleanupPadInst>(CallSiteEHPad)) {
1853 for (
const BasicBlock &CalledBB : *CalledFunc) {
1854 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1861 for (
const BasicBlock &CalledBB : *CalledFunc) {
1862 if (CalledBB.isEHPad())
1872 bool EHPadForCallUnwindsLocally =
false;
1873 if (CallSiteEHPad && isa<CallInst>(CB)) {
1875 Value *CallSiteUnwindDestToken =
1878 EHPadForCallUnwindsLocally =
1879 CallSiteUnwindDestToken &&
1880 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1911 auto &
DL = Caller->getParent()->getDataLayout();
1918 E = CalledFunc->
arg_end();
I !=
E; ++
I, ++AI, ++ArgNo) {
1919 Value *ActualArg = *AI;
1927 &CB, CalledFunc, IFI,
1929 if (ActualArg != *AI)
1930 ByValInits.push_back(
1934 VMap[&*
I] = ActualArg;
1954 false, Returns,
".i",
1955 &InlinedFunctionInfo);
1957 FirstNewBlock = LastBlock; ++FirstNewBlock;
1972 CalledFunc->
front());
1980 for (ByValInit &
Init : ByValInits)
1982 &*FirstNewBlock, IFI);
1990 CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
2011 std::vector<Value *> MergedDeoptArgs;
2012 MergedDeoptArgs.reserve(ParentDeopt->
Inputs.size() +
2013 ChildOB.Inputs.size());
2043 SAMetadataCloner.clone();
2044 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2059 make_range(FirstNewBlock->getIterator(), Caller->end()))
2061 if (
auto *II = dyn_cast<AssumeInst>(&
I))
2072 E = FirstNewBlock->end();
I !=
E; ) {
2091 while (isa<AllocaInst>(
I) &&
2092 !cast<AllocaInst>(
I)->use_empty() &&
2101 Caller->getEntryBlock().getInstList().splice(
2102 InsertPoint, FirstNewBlock->getInstList(), AI->
getIterator(),
I);
2114 bool InlinedMustTailCalls =
false, InlinedDeoptimizeCalls =
false;
2117 if (
CallInst *CI = dyn_cast<CallInst>(&CB))
2118 CallSiteTailKind = CI->getTailCallKind();
2133 if (!VarArgsToForward.empty() &&
2134 ((ForwardVarArgsTo &&
2140 if (!
Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2141 for (
unsigned ArgNo = 0;
2143 ArgAttrs.push_back(
Attrs.getParamAttrs(ArgNo));
2147 ArgAttrs.
append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2149 Attrs.getRetAttrs(), ArgAttrs);
2152 Params.
append(VarArgsToForward.begin(), VarArgsToForward.end());
2164 InlinedDeoptimizeCalls |=
2165 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2184 ChildTCK =
std::min(CallSiteTailKind, ChildTCK);
2201 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2204 for (
unsigned ai = 0, ae = IFI.
StaticAllocas.size(); ai != ae; ++ai) {
2219 auto &
DL = Caller->getParent()->getDataLayout();
2221 TypeSize AllocaTypeSize =
DL.getTypeAllocSize(AllocaType);
2222 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2225 if (AllocaArraySize == 0)
2235 AllocaArraySize * AllocaTypeSize);
2239 builder.CreateLifetimeStart(AI, AllocaSize);
2243 if (InlinedMustTailCalls &&
2246 if (InlinedDeoptimizeCalls &&
2257 Module *
M = Caller->getParent();
2283 if (
auto *II = dyn_cast<InvokeInst>(&CB)) {
2286 if (isa<LandingPadInst>(FirstNonPHI)) {
2297 if (CallSiteEHPad) {
2310 dyn_cast<Function>(
I->getCalledOperand()->stripPointerCasts());
2311 if (CalledFn && CalledFn->isIntrinsic() &&
I->doesNotThrow())
2318 I->getOperandBundlesAsDefs(OpBundles);
2323 I->replaceAllUsesWith(NewInst);
2324 I->eraseFromParent();
2333 if (
auto *CleanupRet = dyn_cast<CleanupReturnInst>(
BB->getTerminator()))
2334 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2341 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
I)) {
2342 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2343 CatchSwitch->setParentPad(CallSiteEHPad);
2345 auto *FPI = cast<FuncletPadInst>(
I);
2346 if (isa<ConstantTokenNone>(FPI->getParentPad()))
2347 FPI->setParentPad(CallSiteEHPad);
2352 if (InlinedDeoptimizeCalls) {
2358 if (Caller->getReturnType() == CB.
getType()) {
2360 return RI->
getParent()->getTerminatingDeoptimizeCall() !=
nullptr;
2365 Caller->getParent(), Intrinsic::experimental_deoptimize,
2366 {Caller->getReturnType()});
2371 NormalReturns.push_back(RI);
2391 assert(!OpBundles.empty() &&
2392 "Expected at least the deopt operand bundle");
2396 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2402 Builder.CreateRet(NewDeoptCall);
2414 if (InlinedMustTailCalls) {
2416 Type *NewRetTy = Caller->getReturnType();
2424 if (!ReturnedMustTail) {
2425 NormalReturns.push_back(RI);
2433 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2436 OldCast->eraseFromParent();
2440 Builder.CreateRet(
Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2456 make_range(FirstNewBlock->getIterator(), Caller->end()))
2458 if (
auto *CB = dyn_cast<CallBase>(&
I))
2467 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2470 FirstNewBlock->begin(), FirstNewBlock->end());
2472 Caller->getBasicBlockList().pop_back();
2476 if (
InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2485 if (&CB == R->getReturnValue())
2494 Returns[0]->eraseFromParent();
2507 BranchInst *CreatedBranchToNormalDest =
nullptr;
2508 if (
InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2518 CalledFunc->
getName() +
".exit");
2525 CalledFunc->
getName() +
".exit");
2539 "splitBasicBlock broken!");
2545 Caller->getBasicBlockList().splice(AfterCallBB->
getIterator(),
2546 Caller->getBasicBlockList(), FirstNewBlock,
2554 if (Returns.size() > 1) {
2559 &AfterCallBB->
front());
2568 for (
unsigned i = 0,
e = Returns.size();
i !=
e; ++
i) {
2571 "Ret value not consistent in function!");
2578 for (
unsigned i = 0,
e = Returns.size();
i !=
e; ++
i) {
2589 if (CreatedBranchToNormalDest)
2591 }
else if (!Returns.empty()) {
2595 if (&CB == Returns[0]->getReturnValue())
2602 BasicBlock *ReturnBB = Returns[0]->getParent();
2610 if (CreatedBranchToNormalDest)
2614 Returns[0]->eraseFromParent();
2627 if (InlinedMustTailCalls &&
pred_empty(AfterCallBB))
2632 assert(cast<BranchInst>(Br)->isUnconditional() &&
"splitBasicBlock broken!");
2633 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2644 Caller->getBasicBlockList().erase(CalleeEntry);
2652 auto &
DL = Caller->getParent()->getDataLayout();
AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
BasicBlock * getNormalDest() const
const CallInst * getTerminatingDeoptimizeCall() const
Returns the call instruction calling @llvm.experimental.deoptimize prior to the terminating return in...
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
static AttrBuilder IdentifyValidAttributes(CallBase &CB)
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
This class represents an incoming formal argument to a Function.
@ OB_clang_arc_attachedcall
static InlineResult success()
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
std::vector< CallRecord > CalledFunctionsVector
This is an optimization pass for GlobalISel generic memory operations.
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
bool hasAttachedCallOpBundle(const CallBase *CB)
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
iterator_range< arg_iterator > args()
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
std::vector< CallRecord >::iterator iterator
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Return a value (possibly void), from a function.
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
This struct can be used to capture information about code being cloned, while it is being cloned.
Optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
A parsed version of the target data layout string in and methods for querying it.
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
static bool MayContainThrowingOrExitingCall(Instruction *Begin, Instruction *End)
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
InstListType::iterator iterator
Instruction iterators...
const Function * getParent() const
Return the enclosing method, or null if none.
ScalarTy getFixedSize() const
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
PointerType * getType() const
Overload to return most specific pointer type.
void setTailCallKind(TailCallKind TCK)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
void removeCallEdgeFor(CallBase &Call)
Removes the edge in the node for the specified call site.
bool isInlineAsm() const
Check if this call is an inline asm statement.
DISubprogram * getSubprogram() const
Get the attached subprogram.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute >> Attrs)
Create an AttributeList with the specified parameters in it.
The basic data container for the call graph of a Module of IR.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
static bool onlyAccessesArgPointees(FunctionModRefBehavior MRB)
Checks if functions with the specified behavior are known to read and write at most from objects poin...
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, unsigned ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
The instances of the Type class are immutable: once they are created, they are never changed.
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
AttributeList getAttributes() const
Return the parameter attributes for this call.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
FunctionType * getFunctionType() const
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/unsafeClaimRV.
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_NODISCARD T pop_back_val()
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
bool isScalable() const
Returns whether the size is scaled by a runtime quantity (vscale).
void addCalledFunction(CallBase *Call, CallGraphNode *M)
Adds a function to the list of functions called by this one.
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
BlockFrequencyInfo * CallerBFI
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM Basic Block Representation.
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
This is the shared class of boolean and integer constants.
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
CallGraph * CG
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static cl::opt< bool > UpdateReturnAttributes("update-return-attrs", cl::init(true), cl::Hidden, cl::desc("Update return attributes on calls within inlined body"))
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, Instruction *InsertBefore=nullptr)
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
iterator begin()
Instruction iterator methods.
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...
@ RetainRV
objc_retainAutoreleasedReturnValue
void setBlockFreq(const BasicBlock *BB, uint64_t Freq)
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
bool isSimplified(const Value *From, const Value *To) const
const Value * getArraySize() const
Get the number of elements allocated.
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
bool hasPersonalityFn() const
Check whether this function has a personality function.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
A node in the call graph for a module.
static bool isUsedByLifetimeMarker(Value *V)
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
This struct is a compact representation of a valid (non-zero power of two) alignment.
CallingConv::ID getCallingConv() const
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool isLifetimeStartOrEnd() const
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
Function * getCaller()
Helper to get the caller (the parent function).
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Analysis providing profile information.
AttributeList getAttributes() const
Return the attribute list for this Function.
const std::string & getGC() const
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
An instruction for storing to memory.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
This is an important base class in LLVM.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Type * getReturnType() const
Returns the type of the ret val.
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::ZeroOrMore, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
SmallVector< WeakTrackingVH, 8 > InlinedCalls
InlineFunction fills this in with callsites that were inlined from the callee.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Module * getParent()
Get the module that this global value is contained inside of...
static bool hasLifetimeMarkers(AllocaInst *AI)
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
This is an important class for using LLVM in a threaded context.
static BranchInst * Create(BasicBlock *IfTrue, Instruction *InsertBefore=nullptr)
DILocation * get() const
Get the underlying DILocation.
uint64_t getFrequency() const
Returns the frequency as a fixpoint number scaled by the entry frequency.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
initializer< Ty > init(const Ty &Val)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
static void UpdateCallGraphAfterInlining(CallBase &CB, Function::iterator FirstNewBlock, ValueToValueMapTy &VMap, InlineFunctionInfo &IFI)
Once we have cloned code over from a callee into the caller, update the specified callgraph to reflec...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
iterator find(const_arg_type_t< KeyT > Val)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool hasOperandBundles() const
Return true if this User has any operand bundles.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
StandardInstrumentations SI(Debug, VerifyEach)
bool isVoidTy() const
Return true if this is 'void'.
void salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
A Module instance is used to store all the information related to an LLVM module.
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, Instruction *InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void setOperand(unsigned i, Value *Val)
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
void setCallingConv(CallingConv::ID CC)
A cache of @llvm.assume calls within a function.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Type * getType() const
All values are typed, get the type of this value.
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
const Function * getFunction() const
Return the function this instruction belongs to.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap)
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
LLVMContext & getContext() const
All values hold a context through their type.
self_iterator getIterator()
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
bool pred_empty(const BasicBlock *BB)
void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
FunctionModRefBehavior
Summary of how a function affects memory in the program.
bool isMustTailCall() const
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
static MDTuple * getDistinct(LLVMContext &Context, ArrayRef< Metadata * > MDs)
StringRef getName() const
Return a constant reference to the value's name.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
An instruction for reading from memory.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
const Constant * stripPointerCasts() const
FunctionModRefBehavior getModRefBehavior(const CallBase *Call)
Return the behavior of the given call site.
an instruction that atomically reads a memory location, combines it with another value,...
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
bool doesNotThrow() const
Determine if the call cannot unwind.
uint64_t getCount() const
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI)
const Instruction & front() const
amdgpu Simplify well known AMD library false FunctionCallee Callee
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, uint64_t Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
uint64_t getParamAlignment(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Resume the propagation of an exception.
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
static IntegerType * getInt64Ty(LLVMContext &C)
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Argument * getArg(unsigned i) const
unsigned arg_size() const
static bool onlyAccessesInaccessibleMem(FunctionModRefBehavior MRB)
Checks if functions with the specified behavior are known to read and write at most from memory that ...
Function::ProfileCount ProfileCount
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
FunctionType * getFunctionType() const
Returns the FunctionType for me.
iterator find(const KeyT &Val)
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
static InlineResult failure(const char *Reason)
Value * getCalledOperand() const
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
A wrapper class for inspecting calls to intrinsic functions.
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
const BasicBlock & front() const
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
const InstListType & getInstList() const
Return the underlying instruction list container.
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
@ None
anything that is inert from an ARC perspective.
bool hasAttributes() const
Return true if the builder has IR-level attributes.
ilist_iterator< OptionsT, !IsReverse, IsConst > getReverse() const
Get a reverse iterator to the same node.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool UpdateProfile
Update profile for callee as well as cloned version.
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
Value * getArgOperand(unsigned i) const
const BasicBlock * getParent() const
Align max(MaybeAlign Lhs, Align Rhs)
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
A SetVector that performs no allocations if smaller than a certain size.
This class represents a function call, abstracting a target machine's calling convention.
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
void takeName(Value *V)
Transfer the name from V to this value.
an instruction to allocate memory on the stack
Class to represent profile counts.
MDNode * getScope() const
Value * getOperand(unsigned i) const
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
const CallInst * getTerminatingMustTailCall() const
Returns the call instruction marked 'musttail' prior to the terminating return instruction of this ba...
Conditional or Unconditional Branch instruction.
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
A vector that has set insertion semantics.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
void reserve(size_type N)
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
BasicBlock * getUnwindDest() const
BlockFrequencyInfo * CalleeBFI
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
LLVM Value Representation.
An instruction that atomically checks whether a specified value is in a memory location,...
InlineResult is basically true or false.
iterator_range< user_iterator > users()
TailCallKind getTailCallKind() const
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void setCallingConv(CallingConv::ID CC)
reference emplace_back(ArgTypes &&... Args)
BasicBlockListType::iterator iterator
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.