LLVM 23.0.0git
FunctionAttrs.cpp
Go to the documentation of this file.
1//===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file implements interprocedural passes which walk the
11/// call-graph deducing and/or propagating function attributes.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/Statistic.h"
27#include "llvm/Analysis/CFG.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Constant.h"
40#include "llvm/IR/Constants.h"
41#include "llvm/IR/Function.h"
43#include "llvm/IR/InstrTypes.h"
44#include "llvm/IR/Instruction.h"
47#include "llvm/IR/Metadata.h"
49#include "llvm/IR/PassManager.h"
51#include "llvm/IR/Type.h"
52#include "llvm/IR/Use.h"
53#include "llvm/IR/User.h"
54#include "llvm/IR/Value.h"
58#include "llvm/Support/Debug.h"
62#include "llvm/Transforms/IPO.h"
64#include <cassert>
65#include <iterator>
66#include <map>
67#include <optional>
68#include <vector>
69
70using namespace llvm;
71using namespace llvm::PatternMatch;
72
73#define DEBUG_TYPE "function-attrs"
74
75STATISTIC(NumMemoryAttr, "Number of functions with improved memory attribute");
76STATISTIC(NumCapturesNone, "Number of arguments marked captures(none)");
77STATISTIC(NumCapturesPartial, "Number of arguments marked with captures "
78 "attribute other than captures(none)");
79STATISTIC(NumReturned, "Number of arguments marked returned");
80STATISTIC(NumReadNoneArg, "Number of arguments marked readnone");
81STATISTIC(NumReadOnlyArg, "Number of arguments marked readonly");
82STATISTIC(NumWriteOnlyArg, "Number of arguments marked writeonly");
83STATISTIC(NumNoAlias, "Number of function returns marked noalias");
84STATISTIC(NumNonNullReturn, "Number of function returns marked nonnull");
85STATISTIC(NumNoUndefReturn, "Number of function returns marked noundef");
86STATISTIC(NumNoRecurse, "Number of functions marked as norecurse");
87STATISTIC(NumNoUnwind, "Number of functions marked as nounwind");
88STATISTIC(NumNoFree, "Number of functions marked as nofree");
89STATISTIC(NumWillReturn, "Number of functions marked as willreturn");
90STATISTIC(NumNoSync, "Number of functions marked as nosync");
91STATISTIC(NumCold, "Number of functions marked as cold");
92
93STATISTIC(NumThinLinkNoRecurse,
94 "Number of functions marked as norecurse during thinlink");
95STATISTIC(NumThinLinkNoUnwind,
96 "Number of functions marked as nounwind during thinlink");
97
99 "enable-poison-arg-attr-prop", cl::init(true), cl::Hidden,
100 cl::desc("Try to propagate nonnull and nofpclass argument attributes from "
101 "callsites to caller functions."));
102
104 "disable-nounwind-inference", cl::Hidden,
105 cl::desc("Stop inferring nounwind attribute during function-attrs pass"));
106
108 "disable-nofree-inference", cl::Hidden,
109 cl::desc("Stop inferring nofree attribute during function-attrs pass"));
110
112 "disable-thinlto-funcattrs", cl::init(true), cl::Hidden,
113 cl::desc("Don't propagate function-attrs in thinLTO"));
114
116 if (capturesNothing(CI))
117 ++NumCapturesNone;
118 else
119 ++NumCapturesPartial;
120}
121
122namespace {
123
124using SCCNodeSet = SmallSetVector<Function *, 8>;
125
126} // end anonymous namespace
127
129 ModRefInfo MR, AAResults &AAR) {
130 // Ignore accesses to known-invariant or local memory.
131 MR &= AAR.getModRefInfoMask(Loc, /*IgnoreLocal=*/true);
132 if (isNoModRef(MR))
133 return;
134
135 const Value *UO = getUnderlyingObjectAggressive(Loc.Ptr);
136 if (isa<AllocaInst>(UO))
137 return;
138 if (isa<Argument>(UO)) {
140 return;
141 }
142
143 // If it's not an identified object, it might be an argument.
144 if (!isIdentifiedObject(UO))
148}
149
150static void addArgLocs(MemoryEffects &ME, const CallBase *Call,
151 ModRefInfo ArgMR, AAResults &AAR) {
152 for (const Value *Arg : Call->args()) {
153 if (!Arg->getType()->isPtrOrPtrVectorTy())
154 continue;
155
156 addLocAccess(ME,
157 MemoryLocation::getBeforeOrAfter(Arg, Call->getAAMetadata()),
158 ArgMR, AAR);
159 }
160}
161
162/// Returns the memory access attribute for function F using AAR for AA results,
163/// where SCCNodes is the current SCC.
164///
165/// If ThisBody is true, this function may examine the function body and will
166/// return a result pertaining to this copy of the function. If it is false, the
167/// result will be based only on AA results for the function declaration; it
168/// will be assumed that some other (perhaps less optimized) version of the
169/// function may be selected at link time.
170///
171/// The return value is split into two parts: Memory effects that always apply,
172/// and additional memory effects that apply if any of the functions in the SCC
173/// can access argmem.
174static std::pair<MemoryEffects, MemoryEffects>
176 const SCCNodeSet &SCCNodes) {
177 MemoryEffects OrigME = AAR.getMemoryEffects(&F);
178 if (OrigME.doesNotAccessMemory())
179 // Already perfect!
180 return {OrigME, MemoryEffects::none()};
181
182 if (!ThisBody)
183 return {OrigME, MemoryEffects::none()};
184
186 // Additional locations accessed if the SCC accesses argmem.
187 MemoryEffects RecursiveArgME = MemoryEffects::none();
188
189 // Inalloca and preallocated arguments are always clobbered by the call.
190 if (F.getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
191 F.getAttributes().hasAttrSomewhere(Attribute::Preallocated))
193
194 // Scan the function body for instructions that may read or write memory.
195 for (Instruction &I : instructions(F)) {
196 // Some instructions can be ignored even if they read or write memory.
197 // Detect these now, skipping to the next instruction if one is found.
198 if (auto *Call = dyn_cast<CallBase>(&I)) {
199 // We can optimistically ignore calls to functions in the same SCC, with
200 // two caveats:
201 // * Calls with operand bundles may have additional effects.
202 // * Argument memory accesses may imply additional effects depending on
203 // what the argument location is.
204 if (!Call->hasOperandBundles() && Call->getCalledFunction() &&
205 SCCNodes.count(Call->getCalledFunction())) {
206 // Keep track of which additional locations are accessed if the SCC
207 // turns out to access argmem.
208 addArgLocs(RecursiveArgME, Call, ModRefInfo::ModRef, AAR);
209 continue;
210 }
211
212 MemoryEffects CallME = AAR.getMemoryEffects(Call);
213
214 // If the call doesn't access memory, we're done.
215 if (CallME.doesNotAccessMemory())
216 continue;
217
218 // A pseudo probe call shouldn't change any function attribute since it
219 // doesn't translate to a real instruction. It comes with a memory access
220 // tag to prevent itself being removed by optimizations and not block
221 // other instructions being optimized.
223 continue;
224
225 // Merge callee's memory effects into caller's ones, including
226 // inaccessible and errno memory, but excluding argument memory, which is
227 // handled separately.
229
230 // If the call accesses captured memory (currently part of "other") and
231 // an argument is captured (currently not tracked), then it may also
232 // access argument memory.
233 ModRefInfo OtherMR = CallME.getModRef(IRMemLocation::Other);
234 ME |= MemoryEffects::argMemOnly(OtherMR);
235
236 // Check whether all pointer arguments point to local memory, and
237 // ignore calls that only access local memory.
239 if (ArgMR != ModRefInfo::NoModRef)
240 addArgLocs(ME, Call, ArgMR, AAR);
241 continue;
242 }
243
245 if (I.mayWriteToMemory())
246 MR |= ModRefInfo::Mod;
247 if (I.mayReadFromMemory())
248 MR |= ModRefInfo::Ref;
249 if (MR == ModRefInfo::NoModRef)
250 continue;
251
252 std::optional<MemoryLocation> Loc = MemoryLocation::getOrNone(&I);
253 if (!Loc) {
254 // If no location is known, conservatively assume anything can be
255 // accessed.
256 ME |= MemoryEffects(MR);
257 continue;
258 }
259
260 // Volatile operations may access inaccessible memory.
261 if (I.isVolatile())
263
264 addLocAccess(ME, *Loc, MR, AAR);
265 }
266
267 return {OrigME & ME, RecursiveArgME};
268}
269
271 AAResults &AAR) {
272 return checkFunctionMemoryAccess(F, /*ThisBody=*/true, AAR, {}).first;
273}
274
275/// Deduce readonly/readnone/writeonly attributes for the SCC.
276template <typename AARGetterT>
277static void addMemoryAttrs(const SCCNodeSet &SCCNodes, AARGetterT &&AARGetter,
280 MemoryEffects RecursiveArgME = MemoryEffects::none();
281 for (Function *F : SCCNodes) {
282 // Call the callable parameter to look up AA results for this function.
283 AAResults &AAR = AARGetter(*F);
284 // Non-exact function definitions may not be selected at link time, and an
285 // alternative version that writes to memory may be selected. See the
286 // comment on GlobalValue::isDefinitionExact for more details.
287 auto [FnME, FnRecursiveArgME] =
288 checkFunctionMemoryAccess(*F, F->hasExactDefinition(), AAR, SCCNodes);
289 ME |= FnME;
290 RecursiveArgME |= FnRecursiveArgME;
291 // Reached bottom of the lattice, we will not be able to improve the result.
292 if (ME == MemoryEffects::unknown())
293 return;
294 }
295
296 // If the SCC accesses argmem, add recursive accesses resulting from that.
298 if (ArgMR != ModRefInfo::NoModRef)
299 ME |= RecursiveArgME & MemoryEffects(ArgMR);
300
301 for (Function *F : SCCNodes) {
302 MemoryEffects OldME = F->getMemoryEffects();
303 MemoryEffects NewME = ME & OldME;
304 if (NewME != OldME) {
305 ++NumMemoryAttr;
306 F->setMemoryEffects(NewME);
307 // Remove conflicting writable attributes.
309 for (Argument &A : F->args())
310 A.removeAttr(Attribute::Writable);
311 Changed.insert(F);
312 }
313 }
314}
315
316// Compute definitive function attributes for a function taking into account
317// prevailing definitions and linkage types
319 ValueInfo VI,
320 DenseMap<ValueInfo, FunctionSummary *> &CachedPrevailingSummary,
322 IsPrevailing) {
323
324 auto [It, Inserted] = CachedPrevailingSummary.try_emplace(VI);
325 if (!Inserted)
326 return It->second;
327
328 /// At this point, prevailing symbols have been resolved. The following leads
329 /// to returning a conservative result:
330 /// - Multiple instances with local linkage. Normally local linkage would be
331 /// unique per module
332 /// as the GUID includes the module path. We could have a guid alias if
333 /// there wasn't any distinguishing path when each file was compiled, but
334 /// that should be rare so we'll punt on those.
335
336 /// These next 2 cases should not happen and will assert:
337 /// - Multiple instances with external linkage. This should be caught in
338 /// symbol resolution
339 /// - Non-existent FunctionSummary for Aliasee. This presents a hole in our
340 /// knowledge meaning we have to go conservative.
341
342 /// Otherwise, we calculate attributes for a function as:
343 /// 1. If we have a local linkage, take its attributes. If there's somehow
344 /// multiple, bail and go conservative.
345 /// 2. If we have an external/WeakODR/LinkOnceODR linkage check that it is
346 /// prevailing, take its attributes.
347 /// 3. If we have a Weak/LinkOnce linkage the copies can have semantic
348 /// differences. However, if the prevailing copy is known it will be used
349 /// so take its attributes. If the prevailing copy is in a native file
350 /// all IR copies will be dead and propagation will go conservative.
351 /// 4. AvailableExternally summaries without a prevailing copy are known to
352 /// occur in a couple of circumstances:
353 /// a. An internal function gets imported due to its caller getting
354 /// imported, it becomes AvailableExternally but no prevailing
355 /// definition exists. Because it has to get imported along with its
356 /// caller the attributes will be captured by propagating on its
357 /// caller.
358 /// b. C++11 [temp.explicit]p10 can generate AvailableExternally
359 /// definitions of explicitly instanced template declarations
360 /// for inlining which are ultimately dropped from the TU. Since this
361 /// is localized to the TU the attributes will have already made it to
362 /// the callers.
363 /// These are edge cases and already captured by their callers so we
364 /// ignore these for now. If they become relevant to optimize in the
365 /// future this can be revisited.
366 /// 5. Otherwise, go conservative.
367
368 FunctionSummary *Local = nullptr;
369 FunctionSummary *Prevailing = nullptr;
370
371 for (const auto &GVS : VI.getSummaryList()) {
372 if (!GVS->isLive())
373 continue;
374
375 FunctionSummary *FS = dyn_cast<FunctionSummary>(GVS->getBaseObject());
376 // Virtual and Unknown (e.g. indirect) calls require going conservative
377 if (!FS || FS->fflags().HasUnknownCall)
378 return nullptr;
379
380 const auto &Linkage = GVS->linkage();
382 if (Local) {
384 dbgs()
385 << "ThinLTO FunctionAttrs: Multiple Local Linkage, bailing on "
386 "function "
387 << VI.name() << " from " << FS->modulePath() << ". Previous module "
388 << Local->modulePath() << "\n");
389 return nullptr;
390 }
391 Local = FS;
393 assert(IsPrevailing(VI.getGUID(), GVS.get()) || GVS->wasPromoted());
394 Prevailing = FS;
395 break;
400 if (IsPrevailing(VI.getGUID(), GVS.get())) {
401 Prevailing = FS;
402 break;
403 }
405 // TODO: Handle these cases if they become meaningful
406 continue;
407 }
408 }
409
410 auto &CPS = CachedPrevailingSummary[VI];
411 if (Local) {
412 assert(!Prevailing);
413 CPS = Local;
414 } else if (Prevailing) {
415 assert(!Local);
416 CPS = Prevailing;
417 }
418
419 return CPS;
420}
421
423 ModuleSummaryIndex &Index,
425 IsPrevailing) {
426 // TODO: implement addNoAliasAttrs once
427 // there's more information about the return type in the summary
429 return false;
430
431 DenseMap<ValueInfo, FunctionSummary *> CachedPrevailingSummary;
432 bool Changed = false;
433
434 auto PropagateAttributes = [&](std::vector<ValueInfo> &SCCNodes) {
435 // Assume we can propagate unless we discover otherwise
436 FunctionSummary::FFlags InferredFlags;
437 InferredFlags.NoRecurse = (SCCNodes.size() == 1);
438 InferredFlags.NoUnwind = true;
439
440 for (auto &V : SCCNodes) {
441 FunctionSummary *CallerSummary =
442 calculatePrevailingSummary(V, CachedPrevailingSummary, IsPrevailing);
443
444 // Function summaries can fail to contain information such as declarations
445 if (!CallerSummary)
446 return;
447
448 if (CallerSummary->fflags().MayThrow)
449 InferredFlags.NoUnwind = false;
450
451 for (const auto &Callee : CallerSummary->calls()) {
453 Callee.first, CachedPrevailingSummary, IsPrevailing);
454
455 if (!CalleeSummary)
456 return;
457
458 if (!CalleeSummary->fflags().NoRecurse)
459 InferredFlags.NoRecurse = false;
460
461 if (!CalleeSummary->fflags().NoUnwind)
462 InferredFlags.NoUnwind = false;
463
464 if (!InferredFlags.NoUnwind && !InferredFlags.NoRecurse)
465 break;
466 }
467 }
468
469 if (InferredFlags.NoUnwind || InferredFlags.NoRecurse) {
470 Changed = true;
471 for (auto &V : SCCNodes) {
472 if (InferredFlags.NoRecurse) {
473 LLVM_DEBUG(dbgs() << "ThinLTO FunctionAttrs: Propagated NoRecurse to "
474 << V.name() << "\n");
475 ++NumThinLinkNoRecurse;
476 }
477
478 if (InferredFlags.NoUnwind) {
479 LLVM_DEBUG(dbgs() << "ThinLTO FunctionAttrs: Propagated NoUnwind to "
480 << V.name() << "\n");
481 ++NumThinLinkNoUnwind;
482 }
483
484 for (const auto &S : V.getSummaryList()) {
485 if (auto *FS = dyn_cast<FunctionSummary>(S.get())) {
486 if (InferredFlags.NoRecurse)
487 FS->setNoRecurse();
488
489 if (InferredFlags.NoUnwind)
490 FS->setNoUnwind();
491 }
492 }
493 }
494 }
495 };
496
497 // Call propagation functions on each SCC in the Index
498 for (scc_iterator<ModuleSummaryIndex *> I = scc_begin(&Index); !I.isAtEnd();
499 ++I) {
500 std::vector<ValueInfo> Nodes(*I);
501 PropagateAttributes(Nodes);
502 }
503 return Changed;
504}
505
506namespace {
507
508/// For a given pointer Argument, this retains a list of Arguments of functions
509/// in the same SCC that the pointer data flows into. We use this to build an
510/// SCC of the arguments.
511struct ArgumentGraphNode {
512 Argument *Definition;
513 /// CaptureComponents for this argument, excluding captures via Uses.
514 /// We don't distinguish between other/return captures here.
517};
518
519class ArgumentGraph {
520 // We store pointers to ArgumentGraphNode objects, so it's important that
521 // that they not move around upon insert.
522 using ArgumentMapTy = std::map<Argument *, ArgumentGraphNode>;
523
524 ArgumentMapTy ArgumentMap;
525
526 // There is no root node for the argument graph, in fact:
527 // void f(int *x, int *y) { if (...) f(x, y); }
528 // is an example where the graph is disconnected. The SCCIterator requires a
529 // single entry point, so we maintain a fake ("synthetic") root node that
530 // uses every node. Because the graph is directed and nothing points into
531 // the root, it will not participate in any SCCs (except for its own).
532 ArgumentGraphNode SyntheticRoot;
533
534public:
535 ArgumentGraph() { SyntheticRoot.Definition = nullptr; }
536
538
539 iterator begin() { return SyntheticRoot.Uses.begin(); }
540 iterator end() { return SyntheticRoot.Uses.end(); }
541 ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; }
542
543 ArgumentGraphNode *operator[](Argument *A) {
544 ArgumentGraphNode &Node = ArgumentMap[A];
545 Node.Definition = A;
546 SyntheticRoot.Uses.push_back(&Node);
547 return &Node;
548 }
549};
550
551/// This tracker checks whether callees are in the SCC, and if so it does not
552/// consider that a capture, instead adding it to the "Uses" list and
553/// continuing with the analysis.
554struct ArgumentUsesTracker : public CaptureTracker {
555 ArgumentUsesTracker(const SCCNodeSet &SCCNodes) : SCCNodes(SCCNodes) {}
556
557 void tooManyUses() override { CI = CaptureInfo::all(); }
558
559 Action captured(const Use *U, UseCaptureInfo UseCI) override {
560 if (updateCaptureInfo(U, UseCI.UseCC)) {
561 // Don't bother continuing if we already capture everything.
562 if (capturesAll(CI.getOtherComponents()))
563 return Stop;
564 return Continue;
565 }
566
567 // For SCC argument tracking, we're not going to analyze other/ret
568 // components separately, so don't follow the return value.
569 return ContinueIgnoringReturn;
570 }
571
572 bool updateCaptureInfo(const Use *U, CaptureComponents CC) {
573 CallBase *CB = dyn_cast<CallBase>(U->getUser());
574 if (!CB) {
575 if (isa<ReturnInst>(U->getUser()))
576 CI |= CaptureInfo::retOnly(CC);
577 else
578 // Conservatively assume that the captured value might make its way
579 // into the return value as well. This could be made more precise.
580 CI |= CaptureInfo(CC);
581 return true;
582 }
583
585 if (!F || !F->hasExactDefinition() || !SCCNodes.count(F)) {
586 CI |= CaptureInfo(CC);
587 return true;
588 }
589
590 assert(!CB->isCallee(U) && "callee operand reported captured?");
591 const unsigned UseIndex = CB->getDataOperandNo(U);
592 if (UseIndex >= CB->arg_size()) {
593 // Data operand, but not a argument operand -- must be a bundle operand
594 assert(CB->hasOperandBundles() && "Must be!");
595
596 // CaptureTracking told us that we're being captured by an operand bundle
597 // use. In this case it does not matter if the callee is within our SCC
598 // or not -- we've been captured in some unknown way, and we have to be
599 // conservative.
600 CI |= CaptureInfo(CC);
601 return true;
602 }
603
604 if (UseIndex >= F->arg_size()) {
605 assert(F->isVarArg() && "More params than args in non-varargs call");
606 CI |= CaptureInfo(CC);
607 return true;
608 }
609
610 // TODO(captures): Could improve precision by remembering maximum
611 // capture components for the argument.
612 Uses.push_back(&*std::next(F->arg_begin(), UseIndex));
613 return false;
614 }
615
616 // Does not include potential captures via Uses in the SCC.
617 CaptureInfo CI = CaptureInfo::none();
618
619 // Uses within our SCC.
621
622 const SCCNodeSet &SCCNodes;
623};
624
625/// A struct of argument use: a Use and the offset it accesses. This struct
626/// is to track uses inside function via GEP. If GEP has a non-constant index,
627/// the Offset field is nullopt.
628struct ArgumentUse {
629 Use *U;
630 std::optional<int64_t> Offset;
631};
632
633/// A struct of argument access info. "Unknown" accesses are the cases like
634/// unrecognized instructions, instructions that have more than one use of
635/// the argument, or volatile memory accesses. "WriteWithSideEffect" are call
636/// instructions that not only write an argument but also capture it.
637struct ArgumentAccessInfo {
638 enum class AccessType : uint8_t { Write, WriteWithSideEffect, Read, Unknown };
639 AccessType ArgAccessType;
640 ConstantRangeList AccessRanges;
641};
642
643/// A struct to wrap the argument use info per block.
644struct UsesPerBlockInfo {
645 SmallDenseMap<Instruction *, ArgumentAccessInfo, 4> Insts;
646 bool HasWrites = false;
647 bool HasUnknownAccess = false;
648};
649
650/// A struct to summarize the argument use info in a function.
651struct ArgumentUsesSummary {
652 bool HasAnyWrite = false;
653 bool HasWriteOutsideEntryBB = false;
654 SmallDenseMap<const BasicBlock *, UsesPerBlockInfo, 16> UsesPerBlock;
655};
656
657ArgumentAccessInfo getArgumentAccessInfo(const Instruction *I,
658 const ArgumentUse &ArgUse,
659 const DataLayout &DL) {
660 auto GetTypeAccessRange =
661 [&DL](Type *Ty,
662 std::optional<int64_t> Offset) -> std::optional<ConstantRange> {
663 auto TypeSize = DL.getTypeStoreSize(Ty);
664 if (!TypeSize.isScalable() && Offset) {
665 int64_t Size = TypeSize.getFixedValue();
666 APInt Low(64, *Offset, true);
667 bool Overflow;
668 APInt High = Low.sadd_ov(APInt(64, Size, true), Overflow);
669 // Bail if the range overflows signed 64-bit int.
670 if (Overflow)
671 return std::nullopt;
672 return ConstantRange(Low, High);
673 }
674 return std::nullopt;
675 };
676 auto GetConstantIntRange =
677 [](Value *Length,
678 std::optional<int64_t> Offset) -> std::optional<ConstantRange> {
679 auto *ConstantLength = dyn_cast<ConstantInt>(Length);
680 if (ConstantLength && Offset) {
681 int64_t Len = ConstantLength->getSExtValue();
682
683 // Reject zero or negative lengths
684 if (Len <= 0)
685 return std::nullopt;
686
687 APInt Low(64, *Offset, true);
688 bool Overflow;
689 APInt High = Low.sadd_ov(APInt(64, Len, true), Overflow);
690 if (Overflow)
691 return std::nullopt;
692
693 return ConstantRange(Low, High);
694 }
695 return std::nullopt;
696 };
697
698 if (auto *SI = dyn_cast<StoreInst>(I)) {
699 if (SI->isSimple() && &SI->getOperandUse(1) == ArgUse.U) {
700 // Get the fixed type size of "SI". Since the access range of a write
701 // will be unioned, if "SI" doesn't have a fixed type size, we just set
702 // the access range to empty.
703 ConstantRangeList AccessRanges;
704 if (auto TypeAccessRange =
705 GetTypeAccessRange(SI->getAccessType(), ArgUse.Offset))
706 AccessRanges.insert(*TypeAccessRange);
707 return {ArgumentAccessInfo::AccessType::Write, std::move(AccessRanges)};
708 }
709 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
710 if (LI->isSimple()) {
711 assert(&LI->getOperandUse(0) == ArgUse.U);
712 // Get the fixed type size of "LI". Different from Write, if "LI"
713 // doesn't have a fixed type size, we conservatively set as a clobber
714 // with an empty access range.
715 if (auto TypeAccessRange =
716 GetTypeAccessRange(LI->getAccessType(), ArgUse.Offset))
717 return {ArgumentAccessInfo::AccessType::Read, {*TypeAccessRange}};
718 }
719 } else if (auto *MemSet = dyn_cast<MemSetInst>(I)) {
720 if (!MemSet->isVolatile()) {
721 ConstantRangeList AccessRanges;
722 if (auto AccessRange =
723 GetConstantIntRange(MemSet->getLength(), ArgUse.Offset))
724 AccessRanges.insert(*AccessRange);
725 return {ArgumentAccessInfo::AccessType::Write, AccessRanges};
726 }
727 } else if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
728 if (!MTI->isVolatile()) {
729 if (&MTI->getOperandUse(0) == ArgUse.U) {
730 ConstantRangeList AccessRanges;
731 if (auto AccessRange =
732 GetConstantIntRange(MTI->getLength(), ArgUse.Offset))
733 AccessRanges.insert(*AccessRange);
734 return {ArgumentAccessInfo::AccessType::Write, AccessRanges};
735 } else if (&MTI->getOperandUse(1) == ArgUse.U) {
736 if (auto AccessRange =
737 GetConstantIntRange(MTI->getLength(), ArgUse.Offset))
738 return {ArgumentAccessInfo::AccessType::Read, {*AccessRange}};
739 }
740 }
741 } else if (auto *CB = dyn_cast<CallBase>(I)) {
742 if (CB->isArgOperand(ArgUse.U) &&
743 !CB->isByValArgument(CB->getArgOperandNo(ArgUse.U))) {
744 unsigned ArgNo = CB->getArgOperandNo(ArgUse.U);
745 bool IsInitialize = CB->paramHasAttr(ArgNo, Attribute::Initializes);
746 if (IsInitialize && ArgUse.Offset) {
747 // Argument is a Write when parameter is writeonly/readnone
748 // and nocapture. Otherwise, it's a WriteWithSideEffect.
749 auto Access = CB->onlyWritesMemory(ArgNo) && CB->doesNotCapture(ArgNo)
750 ? ArgumentAccessInfo::AccessType::Write
751 : ArgumentAccessInfo::AccessType::WriteWithSideEffect;
752 ConstantRangeList AccessRanges;
753 Attribute Attr = CB->getParamAttr(ArgNo, Attribute::Initializes);
755 for (ConstantRange &CR : CBCRL)
756 AccessRanges.insert(ConstantRange(CR.getLower() + *ArgUse.Offset,
757 CR.getUpper() + *ArgUse.Offset));
758 return {Access, AccessRanges};
759 }
760 }
761 }
762 // Other unrecognized instructions are considered as unknown.
763 return {ArgumentAccessInfo::AccessType::Unknown, {}};
764}
765
766// Collect the uses of argument "A" in "F".
767ArgumentUsesSummary collectArgumentUsesPerBlock(Argument &A, Function &F) {
768 auto &DL = F.getParent()->getDataLayout();
769 unsigned PointerSize =
770 DL.getIndexSizeInBits(A.getType()->getPointerAddressSpace());
771 ArgumentUsesSummary Result;
772
773 BasicBlock &EntryBB = F.getEntryBlock();
775 for (Use &U : A.uses())
776 Worklist.push_back({&U, 0});
777
778 // Update "UsesPerBlock" with the block of "I" as key and "Info" as value.
779 // Return true if the block of "I" has write accesses after updating.
780 auto UpdateUseInfo = [&Result](Instruction *I, ArgumentAccessInfo Info) {
781 auto *BB = I->getParent();
782 auto &BBInfo = Result.UsesPerBlock[BB];
783 auto [It, Inserted] = BBInfo.Insts.try_emplace(I);
784 auto &IInfo = It->second;
785
786 // Instructions that have more than one use of the argument are considered
787 // as clobbers.
788 if (!Inserted) {
789 IInfo = {ArgumentAccessInfo::AccessType::Unknown, {}};
790 BBInfo.HasUnknownAccess = true;
791 return false;
792 }
793
794 IInfo = std::move(Info);
795 BBInfo.HasUnknownAccess |=
796 IInfo.ArgAccessType == ArgumentAccessInfo::AccessType::Unknown;
797 bool InfoHasWrites =
798 (IInfo.ArgAccessType == ArgumentAccessInfo::AccessType::Write ||
799 IInfo.ArgAccessType ==
800 ArgumentAccessInfo::AccessType::WriteWithSideEffect) &&
801 !IInfo.AccessRanges.empty();
802 BBInfo.HasWrites |= InfoHasWrites;
803 return InfoHasWrites;
804 };
805
806 // No need for a visited set because we don't look through phis, so there are
807 // no cycles.
808 while (!Worklist.empty()) {
809 ArgumentUse ArgUse = Worklist.pop_back_val();
810 User *U = ArgUse.U->getUser();
811 // Add GEP uses to worklist.
812 // If the GEP is not a constant GEP, set the ArgumentUse::Offset to nullopt.
813 if (auto *GEP = dyn_cast<GEPOperator>(U)) {
814 std::optional<int64_t> NewOffset = std::nullopt;
815 if (ArgUse.Offset) {
816 APInt Offset(PointerSize, 0);
817 if (GEP->accumulateConstantOffset(DL, Offset))
818 NewOffset = *ArgUse.Offset + Offset.getSExtValue();
819 }
820 for (Use &U : GEP->uses())
821 Worklist.push_back({&U, NewOffset});
822 continue;
823 }
824
825 auto *I = cast<Instruction>(U);
826 bool HasWrite = UpdateUseInfo(I, getArgumentAccessInfo(I, ArgUse, DL));
827
828 Result.HasAnyWrite |= HasWrite;
829
830 if (HasWrite && I->getParent() != &EntryBB)
831 Result.HasWriteOutsideEntryBB = true;
832 }
833 return Result;
834}
835
836} // end anonymous namespace
837
838namespace llvm {
839
840template <> struct GraphTraits<ArgumentGraphNode *> {
841 using NodeRef = ArgumentGraphNode *;
843
844 static NodeRef getEntryNode(NodeRef A) { return A; }
845 static ChildIteratorType child_begin(NodeRef N) { return N->Uses.begin(); }
846 static ChildIteratorType child_end(NodeRef N) { return N->Uses.end(); }
847};
848
849template <>
850struct GraphTraits<ArgumentGraph *> : public GraphTraits<ArgumentGraphNode *> {
851 static NodeRef getEntryNode(ArgumentGraph *AG) { return AG->getEntryNode(); }
852
853 static ChildIteratorType nodes_begin(ArgumentGraph *AG) {
854 return AG->begin();
855 }
856
857 static ChildIteratorType nodes_end(ArgumentGraph *AG) { return AG->end(); }
858};
859
860} // end namespace llvm
861
862/// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone.
865 const SmallPtrSet<Argument *, 8> &SCCNodes) {
866 SmallVector<Use *, 32> Worklist;
868
869 // inalloca arguments are always clobbered by the call.
870 if (A->hasInAllocaAttr() || A->hasPreallocatedAttr())
871 return Attribute::None;
872
873 bool IsRead = false;
874 bool IsWrite = false;
875
876 for (Use &U : A->uses()) {
877 Visited.insert(&U);
878 Worklist.push_back(&U);
879 }
880
881 while (!Worklist.empty()) {
882 if (IsWrite && IsRead)
883 // No point in searching further..
884 return Attribute::None;
885
886 Use *U = Worklist.pop_back_val();
887 Instruction *I = cast<Instruction>(U->getUser());
888
889 switch (I->getOpcode()) {
890 case Instruction::BitCast:
891 case Instruction::GetElementPtr:
892 case Instruction::PHI:
893 case Instruction::Select:
894 case Instruction::AddrSpaceCast:
895 // The original value is not read/written via this if the new value isn't.
896 for (Use &UU : I->uses())
897 if (Visited.insert(&UU).second)
898 Worklist.push_back(&UU);
899 break;
900
901 case Instruction::Call:
902 case Instruction::Invoke: {
903 CallBase &CB = cast<CallBase>(*I);
904 if (CB.isCallee(U)) {
905 IsRead = true;
906 // Note that indirect calls do not capture, see comment in
907 // CaptureTracking for context
908 continue;
909 }
910
911 // Given we've explicitly handled the callee operand above, what's left
912 // must be a data operand (e.g. argument or operand bundle)
913 const unsigned UseIndex = CB.getDataOperandNo(U);
914
915 // Some intrinsics (for instance ptrmask) do not capture their results,
916 // but return results thas alias their pointer argument, and thus should
917 // be handled like GEP or addrspacecast above.
919 &CB, /*MustPreserveNullness=*/false)) {
920 for (Use &UU : CB.uses())
921 if (Visited.insert(&UU).second)
922 Worklist.push_back(&UU);
923 } else if (capturesAnyProvenance(CB.getCaptureInfo(UseIndex))) {
924 if (!CB.onlyReadsMemory())
925 // If the callee can save a copy into other memory, then simply
926 // scanning uses of the call is insufficient. We have no way
927 // of tracking copies of the pointer through memory to see
928 // if a reloaded copy is written to, thus we must give up.
929 return Attribute::None;
930 // Push users for processing once we finish this one
931 if (!I->getType()->isVoidTy())
932 for (Use &UU : I->uses())
933 if (Visited.insert(&UU).second)
934 Worklist.push_back(&UU);
935 }
936
938 if (isNoModRef(ArgMR))
939 continue;
940
941 if (Function *F = CB.getCalledFunction())
942 if (CB.isArgOperand(U) && UseIndex < F->arg_size() &&
943 SCCNodes.count(F->getArg(UseIndex)))
944 // This is an argument which is part of the speculative SCC. Note
945 // that only operands corresponding to formal arguments of the callee
946 // can participate in the speculation.
947 break;
948
949 // The accessors used on call site here do the right thing for calls and
950 // invokes with operand bundles.
951 if (CB.doesNotAccessMemory(UseIndex)) {
952 /* nop */
953 } else if (!isModSet(ArgMR) || CB.onlyReadsMemory(UseIndex)) {
954 IsRead = true;
955 } else if (!isRefSet(ArgMR) ||
956 CB.dataOperandHasImpliedAttr(UseIndex, Attribute::WriteOnly)) {
957 IsWrite = true;
958 } else {
959 return Attribute::None;
960 }
961 break;
962 }
963
964 case Instruction::Load:
965 // A volatile load has side effects beyond what readonly can be relied
966 // upon.
967 if (cast<LoadInst>(I)->isVolatile())
968 return Attribute::None;
969
970 IsRead = true;
971 break;
972
973 case Instruction::Store:
974 if (cast<StoreInst>(I)->getValueOperand() == *U)
975 // untrackable capture
976 return Attribute::None;
977
978 // A volatile store has side effects beyond what writeonly can be relied
979 // upon.
980 if (cast<StoreInst>(I)->isVolatile())
981 return Attribute::None;
982
983 IsWrite = true;
984 break;
985
986 case Instruction::ICmp:
987 case Instruction::Ret:
988 break;
989
990 default:
991 return Attribute::None;
992 }
993 }
994
995 if (IsWrite && IsRead)
996 return Attribute::None;
997 else if (IsRead)
998 return Attribute::ReadOnly;
999 else if (IsWrite)
1000 return Attribute::WriteOnly;
1001 else
1002 return Attribute::ReadNone;
1003}
1004
1005/// Deduce returned attributes for the SCC.
1006static void addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes,
1008 // Check each function in turn, determining if an argument is always returned.
1009 for (Function *F : SCCNodes) {
1010 // We can infer and propagate function attributes only when we know that the
1011 // definition we'll get at link time is *exactly* the definition we see now.
1012 // For more details, see GlobalValue::mayBeDerefined.
1013 if (!F->hasExactDefinition())
1014 continue;
1015
1016 if (F->getReturnType()->isVoidTy())
1017 continue;
1018
1019 // There is nothing to do if an argument is already marked as 'returned'.
1020 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned))
1021 continue;
1022
1023 auto FindRetArg = [&]() -> Argument * {
1024 Argument *RetArg = nullptr;
1025 for (BasicBlock &BB : *F)
1026 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) {
1027 // Note that stripPointerCasts should look through functions with
1028 // returned arguments.
1029 auto *RetVal =
1030 dyn_cast<Argument>(Ret->getReturnValue()->stripPointerCasts());
1031 if (!RetVal || RetVal->getType() != F->getReturnType())
1032 return nullptr;
1033
1034 if (!RetArg)
1035 RetArg = RetVal;
1036 else if (RetArg != RetVal)
1037 return nullptr;
1038 }
1039
1040 return RetArg;
1041 };
1042
1043 if (Argument *RetArg = FindRetArg()) {
1044 RetArg->addAttr(Attribute::Returned);
1045 ++NumReturned;
1046 Changed.insert(F);
1047 }
1048 }
1049}
1050
1051/// If a callsite has arguments that are also arguments to the parent function,
1052/// try to propagate attributes from the callsite's arguments to the parent's
1053/// arguments. This may be important because inlining can cause information loss
1054/// when attribute knowledge disappears with the inlined call.
1057 return false;
1058
1059 bool Changed = false;
1060
1061 // For an argument attribute to transfer from a callsite to the parent, the
1062 // call must be guaranteed to execute every time the parent is called.
1063 // Conservatively, just check for calls in the entry block that are guaranteed
1064 // to execute.
1065 // TODO: This could be enhanced by testing if the callsite post-dominates the
1066 // entry block or by doing simple forward walks or backward walks to the
1067 // callsite.
1068 BasicBlock &Entry = F.getEntryBlock();
1069 for (Instruction &I : Entry) {
1070 if (auto *CB = dyn_cast<CallBase>(&I)) {
1071 if (auto *CalledFunc = CB->getCalledFunction()) {
1072 for (auto &CSArg : CalledFunc->args()) {
1073 unsigned ArgNo = CSArg.getArgNo();
1074 auto *FArg = dyn_cast<Argument>(CB->getArgOperand(ArgNo));
1075 if (!FArg)
1076 continue;
1077
1078 if (CSArg.hasNonNullAttr(/*AllowUndefOrPoison=*/false)) {
1079 // If the non-null callsite argument operand is an argument to 'F'
1080 // (the caller) and the call is guaranteed to execute, then the
1081 // value must be non-null throughout 'F'.
1082 if (!FArg->hasNonNullAttr()) {
1083 FArg->addAttr(Attribute::NonNull);
1084 Changed = true;
1085 }
1086 } else if (FPClassTest CSNoFPClass = CB->getParamNoFPClass(ArgNo);
1087 CSNoFPClass != fcNone &&
1088 CB->paramHasAttr(ArgNo, Attribute::NoUndef)) {
1089 FPClassTest ArgNoFPClass = FArg->getNoFPClass();
1090
1091 if ((CSNoFPClass | ArgNoFPClass) != ArgNoFPClass) {
1092 FArg->addAttr(Attribute::getWithNoFPClass(
1093 FArg->getContext(), CSNoFPClass | ArgNoFPClass));
1094 Changed = true;
1095 }
1096 }
1097 }
1098 }
1099 }
1101 break;
1102 }
1103
1104 return Changed;
1105}
1106
1108 assert((R == Attribute::ReadOnly || R == Attribute::ReadNone ||
1109 R == Attribute::WriteOnly)
1110 && "Must be an access attribute.");
1111 assert(A && "Argument must not be null.");
1112
1113 // If the argument already has the attribute, nothing needs to be done.
1114 if (A->hasAttribute(R))
1115 return false;
1116
1117 // Otherwise, remove potentially conflicting attribute, add the new one,
1118 // and update statistics.
1119 A->removeAttr(Attribute::WriteOnly);
1120 A->removeAttr(Attribute::ReadOnly);
1121 A->removeAttr(Attribute::ReadNone);
1122 // Remove conflicting writable attribute.
1123 if (R == Attribute::ReadNone || R == Attribute::ReadOnly)
1124 A->removeAttr(Attribute::Writable);
1125 A->addAttr(R);
1126 if (R == Attribute::ReadOnly)
1127 ++NumReadOnlyArg;
1128 else if (R == Attribute::WriteOnly)
1129 ++NumWriteOnlyArg;
1130 else
1131 ++NumReadNoneArg;
1132 return true;
1133}
1134
1136 auto ArgumentUses = collectArgumentUsesPerBlock(A, F);
1137 // No write anywhere in the function, bail.
1138 if (!ArgumentUses.HasAnyWrite)
1139 return false;
1140
1141 auto &UsesPerBlock = ArgumentUses.UsesPerBlock;
1142 BasicBlock &EntryBB = F.getEntryBlock();
1143 // A map to store the argument ranges initialized by a BasicBlock (including
1144 // its successors).
1146 // Visit the successors of "BB" block and the instructions in BB (post-order)
1147 // to get the argument ranges initialized by "BB" (including its successors).
1148 // The result will be cached in "Initialized".
1149 auto VisitBlock = [&](const BasicBlock *BB) -> ConstantRangeList {
1150 auto UPB = UsesPerBlock.find(BB);
1152
1153 // Start with intersection of successors.
1154 // If this block has any clobbering use, we're going to clear out the
1155 // ranges at some point in this block anyway, so don't bother looking at
1156 // successors.
1157 if (UPB == UsesPerBlock.end() || !UPB->second.HasUnknownAccess) {
1158 bool HasAddedSuccessor = false;
1159 for (auto *Succ : successors(BB)) {
1160 if (auto SuccI = Initialized.find(Succ); SuccI != Initialized.end()) {
1161 if (HasAddedSuccessor) {
1162 CRL = CRL.intersectWith(SuccI->second);
1163 } else {
1164 CRL = SuccI->second;
1165 HasAddedSuccessor = true;
1166 }
1167 } else {
1168 CRL = ConstantRangeList();
1169 break;
1170 }
1171 }
1172 }
1173
1174 if (UPB != UsesPerBlock.end()) {
1175 // Sort uses in this block by instruction order.
1177 append_range(Insts, UPB->second.Insts);
1178 sort(Insts, [](std::pair<Instruction *, ArgumentAccessInfo> &LHS,
1179 std::pair<Instruction *, ArgumentAccessInfo> &RHS) {
1180 return LHS.first->comesBefore(RHS.first);
1181 });
1182
1183 // From the end of the block to the beginning of the block, set
1184 // initializes ranges.
1185 for (auto &[_, Info] : reverse(Insts)) {
1186 if (Info.ArgAccessType == ArgumentAccessInfo::AccessType::Unknown ||
1187 Info.ArgAccessType ==
1188 ArgumentAccessInfo::AccessType::WriteWithSideEffect)
1189 CRL = ConstantRangeList();
1190 if (!Info.AccessRanges.empty()) {
1191 if (Info.ArgAccessType == ArgumentAccessInfo::AccessType::Write ||
1192 Info.ArgAccessType ==
1193 ArgumentAccessInfo::AccessType::WriteWithSideEffect) {
1194 CRL = CRL.unionWith(Info.AccessRanges);
1195 } else {
1196 assert(Info.ArgAccessType == ArgumentAccessInfo::AccessType::Read);
1197 for (const auto &ReadRange : Info.AccessRanges)
1198 CRL.subtract(ReadRange);
1199 }
1200 }
1201 }
1202 }
1203 return CRL;
1204 };
1205
1206 ConstantRangeList EntryCRL;
1207 // If all write instructions are in the EntryBB, or if the EntryBB has
1208 // a clobbering use, we only need to look at EntryBB.
1209 bool OnlyScanEntryBlock = !ArgumentUses.HasWriteOutsideEntryBB;
1210 if (!OnlyScanEntryBlock)
1211 if (auto EntryUPB = UsesPerBlock.find(&EntryBB);
1212 EntryUPB != UsesPerBlock.end())
1213 OnlyScanEntryBlock = EntryUPB->second.HasUnknownAccess;
1214 if (OnlyScanEntryBlock) {
1215 EntryCRL = VisitBlock(&EntryBB);
1216 if (EntryCRL.empty())
1217 return false;
1218 } else {
1219 // Now we have to go through CFG to get the initialized argument ranges
1220 // across blocks. With dominance and post-dominance, the initialized ranges
1221 // by a block include both accesses inside this block and accesses in its
1222 // (transitive) successors. So visit successors before predecessors with a
1223 // post-order walk of the blocks and memorize the results in "Initialized".
1224 for (const BasicBlock *BB : post_order(&F)) {
1225 ConstantRangeList CRL = VisitBlock(BB);
1226 if (!CRL.empty())
1227 Initialized[BB] = CRL;
1228 }
1229
1230 auto EntryCRLI = Initialized.find(&EntryBB);
1231 if (EntryCRLI == Initialized.end())
1232 return false;
1233
1234 EntryCRL = EntryCRLI->second;
1235 }
1236
1237 assert(!EntryCRL.empty() &&
1238 "should have bailed already if EntryCRL is empty");
1239
1240 if (A.hasAttribute(Attribute::Initializes)) {
1241 ConstantRangeList PreviousCRL =
1242 A.getAttribute(Attribute::Initializes).getValueAsConstantRangeList();
1243 if (PreviousCRL == EntryCRL)
1244 return false;
1245 EntryCRL = EntryCRL.unionWith(PreviousCRL);
1246 }
1247
1248 A.addAttr(Attribute::get(A.getContext(), Attribute::Initializes,
1249 EntryCRL.rangesRef()));
1250
1251 return true;
1252}
1253
1254/// Deduce nocapture attributes for the SCC.
1255static void addArgumentAttrs(const SCCNodeSet &SCCNodes,
1257 bool SkipInitializes) {
1258 ArgumentGraph AG;
1259
1260 auto DetermineAccessAttrsForSingleton = [](Argument *A) {
1262 Self.insert(A);
1264 if (R != Attribute::None)
1265 return addAccessAttr(A, R);
1266 return false;
1267 };
1268
1269 // Check each function in turn, determining which pointer arguments are not
1270 // captured.
1271 for (Function *F : SCCNodes) {
1272 // We can infer and propagate function attributes only when we know that the
1273 // definition we'll get at link time is *exactly* the definition we see now.
1274 // For more details, see GlobalValue::mayBeDerefined.
1275 if (!F->hasExactDefinition())
1276 continue;
1277
1279 Changed.insert(F);
1280
1281 // Functions that are readonly (or readnone) and nounwind and don't return
1282 // a value can't capture arguments. Don't analyze them.
1283 if (F->onlyReadsMemory() && F->doesNotThrow() && F->willReturn() &&
1284 F->getReturnType()->isVoidTy()) {
1285 for (Argument &A : F->args()) {
1286 if (A.getType()->isPointerTy() && !A.hasNoCaptureAttr()) {
1287 A.addAttr(Attribute::getWithCaptureInfo(A.getContext(),
1289 ++NumCapturesNone;
1290 Changed.insert(F);
1291 }
1292 }
1293 continue;
1294 }
1295
1296 for (Argument &A : F->args()) {
1297 if (!A.getType()->isPointerTy())
1298 continue;
1299 bool HasNonLocalUses = false;
1300 CaptureInfo OrigCI = A.getAttributes().getCaptureInfo();
1301 if (!capturesNothing(OrigCI)) {
1302 ArgumentUsesTracker Tracker(SCCNodes);
1303 PointerMayBeCaptured(&A, &Tracker);
1304 CaptureInfo NewCI = Tracker.CI & OrigCI;
1305 if (NewCI != OrigCI) {
1306 if (Tracker.Uses.empty()) {
1307 // If the information is complete, add the attribute now.
1308 A.addAttr(Attribute::getWithCaptureInfo(A.getContext(), NewCI));
1309 addCapturesStat(NewCI);
1310 Changed.insert(F);
1311 } else {
1312 // If it's not trivially captured and not trivially not captured,
1313 // then it must be calling into another function in our SCC. Save
1314 // its particulars for Argument-SCC analysis later.
1315 ArgumentGraphNode *Node = AG[&A];
1316 Node->CC = CaptureComponents(NewCI);
1317 for (Argument *Use : Tracker.Uses) {
1318 Node->Uses.push_back(AG[Use]);
1319 if (Use != &A)
1320 HasNonLocalUses = true;
1321 }
1322 }
1323 }
1324 // Otherwise, it's captured. Don't bother doing SCC analysis on it.
1325 }
1326 if (!HasNonLocalUses && !A.onlyReadsMemory()) {
1327 // Can we determine that it's readonly/readnone/writeonly without doing
1328 // an SCC? Note that we don't allow any calls at all here, or else our
1329 // result will be dependent on the iteration order through the
1330 // functions in the SCC.
1331 if (DetermineAccessAttrsForSingleton(&A))
1332 Changed.insert(F);
1333 }
1334 if (!SkipInitializes && !A.onlyReadsMemory()) {
1335 if (inferInitializes(A, *F))
1336 Changed.insert(F);
1337 }
1338 }
1339 }
1340
1341 // The graph we've collected is partial because we stopped scanning for
1342 // argument uses once we solved the argument trivially. These partial nodes
1343 // show up as ArgumentGraphNode objects with an empty Uses list, and for
1344 // these nodes the final decision about whether they capture has already been
1345 // made. If the definition doesn't have a 'nocapture' attribute by now, it
1346 // captures.
1347
1348 for (scc_iterator<ArgumentGraph *> I = scc_begin(&AG); !I.isAtEnd(); ++I) {
1349 const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I;
1350 if (ArgumentSCC.size() == 1) {
1351 if (!ArgumentSCC[0]->Definition)
1352 continue; // synthetic root node
1353
1354 // eg. "void f(int* x) { if (...) f(x); }"
1355 if (ArgumentSCC[0]->Uses.size() == 1 &&
1356 ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) {
1357 Argument *A = ArgumentSCC[0]->Definition;
1358 CaptureInfo OrigCI = A->getAttributes().getCaptureInfo();
1359 CaptureInfo NewCI = CaptureInfo(ArgumentSCC[0]->CC) & OrigCI;
1360 if (NewCI != OrigCI) {
1361 A->addAttr(Attribute::getWithCaptureInfo(A->getContext(), NewCI));
1362 addCapturesStat(NewCI);
1363 Changed.insert(A->getParent());
1364 }
1365
1366 // Infer the access attributes given the new captures one
1367 if (DetermineAccessAttrsForSingleton(A))
1368 Changed.insert(A->getParent());
1369 }
1370 continue;
1371 }
1372
1373 SmallPtrSet<Argument *, 8> ArgumentSCCNodes;
1374 // Fill ArgumentSCCNodes with the elements of the ArgumentSCC. Used for
1375 // quickly looking up whether a given Argument is in this ArgumentSCC.
1376 for (ArgumentGraphNode *I : ArgumentSCC) {
1377 ArgumentSCCNodes.insert(I->Definition);
1378 }
1379
1380 // At the SCC level, only track merged CaptureComponents. We're not
1381 // currently prepared to handle propagation of return-only captures across
1382 // the SCC.
1384 for (ArgumentGraphNode *N : ArgumentSCC) {
1385 for (ArgumentGraphNode *Use : N->Uses) {
1386 Argument *A = Use->Definition;
1387 if (ArgumentSCCNodes.count(A))
1388 CC |= Use->CC;
1389 else
1390 CC |= CaptureComponents(A->getAttributes().getCaptureInfo());
1391 break;
1392 }
1393 if (capturesAll(CC))
1394 break;
1395 }
1396
1397 if (!capturesAll(CC)) {
1398 for (ArgumentGraphNode *N : ArgumentSCC) {
1399 Argument *A = N->Definition;
1400 CaptureInfo OrigCI = A->getAttributes().getCaptureInfo();
1401 CaptureInfo NewCI = CaptureInfo(N->CC | CC) & OrigCI;
1402 if (NewCI != OrigCI) {
1403 A->addAttr(Attribute::getWithCaptureInfo(A->getContext(), NewCI));
1404 addCapturesStat(NewCI);
1405 Changed.insert(A->getParent());
1406 }
1407 }
1408 }
1409
1410 if (capturesAnyProvenance(CC)) {
1411 // As the pointer provenance may be captured, determine the pointer
1412 // attributes looking at each argument individually.
1413 for (ArgumentGraphNode *N : ArgumentSCC) {
1414 if (DetermineAccessAttrsForSingleton(N->Definition))
1415 Changed.insert(N->Definition->getParent());
1416 }
1417 continue;
1418 }
1419
1420 // We also want to compute readonly/readnone/writeonly. With a small number
1421 // of false negatives, we can assume that any pointer which is captured
1422 // isn't going to be provably readonly or readnone, since by definition
1423 // we can't analyze all uses of a captured pointer.
1424 //
1425 // The false negatives happen when the pointer is captured by a function
1426 // that promises readonly/readnone behaviour on the pointer, then the
1427 // pointer's lifetime ends before anything that writes to arbitrary memory.
1428 // Also, a readonly/readnone pointer may be returned, but returning a
1429 // pointer is capturing it.
1430
1431 auto meetAccessAttr = [](Attribute::AttrKind A, Attribute::AttrKind B) {
1432 if (A == B)
1433 return A;
1434 if (A == Attribute::ReadNone)
1435 return B;
1436 if (B == Attribute::ReadNone)
1437 return A;
1438 return Attribute::None;
1439 };
1440
1441 Attribute::AttrKind AccessAttr = Attribute::ReadNone;
1442 for (ArgumentGraphNode *N : ArgumentSCC) {
1443 Argument *A = N->Definition;
1444 Attribute::AttrKind K = determinePointerAccessAttrs(A, ArgumentSCCNodes);
1445 AccessAttr = meetAccessAttr(AccessAttr, K);
1446 if (AccessAttr == Attribute::None)
1447 break;
1448 }
1449
1450 if (AccessAttr != Attribute::None) {
1451 for (ArgumentGraphNode *N : ArgumentSCC) {
1452 Argument *A = N->Definition;
1453 if (addAccessAttr(A, AccessAttr))
1454 Changed.insert(A->getParent());
1455 }
1456 }
1457 }
1458}
1459
1460/// Tests whether a function is "malloc-like".
1461///
1462/// A function is "malloc-like" if it returns either null or a pointer that
1463/// doesn't alias any other pointer visible to the caller.
1464static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) {
1465 SmallSetVector<Value *, 8> FlowsToReturn;
1466 for (BasicBlock &BB : *F)
1467 if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator()))
1468 FlowsToReturn.insert(Ret->getReturnValue());
1469
1470 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
1471 Value *RetVal = FlowsToReturn[i];
1472
1473 if (Constant *C = dyn_cast<Constant>(RetVal)) {
1474 if (!C->isNullValue() && !isa<UndefValue>(C))
1475 return false;
1476
1477 continue;
1478 }
1479
1480 if (isa<Argument>(RetVal))
1481 return false;
1482
1483 if (Instruction *RVI = dyn_cast<Instruction>(RetVal))
1484 switch (RVI->getOpcode()) {
1485 // Extend the analysis by looking upwards.
1486 case Instruction::BitCast:
1487 case Instruction::GetElementPtr:
1488 case Instruction::AddrSpaceCast:
1489 FlowsToReturn.insert(RVI->getOperand(0));
1490 continue;
1491 case Instruction::Select: {
1493 FlowsToReturn.insert(SI->getTrueValue());
1494 FlowsToReturn.insert(SI->getFalseValue());
1495 continue;
1496 }
1497 case Instruction::PHI: {
1498 PHINode *PN = cast<PHINode>(RVI);
1499 FlowsToReturn.insert_range(PN->incoming_values());
1500 continue;
1501 }
1502
1503 // Check whether the pointer came from an allocation.
1504 case Instruction::Alloca:
1505 break;
1506 case Instruction::Call:
1507 case Instruction::Invoke: {
1508 CallBase &CB = cast<CallBase>(*RVI);
1509 if (CB.hasRetAttr(Attribute::NoAlias))
1510 break;
1511 if (CB.getCalledFunction() && SCCNodes.count(CB.getCalledFunction()))
1512 break;
1513 [[fallthrough]];
1514 }
1515 default:
1516 return false; // Did not come from an allocation.
1517 }
1518
1519 if (PointerMayBeCaptured(RetVal, /*ReturnCaptures=*/false))
1520 return false;
1521 }
1522
1523 return true;
1524}
1525
1526/// Deduce noalias attributes for the SCC.
1527static void addNoAliasAttrs(const SCCNodeSet &SCCNodes,
1529 // Check each function in turn, determining which functions return noalias
1530 // pointers.
1531 for (Function *F : SCCNodes) {
1532 // Already noalias.
1533 if (F->returnDoesNotAlias())
1534 continue;
1535
1536 // We can infer and propagate function attributes only when we know that the
1537 // definition we'll get at link time is *exactly* the definition we see now.
1538 // For more details, see GlobalValue::mayBeDerefined.
1539 if (!F->hasExactDefinition())
1540 return;
1541
1542 // We annotate noalias return values, which are only applicable to
1543 // pointer types.
1544 if (!F->getReturnType()->isPointerTy())
1545 continue;
1546
1547 if (!isFunctionMallocLike(F, SCCNodes))
1548 return;
1549 }
1550
1551 for (Function *F : SCCNodes) {
1552 if (F->returnDoesNotAlias() ||
1553 !F->getReturnType()->isPointerTy())
1554 continue;
1555
1556 F->setReturnDoesNotAlias();
1557 ++NumNoAlias;
1558 Changed.insert(F);
1559 }
1560}
1561
1562/// Tests whether this function is known to not return null.
1563///
1564/// Requires that the function returns a pointer.
1565///
1566/// Returns true if it believes the function will not return a null, and sets
1567/// \p Speculative based on whether the returned conclusion is a speculative
1568/// conclusion due to SCC calls.
1569static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
1570 bool &Speculative) {
1571 assert(F->getReturnType()->isPointerTy() &&
1572 "nonnull only meaningful on pointer types");
1573 Speculative = false;
1574
1575 SmallSetVector<Value *, 8> FlowsToReturn;
1576 for (BasicBlock &BB : *F)
1577 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator()))
1578 FlowsToReturn.insert(Ret->getReturnValue());
1579
1580 auto &DL = F->getDataLayout();
1581
1582 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
1583 Value *RetVal = FlowsToReturn[i];
1584
1585 // If this value is locally known to be non-null, we're good
1586 if (isKnownNonZero(RetVal, DL))
1587 continue;
1588
1589 // Otherwise, we need to look upwards since we can't make any local
1590 // conclusions.
1591 Instruction *RVI = dyn_cast<Instruction>(RetVal);
1592 if (!RVI)
1593 return false;
1594 switch (RVI->getOpcode()) {
1595 // Extend the analysis by looking upwards.
1596 case Instruction::BitCast:
1597 case Instruction::AddrSpaceCast:
1598 FlowsToReturn.insert(RVI->getOperand(0));
1599 continue;
1600 case Instruction::GetElementPtr:
1601 if (cast<GEPOperator>(RVI)->isInBounds()) {
1602 FlowsToReturn.insert(RVI->getOperand(0));
1603 continue;
1604 }
1605 return false;
1606 case Instruction::Select: {
1608 FlowsToReturn.insert(SI->getTrueValue());
1609 FlowsToReturn.insert(SI->getFalseValue());
1610 continue;
1611 }
1612 case Instruction::PHI: {
1613 PHINode *PN = cast<PHINode>(RVI);
1614 for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1615 FlowsToReturn.insert(PN->getIncomingValue(i));
1616 continue;
1617 }
1618 case Instruction::Call:
1619 case Instruction::Invoke: {
1620 CallBase &CB = cast<CallBase>(*RVI);
1621 Function *Callee = CB.getCalledFunction();
1622 // A call to a node within the SCC is assumed to return null until
1623 // proven otherwise
1624 if (Callee && SCCNodes.count(Callee)) {
1625 Speculative = true;
1626 continue;
1627 }
1628 return false;
1629 }
1630 default:
1631 return false; // Unknown source, may be null
1632 };
1633 llvm_unreachable("should have either continued or returned");
1634 }
1635
1636 return true;
1637}
1638
1639/// Deduce nonnull attributes for the SCC.
1640static void addNonNullAttrs(const SCCNodeSet &SCCNodes,
1642 // Speculative that all functions in the SCC return only nonnull
1643 // pointers. We may refute this as we analyze functions.
1644 bool SCCReturnsNonNull = true;
1645
1646 // Check each function in turn, determining which functions return nonnull
1647 // pointers.
1648 for (Function *F : SCCNodes) {
1649 // Already nonnull.
1650 if (F->getAttributes().hasRetAttr(Attribute::NonNull))
1651 continue;
1652
1653 // We can infer and propagate function attributes only when we know that the
1654 // definition we'll get at link time is *exactly* the definition we see now.
1655 // For more details, see GlobalValue::mayBeDerefined.
1656 if (!F->hasExactDefinition())
1657 return;
1658
1659 // We annotate nonnull return values, which are only applicable to
1660 // pointer types.
1661 if (!F->getReturnType()->isPointerTy())
1662 continue;
1663
1664 bool Speculative = false;
1665 if (isReturnNonNull(F, SCCNodes, Speculative)) {
1666 if (!Speculative) {
1667 // Mark the function eagerly since we may discover a function
1668 // which prevents us from speculating about the entire SCC
1669 LLVM_DEBUG(dbgs() << "Eagerly marking " << F->getName()
1670 << " as nonnull\n");
1671 F->addRetAttr(Attribute::NonNull);
1672 ++NumNonNullReturn;
1673 Changed.insert(F);
1674 }
1675 continue;
1676 }
1677 // At least one function returns something which could be null, can't
1678 // speculate any more.
1679 SCCReturnsNonNull = false;
1680 }
1681
1682 if (SCCReturnsNonNull) {
1683 for (Function *F : SCCNodes) {
1684 if (F->getAttributes().hasRetAttr(Attribute::NonNull) ||
1685 !F->getReturnType()->isPointerTy())
1686 continue;
1687
1688 LLVM_DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n");
1689 F->addRetAttr(Attribute::NonNull);
1690 ++NumNonNullReturn;
1691 Changed.insert(F);
1692 }
1693 }
1694}
1695
1696/// Deduce noundef attributes for the SCC.
1697static void addNoUndefAttrs(const SCCNodeSet &SCCNodes,
1699 // Check each function in turn, determining which functions return noundef
1700 // values.
1701 for (Function *F : SCCNodes) {
1702 // Already noundef.
1703 AttributeList Attrs = F->getAttributes();
1704 if (Attrs.hasRetAttr(Attribute::NoUndef))
1705 continue;
1706
1707 // We can infer and propagate function attributes only when we know that the
1708 // definition we'll get at link time is *exactly* the definition we see now.
1709 // For more details, see GlobalValue::mayBeDerefined.
1710 if (!F->hasExactDefinition())
1711 return;
1712
1713 // MemorySanitizer assumes that the definition and declaration of a
1714 // function will be consistent. A function with sanitize_memory attribute
1715 // should be skipped from inference.
1716 if (F->hasFnAttribute(Attribute::SanitizeMemory))
1717 continue;
1718
1719 if (F->getReturnType()->isVoidTy())
1720 continue;
1721
1722 const DataLayout &DL = F->getDataLayout();
1723 if (all_of(*F, [&](BasicBlock &BB) {
1724 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) {
1725 // TODO: perform context-sensitive analysis?
1726 Value *RetVal = Ret->getReturnValue();
1728 return false;
1729
1730 // We know the original return value is not poison now, but it
1731 // could still be converted to poison by another return attribute.
1732 // Try to explicitly re-prove the relevant attributes.
1733 if (Attrs.hasRetAttr(Attribute::NonNull) &&
1734 !isKnownNonZero(RetVal, DL))
1735 return false;
1736
1737 if (MaybeAlign Align = Attrs.getRetAlignment())
1738 if (RetVal->getPointerAlignment(DL) < *Align)
1739 return false;
1740
1741 Attribute Attr = Attrs.getRetAttr(Attribute::Range);
1742 if (Attr.isValid() &&
1743 !Attr.getRange().contains(
1744 computeConstantRange(RetVal, /*ForSigned=*/false,
1745 SimplifyQuery(F->getDataLayout()))))
1746 return false;
1747
1748 FPClassTest AttrFPClass = Attrs.getRetNoFPClass();
1749 if (AttrFPClass != fcNone) {
1750 KnownFPClass ComputedFPClass = computeKnownFPClass(RetVal, DL);
1751 if (!ComputedFPClass.isKnownNever(AttrFPClass))
1752 return false;
1753 }
1754 }
1755 return true;
1756 })) {
1757 F->addRetAttr(Attribute::NoUndef);
1758 ++NumNoUndefReturn;
1759 Changed.insert(F);
1760 }
1761 }
1762}
1763
1764namespace {
1765
1766/// Collects a set of attribute inference requests and performs them all in one
1767/// go on a single SCC Node. Inference involves scanning function bodies
1768/// looking for instructions that violate attribute assumptions.
1769/// As soon as all the bodies are fine we are free to set the attribute.
1770/// Customization of inference for individual attributes is performed by
1771/// providing a handful of predicates for each attribute.
1772class AttributeInferer {
1773public:
1774 /// Describes a request for inference of a single attribute.
1775 struct InferenceDescriptor {
1776
1777 /// Returns true if this function does not have to be handled.
1778 /// General intent for this predicate is to provide an optimization
1779 /// for functions that do not need this attribute inference at all
1780 /// (say, for functions that already have the attribute).
1781 std::function<bool(const Function &)> SkipFunction;
1782
1783 /// Returns true if this instruction violates attribute assumptions.
1784 std::function<bool(Instruction &)> InstrBreaksAttribute;
1785
1786 /// Sets the inferred attribute for this function.
1787 std::function<void(Function &)> SetAttribute;
1788
1789 /// Attribute we derive.
1790 Attribute::AttrKind AKind;
1791
1792 /// If true, only "exact" definitions can be used to infer this attribute.
1793 /// See GlobalValue::isDefinitionExact.
1794 bool RequiresExactDefinition;
1795
1796 InferenceDescriptor(Attribute::AttrKind AK,
1797 std::function<bool(const Function &)> SkipFunc,
1798 std::function<bool(Instruction &)> InstrScan,
1799 std::function<void(Function &)> SetAttr,
1800 bool ReqExactDef)
1801 : SkipFunction(SkipFunc), InstrBreaksAttribute(InstrScan),
1802 SetAttribute(SetAttr), AKind(AK),
1803 RequiresExactDefinition(ReqExactDef) {}
1804 };
1805
1806private:
1807 SmallVector<InferenceDescriptor, 4> InferenceDescriptors;
1808
1809public:
1810 void registerAttrInference(InferenceDescriptor AttrInference) {
1811 InferenceDescriptors.push_back(AttrInference);
1812 }
1813
1814 void run(const SCCNodeSet &SCCNodes, SmallPtrSet<Function *, 8> &Changed);
1815};
1816
1817/// Perform all the requested attribute inference actions according to the
1818/// attribute predicates stored before.
1819void AttributeInferer::run(const SCCNodeSet &SCCNodes,
1820 SmallPtrSet<Function *, 8> &Changed) {
1821 SmallVector<InferenceDescriptor, 4> InferInSCC = InferenceDescriptors;
1822 // Go through all the functions in SCC and check corresponding attribute
1823 // assumptions for each of them. Attributes that are invalid for this SCC
1824 // will be removed from InferInSCC.
1825 for (Function *F : SCCNodes) {
1826
1827 // No attributes whose assumptions are still valid - done.
1828 if (InferInSCC.empty())
1829 return;
1830
1831 // Check if our attributes ever need scanning/can be scanned.
1832 llvm::erase_if(InferInSCC, [F](const InferenceDescriptor &ID) {
1833 if (ID.SkipFunction(*F))
1834 return false;
1835
1836 // Remove from further inference (invalidate) when visiting a function
1837 // that has no instructions to scan/has an unsuitable definition.
1838 return F->isDeclaration() ||
1839 (ID.RequiresExactDefinition && !F->hasExactDefinition());
1840 });
1841
1842 // For each attribute still in InferInSCC that doesn't explicitly skip F,
1843 // set up the F instructions scan to verify assumptions of the attribute.
1846 InferInSCC, std::back_inserter(InferInThisFunc),
1847 [F](const InferenceDescriptor &ID) { return !ID.SkipFunction(*F); });
1848
1849 if (InferInThisFunc.empty())
1850 continue;
1851
1852 // Start instruction scan.
1853 for (Instruction &I : instructions(*F)) {
1854 llvm::erase_if(InferInThisFunc, [&](const InferenceDescriptor &ID) {
1855 if (!ID.InstrBreaksAttribute(I))
1856 return false;
1857 // Remove attribute from further inference on any other functions
1858 // because attribute assumptions have just been violated.
1859 llvm::erase_if(InferInSCC, [&ID](const InferenceDescriptor &D) {
1860 return D.AKind == ID.AKind;
1861 });
1862 // Remove attribute from the rest of current instruction scan.
1863 return true;
1864 });
1865
1866 if (InferInThisFunc.empty())
1867 break;
1868 }
1869 }
1870
1871 if (InferInSCC.empty())
1872 return;
1873
1874 for (Function *F : SCCNodes)
1875 // At this point InferInSCC contains only functions that were either:
1876 // - explicitly skipped from scan/inference, or
1877 // - verified to have no instructions that break attribute assumptions.
1878 // Hence we just go and force the attribute for all non-skipped functions.
1879 for (auto &ID : InferInSCC) {
1880 if (ID.SkipFunction(*F))
1881 continue;
1882 Changed.insert(F);
1883 ID.SetAttribute(*F);
1884 }
1885}
1886
1887struct SCCNodesResult {
1888 SCCNodeSet SCCNodes;
1889};
1890
1891} // end anonymous namespace
1892
1893/// Helper for non-Convergent inference predicate InstrBreaksAttribute.
1895 const SCCNodeSet &SCCNodes) {
1896 const CallBase *CB = dyn_cast<CallBase>(&I);
1897 // Breaks non-convergent assumption if CS is a convergent call to a function
1898 // not in the SCC.
1899 return CB && CB->isConvergent() &&
1900 !SCCNodes.contains(CB->getCalledFunction());
1901}
1902
1903/// Helper for NoUnwind inference predicate InstrBreaksAttribute.
1904static bool InstrBreaksNonThrowing(Instruction &I, const SCCNodeSet &SCCNodes) {
1905 if (!I.mayThrow(/* IncludePhaseOneUnwind */ true))
1906 return false;
1907 if (const auto *CI = dyn_cast<CallInst>(&I)) {
1908 if (Function *Callee = CI->getCalledFunction()) {
1909 // I is a may-throw call to a function inside our SCC. This doesn't
1910 // invalidate our current working assumption that the SCC is no-throw; we
1911 // just have to scan that other function.
1912 if (SCCNodes.contains(Callee))
1913 return false;
1914 }
1915 }
1916 return true;
1917}
1918
1919/// Helper for NoFree inference predicate InstrBreaksAttribute.
1920static bool InstrBreaksNoFree(Instruction &I, const SCCNodeSet &SCCNodes) {
1922 if (!CB)
1923 return false;
1924
1925 if (CB->hasFnAttr(Attribute::NoFree))
1926 return false;
1927
1928 // Speculatively assume in SCC.
1929 if (Function *Callee = CB->getCalledFunction())
1930 if (SCCNodes.contains(Callee))
1931 return false;
1932
1933 return true;
1934}
1935
1936// Return true if this is an atomic which has an ordering stronger than
1937// unordered. Note that this is different than the predicate we use in
1938// Attributor. Here we chose to be conservative and consider monotonic
1939// operations potentially synchronizing. We generally don't do much with
1940// monotonic operations, so this is simply risk reduction.
1942 if (!I->isAtomic())
1943 return false;
1944
1945 if (auto *FI = dyn_cast<FenceInst>(I))
1946 // All legal orderings for fence are stronger than monotonic.
1947 return FI->getSyncScopeID() != SyncScope::SingleThread;
1949 return true;
1950 else if (auto *SI = dyn_cast<StoreInst>(I))
1951 return !SI->isUnordered();
1952 else if (auto *LI = dyn_cast<LoadInst>(I))
1953 return !LI->isUnordered();
1954 else {
1955 llvm_unreachable("unknown atomic instruction?");
1956 }
1957}
1958
1959static bool InstrBreaksNoSync(Instruction &I, const SCCNodeSet &SCCNodes) {
1960 // Volatile may synchronize
1961 if (I.isVolatile())
1962 return true;
1963
1964 // An ordered atomic may synchronize. (See comment about on monotonic.)
1965 if (isOrderedAtomic(&I))
1966 return true;
1967
1968 auto *CB = dyn_cast<CallBase>(&I);
1969 if (!CB)
1970 // Non call site cases covered by the two checks above
1971 return false;
1972
1973 if (CB->hasFnAttr(Attribute::NoSync))
1974 return false;
1975
1976 // Non volatile memset/memcpy/memmoves are nosync
1977 // NOTE: Only intrinsics with volatile flags should be handled here. All
1978 // others should be marked in Intrinsics.td.
1979 if (auto *MI = dyn_cast<MemIntrinsic>(&I))
1980 if (!MI->isVolatile())
1981 return false;
1982
1983 // Speculatively assume in SCC.
1984 if (Function *Callee = CB->getCalledFunction())
1985 if (SCCNodes.contains(Callee))
1986 return false;
1987
1988 return true;
1989}
1990
1991/// Attempt to remove convergent function attribute when possible.
1992///
1993/// Returns true if any changes to function attributes were made.
1994static void inferConvergent(const SCCNodeSet &SCCNodes,
1996 AttributeInferer AI;
1997
1998 // Request to remove the convergent attribute from all functions in the SCC
1999 // if every callsite within the SCC is not convergent (except for calls
2000 // to functions within the SCC).
2001 // Note: Removal of the attr from the callsites will happen in
2002 // InstCombineCalls separately.
2003 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
2004 Attribute::Convergent,
2005 // Skip non-convergent functions.
2006 [](const Function &F) { return !F.isConvergent(); },
2007 // Instructions that break non-convergent assumption.
2008 [SCCNodes](Instruction &I) {
2009 return InstrBreaksNonConvergent(I, SCCNodes);
2010 },
2011 [](Function &F) {
2012 LLVM_DEBUG(dbgs() << "Removing convergent attr from fn " << F.getName()
2013 << "\n");
2014 F.setNotConvergent();
2015 },
2016 /* RequiresExactDefinition= */ false});
2017 // Perform all the requested attribute inference actions.
2018 AI.run(SCCNodes, Changed);
2019}
2020
2021/// Infer attributes from all functions in the SCC by scanning every
2022/// instruction for compliance to the attribute assumptions.
2023///
2024/// Returns true if any changes to function attributes were made.
2025static void inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes,
2027 AttributeInferer AI;
2028
2030 // Request to infer nounwind attribute for all the functions in the SCC if
2031 // every callsite within the SCC is not throwing (except for calls to
2032 // functions within the SCC). Note that nounwind attribute suffers from
2033 // derefinement - results may change depending on how functions are
2034 // optimized. Thus it can be inferred only from exact definitions.
2035 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
2036 Attribute::NoUnwind,
2037 // Skip non-throwing functions.
2038 [](const Function &F) { return F.doesNotThrow(); },
2039 // Instructions that break non-throwing assumption.
2040 [&SCCNodes](Instruction &I) {
2041 return InstrBreaksNonThrowing(I, SCCNodes);
2042 },
2043 [](Function &F) {
2045 << "Adding nounwind attr to fn " << F.getName() << "\n");
2046 F.setDoesNotThrow();
2047 ++NumNoUnwind;
2048 },
2049 /* RequiresExactDefinition= */ true});
2050
2052 // Request to infer nofree attribute for all the functions in the SCC if
2053 // every callsite within the SCC does not directly or indirectly free
2054 // memory (except for calls to functions within the SCC). Note that nofree
2055 // attribute suffers from derefinement - results may change depending on
2056 // how functions are optimized. Thus it can be inferred only from exact
2057 // definitions.
2058 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
2059 Attribute::NoFree,
2060 // Skip functions known not to free memory.
2061 [](const Function &F) { return F.doesNotFreeMemory(); },
2062 // Instructions that break non-deallocating assumption.
2063 [&SCCNodes](Instruction &I) {
2064 return InstrBreaksNoFree(I, SCCNodes);
2065 },
2066 [](Function &F) {
2068 << "Adding nofree attr to fn " << F.getName() << "\n");
2069 F.setDoesNotFreeMemory();
2070 ++NumNoFree;
2071 },
2072 /* RequiresExactDefinition= */ true});
2073
2074 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
2075 Attribute::NoSync,
2076 // Skip already marked functions.
2077 [](const Function &F) { return F.hasNoSync(); },
2078 // Instructions that break nosync assumption.
2079 [&SCCNodes](Instruction &I) {
2080 return InstrBreaksNoSync(I, SCCNodes);
2081 },
2082 [](Function &F) {
2084 << "Adding nosync attr to fn " << F.getName() << "\n");
2085 F.setNoSync();
2086 ++NumNoSync;
2087 },
2088 /* RequiresExactDefinition= */ true});
2089
2090 // Perform all the requested attribute inference actions.
2091 AI.run(SCCNodes, Changed);
2092}
2093
2094// Determines if the function 'F' can be marked 'norecurse'.
2095// It returns true if any call within 'F' could lead to a recursive
2096// call back to 'F', and false otherwise.
2097// The 'AnyFunctionsAddressIsTaken' parameter is a module-wide flag
2098// that is true if any function's address is taken, or if any function
2099// has external linkage. This is used to determine the safety of
2100// external/library calls.
2102 bool AnyFunctionsAddressIsTaken = true) {
2103 for (const auto &BB : F) {
2104 for (const auto &I : BB) {
2105 if (const auto *CB = dyn_cast<CallBase>(&I)) {
2106 const Function *Callee = CB->getCalledFunction();
2107 if (!Callee || Callee == &F)
2108 return true;
2109
2110 if (Callee->doesNotRecurse())
2111 continue;
2112
2113 if (!AnyFunctionsAddressIsTaken ||
2114 (Callee->isDeclaration() &&
2115 Callee->hasFnAttribute(Attribute::NoCallback)))
2116 continue;
2117 return true;
2118 }
2119 }
2120 }
2121 return false;
2122}
2123
2124static void addNoRecurseAttrs(const SCCNodeSet &SCCNodes,
2126 // Try and identify functions that do not recurse.
2127
2128 // If the SCC contains multiple nodes we know for sure there is recursion.
2129 if (SCCNodes.size() != 1)
2130 return;
2131
2132 Function *F = *SCCNodes.begin();
2133 if (!F || !F->hasExactDefinition() || F->doesNotRecurse())
2134 return;
2135 if (!mayHaveRecursiveCallee(*F)) {
2136 // Every call was to a non-recursive function other than this function, and
2137 // we have no indirect recursion as the SCC size is one. This function
2138 // cannot recurse.
2139 F->setDoesNotRecurse();
2140 ++NumNoRecurse;
2141 Changed.insert(F);
2142 }
2143}
2144
2145// Set the noreturn function attribute if possible.
2146static void addNoReturnAttrs(const SCCNodeSet &SCCNodes,
2148 for (Function *F : SCCNodes) {
2149 if (!F || !F->hasExactDefinition() || F->hasFnAttribute(Attribute::Naked) ||
2150 F->doesNotReturn())
2151 continue;
2152
2153 if (!canReturn(*F)) {
2154 F->setDoesNotReturn();
2155 Changed.insert(F);
2156 }
2157 }
2158}
2159
2162 ColdPaths[&F.front()] = false;
2164 Jobs.push_back(&F.front());
2165
2166 while (!Jobs.empty()) {
2167 BasicBlock *BB = Jobs.pop_back_val();
2168
2169 // If block contains a cold callsite this path through the CG is cold.
2170 // Ignore whether the instructions actually are guaranteed to transfer
2171 // execution. Divergent behavior is considered unlikely.
2172 if (any_of(*BB, [](Instruction &I) {
2173 if (auto *CB = dyn_cast<CallBase>(&I))
2174 return CB->hasFnAttr(Attribute::Cold);
2175 return false;
2176 })) {
2177 ColdPaths[BB] = true;
2178 continue;
2179 }
2180
2181 auto Succs = successors(BB);
2182 // We found a path that doesn't go through any cold callsite.
2183 if (Succs.empty())
2184 return false;
2185
2186 // We didn't find a cold callsite in this BB, so check that all successors
2187 // contain a cold callsite (or that their successors do).
2188 // Potential TODO: We could use static branch hints to assume certain
2189 // successor paths are inherently cold, irrespective of if they contain a
2190 // cold callsite.
2191 for (BasicBlock *Succ : Succs) {
2192 // Start with false, this is necessary to ensure we don't turn loops into
2193 // cold.
2194 auto [Iter, Inserted] = ColdPaths.try_emplace(Succ, false);
2195 if (!Inserted) {
2196 if (Iter->second)
2197 continue;
2198 return false;
2199 }
2200 Jobs.push_back(Succ);
2201 }
2202 }
2203 return true;
2204}
2205
2206// Set the cold function attribute if possible.
2207static void addColdAttrs(const SCCNodeSet &SCCNodes,
2209 for (Function *F : SCCNodes) {
2210 if (!F || !F->hasExactDefinition() || F->hasFnAttribute(Attribute::Naked) ||
2211 F->hasFnAttribute(Attribute::Cold) || F->hasFnAttribute(Attribute::Hot))
2212 continue;
2213
2214 // Potential TODO: We could add attribute `cold` on functions with `coldcc`.
2215 if (allPathsGoThroughCold(*F)) {
2216 F->addFnAttr(Attribute::Cold);
2217 ++NumCold;
2218 Changed.insert(F);
2219 continue;
2220 }
2221 }
2222}
2223
2224static bool functionWillReturn(const Function &F) {
2225 // We can infer and propagate function attributes only when we know that the
2226 // definition we'll get at link time is *exactly* the definition we see now.
2227 // For more details, see GlobalValue::mayBeDerefined.
2228 if (!F.hasExactDefinition())
2229 return false;
2230
2231 // Must-progress function without side-effects must return.
2232 if (F.mustProgress() && F.onlyReadsMemory())
2233 return true;
2234
2235 // Can only analyze functions with a definition.
2236 if (F.isDeclaration())
2237 return false;
2238
2239 // Functions with loops require more sophisticated analysis, as the loop
2240 // may be infinite. For now, don't try to handle them.
2242 FindFunctionBackedges(F, Backedges);
2243 if (!Backedges.empty())
2244 return false;
2245
2246 // If there are no loops, then the function is willreturn if all calls in
2247 // it are willreturn.
2248 return all_of(instructions(F), [](const Instruction &I) {
2249 return I.willReturn();
2250 });
2251}
2252
2253// Set the willreturn function attribute if possible.
2254static void addWillReturn(const SCCNodeSet &SCCNodes,
2256 for (Function *F : SCCNodes) {
2257 if (!F || F->willReturn() || !functionWillReturn(*F))
2258 continue;
2259
2260 F->setWillReturn();
2261 NumWillReturn++;
2262 Changed.insert(F);
2263 }
2264}
2265
2266static SCCNodesResult createSCCNodeSet(ArrayRef<Function *> Functions) {
2267 SCCNodesResult Res;
2268 for (Function *F : Functions) {
2269 if (!F || F->hasOptNone() || F->hasFnAttribute(Attribute::Naked) ||
2270 F->isPresplitCoroutine()) {
2271 // Omit any functions we're trying not to optimize from the set.
2272 continue;
2273 }
2274
2275 Res.SCCNodes.insert(F);
2276 }
2277 return Res;
2278}
2279
2280template <typename AARGetterT>
2282deriveAttrsInPostOrder(ArrayRef<Function *> Functions, AARGetterT &&AARGetter,
2283 bool ArgAttrsOnly) {
2284 SCCNodesResult Nodes = createSCCNodeSet(Functions);
2285
2286 // Bail if the SCC only contains optnone functions.
2287 if (Nodes.SCCNodes.empty())
2288 return {};
2289
2291 if (ArgAttrsOnly) {
2292 // ArgAttrsOnly means to only infer attributes that may aid optimizations
2293 // on the *current* function. "initializes" attribute is to aid
2294 // optimizations (like DSE) on the callers, so skip "initializes" here.
2295 addArgumentAttrs(Nodes.SCCNodes, Changed, /*SkipInitializes=*/true);
2296 return Changed;
2297 }
2298
2299 addArgumentReturnedAttrs(Nodes.SCCNodes, Changed);
2300 addMemoryAttrs(Nodes.SCCNodes, AARGetter, Changed);
2301 addArgumentAttrs(Nodes.SCCNodes, Changed, /*SkipInitializes=*/false);
2302 inferConvergent(Nodes.SCCNodes, Changed);
2303 addNoReturnAttrs(Nodes.SCCNodes, Changed);
2304 addColdAttrs(Nodes.SCCNodes, Changed);
2305 addWillReturn(Nodes.SCCNodes, Changed);
2306 addNoUndefAttrs(Nodes.SCCNodes, Changed);
2307 addNoAliasAttrs(Nodes.SCCNodes, Changed);
2308 addNonNullAttrs(Nodes.SCCNodes, Changed);
2309 inferAttrsFromFunctionBodies(Nodes.SCCNodes, Changed);
2310 addNoRecurseAttrs(Nodes.SCCNodes, Changed);
2311
2312 // Finally, infer the maximal set of attributes from the ones we've inferred
2313 // above. This is handling the cases where one attribute on a signature
2314 // implies another, but for implementation reasons the inference rule for
2315 // the later is missing (or simply less sophisticated).
2316 for (Function *F : Nodes.SCCNodes)
2317 if (F)
2319 Changed.insert(F);
2320
2321 return Changed;
2322}
2323
2326 LazyCallGraph &CG,
2328 // Skip non-recursive functions if requested.
2329 // Only infer argument attributes for non-recursive functions, because
2330 // it can affect optimization behavior in conjunction with noalias.
2331 bool ArgAttrsOnly = false;
2332 if (C.size() == 1 && SkipNonRecursive) {
2333 LazyCallGraph::Node &N = *C.begin();
2334 if (!N->lookup(N))
2335 ArgAttrsOnly = true;
2336 }
2337
2339 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2340
2341 // We pass a lambda into functions to wire them up to the analysis manager
2342 // for getting function analyses.
2343 auto AARGetter = [&](Function &F) -> AAResults & {
2344 return FAM.getResult<AAManager>(F);
2345 };
2346
2348 for (LazyCallGraph::Node &N : C) {
2349 Functions.push_back(&N.getFunction());
2350 }
2351
2352 auto ChangedFunctions =
2353 deriveAttrsInPostOrder(Functions, AARGetter, ArgAttrsOnly);
2354 if (ChangedFunctions.empty())
2355 return PreservedAnalyses::all();
2356
2357 // Invalidate analyses for modified functions so that we don't have to
2358 // invalidate all analyses for all functions in this SCC.
2359 PreservedAnalyses FuncPA;
2360 // We haven't changed the CFG for modified functions.
2361 FuncPA.preserveSet<CFGAnalyses>();
2362 for (Function *Changed : ChangedFunctions) {
2363 FAM.invalidate(*Changed, FuncPA);
2364 // Also invalidate any direct callers of changed functions since analyses
2365 // may care about attributes of direct callees. For example, MemorySSA cares
2366 // about whether or not a call's callee modifies memory and queries that
2367 // through function attributes.
2368 for (auto *U : Changed->users()) {
2369 if (auto *Call = dyn_cast<CallBase>(U)) {
2370 if (Call->getCalledOperand() == Changed)
2371 FAM.invalidate(*Call->getFunction(), FuncPA);
2372 }
2373 }
2374 }
2375
2377 // We have not added or removed functions.
2379 // We already invalidated all relevant function analyses above.
2381 return PA;
2382}
2383
2385 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
2387 OS, MapClassName2PassName);
2388 if (SkipNonRecursive)
2389 OS << "<skip-non-recursive-function-attrs>";
2390}
2391
2392template <typename AARGetterT>
2393static bool runImpl(CallGraphSCC &SCC, AARGetterT AARGetter) {
2395 for (CallGraphNode *I : SCC) {
2396 Functions.push_back(I->getFunction());
2397 }
2398
2399 return !deriveAttrsInPostOrder(Functions, AARGetter).empty();
2400}
2401
2403 if (F.doesNotRecurse())
2404 return false;
2405
2406 // We check the preconditions for the function prior to calling this to avoid
2407 // the cost of building up a reversible post-order list. We assert them here
2408 // to make sure none of the invariants this relies on were violated.
2409 assert(!F.isDeclaration() && "Cannot deduce norecurse without a definition!");
2410 assert(F.hasInternalLinkage() &&
2411 "Can only do top-down deduction for internal linkage functions!");
2412
2413 // If F is internal and all of its uses are calls from a non-recursive
2414 // functions, then none of its calls could in fact recurse without going
2415 // through a function marked norecurse, and so we can mark this function too
2416 // as norecurse. Note that the uses must actually be calls -- otherwise
2417 // a pointer to this function could be returned from a norecurse function but
2418 // this function could be recursively (indirectly) called. Note that this
2419 // also detects if F is directly recursive as F is not yet marked as
2420 // a norecurse function.
2421 for (auto &U : F.uses()) {
2422 const CallBase *CB = dyn_cast<CallBase>(U.getUser());
2423 if (!CB || !CB->isCallee(&U) ||
2424 !CB->getParent()->getParent()->doesNotRecurse())
2425 return false;
2426 }
2427 F.setDoesNotRecurse();
2428 ++NumNoRecurse;
2429 return true;
2430}
2431
2433 assert(!F.isDeclaration() && "Cannot deduce nofpclass without a definition!");
2434 unsigned NumArgs = F.arg_size();
2435 SmallVector<FPClassTest, 8> ArgsNoFPClass(NumArgs, fcAllFlags);
2436 FPClassTest RetNoFPClass = fcAllFlags;
2437
2438 bool Changed = false;
2439 for (User *U : F.users()) {
2440 auto *CB = dyn_cast<CallBase>(U);
2441 if (!CB || CB->getCalledFunction() != &F)
2442 return false;
2443
2444 RetNoFPClass &= CB->getRetNoFPClass();
2445 for (unsigned I = 0; I != NumArgs; ++I) {
2446 // TODO: Consider computeKnownFPClass, at least with a small search
2447 // depth. This will currently not catch non-splat vectors.
2448 const APFloat *Cst;
2449 if (match(CB->getArgOperand(I), m_APFloat(Cst)))
2450 ArgsNoFPClass[I] &= ~Cst->classify();
2451 else
2452 ArgsNoFPClass[I] &= CB->getParamNoFPClass(I);
2453 }
2454 }
2455
2456 LLVMContext &Ctx = F.getContext();
2457
2458 if (RetNoFPClass != fcNone) {
2459 FPClassTest OldAttr = F.getAttributes().getRetNoFPClass();
2460 if (OldAttr != RetNoFPClass) {
2461 F.addRetAttr(Attribute::getWithNoFPClass(Ctx, RetNoFPClass));
2462 Changed = true;
2463 }
2464 }
2465
2466 for (unsigned I = 0; I != NumArgs; ++I) {
2467 FPClassTest ArgNoFPClass = ArgsNoFPClass[I];
2468 if (ArgNoFPClass == fcNone)
2469 continue;
2470 FPClassTest OldAttr = F.getParamNoFPClass(I);
2471 if (OldAttr == ArgNoFPClass)
2472 continue;
2473
2474 F.addParamAttr(I, Attribute::getWithNoFPClass(Ctx, ArgNoFPClass));
2475 Changed = true;
2476 }
2477
2478 return Changed;
2479}
2480
2482 // We only have a post-order SCC traversal (because SCCs are inherently
2483 // discovered in post-order), so we accumulate them in a vector and then walk
2484 // it in reverse. This is simpler than using the RPO iterator infrastructure
2485 // because we need to combine SCC detection and the PO walk of the call
2486 // graph. We can also cheat egregiously because we're primarily interested in
2487 // synthesizing norecurse and so we can only save the singular SCCs as SCCs
2488 // with multiple functions in them will clearly be recursive.
2489
2491 CG.buildRefSCCs();
2492 for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs()) {
2493 for (LazyCallGraph::SCC &SCC : RC) {
2494 if (SCC.size() != 1)
2495 continue;
2496 Function &F = SCC.begin()->getFunction();
2497 if (!F.isDeclaration() && F.hasInternalLinkage() && !F.use_empty())
2498 Worklist.push_back(&F);
2499 }
2500 }
2501 bool Changed = false;
2502 for (auto *F : llvm::reverse(Worklist)) {
2505 }
2506
2507 return Changed;
2508}
2509
2512 auto &CG = AM.getResult<LazyCallGraphAnalysis>(M);
2513
2514 if (!deduceFunctionAttributeInRPO(M, CG))
2515 return PreservedAnalyses::all();
2516
2519 return PA;
2520}
2521
2524
2525 // Check if any function in the whole program has its address taken or has
2526 // potentially external linkage.
2527 // We use this information when inferring norecurse attribute: If there is
2528 // no function whose address is taken and all functions have internal
2529 // linkage, there is no path for a callback to any user function.
2530 bool AnyFunctionsAddressIsTaken = false;
2531 for (Function &F : M) {
2532 if (F.isDeclaration() || F.doesNotRecurse())
2533 continue;
2534 if (!F.hasLocalLinkage() || F.hasAddressTaken()) {
2535 AnyFunctionsAddressIsTaken = true;
2536 break;
2537 }
2538 }
2539
2540 // Run norecurse inference on all RefSCCs in the LazyCallGraph for this
2541 // module.
2542 bool Changed = false;
2543 LazyCallGraph &CG = MAM.getResult<LazyCallGraphAnalysis>(M);
2544 CG.buildRefSCCs();
2545
2546 for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs()) {
2547 // Skip any RefSCC that is part of a call cycle. A RefSCC containing more
2548 // than one SCC indicates a recursive relationship involving indirect calls.
2549 if (RC.size() > 1)
2550 continue;
2551
2552 // RefSCC contains a single-SCC. SCC size > 1 indicates mutually recursive
2553 // functions. Ex: foo1 -> foo2 -> foo3 -> foo1.
2554 LazyCallGraph::SCC &S = *RC.begin();
2555 if (S.size() > 1)
2556 continue;
2557
2558 // Get the single function from this SCC.
2559 Function &F = S.begin()->getFunction();
2560 if (!F.hasExactDefinition() || F.doesNotRecurse())
2561 continue;
2562
2563 // If the analysis confirms that this function has no recursive calls
2564 // (either direct, indirect, or through external linkages),
2565 // we can safely apply the norecurse attribute.
2566 if (!mayHaveRecursiveCallee(F, AnyFunctionsAddressIsTaken)) {
2567 F.setDoesNotRecurse();
2568 ++NumNoRecurse;
2569 Changed = true;
2570 }
2571 }
2572
2574 if (Changed)
2576 else
2578 return PA;
2579}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This header provides classes for managing passes over SCCs of the call graph.
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Finalize Linkage
DXIL Resource Access
This file defines the DenseMap class.
static bool runImpl(Function &F, const TargetLowering &TLI, const LibcallLoweringInfo &Libcalls, AssumptionCache *AC)
static SmallPtrSet< Function *, 8 > deriveAttrsInPostOrder(ArrayRef< Function * > Functions, AARGetterT &&AARGetter, bool ArgAttrsOnly)
static Attribute::AttrKind determinePointerAccessAttrs(Argument *A, const SmallPtrSet< Argument *, 8 > &SCCNodes)
Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone.
static cl::opt< bool > DisableNoFreeInference("disable-nofree-inference", cl::Hidden, cl::desc("Stop inferring nofree attribute during function-attrs pass"))
static bool inferInitializes(Argument &A, Function &F)
static bool allPathsGoThroughCold(Function &F)
static bool addAccessAttr(Argument *A, Attribute::AttrKind R)
static FunctionSummary * calculatePrevailingSummary(ValueInfo VI, DenseMap< ValueInfo, FunctionSummary * > &CachedPrevailingSummary, function_ref< bool(GlobalValue::GUID, const GlobalValueSummary *)> IsPrevailing)
static void addMemoryAttrs(const SCCNodeSet &SCCNodes, AARGetterT &&AARGetter, SmallPtrSet< Function *, 8 > &Changed)
Deduce readonly/readnone/writeonly attributes for the SCC.
static bool addArgumentAttrsFromCallsites(Function &F)
If a callsite has arguments that are also arguments to the parent function, try to propagate attribut...
static void addCapturesStat(CaptureInfo CI)
static bool isOrderedAtomic(Instruction *I)
static void addArgLocs(MemoryEffects &ME, const CallBase *Call, ModRefInfo ArgMR, AAResults &AAR)
static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes)
Tests whether a function is "malloc-like".
static void addColdAttrs(const SCCNodeSet &SCCNodes, SmallPtrSet< Function *, 8 > &Changed)
static bool mayHaveRecursiveCallee(Function &F, bool AnyFunctionsAddressIsTaken=true)
static void addNoReturnAttrs(const SCCNodeSet &SCCNodes, SmallPtrSet< Function *, 8 > &Changed)
static bool addNoFPClassAttrsTopDown(Function &F)
static cl::opt< bool > DisableNoUnwindInference("disable-nounwind-inference", cl::Hidden, cl::desc("Stop inferring nounwind attribute during function-attrs pass"))
static void addWillReturn(const SCCNodeSet &SCCNodes, SmallPtrSet< Function *, 8 > &Changed)
static void addNonNullAttrs(const SCCNodeSet &SCCNodes, SmallPtrSet< Function *, 8 > &Changed)
Deduce nonnull attributes for the SCC.
static std::pair< MemoryEffects, MemoryEffects > checkFunctionMemoryAccess(Function &F, bool ThisBody, AAResults &AAR, const SCCNodeSet &SCCNodes)
Returns the memory access attribute for function F using AAR for AA results, where SCCNodes is the cu...
static bool InstrBreaksNonThrowing(Instruction &I, const SCCNodeSet &SCCNodes)
Helper for NoUnwind inference predicate InstrBreaksAttribute.
static void inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes, SmallPtrSet< Function *, 8 > &Changed)
Infer attributes from all functions in the SCC by scanning every instruction for compliance to the at...
static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes, bool &Speculative)
Tests whether this function is known to not return null.
static bool InstrBreaksNoSync(Instruction &I, const SCCNodeSet &SCCNodes)
static bool deduceFunctionAttributeInRPO(Module &M, LazyCallGraph &CG)
static bool InstrBreaksNoFree(Instruction &I, const SCCNodeSet &SCCNodes)
Helper for NoFree inference predicate InstrBreaksAttribute.
static cl::opt< bool > EnablePoisonArgAttrPropagation("enable-poison-arg-attr-prop", cl::init(true), cl::Hidden, cl::desc("Try to propagate nonnull and nofpclass argument attributes from " "callsites to caller functions."))
static void addNoAliasAttrs(const SCCNodeSet &SCCNodes, SmallPtrSet< Function *, 8 > &Changed)
Deduce noalias attributes for the SCC.
static bool addNoRecurseAttrsTopDown(Function &F)
static void addLocAccess(MemoryEffects &ME, const MemoryLocation &Loc, ModRefInfo MR, AAResults &AAR)
static void inferConvergent(const SCCNodeSet &SCCNodes, SmallPtrSet< Function *, 8 > &Changed)
Attempt to remove convergent function attribute when possible.
static cl::opt< bool > DisableThinLTOPropagation("disable-thinlto-funcattrs", cl::init(true), cl::Hidden, cl::desc("Don't propagate function-attrs in thinLTO"))
static SCCNodesResult createSCCNodeSet(ArrayRef< Function * > Functions)
static bool InstrBreaksNonConvergent(Instruction &I, const SCCNodeSet &SCCNodes)
Helper for non-Convergent inference predicate InstrBreaksAttribute.
static void addArgumentAttrs(const SCCNodeSet &SCCNodes, SmallPtrSet< Function *, 8 > &Changed, bool SkipInitializes)
Deduce nocapture attributes for the SCC.
static void addNoRecurseAttrs(const SCCNodeSet &SCCNodes, SmallPtrSet< Function *, 8 > &Changed)
static bool functionWillReturn(const Function &F)
static void addNoUndefAttrs(const SCCNodeSet &SCCNodes, SmallPtrSet< Function *, 8 > &Changed)
Deduce noundef attributes for the SCC.
static void addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes, SmallPtrSet< Function *, 8 > &Changed)
Deduce returned attributes for the SCC.
Provides passes for computing function attributes based on interprocedural analyses.
Hexagon Common GEP
#define _
IRTranslator LLVM IR MI
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
Implements a lazy call graph analysis and related passes for the new pass manager.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file provides utility analysis objects describing memory locations.
This file contains the declarations for metadata subclasses.
ModuleSummaryIndex.h This file contains the declarations the classes that hold the module index and s...
uint64_t High
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
Remove Loads Into Fake Uses
This builds on the llvm/ADT/GraphTraits.h file to find the strongly connected components (SCCs) of a ...
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
A manager for alias analyses.
LLVM_ABI ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
LLVM_ABI MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
Definition APInt.h:78
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
Definition Analysis.h:50
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
LLVM_ABI ArrayRef< ConstantRange > getValueAsConstantRangeList() const
Return the attribute's value as a ConstantRange array.
static LLVM_ABI Attribute getWithNoFPClass(LLVMContext &Context, FPClassTest Mask)
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
@ None
No attributes have been set.
Definition Attributes.h:126
static LLVM_ABI Attribute getWithCaptureInfo(LLVMContext &Context, CaptureInfo CI)
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
LLVM_ABI FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
LLVM_ABI FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
LLVM_ABI MemoryEffects getMemoryEffects() const
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool doesNotAccessMemory(unsigned OpNo) const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getDataOperandNo(Value::const_user_iterator UI) const
Given a value use iterator, return the data operand corresponding to it.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const
Return true if the data operand at index i has the attribute A.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
bool onlyWritesMemory(unsigned OpNo) const
bool isCallee(Value::const_user_iterator UI) const
Determine whether the passed iterator points to the callee operand's Use.
LLVM_ABI CaptureInfo getCaptureInfo(unsigned OpNo) const
Return which pointer components this operand may capture.
bool onlyReadsMemory(unsigned OpNo) const
Value * getArgOperand(unsigned i) const
bool isConvergent() const
Determine if the invoke is convergent.
unsigned getArgOperandNo(const Use *U) const
Given a use for a arg operand, get the arg operand number that corresponds to it.
unsigned arg_size() const
bool isArgOperand(const Use *U) const
bool hasOperandBundles() const
Return true if this User has any operand bundles.
A node in the call graph for a module.
Definition CallGraph.h:162
CallGraphSCC - This is a single SCC that a CallGraphSCCPass is run on.
Represents which components of the pointer may be captured in which location.
Definition ModRef.h:414
static CaptureInfo none()
Create CaptureInfo that does not capture any components of the pointer.
Definition ModRef.h:427
static CaptureInfo retOnly(CaptureComponents RetComponents=CaptureComponents::All)
Create CaptureInfo that may only capture via the return value.
Definition ModRef.h:434
static CaptureInfo all()
Create CaptureInfo that may capture all components of the pointer.
Definition ModRef.h:430
This class represents a list of constant ranges.
LLVM_ABI void subtract(const ConstantRange &SubRange)
LLVM_ABI void insert(const ConstantRange &NewRange)
Insert a new range to Ranges and keep the list ordered.
bool empty() const
Return true if this list contains no members.
ArrayRef< ConstantRange > rangesRef() const
LLVM_ABI ConstantRangeList intersectWith(const ConstantRangeList &CRL) const
Return the range list that results from the intersection of this ConstantRangeList with another Const...
LLVM_ABI ConstantRangeList unionWith(const ConstantRangeList &CRL) const
Return the range list that results from the union of this ConstantRangeList with another ConstantRang...
This class represents a range of values.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:256
iterator end()
Definition DenseMap.h:81
A proxy from a FunctionAnalysisManager to an SCC.
Function summary information to aid decisions and implementation of importing.
ArrayRef< EdgeTy > calls() const
Return the list of <CalleeValueInfo, CalleeInfo> pairs.
FFlags fflags() const
Get function summary flags.
Function and variable summary information to aid decisions and implementation of importing.
static bool isWeakAnyLinkage(LinkageTypes Linkage)
static bool isLinkOnceAnyLinkage(LinkageTypes Linkage)
static bool isLocalLinkage(LinkageTypes Linkage)
static bool isWeakODRLinkage(LinkageTypes Linkage)
uint64_t GUID
Declare a type to represent a global unique identifier for a global value.
static bool isAvailableExternallyLinkage(LinkageTypes Linkage)
static bool isExternalLinkage(LinkageTypes Linkage)
static bool isLinkOnceODRLinkage(LinkageTypes Linkage)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An analysis pass which computes the call graph for a module.
A node in the call graph.
A RefSCC of the call graph.
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
LLVM_ABI void buildRefSCCs()
iterator_range< postorder_ref_scc_iterator > postorder_ref_sccs()
MemoryEffectsBase getWithoutLoc(Location Loc) const
Get new MemoryEffectsBase with NoModRef on the given Loc.
Definition ModRef.h:231
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition ModRef.h:246
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:143
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:149
ModRefInfo getModRef(Location Loc) const
Get ModRefInfo for the given Location.
Definition ModRef.h:219
static MemoryEffectsBase none()
Definition ModRef.h:128
static MemoryEffectsBase unknown()
Definition ModRef.h:123
Representation for a specific memory location.
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
static LLVM_ABI std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
Class to hold module path string table and global value map, and encapsulate methods for operating on...
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM)
op_range incoming_values()
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
Return a value (possibly void), from a function.
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
This class represents the LLVM 'select' instruction.
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
void insert_range(Range &&R)
Definition SetVector.h:176
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Value * getOperand(unsigned i) const
Definition User.h:207
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition Value.cpp:964
iterator_range< use_iterator > uses()
Definition Value.h:380
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
Enumerate the SCCs of a directed graph in reverse topological order of the SCC DAG.
Definition SCCIterator.h:49
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
bool match(Val *V, const Pattern &P)
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
iterator end() const
Definition BasicBlock.h:89
LLVM_ABI iterator begin() const
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
@ Length
Definition DWP.cpp:532
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
LLVM_ABI MemoryEffects computeFunctionBodyMemoryAccess(Function &F, AAResults &AAR)
Returns the memory access properties of this copy of the function.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
scc_iterator< T > scc_begin(const T &G)
Construct the begin iterator for a deduced graph type T.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:356
LLVM_ABI bool thinLTOPropagateFunctionAttrs(ModuleSummaryIndex &Index, function_ref< bool(GlobalValue::GUID, const GlobalValueSummary *)> isPrevailing)
Propagate function attributes for function summaries along the index's callgraph during thinlink.
OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P)
Provide wrappers to std::copy_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1791
AnalysisManager< LazyCallGraph::SCC, LazyCallGraph & > CGSCCAnalysisManager
The CGSCC analysis manager.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase *Call, bool MustPreserveNullness)
{launder,strip}.invariant.group returns pointer that aliases its argument, and it only captures point...
bool isModSet(const ModRefInfo MRI)
Definition ModRef.h:49
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
CaptureComponents
Components of the pointer that may be captured.
Definition ModRef.h:365
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
auto post_order(const T &G)
Post-order traversal of a graph.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition ModRef.h:28
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
@ ModRef
The access may reference and may modify the value stored in memory.
Definition ModRef.h:36
@ Mod
The access may modify the value stored in memory.
Definition ModRef.h:34
@ NoModRef
The access neither references nor modifies the value stored in memory.
Definition ModRef.h:30
@ ErrnoMem
Errno memory.
Definition ModRef.h:66
@ ArgMem
Access to memory via argument pointers.
Definition ModRef.h:62
@ Other
Any other memory.
Definition ModRef.h:68
LLVM_ABI const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2192
@ Continue
Definition DWP.h:22
LLVM_ABI bool inferAttributesFromOthers(Function &F)
If we can infer one attribute from another on the declaration of a function, explicitly materialize t...
Definition Local.cpp:4017
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool capturesAll(CaptureComponents CC)
Definition ModRef.h:404
bool capturesNothing(CaptureComponents CC)
Definition ModRef.h:375
bool isNoModRef(const ModRefInfo MRI)
Definition ModRef.h:40
LLVM_ABI void FindFunctionBackedges(const Function &F, SmallVectorImpl< std::pair< const BasicBlock *, const BasicBlock * > > &Result)
Analyze the specified function to find all of the loop backedges in the function and return them.
Definition CFG.cpp:36
LLVM_ABI bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
bool capturesAnyProvenance(CaptureComponents CC)
Definition ModRef.h:400
bool isRefSet(const ModRefInfo MRI)
Definition ModRef.h:52
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
LLVM_ABI bool canReturn(const Function &F)
Return true if there is at least a path through which F can return, false if there is no such path.
Definition CFG.cpp:405
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, const SimplifyQuery &SQ, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Support structure for SCC passes to communicate updates the call graph back to the CGSCC pass manager...
This callback is used in conjunction with PointerMayBeCaptured.
Flags specific to function summaries.
SmallVectorImpl< ArgumentGraphNode * >::iterator ChildIteratorType
static ChildIteratorType child_begin(NodeRef N)
static ChildIteratorType child_end(NodeRef N)
static ChildIteratorType nodes_end(ArgumentGraph *AG)
static NodeRef getEntryNode(ArgumentGraph *AG)
static ChildIteratorType nodes_begin(ArgumentGraph *AG)
typename ArgumentGraph *::UnknownGraphTypeError NodeRef
Definition GraphTraits.h:95
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
CaptureComponents UseCC
Components captured by this use.
Struct that holds a reference to a particular GUID in a global value summary.