LLVM 17.0.0git
Attributor.cpp
Go to the documentation of this file.
1//===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements an interprocedural pass that deduces and/or propagates
10// attributes. This is done in an abstract interpretation style fixpoint
11// iteration. See the Attributor.h file comment and the class descriptions in
12// that file for more information.
13//
14//===----------------------------------------------------------------------===//
15
17
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/Statistic.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/Constant.h"
30#include "llvm/IR/Constants.h"
31#include "llvm/IR/DataLayout.h"
32#include "llvm/IR/GlobalValue.h"
34#include "llvm/IR/Instruction.h"
37#include "llvm/IR/ValueHandle.h"
40#include "llvm/Support/Debug.h"
48#include <cstdint>
49
50#ifdef EXPENSIVE_CHECKS
51#include "llvm/IR/Verifier.h"
52#endif
53
54#include <cassert>
55#include <optional>
56#include <string>
57
58using namespace llvm;
59
60#define DEBUG_TYPE "attributor"
61#define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
62
63DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
64 "Determine what attributes are manifested in the IR");
65
66STATISTIC(NumFnDeleted, "Number of function deleted");
67STATISTIC(NumFnWithExactDefinition,
68 "Number of functions with exact definitions");
69STATISTIC(NumFnWithoutExactDefinition,
70 "Number of functions without exact definitions");
71STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
72STATISTIC(NumAttributesTimedOut,
73 "Number of abstract attributes timed out before fixpoint");
74STATISTIC(NumAttributesValidFixpoint,
75 "Number of abstract attributes in a valid fixpoint state");
76STATISTIC(NumAttributesManifested,
77 "Number of abstract attributes manifested in IR");
78
79// TODO: Determine a good default value.
80//
81// In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
82// (when run with the first 5 abstract attributes). The results also indicate
83// that we never reach 32 iterations but always find a fixpoint sooner.
84//
85// This will become more evolved once we perform two interleaved fixpoint
86// iterations: bottom-up and top-down.
88 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
89 cl::desc("Maximal number of fixpoint iterations."),
90 cl::init(32));
91
93 "attributor-max-initialization-chain-length", cl::Hidden,
95 "Maximal number of chained initializations (to avoid stack overflows)"),
98
100 "attributor-max-iterations-verify", cl::Hidden,
101 cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
102 cl::init(false));
103
105 "attributor-annotate-decl-cs", cl::Hidden,
106 cl::desc("Annotate call sites of function declarations."), cl::init(false));
107
108static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
109 cl::init(true), cl::Hidden);
110
111static cl::opt<bool>
112 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
113 cl::desc("Allow the Attributor to create shallow "
114 "wrappers for non-exact definitions."),
115 cl::init(false));
116
117static cl::opt<bool>
118 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
119 cl::desc("Allow the Attributor to use IP information "
120 "derived from non-exact functions via cloning"),
121 cl::init(false));
122
123// These options can only used for debug builds.
124#ifndef NDEBUG
126 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
127 cl::desc("Comma seperated list of attribute names that are "
128 "allowed to be seeded."),
130
132 "attributor-function-seed-allow-list", cl::Hidden,
133 cl::desc("Comma seperated list of function names that are "
134 "allowed to be seeded."),
136#endif
137
138static cl::opt<bool>
139 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
140 cl::desc("Dump the dependency graph to dot files."),
141 cl::init(false));
142
144 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
145 cl::desc("The prefix used for the CallGraph dot file names."));
146
147static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
148 cl::desc("View the dependency graph."),
149 cl::init(false));
150
151static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
152 cl::desc("Print attribute dependencies"),
153 cl::init(false));
154
156 "attributor-enable-call-site-specific-deduction", cl::Hidden,
157 cl::desc("Allow the Attributor to do call site specific analysis"),
158 cl::init(false));
159
160static cl::opt<bool>
161 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
162 cl::desc("Print Attributor's internal call graph"),
163 cl::init(false));
164
165static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
167 cl::desc("Try to simplify all loads."),
168 cl::init(true));
169
170/// Logic operators for the change status enum class.
171///
172///{
174 return L == ChangeStatus::CHANGED ? L : R;
175}
177 L = L | R;
178 return L;
179}
181 return L == ChangeStatus::UNCHANGED ? L : R;
182}
184 L = L & R;
185 return L;
186}
187///}
188
189bool AA::isGPU(const Module &M) {
190 Triple T(M.getTargetTriple());
191 return T.isAMDGPU() || T.isNVPTX();
192}
193
195 const AbstractAttribute &QueryingAA) {
196 // We are looking for volatile instructions or non-relaxed atomics.
197 if (const auto *CB = dyn_cast<CallBase>(&I)) {
198 if (CB->hasFnAttr(Attribute::NoSync))
199 return true;
200
201 // Non-convergent and readnone imply nosync.
202 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
203 return true;
204
206 return true;
207
208 const auto &NoSyncAA = A.getAAFor<AANoSync>(
209 QueryingAA, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
210 return NoSyncAA.isAssumedNoSync();
211 }
212
213 if (!I.mayReadOrWriteMemory())
214 return true;
215
216 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
217}
218
220 const Value &V, bool ForAnalysisOnly) {
221 // TODO: See the AAInstanceInfo class comment.
222 if (!ForAnalysisOnly)
223 return false;
224 auto &InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
225 QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
226 return InstanceInfoAA.isAssumedUniqueForAnalysis();
227}
228
230 const TargetLibraryInfo *TLI,
231 const DataLayout &DL,
232 AA::RangeTy *RangePtr) {
233 if (isa<AllocaInst>(Obj))
234 return UndefValue::get(&Ty);
235 if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
236 return Init;
237 auto *GV = dyn_cast<GlobalVariable>(&Obj);
238 if (!GV)
239 return nullptr;
240
241 bool UsedAssumedInformation = false;
242 Constant *Initializer = nullptr;
243 if (A.hasGlobalVariableSimplificationCallback(*GV)) {
244 auto AssumedGV = A.getAssumedInitializerFromCallBack(
245 *GV, /* const AbstractAttribute *AA */ nullptr, UsedAssumedInformation);
246 Initializer = *AssumedGV;
247 if (!Initializer)
248 return nullptr;
249 } else {
250 if (!GV->hasLocalLinkage() && !(GV->isConstant() && GV->hasInitializer()))
251 return nullptr;
252 if (!GV->hasInitializer())
253 return UndefValue::get(&Ty);
254
255 if (!Initializer)
256 Initializer = GV->getInitializer();
257 }
258
259 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
260 APInt Offset = APInt(64, RangePtr->Offset);
261 return ConstantFoldLoadFromConst(Initializer, &Ty, Offset, DL);
262 }
263
264 return ConstantFoldLoadFromUniformValue(Initializer, &Ty);
265}
266
267bool AA::isValidInScope(const Value &V, const Function *Scope) {
268 if (isa<Constant>(V))
269 return true;
270 if (auto *I = dyn_cast<Instruction>(&V))
271 return I->getFunction() == Scope;
272 if (auto *A = dyn_cast<Argument>(&V))
273 return A->getParent() == Scope;
274 return false;
275}
276
278 InformationCache &InfoCache) {
279 if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
280 return true;
281 const Function *Scope = nullptr;
282 const Instruction *CtxI = VAC.getCtxI();
283 if (CtxI)
284 Scope = CtxI->getFunction();
285 if (auto *A = dyn_cast<Argument>(VAC.getValue()))
286 return A->getParent() == Scope;
287 if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
288 if (I->getFunction() == Scope) {
289 if (const DominatorTree *DT =
291 *Scope))
292 return DT->dominates(I, CtxI);
293 // Local dominance check mostly for the old PM passes.
294 if (CtxI && I->getParent() == CtxI->getParent())
295 return llvm::any_of(
296 make_range(I->getIterator(), I->getParent()->end()),
297 [&](const Instruction &AfterI) { return &AfterI == CtxI; });
298 }
299 }
300 return false;
301}
302
304 if (V.getType() == &Ty)
305 return &V;
306 if (isa<PoisonValue>(V))
307 return PoisonValue::get(&Ty);
308 if (isa<UndefValue>(V))
309 return UndefValue::get(&Ty);
310 if (auto *C = dyn_cast<Constant>(&V)) {
311 if (C->isNullValue())
312 return Constant::getNullValue(&Ty);
313 if (C->getType()->isPointerTy() && Ty.isPointerTy())
314 return ConstantExpr::getPointerCast(C, &Ty);
315 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
316 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
317 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
318 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
319 return ConstantExpr::getFPTrunc(C, &Ty, /* OnlyIfReduced */ true);
320 }
321 }
322 return nullptr;
323}
324
325std::optional<Value *>
326AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
327 const std::optional<Value *> &B,
328 Type *Ty) {
329 if (A == B)
330 return A;
331 if (!B)
332 return A;
333 if (*B == nullptr)
334 return nullptr;
335 if (!A)
336 return Ty ? getWithType(**B, *Ty) : nullptr;
337 if (*A == nullptr)
338 return nullptr;
339 if (!Ty)
340 Ty = (*A)->getType();
341 if (isa_and_nonnull<UndefValue>(*A))
342 return getWithType(**B, *Ty);
343 if (isa<UndefValue>(*B))
344 return A;
345 if (*A && *B && *A == getWithType(**B, *Ty))
346 return A;
347 return nullptr;
348}
349
350template <bool IsLoad, typename Ty>
352 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
353 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
354 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
355 bool OnlyExact) {
356 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
357 << " (only exact: " << OnlyExact << ")\n";);
358
359 Value &Ptr = *I.getPointerOperand();
360 // Containers to remember the pointer infos and new copies while we are not
361 // sure that we can find all of them. If we abort we want to avoid spurious
362 // dependences and potential copies in the provided container.
364 SmallVector<Value *> NewCopies;
365 SmallVector<Instruction *> NewCopyOrigins;
366
367 const auto *TLI =
368 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
369
370 auto Pred = [&](Value &Obj) {
371 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
372 if (isa<UndefValue>(&Obj))
373 return true;
374 if (isa<ConstantPointerNull>(&Obj)) {
375 // A null pointer access can be undefined but any offset from null may
376 // be OK. We do not try to optimize the latter.
377 if (!NullPointerIsDefined(I.getFunction(),
378 Ptr.getType()->getPointerAddressSpace()) &&
379 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
380 AA::Interprocedural) == &Obj)
381 return true;
383 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
384 return false;
385 }
386 // TODO: Use assumed noalias return.
387 if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
388 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
389 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
390 << "\n";);
391 return false;
392 }
393 if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
394 if (!GV->hasLocalLinkage() &&
395 !(GV->isConstant() && GV->hasInitializer())) {
396 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
397 "linkage, not supported yet: "
398 << Obj << "\n";);
399 return false;
400 }
401
402 bool NullOnly = true;
403 bool NullRequired = false;
404 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
405 bool IsExact) {
406 if (!V || *V == nullptr)
407 NullOnly = false;
408 else if (isa<UndefValue>(*V))
409 /* No op */;
410 else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
411 NullRequired = !IsExact;
412 else
413 NullOnly = false;
414 };
415
416 auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
417 Value &V) {
418 Value *AdjV = AA::getWithType(V, *I.getType());
419 if (!AdjV) {
420 LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
421 "cannot be converted to read type: "
422 << *Acc.getRemoteInst() << " : " << *I.getType()
423 << "\n";);
424 }
425 return AdjV;
426 };
427
428 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
429 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
430 return true;
431 if (IsLoad && Acc.isWrittenValueYetUndetermined())
432 return true;
433 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
434 if (OnlyExact && !IsExact && !NullOnly &&
435 !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
436 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
437 << ", abort!\n");
438 return false;
439 }
440 if (NullRequired && !NullOnly) {
441 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
442 "one, however found non-null one: "
443 << *Acc.getRemoteInst() << ", abort!\n");
444 return false;
445 }
446 if (IsLoad) {
447 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
448 if (!Acc.isWrittenValueUnknown()) {
449 Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
450 if (!V)
451 return false;
452 NewCopies.push_back(V);
453 NewCopyOrigins.push_back(Acc.getRemoteInst());
454 return true;
455 }
456 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
457 if (!SI) {
458 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
459 "instruction not supported yet: "
460 << *Acc.getRemoteInst() << "\n";);
461 return false;
462 }
463 Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
464 if (!V)
465 return false;
466 NewCopies.push_back(V);
467 NewCopyOrigins.push_back(SI);
468 } else {
469 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
470 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
471 if (!LI && OnlyExact) {
472 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
473 "instruction not supported yet: "
474 << *Acc.getRemoteInst() << "\n";);
475 return false;
476 }
477 NewCopies.push_back(Acc.getRemoteInst());
478 }
479 return true;
480 };
481
482 // If the value has been written to we don't need the initial value of the
483 // object.
484 bool HasBeenWrittenTo = false;
485
486 AA::RangeTy Range;
487 auto &PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
488 DepClassTy::NONE);
489 if (!PI.forallInterferingAccesses(A, QueryingAA, I,
490 /* FindInterferingWrites */ IsLoad,
491 /* FindInterferingReads */ !IsLoad,
492 CheckAccess, HasBeenWrittenTo, Range)) {
494 dbgs()
495 << "Failed to verify all interfering accesses for underlying object: "
496 << Obj << "\n");
497 return false;
498 }
499
500 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
501 const DataLayout &DL = A.getDataLayout();
502 Value *InitialValue =
503 AA::getInitialValueForObj(A, Obj, *I.getType(), TLI, DL, &Range);
504 if (!InitialValue) {
505 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
506 "underlying object, abort!\n");
507 return false;
508 }
509 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
510 if (NullRequired && !NullOnly) {
511 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
512 "null or undef, abort!\n");
513 return false;
514 }
515
516 NewCopies.push_back(InitialValue);
517 NewCopyOrigins.push_back(nullptr);
518 }
519
520 PIs.push_back(&PI);
521
522 return true;
523 };
524
525 const auto &AAUO = A.getAAFor<AAUnderlyingObjects>(
526 QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
527 if (!AAUO.forallUnderlyingObjects(Pred)) {
529 dbgs() << "Underlying objects stored into could not be determined\n";);
530 return false;
531 }
532
533 // Only if we were successful collection all potential copies we record
534 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
535 // given PotentialCopies container.
536 for (const auto *PI : PIs) {
537 if (!PI->getState().isAtFixpoint())
538 UsedAssumedInformation = true;
539 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
540 }
541 PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
542 PotentialValueOrigins.insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
543
544 return true;
545}
546
548 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
549 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
550 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
551 bool OnlyExact) {
552 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
553 A, LI, PotentialValues, PotentialValueOrigins, QueryingAA,
554 UsedAssumedInformation, OnlyExact);
555}
556
558 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
559 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
560 bool OnlyExact) {
561 SmallSetVector<Instruction *, 4> PotentialValueOrigins;
562 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
563 A, SI, PotentialCopies, PotentialValueOrigins, QueryingAA,
564 UsedAssumedInformation, OnlyExact);
565}
566
568 const AbstractAttribute &QueryingAA,
569 bool RequireReadNone, bool &IsKnown) {
570
573 const auto &MemLocAA =
574 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
575 if (MemLocAA.isAssumedReadNone()) {
576 IsKnown = MemLocAA.isKnownReadNone();
577 if (!IsKnown)
578 A.recordDependence(MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
579 return true;
580 }
581 }
582
583 const auto &MemBehaviorAA =
584 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
585 if (MemBehaviorAA.isAssumedReadNone() ||
586 (!RequireReadNone && MemBehaviorAA.isAssumedReadOnly())) {
587 IsKnown = RequireReadNone ? MemBehaviorAA.isKnownReadNone()
588 : MemBehaviorAA.isKnownReadOnly();
589 if (!IsKnown)
590 A.recordDependence(MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
591 return true;
592 }
593
594 return false;
595}
596
598 const AbstractAttribute &QueryingAA, bool &IsKnown) {
599 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
600 /* RequireReadNone */ false, IsKnown);
601}
603 const AbstractAttribute &QueryingAA, bool &IsKnown) {
604 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
605 /* RequireReadNone */ true, IsKnown);
606}
607
608static bool
610 const Instruction *ToI, const Function &ToFn,
611 const AbstractAttribute &QueryingAA,
612 const AA::InstExclusionSetTy *ExclusionSet,
613 std::function<bool(const Function &F)> GoBackwardsCB) {
615 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
616 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
617 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
618 << "]\n";
619 if (ExclusionSet)
620 for (auto *ES : *ExclusionSet)
621 dbgs() << *ES << "\n";
622 });
623
624 // We know kernels (generally) cannot be called from within the module. Thus,
625 // for reachability we would need to step back from a kernel which would allow
626 // us to reach anything anyway. Even if a kernel is invoked from another
627 // kernel, values like allocas and shared memory are not accessible. We
628 // implicitly check for this situation to avoid costly lookups.
629 if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
630 !GoBackwardsCB(*FromI.getFunction()) && ToFn.hasFnAttribute("kernel") &&
631 FromI.getFunction()->hasFnAttribute("kernel")) {
632 LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
633 "module; success\n";);
634 return false;
635 }
636
637 // If we can go arbitrarily backwards we will eventually reach an entry point
638 // that can reach ToI. Only if a set of blocks through which we cannot go is
639 // provided, or once we track internal functions not accessible from the
640 // outside, it makes sense to perform backwards analysis in the absence of a
641 // GoBackwardsCB.
642 if (!GoBackwardsCB && !ExclusionSet) {
643 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
644 << " is not checked backwards and does not have an "
645 "exclusion set, abort\n");
646 return true;
647 }
648
651 Worklist.push_back(&FromI);
652
653 while (!Worklist.empty()) {
654 const Instruction *CurFromI = Worklist.pop_back_val();
655 if (!Visited.insert(CurFromI).second)
656 continue;
657
658 const Function *FromFn = CurFromI->getFunction();
659 if (FromFn == &ToFn) {
660 if (!ToI)
661 return true;
662 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
663 << " intraprocedurally\n");
664 const auto &ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
665 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
666 bool Result =
667 ReachabilityAA.isAssumedReachable(A, *CurFromI, *ToI, ExclusionSet);
668 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
669 << (Result ? "can potentially " : "cannot ") << "reach "
670 << *ToI << " [Intra]\n");
671 if (Result)
672 return true;
673 }
674
675 bool Result = true;
676 if (!ToFn.isDeclaration() && ToI) {
677 const auto &ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
678 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
679 const Instruction &EntryI = ToFn.getEntryBlock().front();
680 Result =
681 ToReachabilityAA.isAssumedReachable(A, EntryI, *ToI, ExclusionSet);
682 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
683 << " " << (Result ? "can potentially " : "cannot ")
684 << "reach @" << *ToI << " [ToFn]\n");
685 }
686
687 if (Result) {
688 // The entry of the ToFn can reach the instruction ToI. If the current
689 // instruction is already known to reach the ToFn.
690 const auto &FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
691 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
692 Result = FnReachabilityAA.instructionCanReach(A, *CurFromI, ToFn,
693 ExclusionSet);
694 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
695 << " " << (Result ? "can potentially " : "cannot ")
696 << "reach @" << ToFn.getName() << " [FromFn]\n");
697 if (Result)
698 return true;
699 }
700
701 // TODO: Check assumed nounwind.
702 const auto &ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
703 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
704 auto ReturnInstCB = [&](Instruction &Ret) {
705 bool Result =
706 ReachabilityAA.isAssumedReachable(A, *CurFromI, Ret, ExclusionSet);
707 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
708 << (Result ? "can potentially " : "cannot ") << "reach "
709 << Ret << " [Intra]\n");
710 return !Result;
711 };
712
713 // Check if we can reach returns.
714 bool UsedAssumedInformation = false;
715 if (A.checkForAllInstructions(ReturnInstCB, FromFn, QueryingAA,
716 {Instruction::Ret}, UsedAssumedInformation)) {
717 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
718 continue;
719 }
720
721 if (!GoBackwardsCB) {
722 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
723 << " is not checked backwards, abort\n");
724 return true;
725 }
726
727 // If we do not go backwards from the FromFn we are done here and so far we
728 // could not find a way to reach ToFn/ToI.
729 if (!GoBackwardsCB(*FromFn))
730 continue;
731
732 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
733 << FromFn->getName() << "\n");
734
735 auto CheckCallSite = [&](AbstractCallSite ACS) {
736 CallBase *CB = ACS.getInstruction();
737 if (!CB)
738 return false;
739
740 if (isa<InvokeInst>(CB))
741 return false;
742
744 Worklist.push_back(Inst);
745 return true;
746 };
747
748 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
749 /* RequireAllCallSites */ true,
750 &QueryingAA, UsedAssumedInformation);
751 if (Result) {
752 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
753 << " in @" << FromFn->getName()
754 << " failed, give up\n");
755 return true;
756 }
757
758 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
759 << " in @" << FromFn->getName()
760 << " worklist size is: " << Worklist.size() << "\n");
761 }
762 return false;
763}
764
766 Attributor &A, const Instruction &FromI, const Instruction &ToI,
767 const AbstractAttribute &QueryingAA,
768 const AA::InstExclusionSetTy *ExclusionSet,
769 std::function<bool(const Function &F)> GoBackwardsCB) {
770 const Function *ToFn = ToI.getFunction();
771 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
772 ExclusionSet, GoBackwardsCB);
773}
774
776 Attributor &A, const Instruction &FromI, const Function &ToFn,
777 const AbstractAttribute &QueryingAA,
778 const AA::InstExclusionSetTy *ExclusionSet,
779 std::function<bool(const Function &F)> GoBackwardsCB) {
780 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
781 ExclusionSet, GoBackwardsCB);
782}
783
785 const AbstractAttribute &QueryingAA) {
786 if (isa<UndefValue>(Obj))
787 return true;
788 if (isa<AllocaInst>(Obj)) {
789 InformationCache &InfoCache = A.getInfoCache();
790 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
792 dbgs() << "[AA] Object '" << Obj
793 << "' is thread local; stack objects are thread local.\n");
794 return true;
795 }
796 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
797 QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL);
798 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
799 << (NoCaptureAA.isAssumedNoCapture() ? "" : "not")
800 << " thread local; "
801 << (NoCaptureAA.isAssumedNoCapture() ? "non-" : "")
802 << "captured stack object.\n");
803 return NoCaptureAA.isAssumedNoCapture();
804 }
805 if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
806 if (GV->isConstant()) {
807 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
808 << "' is thread local; constant global\n");
809 return true;
810 }
811 if (GV->isThreadLocal()) {
812 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
813 << "' is thread local; thread local global\n");
814 return true;
815 }
816 }
817
818 if (A.getInfoCache().targetIsGPU()) {
819 if (Obj.getType()->getPointerAddressSpace() ==
820 (int)AA::GPUAddressSpace::Local) {
821 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
822 << "' is thread local; GPU local memory\n");
823 return true;
824 }
825 if (Obj.getType()->getPointerAddressSpace() ==
826 (int)AA::GPUAddressSpace::Constant) {
827 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
828 << "' is thread local; GPU constant memory\n");
829 return true;
830 }
831 }
832
833 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
834 return false;
835}
836
838 const AbstractAttribute &QueryingAA) {
839 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
840 return false;
841
843
844 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
845 if (!Loc || !Loc->Ptr) {
847 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
848 return false;
849 }
850 Ptrs.insert(Loc->Ptr);
851 return true;
852 };
853
854 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
855 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
856 return true;
857 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(&I))
858 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
859 return true;
860 } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
861 return true;
862
863 return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
864}
865
868 const AbstractAttribute &QueryingAA,
869 const Instruction *CtxI) {
870 for (const Value *Ptr : Ptrs) {
871 if (!Ptr) {
872 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
873 return true;
874 }
875
876 auto Pred = [&](Value &Obj) {
877 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
878 return true;
879 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
880 << "'; -> requires barrier\n");
881 return false;
882 };
883
884 const auto &UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
885 QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
886 if (!UnderlyingObjsAA.forallUnderlyingObjects(Pred))
887 return true;
888 }
889 return false;
890}
891
892/// Return true if \p New is equal or worse than \p Old.
893static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
894 if (!Old.isIntAttribute())
895 return true;
896
897 return Old.getValueAsInt() >= New.getValueAsInt();
898}
899
900/// Return true if the information provided by \p Attr was added to the
901/// attribute list \p Attrs. This is only the case if it was not already present
902/// in \p Attrs at the position describe by \p PK and \p AttrIdx.
903static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
904 AttributeList &Attrs, int AttrIdx,
905 bool ForceReplace = false) {
906
907 if (Attr.isEnumAttribute()) {
909 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
910 if (!ForceReplace &&
911 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
912 return false;
913 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
914 return true;
915 }
916 if (Attr.isStringAttribute()) {
917 StringRef Kind = Attr.getKindAsString();
918 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
919 if (!ForceReplace &&
920 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
921 return false;
922 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
923 return true;
924 }
925 if (Attr.isIntAttribute()) {
927 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
928 if (!ForceReplace &&
929 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
930 return false;
931 Attrs = Attrs.removeAttributeAtIndex(Ctx, AttrIdx, Kind);
932 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
933 return true;
934 }
935
936 llvm_unreachable("Expected enum or string attribute!");
937}
938
941 return cast<Argument>(&getAnchorValue());
942
943 // Not an Argument and no argument number means this is not a call site
944 // argument, thus we cannot find a callback argument to return.
945 int ArgNo = getCallSiteArgNo();
946 if (ArgNo < 0)
947 return nullptr;
948
949 // Use abstract call sites to make the connection between the call site
950 // values and the ones in callbacks. If a callback was found that makes use
951 // of the underlying call site operand, we want the corresponding callback
952 // callee argument and not the direct callee argument.
953 std::optional<Argument *> CBCandidateArg;
954 SmallVector<const Use *, 4> CallbackUses;
955 const auto &CB = cast<CallBase>(getAnchorValue());
956 AbstractCallSite::getCallbackUses(CB, CallbackUses);
957 for (const Use *U : CallbackUses) {
958 AbstractCallSite ACS(U);
959 assert(ACS && ACS.isCallbackCall());
960 if (!ACS.getCalledFunction())
961 continue;
962
963 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
964
965 // Test if the underlying call site operand is argument number u of the
966 // callback callee.
967 if (ACS.getCallArgOperandNo(u) != ArgNo)
968 continue;
969
970 assert(ACS.getCalledFunction()->arg_size() > u &&
971 "ACS mapped into var-args arguments!");
972 if (CBCandidateArg) {
973 CBCandidateArg = nullptr;
974 break;
975 }
976 CBCandidateArg = ACS.getCalledFunction()->getArg(u);
977 }
978 }
979
980 // If we found a unique callback candidate argument, return it.
981 if (CBCandidateArg && *CBCandidateArg)
982 return *CBCandidateArg;
983
984 // If no callbacks were found, or none used the underlying call site operand
985 // exclusively, use the direct callee argument if available.
986 const Function *Callee = CB.getCalledFunction();
987 if (Callee && Callee->arg_size() > unsigned(ArgNo))
988 return Callee->getArg(ArgNo);
989
990 return nullptr;
991}
992
995 if (getState().isAtFixpoint())
996 return HasChanged;
997
998 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
999
1000 HasChanged = updateImpl(A);
1001
1002 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
1003 << "\n");
1004
1005 return HasChanged;
1006}
1007
1010 const ArrayRef<Attribute> &DeducedAttrs,
1011 bool ForceReplace) {
1012 Function *ScopeFn = IRP.getAnchorScope();
1014
1015 // In the following some generic code that will manifest attributes in
1016 // DeducedAttrs if they improve the current IR. Due to the different
1017 // annotation positions we use the underlying AttributeList interface.
1018
1019 AttributeList Attrs;
1020 switch (PK) {
1027 Attrs = ScopeFn->getAttributes();
1028 break;
1032 Attrs = cast<CallBase>(IRP.getAnchorValue()).getAttributes();
1033 break;
1034 }
1035
1037 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1038 for (const Attribute &Attr : DeducedAttrs) {
1039 if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx(), ForceReplace))
1040 continue;
1041
1042 HasChanged = ChangeStatus::CHANGED;
1043 }
1044
1045 if (HasChanged == ChangeStatus::UNCHANGED)
1046 return HasChanged;
1047
1048 switch (PK) {
1052 ScopeFn->setAttributes(Attrs);
1053 break;
1057 cast<CallBase>(IRP.getAnchorValue()).setAttributes(Attrs);
1058 break;
1061 break;
1062 }
1063
1064 return HasChanged;
1065}
1066
1068const IRPosition
1070
1072 IRPositions.emplace_back(IRP);
1073
1074 // Helper to determine if operand bundles on a call site are benin or
1075 // potentially problematic. We handle only llvm.assume for now.
1076 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1077 return (isa<IntrinsicInst>(CB) &&
1078 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1079 };
1080
1081 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1082 switch (IRP.getPositionKind()) {
1086 return;
1089 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1090 return;
1092 assert(CB && "Expected call site!");
1093 // TODO: We need to look at the operand bundles similar to the redirection
1094 // in CallBase.
1095 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1096 if (const Function *Callee = CB->getCalledFunction())
1097 IRPositions.emplace_back(IRPosition::function(*Callee));
1098 return;
1100 assert(CB && "Expected call site!");
1101 // TODO: We need to look at the operand bundles similar to the redirection
1102 // in CallBase.
1103 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1104 if (const Function *Callee = CB->getCalledFunction()) {
1105 IRPositions.emplace_back(IRPosition::returned(*Callee));
1106 IRPositions.emplace_back(IRPosition::function(*Callee));
1107 for (const Argument &Arg : Callee->args())
1108 if (Arg.hasReturnedAttr()) {
1109 IRPositions.emplace_back(
1110 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1111 IRPositions.emplace_back(
1112 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1113 IRPositions.emplace_back(IRPosition::argument(Arg));
1114 }
1115 }
1116 }
1117 IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1118 return;
1120 assert(CB && "Expected call site!");
1121 // TODO: We need to look at the operand bundles similar to the redirection
1122 // in CallBase.
1123 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1124 const Function *Callee = CB->getCalledFunction();
1125 if (Callee) {
1126 if (Argument *Arg = IRP.getAssociatedArgument())
1127 IRPositions.emplace_back(IRPosition::argument(*Arg));
1128 IRPositions.emplace_back(IRPosition::function(*Callee));
1129 }
1130 }
1131 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1132 return;
1133 }
1134 }
1135}
1136
1138 bool IgnoreSubsumingPositions, Attributor *A) const {
1140 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
1141 for (Attribute::AttrKind AK : AKs)
1142 if (EquivIRP.getAttrsFromIRAttr(AK, Attrs))
1143 return true;
1144 // The first position returned by the SubsumingPositionIterator is
1145 // always the position itself. If we ignore subsuming positions we
1146 // are done after the first iteration.
1147 if (IgnoreSubsumingPositions)
1148 break;
1149 }
1150 if (A)
1151 for (Attribute::AttrKind AK : AKs)
1152 if (getAttrsFromAssumes(AK, Attrs, *A))
1153 return true;
1154 return false;
1155}
1156
1159 bool IgnoreSubsumingPositions, Attributor *A) const {
1160 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
1161 for (Attribute::AttrKind AK : AKs)
1162 EquivIRP.getAttrsFromIRAttr(AK, Attrs);
1163 // The first position returned by the SubsumingPositionIterator is
1164 // always the position itself. If we ignore subsuming positions we
1165 // are done after the first iteration.
1166 if (IgnoreSubsumingPositions)
1167 break;
1168 }
1169 if (A)
1170 for (Attribute::AttrKind AK : AKs)
1171 getAttrsFromAssumes(AK, Attrs, *A);
1172}
1173
1174bool IRPosition::getAttrsFromIRAttr(Attribute::AttrKind AK,
1175 SmallVectorImpl<Attribute> &Attrs) const {
1177 return false;
1178
1179 AttributeList AttrList;
1180 if (const auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
1181 AttrList = CB->getAttributes();
1182 else
1183 AttrList = getAssociatedFunction()->getAttributes();
1184
1185 bool HasAttr = AttrList.hasAttributeAtIndex(getAttrIdx(), AK);
1186 if (HasAttr)
1187 Attrs.push_back(AttrList.getAttributeAtIndex(getAttrIdx(), AK));
1188 return HasAttr;
1189}
1190
1191bool IRPosition::getAttrsFromAssumes(Attribute::AttrKind AK,
1193 Attributor &A) const {
1194 assert(getPositionKind() != IRP_INVALID && "Did expect a valid position!");
1195 Value &AssociatedValue = getAssociatedValue();
1196
1197 const Assume2KnowledgeMap &A2K =
1198 A.getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1199
1200 // Check if we found any potential assume use, if not we don't need to create
1201 // explorer iterators.
1202 if (A2K.empty())
1203 return false;
1204
1205 LLVMContext &Ctx = AssociatedValue.getContext();
1206 unsigned AttrsSize = Attrs.size();
1208 A.getInfoCache().getMustBeExecutedContextExplorer();
1209 auto EIt = Explorer.begin(getCtxI()), EEnd = Explorer.end(getCtxI());
1210 for (const auto &It : A2K)
1211 if (Explorer.findInContextOf(It.first, EIt, EEnd))
1212 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1213 return AttrsSize != Attrs.size();
1214}
1215
1216void IRPosition::verify() {
1217#ifdef EXPENSIVE_CHECKS
1218 switch (getPositionKind()) {
1219 case IRP_INVALID:
1220 assert((CBContext == nullptr) &&
1221 "Invalid position must not have CallBaseContext!");
1222 assert(!Enc.getOpaqueValue() &&
1223 "Expected a nullptr for an invalid position!");
1224 return;
1225 case IRP_FLOAT:
1226 assert((!isa<Argument>(&getAssociatedValue())) &&
1227 "Expected specialized kind for argument values!");
1228 return;
1229 case IRP_RETURNED:
1230 assert(isa<Function>(getAsValuePtr()) &&
1231 "Expected function for a 'returned' position!");
1232 assert(getAsValuePtr() == &getAssociatedValue() &&
1233 "Associated value mismatch!");
1234 return;
1236 assert((CBContext == nullptr) &&
1237 "'call site returned' position must not have CallBaseContext!");
1238 assert((isa<CallBase>(getAsValuePtr())) &&
1239 "Expected call base for 'call site returned' position!");
1240 assert(getAsValuePtr() == &getAssociatedValue() &&
1241 "Associated value mismatch!");
1242 return;
1243 case IRP_CALL_SITE:
1244 assert((CBContext == nullptr) &&
1245 "'call site function' position must not have CallBaseContext!");
1246 assert((isa<CallBase>(getAsValuePtr())) &&
1247 "Expected call base for 'call site function' position!");
1248 assert(getAsValuePtr() == &getAssociatedValue() &&
1249 "Associated value mismatch!");
1250 return;
1251 case IRP_FUNCTION:
1252 assert(isa<Function>(getAsValuePtr()) &&
1253 "Expected function for a 'function' position!");
1254 assert(getAsValuePtr() == &getAssociatedValue() &&
1255 "Associated value mismatch!");
1256 return;
1257 case IRP_ARGUMENT:
1258 assert(isa<Argument>(getAsValuePtr()) &&
1259 "Expected argument for a 'argument' position!");
1260 assert(getAsValuePtr() == &getAssociatedValue() &&
1261 "Associated value mismatch!");
1262 return;
1264 assert((CBContext == nullptr) &&
1265 "'call site argument' position must not have CallBaseContext!");
1266 Use *U = getAsUsePtr();
1267 (void)U; // Silence unused variable warning.
1268 assert(U && "Expected use for a 'call site argument' position!");
1269 assert(isa<CallBase>(U->getUser()) &&
1270 "Expected call base user for a 'call site argument' position!");
1271 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1272 "Expected call base argument operand for a 'call site argument' "
1273 "position");
1274 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1275 unsigned(getCallSiteArgNo()) &&
1276 "Argument number mismatch!");
1277 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1278 return;
1279 }
1280 }
1281#endif
1282}
1283
1284std::optional<Constant *>
1286 const AbstractAttribute &AA,
1287 bool &UsedAssumedInformation) {
1288 // First check all callbacks provided by outside AAs. If any of them returns
1289 // a non-null value that is different from the associated value, or
1290 // std::nullopt, we assume it's simplified.
1291 for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1292 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1293 if (!SimplifiedV)
1294 return std::nullopt;
1295 if (isa_and_nonnull<Constant>(*SimplifiedV))
1296 return cast<Constant>(*SimplifiedV);
1297 return nullptr;
1298 }
1299 if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1300 return C;
1302 if (getAssumedSimplifiedValues(IRP, &AA, Values,
1304 UsedAssumedInformation)) {
1305 if (Values.empty())
1306 return std::nullopt;
1307 if (auto *C = dyn_cast_or_null<Constant>(
1308 AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1309 return C;
1310 }
1311 return nullptr;
1312}
1313
1315 const IRPosition &IRP, const AbstractAttribute *AA,
1316 bool &UsedAssumedInformation, AA::ValueScope S) {
1317 // First check all callbacks provided by outside AAs. If any of them returns
1318 // a non-null value that is different from the associated value, or
1319 // std::nullopt, we assume it's simplified.
1320 for (auto &CB : SimplificationCallbacks.lookup(IRP))
1321 return CB(IRP, AA, UsedAssumedInformation);
1322
1324 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1325 return &IRP.getAssociatedValue();
1326 if (Values.empty())
1327 return std::nullopt;
1328 if (AA)
1329 if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1330 return V;
1333 return nullptr;
1334 return &IRP.getAssociatedValue();
1335}
1336
1338 const IRPosition &IRP, const AbstractAttribute *AA,
1340 bool &UsedAssumedInformation) {
1341 // First check all callbacks provided by outside AAs. If any of them returns
1342 // a non-null value that is different from the associated value, or
1343 // std::nullopt, we assume it's simplified.
1344 const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1345 for (const auto &CB : SimplificationCBs) {
1346 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1347 if (!CBResult.has_value())
1348 continue;
1349 Value *V = *CBResult;
1350 if (!V)
1351 return false;
1354 Values.push_back(AA::ValueAndContext{*V, nullptr});
1355 else
1356 return false;
1357 }
1358 if (!SimplificationCBs.empty())
1359 return true;
1360
1361 // If no high-level/outside simplification occurred, use AAPotentialValues.
1362 const auto &PotentialValuesAA =
1363 getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
1364 if (!PotentialValuesAA.getAssumedSimplifiedValues(*this, Values, S))
1365 return false;
1366 UsedAssumedInformation |= !PotentialValuesAA.isAtFixpoint();
1367 return true;
1368}
1369
1371 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1372 bool &UsedAssumedInformation) {
1373 if (!V)
1374 return V;
1375 if (*V == nullptr || isa<Constant>(*V))
1376 return V;
1377 if (auto *Arg = dyn_cast<Argument>(*V))
1378 if (CB.getCalledFunction() == Arg->getParent())
1379 if (!Arg->hasPointeeInMemoryValueAttr())
1380 return getAssumedSimplified(
1381 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1382 UsedAssumedInformation, AA::Intraprocedural);
1383 return nullptr;
1384}
1385
1387 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1388 // thus we cannot delete them. We can, and want to, destruct them though.
1389 for (auto &It : AAMap) {
1390 AbstractAttribute *AA = It.getSecond();
1391 AA->~AbstractAttribute();
1392 }
1393}
1394
1396 const AAIsDead *FnLivenessAA,
1397 bool &UsedAssumedInformation,
1398 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1399 const IRPosition &IRP = AA.getIRPosition();
1400 if (!Functions.count(IRP.getAnchorScope()))
1401 return false;
1402 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1403 CheckBBLivenessOnly, DepClass);
1404}
1405
1407 const AbstractAttribute *QueryingAA,
1408 const AAIsDead *FnLivenessAA,
1409 bool &UsedAssumedInformation,
1410 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1411 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1412 if (!UserI)
1413 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1414 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1415
1416 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1417 // For call site argument uses we can check if the argument is
1418 // unused/dead.
1419 if (CB->isArgOperand(&U)) {
1420 const IRPosition &CSArgPos =
1421 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1422 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1423 UsedAssumedInformation, CheckBBLivenessOnly,
1424 DepClass);
1425 }
1426 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1427 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1428 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1429 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1430 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1431 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1432 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1433 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1434 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1435 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1436 const IRPosition IRP = IRPosition::inst(*SI);
1437 const AAIsDead &IsDeadAA =
1438 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1439 if (IsDeadAA.isRemovableStore()) {
1440 if (QueryingAA)
1441 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1442 if (!IsDeadAA.isKnown(AAIsDead::IS_REMOVABLE))
1443 UsedAssumedInformation = true;
1444 return true;
1445 }
1446 }
1447 }
1448
1449 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1450 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1451}
1452
1454 const AbstractAttribute *QueryingAA,
1455 const AAIsDead *FnLivenessAA,
1456 bool &UsedAssumedInformation,
1457 bool CheckBBLivenessOnly, DepClassTy DepClass,
1458 bool CheckForDeadStore) {
1459 const IRPosition::CallBaseContext *CBCtx =
1460 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1461
1462 if (ManifestAddedBlocks.contains(I.getParent()))
1463 return false;
1464
1465 const Function &F = *I.getFunction();
1466 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1467 FnLivenessAA = &getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1468 QueryingAA, DepClassTy::NONE);
1469
1470 // Don't use recursive reasoning.
1471 if (QueryingAA == FnLivenessAA)
1472 return false;
1473
1474 // If we have a context instruction and a liveness AA we use it.
1475 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1476 : FnLivenessAA->isAssumedDead(&I)) {
1477 if (QueryingAA)
1478 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1479 if (!FnLivenessAA->isKnownDead(&I))
1480 UsedAssumedInformation = true;
1481 return true;
1482 }
1483
1484 if (CheckBBLivenessOnly)
1485 return false;
1486
1487 const IRPosition IRP = IRPosition::inst(I, CBCtx);
1488 const AAIsDead &IsDeadAA =
1489 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1490
1491 // Don't use recursive reasoning.
1492 if (QueryingAA == &IsDeadAA)
1493 return false;
1494
1495 if (IsDeadAA.isAssumedDead()) {
1496 if (QueryingAA)
1497 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1498 if (!IsDeadAA.isKnownDead())
1499 UsedAssumedInformation = true;
1500 return true;
1501 }
1502
1503 if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA.isRemovableStore()) {
1504 if (QueryingAA)
1505 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1506 if (!IsDeadAA.isKnownDead())
1507 UsedAssumedInformation = true;
1508 return true;
1509 }
1510
1511 return false;
1512}
1513
1515 const AbstractAttribute *QueryingAA,
1516 const AAIsDead *FnLivenessAA,
1517 bool &UsedAssumedInformation,
1518 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1519 // Don't check liveness for constants, e.g. functions, used as (floating)
1520 // values since the context instruction and such is here meaningless.
1522 isa<Constant>(IRP.getAssociatedValue())) {
1523 return false;
1524 }
1525
1526 Instruction *CtxI = IRP.getCtxI();
1527 if (CtxI &&
1528 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1529 /* CheckBBLivenessOnly */ true,
1530 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1531 return true;
1532
1533 if (CheckBBLivenessOnly)
1534 return false;
1535
1536 // If we haven't succeeded we query the specific liveness info for the IRP.
1537 const AAIsDead *IsDeadAA;
1539 IsDeadAA = &getOrCreateAAFor<AAIsDead>(
1541 QueryingAA, DepClassTy::NONE);
1542 else
1543 IsDeadAA = &getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1544
1545 // Don't use recursive reasoning.
1546 if (QueryingAA == IsDeadAA)
1547 return false;
1548
1549 if (IsDeadAA->isAssumedDead()) {
1550 if (QueryingAA)
1551 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1552 if (!IsDeadAA->isKnownDead())
1553 UsedAssumedInformation = true;
1554 return true;
1555 }
1556
1557 return false;
1558}
1559
1561 const AbstractAttribute *QueryingAA,
1562 const AAIsDead *FnLivenessAA,
1563 DepClassTy DepClass) {
1564 const Function &F = *BB.getParent();
1565 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1566 FnLivenessAA = &getOrCreateAAFor<AAIsDead>(IRPosition::function(F),
1567 QueryingAA, DepClassTy::NONE);
1568
1569 // Don't use recursive reasoning.
1570 if (QueryingAA == FnLivenessAA)
1571 return false;
1572
1573 if (FnLivenessAA->isAssumedDead(&BB)) {
1574 if (QueryingAA)
1575 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1576 return true;
1577 }
1578
1579 return false;
1580}
1581
1583 function_ref<bool(const Use &, bool &)> Pred,
1584 const AbstractAttribute &QueryingAA, const Value &V,
1585 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1586 bool IgnoreDroppableUses,
1587 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1588
1589 // Check virtual uses first.
1590 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1591 if (!CB(*this, &QueryingAA))
1592 return false;
1593
1594 // Check the trivial case first as it catches void values.
1595 if (V.use_empty())
1596 return true;
1597
1598 const IRPosition &IRP = QueryingAA.getIRPosition();
1601
1602 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1603 for (const Use &UU : V.uses()) {
1604 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1605 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1606 "rejected by the equivalence call back: "
1607 << *UU << "!\n");
1608 return false;
1609 }
1610
1611 Worklist.push_back(&UU);
1612 }
1613 return true;
1614 };
1615
1616 AddUsers(V, /* OldUse */ nullptr);
1617
1618 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1619 << " initial uses to check\n");
1620
1621 const Function *ScopeFn = IRP.getAnchorScope();
1622 const auto *LivenessAA =
1623 ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1625 : nullptr;
1626
1627 while (!Worklist.empty()) {
1628 const Use *U = Worklist.pop_back_val();
1629 if (isa<PHINode>(U->getUser()) && !Visited.insert(U).second)
1630 continue;
1632 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1633 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1634 << "\n";
1635 else
1636 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1637 << "\n";
1638 });
1639 bool UsedAssumedInformation = false;
1640 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1641 CheckBBLivenessOnly, LivenessDepClass)) {
1643 dbgs() << "[Attributor] Dead use, skip!\n");
1644 continue;
1645 }
1646 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1648 dbgs() << "[Attributor] Droppable user, skip!\n");
1649 continue;
1650 }
1651
1652 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1653 if (&SI->getOperandUse(0) == U) {
1654 if (!Visited.insert(U).second)
1655 continue;
1656 SmallSetVector<Value *, 4> PotentialCopies;
1658 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1659 /* OnlyExact */ true)) {
1661 dbgs()
1662 << "[Attributor] Value is stored, continue with "
1663 << PotentialCopies.size()
1664 << " potential copies instead!\n");
1665 for (Value *PotentialCopy : PotentialCopies)
1666 if (!AddUsers(*PotentialCopy, U))
1667 return false;
1668 continue;
1669 }
1670 }
1671 }
1672
1673 bool Follow = false;
1674 if (!Pred(*U, Follow))
1675 return false;
1676 if (!Follow)
1677 continue;
1678
1679 User &Usr = *U->getUser();
1680 AddUsers(Usr, /* OldUse */ nullptr);
1681
1682 auto *RI = dyn_cast<ReturnInst>(&Usr);
1683 if (!RI)
1684 continue;
1685
1686 Function &F = *RI->getFunction();
1687 auto CallSitePred = [&](AbstractCallSite ACS) {
1688 return AddUsers(*ACS.getInstruction(), U);
1689 };
1690 if (!checkForAllCallSites(CallSitePred, F, /* RequireAllCallSites */ true,
1691 &QueryingAA, UsedAssumedInformation)) {
1692 LLVM_DEBUG(dbgs() << "[Attributor] Could not follow return instruction "
1693 "to all call sites: "
1694 << *RI << "\n");
1695 return false;
1696 }
1697 }
1698
1699 return true;
1700}
1701
1703 const AbstractAttribute &QueryingAA,
1704 bool RequireAllCallSites,
1705 bool &UsedAssumedInformation) {
1706 // We can try to determine information from
1707 // the call sites. However, this is only possible all call sites are known,
1708 // hence the function has internal linkage.
1709 const IRPosition &IRP = QueryingAA.getIRPosition();
1710 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1711 if (!AssociatedFunction) {
1712 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1713 << "\n");
1714 return false;
1715 }
1716
1717 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1718 &QueryingAA, UsedAssumedInformation);
1719}
1720
1722 const Function &Fn,
1723 bool RequireAllCallSites,
1724 const AbstractAttribute *QueryingAA,
1725 bool &UsedAssumedInformation,
1726 bool CheckPotentiallyDead) {
1727 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1728 LLVM_DEBUG(
1729 dbgs()
1730 << "[Attributor] Function " << Fn.getName()
1731 << " has no internal linkage, hence not all call sites are known\n");
1732 return false;
1733 }
1734 // Check virtual uses first.
1735 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1736 if (!CB(*this, QueryingAA))
1737 return false;
1738
1740 for (unsigned u = 0; u < Uses.size(); ++u) {
1741 const Use &U = *Uses[u];
1743 if (auto *Fn = dyn_cast<Function>(U))
1744 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1745 << *U.getUser() << "\n";
1746 else
1747 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1748 << "\n";
1749 });
1750 if (!CheckPotentiallyDead &&
1751 isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1752 /* CheckBBLivenessOnly */ true)) {
1754 dbgs() << "[Attributor] Dead use, skip!\n");
1755 continue;
1756 }
1757 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1758 if (CE->isCast() && CE->getType()->isPointerTy()) {
1760 dbgs() << "[Attributor] Use, is constant cast expression, add "
1761 << CE->getNumUses() << " uses of that expression instead!\n";
1762 });
1763 for (const Use &CEU : CE->uses())
1764 Uses.push_back(&CEU);
1765 continue;
1766 }
1767 }
1768
1769 AbstractCallSite ACS(&U);
1770 if (!ACS) {
1771 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1772 << " has non call site use " << *U.get() << " in "
1773 << *U.getUser() << "\n");
1774 // BlockAddress users are allowed.
1775 if (isa<BlockAddress>(U.getUser()))
1776 continue;
1777 return false;
1778 }
1779
1780 const Use *EffectiveUse =
1781 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1782 if (!ACS.isCallee(EffectiveUse)) {
1783 if (!RequireAllCallSites) {
1784 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1785 << " is not a call of " << Fn.getName()
1786 << ", skip use\n");
1787 continue;
1788 }
1789 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1790 << " is an invalid use of " << Fn.getName() << "\n");
1791 return false;
1792 }
1793
1794 // Make sure the arguments that can be matched between the call site and the
1795 // callee argee on their type. It is unlikely they do not and it doesn't
1796 // make sense for all attributes to know/care about this.
1797 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1798 unsigned MinArgsParams =
1799 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
1800 for (unsigned u = 0; u < MinArgsParams; ++u) {
1801 Value *CSArgOp = ACS.getCallArgOperand(u);
1802 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
1803 LLVM_DEBUG(
1804 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1805 << u << "@" << Fn.getName() << ": "
1806 << *Fn.getArg(u)->getType() << " vs. "
1807 << *ACS.getCallArgOperand(u)->getType() << "\n");
1808 return false;
1809 }
1810 }
1811
1812 if (Pred(ACS))
1813 continue;
1814
1815 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1816 << *ACS.getInstruction() << "\n");
1817 return false;
1818 }
1819
1820 return true;
1821}
1822
1823bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1824 // TODO: Maintain a cache of Values that are
1825 // on the pathway from a Argument to a Instruction that would effect the
1826 // liveness/return state etc.
1828}
1829
1831 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred,
1832 const AbstractAttribute &QueryingAA) {
1833
1834 const IRPosition &IRP = QueryingAA.getIRPosition();
1835 // Since we need to provide return instructions we have to have an exact
1836 // definition.
1837 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1838 if (!AssociatedFunction)
1839 return false;
1840
1841 // If this is a call site query we use the call site specific return values
1842 // and liveness information.
1843 // TODO: use the function scope once we have call site AAReturnedValues.
1844 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
1845 const auto &AARetVal =
1846 getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED);
1847 if (!AARetVal.getState().isValidState())
1848 return false;
1849
1850 return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
1851}
1852
1854 function_ref<bool(Value &)> Pred, const AbstractAttribute &QueryingAA) {
1855
1856 const IRPosition &IRP = QueryingAA.getIRPosition();
1857 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1858 if (!AssociatedFunction)
1859 return false;
1860
1861 // TODO: use the function scope once we have call site AAReturnedValues.
1862 const IRPosition &QueryIRP = IRPosition::function(
1863 *AssociatedFunction, QueryingAA.getCallBaseContext());
1864 const auto &AARetVal =
1865 getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED);
1866 if (!AARetVal.getState().isValidState())
1867 return false;
1868
1869 return AARetVal.checkForAllReturnedValuesAndReturnInsts(
1870 [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
1871 return Pred(RV);
1872 });
1873}
1874
1877 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
1878 const AAIsDead *LivenessAA, const ArrayRef<unsigned> &Opcodes,
1879 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
1880 bool CheckPotentiallyDead = false) {
1881 for (unsigned Opcode : Opcodes) {
1882 // Check if we have instructions with this opcode at all first.
1883 auto *Insts = OpcodeInstMap.lookup(Opcode);
1884 if (!Insts)
1885 continue;
1886
1887 for (Instruction *I : *Insts) {
1888 // Skip dead instructions.
1889 if (A && !CheckPotentiallyDead &&
1890 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
1891 UsedAssumedInformation, CheckBBLivenessOnly)) {
1893 dbgs() << "[Attributor] Instruction " << *I
1894 << " is potentially dead, skip!\n";);
1895 continue;
1896 }
1897
1898 if (!Pred(*I))
1899 return false;
1900 }
1901 }
1902 return true;
1903}
1904
1906 const Function *Fn,
1907 const AbstractAttribute &QueryingAA,
1908 const ArrayRef<unsigned> &Opcodes,
1909 bool &UsedAssumedInformation,
1910 bool CheckBBLivenessOnly,
1911 bool CheckPotentiallyDead) {
1912 // Since we need to provide instructions we have to have an exact definition.
1913 if (!Fn || Fn->isDeclaration())
1914 return false;
1915
1916 // TODO: use the function scope once we have call site AAReturnedValues.
1917 const IRPosition &QueryIRP = IRPosition::function(*Fn);
1918 const auto *LivenessAA =
1919 CheckPotentiallyDead
1920 ? nullptr
1921 : &(getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE));
1922
1923 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
1924 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, &QueryingAA,
1925 LivenessAA, Opcodes, UsedAssumedInformation,
1926 CheckBBLivenessOnly, CheckPotentiallyDead))
1927 return false;
1928
1929 return true;
1930}
1931
1933 const AbstractAttribute &QueryingAA,
1934 const ArrayRef<unsigned> &Opcodes,
1935 bool &UsedAssumedInformation,
1936 bool CheckBBLivenessOnly,
1937 bool CheckPotentiallyDead) {
1938 const IRPosition &IRP = QueryingAA.getIRPosition();
1939 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1940 return checkForAllInstructions(Pred, AssociatedFunction, QueryingAA, Opcodes,
1941 UsedAssumedInformation, CheckBBLivenessOnly,
1942 CheckPotentiallyDead);
1943}
1944
1946 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
1947 bool &UsedAssumedInformation) {
1948
1949 const Function *AssociatedFunction =
1950 QueryingAA.getIRPosition().getAssociatedFunction();
1951 if (!AssociatedFunction)
1952 return false;
1953
1954 // TODO: use the function scope once we have call site AAReturnedValues.
1955 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
1956 const auto &LivenessAA =
1957 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
1958
1959 for (Instruction *I :
1960 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
1961 // Skip dead instructions.
1962 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, &LivenessAA,
1963 UsedAssumedInformation))
1964 continue;
1965
1966 if (!Pred(*I))
1967 return false;
1968 }
1969
1970 return true;
1971}
1972
1973void Attributor::runTillFixpoint() {
1974 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
1975 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
1976 << DG.SyntheticRoot.Deps.size()
1977 << " abstract attributes.\n");
1978
1979 // Now that all abstract attributes are collected and initialized we start
1980 // the abstract analysis.
1981
1982 unsigned IterationCounter = 1;
1983 unsigned MaxIterations =
1984 Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
1985
1987 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
1988 Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end());
1989
1990 do {
1991 // Remember the size to determine new attributes.
1992 size_t NumAAs = DG.SyntheticRoot.Deps.size();
1993 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
1994 << ", Worklist size: " << Worklist.size() << "\n");
1995
1996 // For invalid AAs we can fix dependent AAs that have a required dependence,
1997 // thereby folding long dependence chains in a single step without the need
1998 // to run updates.
1999 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
2000 AbstractAttribute *InvalidAA = InvalidAAs[u];
2001
2002 // Check the dependences to fast track invalidation.
2004 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
2005 << " has " << InvalidAA->Deps.size()
2006 << " required & optional dependences\n");
2007 for (auto &DepIt : InvalidAA->Deps) {
2008 AbstractAttribute *DepAA = cast<AbstractAttribute>(DepIt.getPointer());
2009 if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
2011 dbgs() << " - recompute: " << *DepAA);
2012 Worklist.insert(DepAA);
2013 continue;
2014 }
2016 << " - invalidate: " << *DepAA);
2018 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2019 if (!DepAA->getState().isValidState())
2020 InvalidAAs.insert(DepAA);
2021 else
2022 ChangedAAs.push_back(DepAA);
2023 }
2024 InvalidAA->Deps.clear();
2025 }
2026
2027 // Add all abstract attributes that are potentially dependent on one that
2028 // changed to the work list.
2029 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2030 for (auto &DepIt : ChangedAA->Deps)
2031 Worklist.insert(cast<AbstractAttribute>(DepIt.getPointer()));
2032 ChangedAA->Deps.clear();
2033 }
2034
2035 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2036 << ", Worklist+Dependent size: " << Worklist.size()
2037 << "\n");
2038
2039 // Reset the changed and invalid set.
2040 ChangedAAs.clear();
2041 InvalidAAs.clear();
2042
2043 // Update all abstract attribute in the work list and record the ones that
2044 // changed.
2045 for (AbstractAttribute *AA : Worklist) {
2046 const auto &AAState = AA->getState();
2047 if (!AAState.isAtFixpoint())
2048 if (updateAA(*AA) == ChangeStatus::CHANGED)
2049 ChangedAAs.push_back(AA);
2050
2051 // Use the InvalidAAs vector to propagate invalid states fast transitively
2052 // without requiring updates.
2053 if (!AAState.isValidState())
2054 InvalidAAs.insert(AA);
2055 }
2056
2057 // Add attributes to the changed set if they have been created in the last
2058 // iteration.
2059 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2060 DG.SyntheticRoot.end());
2061
2062 // Reset the work list and repopulate with the changed abstract attributes.
2063 // Note that dependent ones are added above.
2064 Worklist.clear();
2065 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2066 Worklist.insert(QueryAAsAwaitingUpdate.begin(),
2067 QueryAAsAwaitingUpdate.end());
2068 QueryAAsAwaitingUpdate.clear();
2069
2070 } while (!Worklist.empty() &&
2071 (IterationCounter++ < MaxIterations || VerifyMaxFixpointIterations));
2072
2073 if (IterationCounter > MaxIterations && !Functions.empty()) {
2074 auto Remark = [&](OptimizationRemarkMissed ORM) {
2075 return ORM << "Attributor did not reach a fixpoint after "
2076 << ore::NV("Iterations", MaxIterations) << " iterations.";
2077 };
2078 Function *F = Functions.front();
2079 emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
2080 }
2081
2082 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2083 << IterationCounter << "/" << MaxIterations
2084 << " iterations\n");
2085
2086 // Reset abstract arguments not settled in a sound fixpoint by now. This
2087 // happens when we stopped the fixpoint iteration early. Note that only the
2088 // ones marked as "changed" *and* the ones transitively depending on them
2089 // need to be reverted to a pessimistic state. Others might not be in a
2090 // fixpoint state but we can use the optimistic results for them anyway.
2092 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2093 AbstractAttribute *ChangedAA = ChangedAAs[u];
2094 if (!Visited.insert(ChangedAA).second)
2095 continue;
2096
2097 AbstractState &State = ChangedAA->getState();
2098 if (!State.isAtFixpoint()) {
2100
2101 NumAttributesTimedOut++;
2102 }
2103
2104 for (auto &DepIt : ChangedAA->Deps)
2105 ChangedAAs.push_back(cast<AbstractAttribute>(DepIt.getPointer()));
2106 ChangedAA->Deps.clear();
2107 }
2108
2109 LLVM_DEBUG({
2110 if (!Visited.empty())
2111 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2112 << " abstract attributes.\n";
2113 });
2114
2115 if (VerifyMaxFixpointIterations && IterationCounter != MaxIterations) {
2116 errs() << "\n[Attributor] Fixpoint iteration done after: "
2117 << IterationCounter << "/" << MaxIterations << " iterations\n";
2118 llvm_unreachable("The fixpoint was not reached with exactly the number of "
2119 "specified iterations!");
2120 }
2121}
2122
2124 assert(AA.isQueryAA() &&
2125 "Non-query AAs should not be required to register for updates!");
2126 QueryAAsAwaitingUpdate.insert(&AA);
2127}
2128
2129ChangeStatus Attributor::manifestAttributes() {
2130 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2131 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2132
2133 unsigned NumManifested = 0;
2134 unsigned NumAtFixpoint = 0;
2135 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2136 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2137 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2138 AbstractState &State = AA->getState();
2139
2140 // If there is not already a fixpoint reached, we can now take the
2141 // optimistic state. This is correct because we enforced a pessimistic one
2142 // on abstract attributes that were transitively dependent on a changed one
2143 // already above.
2144 if (!State.isAtFixpoint())
2146
2147 // We must not manifest Attributes that use Callbase info.
2148 if (AA->hasCallBaseContext())
2149 continue;
2150 // If the state is invalid, we do not try to manifest it.
2151 if (!State.isValidState())
2152 continue;
2153
2154 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2155 continue;
2156
2157 // Skip dead code.
2158 bool UsedAssumedInformation = false;
2159 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2160 /* CheckBBLivenessOnly */ true))
2161 continue;
2162 // Check if the manifest debug counter that allows skipping manifestation of
2163 // AAs
2164 if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2165 continue;
2166 // Manifest the state and record if we changed the IR.
2167 ChangeStatus LocalChange = AA->manifest(*this);
2168 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2169 AA->trackStatistics();
2170 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2171 << "\n");
2172
2173 ManifestChange = ManifestChange | LocalChange;
2174
2175 NumAtFixpoint++;
2176 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2177 }
2178
2179 (void)NumManifested;
2180 (void)NumAtFixpoint;
2181 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2182 << " arguments while " << NumAtFixpoint
2183 << " were in a valid fixpoint state\n");
2184
2185 NumAttributesManifested += NumManifested;
2186 NumAttributesValidFixpoint += NumAtFixpoint;
2187
2188 (void)NumFinalAAs;
2189 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2190 auto DepIt = DG.SyntheticRoot.Deps.begin();
2191 for (unsigned u = 0; u < NumFinalAAs; ++u)
2192 ++DepIt;
2193 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2194 ++u, ++DepIt) {
2195 errs() << "Unexpected abstract attribute: "
2196 << cast<AbstractAttribute>(DepIt->getPointer()) << " :: "
2197 << cast<AbstractAttribute>(DepIt->getPointer())
2198 ->getIRPosition()
2199 .getAssociatedValue()
2200 << "\n";
2201 }
2202 llvm_unreachable("Expected the final number of abstract attributes to "
2203 "remain unchanged!");
2204 }
2205 return ManifestChange;
2206}
2207
2208void Attributor::identifyDeadInternalFunctions() {
2209 // Early exit if we don't intend to delete functions.
2210 if (!Configuration.DeleteFns)
2211 return;
2212
2213 // To avoid triggering an assertion in the lazy call graph we will not delete
2214 // any internal library functions. We should modify the assertion though and
2215 // allow internals to be deleted.
2216 const auto *TLI =
2217 isModulePass()
2218 ? nullptr
2220 LibFunc LF;
2221
2222 // Identify dead internal functions and delete them. This happens outside
2223 // the other fixpoint analysis as we might treat potentially dead functions
2224 // as live to lower the number of iterations. If they happen to be dead, the
2225 // below fixpoint loop will identify and eliminate them.
2226
2227 SmallVector<Function *, 8> InternalFns;
2228 for (Function *F : Functions)
2229 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2230 InternalFns.push_back(F);
2231
2232 SmallPtrSet<Function *, 8> LiveInternalFns;
2233 bool FoundLiveInternal = true;
2234 while (FoundLiveInternal) {
2235 FoundLiveInternal = false;
2236 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
2237 Function *F = InternalFns[u];
2238 if (!F)
2239 continue;
2240
2241 bool UsedAssumedInformation = false;
2243 [&](AbstractCallSite ACS) {
2245 return ToBeDeletedFunctions.count(Callee) ||
2246 (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2247 !LiveInternalFns.count(Callee));
2248 },
2249 *F, true, nullptr, UsedAssumedInformation)) {
2250 continue;
2251 }
2252
2253 LiveInternalFns.insert(F);
2254 InternalFns[u] = nullptr;
2255 FoundLiveInternal = true;
2256 }
2257 }
2258
2259 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u)
2260 if (Function *F = InternalFns[u])
2261 ToBeDeletedFunctions.insert(F);
2262}
2263
2264ChangeStatus Attributor::cleanupIR() {
2265 TimeTraceScope TimeScope("Attributor::cleanupIR");
2266 // Delete stuff at the end to avoid invalid references and a nice order.
2267 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2268 << ToBeDeletedFunctions.size() << " functions and "
2269 << ToBeDeletedBlocks.size() << " blocks and "
2270 << ToBeDeletedInsts.size() << " instructions and "
2271 << ToBeChangedValues.size() << " values and "
2272 << ToBeChangedUses.size() << " uses. To insert "
2273 << ToBeChangedToUnreachableInsts.size()
2274 << " unreachables.\n"
2275 << "Preserve manifest added " << ManifestAddedBlocks.size()
2276 << " blocks\n");
2277
2279 SmallVector<Instruction *, 32> TerminatorsToFold;
2280
2281 auto ReplaceUse = [&](Use *U, Value *NewV) {
2282 Value *OldV = U->get();
2283
2284 // If we plan to replace NewV we need to update it at this point.
2285 do {
2286 const auto &Entry = ToBeChangedValues.lookup(NewV);
2287 if (!get<0>(Entry))
2288 break;
2289 NewV = get<0>(Entry);
2290 } while (true);
2291
2292 Instruction *I = dyn_cast<Instruction>(U->getUser());
2293 assert((!I || isRunOn(*I->getFunction())) &&
2294 "Cannot replace an instruction outside the current SCC!");
2295
2296 // Do not replace uses in returns if the value is a must-tail call we will
2297 // not delete.
2298 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2299 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2300 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2301 return;
2302 // If we rewrite a return and the new value is not an argument, strip the
2303 // `returned` attribute as it is wrong now.
2304 if (!isa<Argument>(NewV))
2305 for (auto &Arg : RI->getFunction()->args())
2306 Arg.removeAttr(Attribute::Returned);
2307 }
2308
2309 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2310 << " instead of " << *OldV << "\n");
2311 U->set(NewV);
2312
2313 if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2314 CGModifiedFunctions.insert(I->getFunction());
2315 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2317 DeadInsts.push_back(I);
2318 }
2319 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2320 auto *CB = cast<CallBase>(U->getUser());
2321 if (CB->isArgOperand(U)) {
2322 unsigned Idx = CB->getArgOperandNo(U);
2323 CB->removeParamAttr(Idx, Attribute::NoUndef);
2324 Function *Fn = CB->getCalledFunction();
2325 if (Fn && Fn->arg_size() > Idx)
2326 Fn->removeParamAttr(Idx, Attribute::NoUndef);
2327 }
2328 }
2329 if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
2330 Instruction *UserI = cast<Instruction>(U->getUser());
2331 if (isa<UndefValue>(NewV)) {
2332 ToBeChangedToUnreachableInsts.insert(UserI);
2333 } else {
2334 TerminatorsToFold.push_back(UserI);
2335 }
2336 }
2337 };
2338
2339 for (auto &It : ToBeChangedUses) {
2340 Use *U = It.first;
2341 Value *NewV = It.second;
2342 ReplaceUse(U, NewV);
2343 }
2344
2346 for (auto &It : ToBeChangedValues) {
2347 Value *OldV = It.first;
2348 auto [NewV, Done] = It.second;
2349 Uses.clear();
2350 for (auto &U : OldV->uses())
2351 if (Done || !U.getUser()->isDroppable())
2352 Uses.push_back(&U);
2353 for (Use *U : Uses) {
2354 if (auto *I = dyn_cast<Instruction>(U->getUser()))
2355 if (!isRunOn(*I->getFunction()))
2356 continue;
2357 ReplaceUse(U, NewV);
2358 }
2359 }
2360
2361 for (const auto &V : InvokeWithDeadSuccessor)
2362 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2363 assert(isRunOn(*II->getFunction()) &&
2364 "Cannot replace an invoke outside the current SCC!");
2365 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2366 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2367 bool Invoke2CallAllowed =
2368 !AAIsDead::mayCatchAsynchronousExceptions(*II->getFunction());
2369 assert((UnwindBBIsDead || NormalBBIsDead) &&
2370 "Invoke does not have dead successors!");
2371 BasicBlock *BB = II->getParent();
2372 BasicBlock *NormalDestBB = II->getNormalDest();
2373 if (UnwindBBIsDead) {
2374 Instruction *NormalNextIP = &NormalDestBB->front();
2375 if (Invoke2CallAllowed) {
2376 changeToCall(II);
2377 NormalNextIP = BB->getTerminator();
2378 }
2379 if (NormalBBIsDead)
2380 ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2381 } else {
2382 assert(NormalBBIsDead && "Broken invariant!");
2383 if (!NormalDestBB->getUniquePredecessor())
2384 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2385 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2386 }
2387 }
2388 for (Instruction *I : TerminatorsToFold) {
2389 assert(isRunOn(*I->getFunction()) &&
2390 "Cannot replace a terminator outside the current SCC!");
2391 CGModifiedFunctions.insert(I->getFunction());
2392 ConstantFoldTerminator(I->getParent());
2393 }
2394 for (const auto &V : ToBeChangedToUnreachableInsts)
2395 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2396 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2397 << "\n");
2398 assert(isRunOn(*I->getFunction()) &&
2399 "Cannot replace an instruction outside the current SCC!");
2400 CGModifiedFunctions.insert(I->getFunction());
2402 }
2403
2404 for (const auto &V : ToBeDeletedInsts) {
2405 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2406 if (auto *CB = dyn_cast<CallBase>(I)) {
2407 assert((isa<IntrinsicInst>(CB) || isRunOn(*I->getFunction())) &&
2408 "Cannot delete an instruction outside the current SCC!");
2409 if (!isa<IntrinsicInst>(CB))
2410 Configuration.CGUpdater.removeCallSite(*CB);
2411 }
2412 I->dropDroppableUses();
2413 CGModifiedFunctions.insert(I->getFunction());
2414 if (!I->getType()->isVoidTy())
2415 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2416 if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
2417 DeadInsts.push_back(I);
2418 else
2419 I->eraseFromParent();
2420 }
2421 }
2422
2423 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2424
2425 LLVM_DEBUG({
2426 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2427 for (auto &I : DeadInsts)
2428 if (I)
2429 dbgs() << " - " << *I << "\n";
2430 });
2431
2433
2434 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2435 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2436 ToBeDeletedBBs.reserve(NumDeadBlocks);
2437 for (BasicBlock *BB : ToBeDeletedBlocks) {
2438 assert(isRunOn(*BB->getParent()) &&
2439 "Cannot delete a block outside the current SCC!");
2440 CGModifiedFunctions.insert(BB->getParent());
2441 // Do not delete BBs added during manifests of AAs.
2442 if (ManifestAddedBlocks.contains(BB))
2443 continue;
2444 ToBeDeletedBBs.push_back(BB);
2445 }
2446 // Actually we do not delete the blocks but squash them into a single
2447 // unreachable but untangling branches that jump here is something we need
2448 // to do in a more generic way.
2449 detachDeadBlocks(ToBeDeletedBBs, nullptr);
2450 }
2451
2452 identifyDeadInternalFunctions();
2453
2454 // Rewrite the functions as requested during manifest.
2455 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2456
2457 for (Function *Fn : CGModifiedFunctions)
2458 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2459 Configuration.CGUpdater.reanalyzeFunction(*Fn);
2460
2461 for (Function *Fn : ToBeDeletedFunctions) {
2462 if (!Functions.count(Fn))
2463 continue;
2464 Configuration.CGUpdater.removeFunction(*Fn);
2465 }
2466
2467 if (!ToBeChangedUses.empty())
2468 ManifestChange = ChangeStatus::CHANGED;
2469
2470 if (!ToBeChangedToUnreachableInsts.empty())
2471 ManifestChange = ChangeStatus::CHANGED;
2472
2473 if (!ToBeDeletedFunctions.empty())
2474 ManifestChange = ChangeStatus::CHANGED;
2475
2476 if (!ToBeDeletedBlocks.empty())
2477 ManifestChange = ChangeStatus::CHANGED;
2478
2479 if (!ToBeDeletedInsts.empty())
2480 ManifestChange = ChangeStatus::CHANGED;
2481
2482 if (!InvokeWithDeadSuccessor.empty())
2483 ManifestChange = ChangeStatus::CHANGED;
2484
2485 if (!DeadInsts.empty())
2486 ManifestChange = ChangeStatus::CHANGED;
2487
2488 NumFnDeleted += ToBeDeletedFunctions.size();
2489
2490 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2491 << " functions after manifest.\n");
2492
2493#ifdef EXPENSIVE_CHECKS
2494 for (Function *F : Functions) {
2495 if (ToBeDeletedFunctions.count(F))
2496 continue;
2497 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2498 }
2499#endif
2500
2501 return ManifestChange;
2502}
2503
2505 TimeTraceScope TimeScope("Attributor::run");
2506 AttributorCallGraph ACallGraph(*this);
2507
2508 if (PrintCallGraph)
2509 ACallGraph.populateAll();
2510
2511 Phase = AttributorPhase::UPDATE;
2512 runTillFixpoint();
2513
2514 // dump graphs on demand
2515 if (DumpDepGraph)
2516 DG.dumpGraph();
2517
2518 if (ViewDepGraph)
2519 DG.viewGraph();
2520
2522 DG.print();
2523
2524 Phase = AttributorPhase::MANIFEST;
2525 ChangeStatus ManifestChange = manifestAttributes();
2526
2527 Phase = AttributorPhase::CLEANUP;
2528 ChangeStatus CleanupChange = cleanupIR();
2529
2530 if (PrintCallGraph)
2531 ACallGraph.print();
2532
2533 return ManifestChange | CleanupChange;
2534}
2535
2536ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2537 TimeTraceScope TimeScope("updateAA", [&]() {
2538 return AA.getName() + std::to_string(AA.getIRPosition().getPositionKind());
2539 });
2540 assert(Phase == AttributorPhase::UPDATE &&
2541 "We can update AA only in the update stage!");
2542
2543 // Use a new dependence vector for this update.
2544 DependenceVector DV;
2545 DependenceStack.push_back(&DV);
2546
2547 auto &AAState = AA.getState();
2549 bool UsedAssumedInformation = false;
2550 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2551 /* CheckBBLivenessOnly */ true))
2552 CS = AA.update(*this);
2553
2554 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2555 // If the AA did not rely on outside information but changed, we run it
2556 // again to see if it found a fixpoint. Most AAs do but we don't require
2557 // them to. Hence, it might take the AA multiple iterations to get to a
2558 // fixpoint even if it does not rely on outside information, which is fine.
2560 if (CS == ChangeStatus::CHANGED)
2561 RerunCS = AA.update(*this);
2562
2563 // If the attribute did not change during the run or rerun, and it still did
2564 // not query any non-fix information, the state will not change and we can
2565 // indicate that right at this point.
2566 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2567 AAState.indicateOptimisticFixpoint();
2568 }
2569
2570 if (!AAState.isAtFixpoint())
2571 rememberDependences();
2572
2573 // Verify the stack was used properly, that is we pop the dependence vector we
2574 // put there earlier.
2575 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2576 (void)PoppedDV;
2577 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2578
2579 return CS;
2580}
2581
2583 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2584
2585 Module &M = *F.getParent();
2586 LLVMContext &Ctx = M.getContext();
2587 FunctionType *FnTy = F.getFunctionType();
2588
2589 Function *Wrapper =
2590 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2591 F.setName(""); // set the inside function anonymous
2592 M.getFunctionList().insert(F.getIterator(), Wrapper);
2593
2594 F.setLinkage(GlobalValue::InternalLinkage);
2595
2596 F.replaceAllUsesWith(Wrapper);
2597 assert(F.use_empty() && "Uses remained after wrapper was created!");
2598
2599 // Move the COMDAT section to the wrapper.
2600 // TODO: Check if we need to keep it for F as well.
2601 Wrapper->setComdat(F.getComdat());
2602 F.setComdat(nullptr);
2603
2604 // Copy all metadata and attributes but keep them on F as well.
2606 F.getAllMetadata(MDs);
2607 for (auto MDIt : MDs)
2608 Wrapper->addMetadata(MDIt.first, *MDIt.second);
2609 Wrapper->setAttributes(F.getAttributes());
2610
2611 // Create the call in the wrapper.
2612 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2613
2615 Argument *FArgIt = F.arg_begin();
2616 for (Argument &Arg : Wrapper->args()) {
2617 Args.push_back(&Arg);
2618 Arg.setName((FArgIt++)->getName());
2619 }
2620
2621 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2622 CI->setTailCall(true);
2623 CI->addFnAttr(Attribute::NoInline);
2624 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2625
2626 NumFnShallowWrappersCreated++;
2627}
2628
2630 if (F.isDeclaration() || F.hasLocalLinkage() ||
2632 return false;
2633 return true;
2634}
2635
2637 if (!AllowDeepWrapper && !Force)
2638 return nullptr;
2639 if (!isInternalizable(F))
2640 return nullptr;
2641
2642 SmallPtrSet<Function *, 2> FnSet = {&F};
2643 DenseMap<Function *, Function *> InternalizedFns;
2644 internalizeFunctions(FnSet, InternalizedFns);
2645
2646 return InternalizedFns[&F];
2647}
2648
2651 for (Function *F : FnSet)
2653 return false;
2654
2655 FnMap.clear();
2656 // Generate the internalized version of each function.
2657 for (Function *F : FnSet) {
2658 Module &M = *F->getParent();
2659 FunctionType *FnTy = F->getFunctionType();
2660
2661 // Create a copy of the current function
2662 Function *Copied =
2663 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2664 F->getName() + ".internalized");
2665 ValueToValueMapTy VMap;
2666 auto *NewFArgIt = Copied->arg_begin();
2667 for (auto &Arg : F->args()) {
2668 auto ArgName = Arg.getName();
2669 NewFArgIt->setName(ArgName);
2670 VMap[&Arg] = &(*NewFArgIt++);
2671 }
2673
2674 // Copy the body of the original function to the new one
2675 CloneFunctionInto(Copied, F, VMap,
2677
2678 // Set the linakage and visibility late as CloneFunctionInto has some
2679 // implicit requirements.
2682
2683 // Copy metadata
2685 F->getAllMetadata(MDs);
2686 for (auto MDIt : MDs)
2687 if (!Copied->hasMetadata())
2688 Copied->addMetadata(MDIt.first, *MDIt.second);
2689
2690 M.getFunctionList().insert(F->getIterator(), Copied);
2691 Copied->setDSOLocal(true);
2692 FnMap[F] = Copied;
2693 }
2694
2695 // Replace all uses of the old function with the new internalized function
2696 // unless the caller is a function that was just internalized.
2697 for (Function *F : FnSet) {
2698 auto &InternalizedFn = FnMap[F];
2699 auto IsNotInternalized = [&](Use &U) -> bool {
2700 if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2701 return !FnMap.lookup(CB->getCaller());
2702 return false;
2703 };
2704 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2705 }
2706
2707 return true;
2708}
2709
2711 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2712
2713 if (!Configuration.RewriteSignatures)
2714 return false;
2715
2716 Function *Fn = Arg.getParent();
2717 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2718 // Forbid the call site to cast the function return type. If we need to
2719 // rewrite these functions we need to re-create a cast for the new call site
2720 // (if the old had uses).
2721 if (!ACS.getCalledFunction() ||
2722 ACS.getInstruction()->getType() !=
2724 return false;
2725 if (ACS.getCalledOperand()->getType() != Fn->getType())
2726 return false;
2727 // Forbid must-tail calls for now.
2728 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2729 };
2730
2731 // Avoid var-arg functions for now.
2732 if (Fn->isVarArg()) {
2733 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2734 return false;
2735 }
2736
2737 // Avoid functions with complicated argument passing semantics.
2738 AttributeList FnAttributeList = Fn->getAttributes();
2739 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2740 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2741 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2742 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2743 LLVM_DEBUG(
2744 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2745 return false;
2746 }
2747
2748 // Avoid callbacks for now.
2749 bool UsedAssumedInformation = false;
2750 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2751 UsedAssumedInformation)) {
2752 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2753 return false;
2754 }
2755
2756 auto InstPred = [](Instruction &I) {
2757 if (auto *CI = dyn_cast<CallInst>(&I))
2758 return !CI->isMustTailCall();
2759 return true;
2760 };
2761
2762 // Forbid must-tail calls for now.
2763 // TODO:
2764 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2765 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2766 nullptr, {Instruction::Call},
2767 UsedAssumedInformation)) {
2768 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2769 return false;
2770 }
2771
2772 return true;
2773}
2774
2776 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2779 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2780 << Arg.getParent()->getName() << " with "
2781 << ReplacementTypes.size() << " replacements\n");
2782 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2783 "Cannot register an invalid rewrite");
2784
2785 Function *Fn = Arg.getParent();
2787 ArgumentReplacementMap[Fn];
2788 if (ARIs.empty())
2789 ARIs.resize(Fn->arg_size());
2790
2791 // If we have a replacement already with less than or equal new arguments,
2792 // ignore this request.
2793 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2794 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2795 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2796 return false;
2797 }
2798
2799 // If we have a replacement already but we like the new one better, delete
2800 // the old.
2801 ARI.reset();
2802
2803 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2804 << Arg.getParent()->getName() << " with "
2805 << ReplacementTypes.size() << " replacements\n");
2806
2807 // Remember the replacement.
2808 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2809 std::move(CalleeRepairCB),
2810 std::move(ACSRepairCB)));
2811
2812 return true;
2813}
2814
2815bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2816 bool Result = true;
2817#ifndef NDEBUG
2818 if (SeedAllowList.size() != 0)
2820 Function *Fn = AA.getAnchorScope();
2821 if (FunctionSeedAllowList.size() != 0 && Fn)
2823#endif
2824 return Result;
2825}
2826
2827ChangeStatus Attributor::rewriteFunctionSignatures(
2828 SmallSetVector<Function *, 8> &ModifiedFns) {
2830
2831 for (auto &It : ArgumentReplacementMap) {
2832 Function *OldFn = It.getFirst();
2833
2834 // Deleted functions do not require rewrites.
2835 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
2836 continue;
2837
2839 It.getSecond();
2840 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2841
2842 SmallVector<Type *, 16> NewArgumentTypes;
2843 SmallVector<AttributeSet, 16> NewArgumentAttributes;
2844
2845 // Collect replacement argument types and copy over existing attributes.
2846 AttributeList OldFnAttributeList = OldFn->getAttributes();
2847 for (Argument &Arg : OldFn->args()) {
2848 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2849 ARIs[Arg.getArgNo()]) {
2850 NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
2851 ARI->ReplacementTypes.end());
2852 NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
2853 AttributeSet());
2854 } else {
2855 NewArgumentTypes.push_back(Arg.getType());
2856 NewArgumentAttributes.push_back(
2857 OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
2858 }
2859 }
2860
2861 uint64_t LargestVectorWidth = 0;
2862 for (auto *I : NewArgumentTypes)
2863 if (auto *VT = dyn_cast<llvm::VectorType>(I))
2864 LargestVectorWidth =
2865 std::max(LargestVectorWidth,
2866 VT->getPrimitiveSizeInBits().getKnownMinValue());
2867
2868 FunctionType *OldFnTy = OldFn->getFunctionType();
2869 Type *RetTy = OldFnTy->getReturnType();
2870
2871 // Construct the new function type using the new arguments types.
2872 FunctionType *NewFnTy =
2873 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
2874
2875 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
2876 << "' from " << *OldFn->getFunctionType() << " to "
2877 << *NewFnTy << "\n");
2878
2879 // Create the new function body and insert it into the module.
2880 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
2881 OldFn->getAddressSpace(), "");
2882 Functions.insert(NewFn);
2883 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
2884 NewFn->takeName(OldFn);
2885 NewFn->copyAttributesFrom(OldFn);
2886
2887 // Patch the pointer to LLVM function in debug info descriptor.
2888 NewFn->setSubprogram(OldFn->getSubprogram());
2889 OldFn->setSubprogram(nullptr);
2890
2891 // Recompute the parameter attributes list based on the new arguments for
2892 // the function.
2893 LLVMContext &Ctx = OldFn->getContext();
2895 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
2896 NewArgumentAttributes));
2897 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
2898
2899 // Since we have now created the new function, splice the body of the old
2900 // function right into the new function, leaving the old rotting hulk of the
2901 // function empty.
2902 NewFn->splice(NewFn->begin(), OldFn);
2903
2904 // Fixup block addresses to reference new function.
2905 SmallVector<BlockAddress *, 8u> BlockAddresses;
2906 for (User *U : OldFn->users())
2907 if (auto *BA = dyn_cast<BlockAddress>(U))
2908 BlockAddresses.push_back(BA);
2909 for (auto *BA : BlockAddresses)
2910 BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock()));
2911
2912 // Set of all "call-like" instructions that invoke the old function mapped
2913 // to their new replacements.
2915
2916 // Callback to create a new "call-like" instruction for a given one.
2917 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
2918 CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
2919 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
2920
2921 // Collect the new argument operands for the replacement call site.
2922 SmallVector<Value *, 16> NewArgOperands;
2923 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
2924 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
2925 unsigned NewFirstArgNum = NewArgOperands.size();
2926 (void)NewFirstArgNum; // only used inside assert.
2927 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2928 ARIs[OldArgNum]) {
2929 if (ARI->ACSRepairCB)
2930 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
2931 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
2932 NewArgOperands.size() &&
2933 "ACS repair callback did not provide as many operand as new "
2934 "types were registered!");
2935 // TODO: Exose the attribute set to the ACS repair callback
2936 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
2937 AttributeSet());
2938 } else {
2939 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
2940 NewArgOperandAttributes.push_back(
2941 OldCallAttributeList.getParamAttrs(OldArgNum));
2942 }
2943 }
2944
2945 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
2946 "Mismatch # argument operands vs. # argument operand attributes!");
2947 assert(NewArgOperands.size() == NewFn->arg_size() &&
2948 "Mismatch # argument operands vs. # function arguments!");
2949
2950 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
2951 OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
2952
2953 // Create a new call or invoke instruction to replace the old one.
2954 CallBase *NewCB;
2955 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
2956 NewCB =
2957 InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
2958 NewArgOperands, OperandBundleDefs, "", OldCB);
2959 } else {
2960 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
2961 "", OldCB);
2962 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
2963 NewCB = NewCI;
2964 }
2965
2966 // Copy over various properties and the new attributes.
2967 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
2968 NewCB->setCallingConv(OldCB->getCallingConv());
2969 NewCB->takeName(OldCB);
2971 Ctx, OldCallAttributeList.getFnAttrs(),
2972 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
2973
2975 LargestVectorWidth);
2976
2977 CallSitePairs.push_back({OldCB, NewCB});
2978 return true;
2979 };
2980
2981 // Use the CallSiteReplacementCreator to create replacement call sites.
2982 bool UsedAssumedInformation = false;
2983 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
2984 true, nullptr, UsedAssumedInformation,
2985 /* CheckPotentiallyDead */ true);
2986 (void)Success;
2987 assert(Success && "Assumed call site replacement to succeed!");
2988
2989 // Rewire the arguments.
2990 Argument *OldFnArgIt = OldFn->arg_begin();
2991 Argument *NewFnArgIt = NewFn->arg_begin();
2992 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
2993 ++OldArgNum, ++OldFnArgIt) {
2994 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2995 ARIs[OldArgNum]) {
2996 if (ARI->CalleeRepairCB)
2997 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
2998 if (ARI->ReplacementTypes.empty())
2999 OldFnArgIt->replaceAllUsesWith(
3000 PoisonValue::get(OldFnArgIt->getType()));
3001 NewFnArgIt += ARI->ReplacementTypes.size();
3002 } else {
3003 NewFnArgIt->takeName(&*OldFnArgIt);
3004 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
3005 ++NewFnArgIt;
3006 }
3007 }
3008
3009 // Eliminate the instructions *after* we visited all of them.
3010 for (auto &CallSitePair : CallSitePairs) {
3011 CallBase &OldCB = *CallSitePair.first;
3012 CallBase &NewCB = *CallSitePair.second;
3013 assert(OldCB.getType() == NewCB.getType() &&
3014 "Cannot handle call sites with different types!");
3015 ModifiedFns.insert(OldCB.getFunction());
3016 Configuration.CGUpdater.replaceCallSite(OldCB, NewCB);
3017 OldCB.replaceAllUsesWith(&NewCB);
3018 OldCB.eraseFromParent();
3019 }
3020
3021 // Replace the function in the call graph (if any).
3022 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
3023
3024 // If the old function was modified and needed to be reanalyzed, the new one
3025 // does now.
3026 if (ModifiedFns.remove(OldFn))
3027 ModifiedFns.insert(NewFn);
3028
3029 Changed = ChangeStatus::CHANGED;
3030 }
3031
3032 return Changed;
3033}
3034
3035void InformationCache::initializeInformationCache(const Function &CF,
3036 FunctionInfo &FI) {
3037 // As we do not modify the function here we can remove the const
3038 // withouth breaking implicit assumptions. At the end of the day, we could
3039 // initialize the cache eagerly which would look the same to the users.
3040 Function &F = const_cast<Function &>(CF);
3041
3042 // Walk all instructions to find interesting instructions that might be
3043 // queried by abstract attributes during their initialization or update.
3044 // This has to happen before we create attributes.
3045
3047
3048 // Add \p V to the assume uses map which track the number of uses outside of
3049 // "visited" assumes. If no outside uses are left the value is added to the
3050 // assume only use vector.
3051 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3053 if (auto *I = dyn_cast<Instruction>(&V))
3054 Worklist.push_back(I);
3055 while (!Worklist.empty()) {
3056 const Instruction *I = Worklist.pop_back_val();
3057 std::optional<short> &NumUses = AssumeUsesMap[I];
3058 if (!NumUses)
3059 NumUses = I->getNumUses();
3060 NumUses = *NumUses - /* this assume */ 1;
3061 if (*NumUses != 0)
3062 continue;
3063 AssumeOnlyValues.insert(I);
3064 for (const Value *Op : I->operands())
3065 if (auto *OpI = dyn_cast<Instruction>(Op))
3066 Worklist.push_back(OpI);
3067 }
3068 };
3069
3070 for (Instruction &I : instructions(&F)) {
3071 bool IsInterestingOpcode = false;
3072
3073 // To allow easy access to all instructions in a function with a given
3074 // opcode we store them in the InfoCache. As not all opcodes are interesting
3075 // to concrete attributes we only cache the ones that are as identified in
3076 // the following switch.
3077 // Note: There are no concrete attributes now so this is initially empty.
3078 switch (I.getOpcode()) {
3079 default:
3080 assert(!isa<CallBase>(&I) &&
3081 "New call base instruction type needs to be known in the "
3082 "Attributor.");
3083 break;
3084 case Instruction::Call:
3085 // Calls are interesting on their own, additionally:
3086 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3087 // For `must-tail` calls we remember the caller and callee.
3088 if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3089 AssumeOnlyValues.insert(Assume);
3090 fillMapFromAssume(*Assume, KnowledgeMap);
3091 AddToAssumeUsesMap(*Assume->getArgOperand(0));
3092 } else if (cast<CallInst>(I).isMustTailCall()) {
3093 FI.ContainsMustTailCall = true;
3094 if (const Function *Callee = cast<CallInst>(I).getCalledFunction())
3095 getFunctionInfo(*Callee).CalledViaMustTail = true;
3096 }
3097 [[fallthrough]];
3098 case Instruction::CallBr:
3099 case Instruction::Invoke:
3100 case Instruction::CleanupRet:
3101 case Instruction::CatchSwitch:
3102 case Instruction::AtomicRMW:
3103 case Instruction::AtomicCmpXchg:
3104 case Instruction::Br:
3105 case Instruction::Resume:
3106 case Instruction::Ret:
3107 case Instruction::Load:
3108 // The alignment of a pointer is interesting for loads.
3109 case Instruction::Store:
3110 // The alignment of a pointer is interesting for stores.
3111 case Instruction::Alloca:
3112 case Instruction::AddrSpaceCast:
3113 IsInterestingOpcode = true;
3114 }
3115 if (IsInterestingOpcode) {
3116 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3117 if (!Insts)
3118 Insts = new (Allocator) InstructionVectorTy();
3119 Insts->push_back(&I);
3120 }
3121 if (I.mayReadOrWriteMemory())
3122 FI.RWInsts.push_back(&I);
3123 }
3124
3125 if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3127 InlineableFunctions.insert(&F);
3128}
3129
3131 return AG.getAnalysis<AAManager>(F);
3132}
3133
3134InformationCache::FunctionInfo::~FunctionInfo() {
3135 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3136 // manually destroy them.
3137 for (auto &It : OpcodeInstMap)
3138 It.getSecond()->~InstructionVectorTy();
3139}
3140
3142 const AbstractAttribute &ToAA,
3143 DepClassTy DepClass) {
3144 if (DepClass == DepClassTy::NONE)
3145 return;
3146 // If we are outside of an update, thus before the actual fixpoint iteration
3147 // started (= when we create AAs), we do not track dependences because we will
3148 // put all AAs into the initial worklist anyway.
3149 if (DependenceStack.empty())
3150 return;
3151 if (FromAA.getState().isAtFixpoint())
3152 return;
3153 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3154}
3155
3156void Attributor::rememberDependences() {
3157 assert(!DependenceStack.empty() && "No dependences to remember!");
3158
3159 for (DepInfo &DI : *DependenceStack.back()) {
3160 assert((DI.DepClass == DepClassTy::REQUIRED ||
3161 DI.DepClass == DepClassTy::OPTIONAL) &&
3162 "Expected required or optional dependence (1 bit)!");
3163 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3164 DepAAs.insert(AbstractAttribute::DepTy(
3165 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3166 }
3167}
3168
3170 if (!VisitedFunctions.insert(&F).second)
3171 return;
3172 if (F.isDeclaration())
3173 return;
3174
3175 // In non-module runs we need to look at the call sites of a function to
3176 // determine if it is part of a must-tail call edge. This will influence what
3177 // attributes we can derive.
3178 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3179 if (!isModulePass() && !FI.CalledViaMustTail) {
3180 for (const Use &U : F.uses())
3181 if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3182 if (CB->isCallee(&U) && CB->isMustTailCall())
3183 FI.CalledViaMustTail = true;
3184 }
3185
3187
3188 // Check for dead BasicBlocks in every function.
3189 // We need dead instruction detection because we do not want to deal with
3190 // broken IR in which SSA rules do not apply.
3191 getOrCreateAAFor<AAIsDead>(FPos);
3192
3193 // Every function might be "will-return".
3194 getOrCreateAAFor<AAWillReturn>(FPos);
3195
3196 // Every function might be "must-progress".
3197 getOrCreateAAFor<AAMustProgress>(FPos);
3198
3199 // Every function might contain instructions that cause "undefined behavior".
3200 getOrCreateAAFor<AAUndefinedBehavior>(FPos);
3201
3202 // Every function can be nounwind.
3203 getOrCreateAAFor<AANoUnwind>(FPos);
3204
3205 // Every function might be marked "nosync"
3206 getOrCreateAAFor<AANoSync>(FPos);
3207
3208 // Every function might be "no-free".
3209 getOrCreateAAFor<AANoFree>(FPos);
3210
3211 // Every function might be "no-return".
3212 getOrCreateAAFor<AANoReturn>(FPos);
3213
3214 // Every function might be "no-recurse".
3215 getOrCreateAAFor<AANoRecurse>(FPos);
3216
3217 // Every function can be "non-convergent".
3218 if (F.hasFnAttribute(Attribute::Convergent))
3219 getOrCreateAAFor<AANonConvergent>(FPos);
3220
3221 // Every function might be "readnone/readonly/writeonly/...".
3222 getOrCreateAAFor<AAMemoryBehavior>(FPos);
3223
3224 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3225 getOrCreateAAFor<AAMemoryLocation>(FPos);
3226
3227 // Every function can track active assumptions.
3228 getOrCreateAAFor<AAAssumptionInfo>(FPos);
3229
3230 // Every function might be applicable for Heap-To-Stack conversion.
3232 getOrCreateAAFor<AAHeapToStack>(FPos);
3233
3234 // Return attributes are only appropriate if the return type is non void.
3235 Type *ReturnType = F.getReturnType();
3236 if (!ReturnType->isVoidTy()) {
3237 // Argument attribute "returned" --- Create only one per function even
3238 // though it is an argument attribute.
3239 getOrCreateAAFor<AAReturnedValues>(FPos);
3240
3242
3243 // Every returned value might be dead.
3244 getOrCreateAAFor<AAIsDead>(RetPos);
3245
3246 // Every function might be simplified.
3247 bool UsedAssumedInformation = false;
3248 getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3250
3251 // Every returned value might be marked noundef.
3252 getOrCreateAAFor<AANoUndef>(RetPos);
3253
3254 if (ReturnType->isPointerTy()) {
3255
3256 // Every function with pointer return type might be marked align.
3257 getOrCreateAAFor<AAAlign>(RetPos);
3258
3259 // Every function with pointer return type might be marked nonnull.
3260 getOrCreateAAFor<AANonNull>(RetPos);
3261
3262 // Every function with pointer return type might be marked noalias.
3263 getOrCreateAAFor<AANoAlias>(RetPos);
3264
3265 // Every function with pointer return type might be marked
3266 // dereferenceable.
3267 getOrCreateAAFor<AADereferenceable>(RetPos);
3268 } else if (AttributeFuncs::isNoFPClassCompatibleType(ReturnType)) {
3269 getOrCreateAAFor<AANoFPClass>(RetPos);
3270 }
3271 }
3272
3273 for (Argument &Arg : F.args()) {
3275
3276 // Every argument might be simplified. We have to go through the Attributor
3277 // interface though as outside AAs can register custom simplification
3278 // callbacks.
3279 bool UsedAssumedInformation = false;
3280 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3282
3283 // Every argument might be dead.
3284 getOrCreateAAFor<AAIsDead>(ArgPos);
3285
3286 // Every argument might be marked noundef.
3287 getOrCreateAAFor<AANoUndef>(ArgPos);
3288
3289 if (Arg.getType()->isPointerTy()) {
3290 // Every argument with pointer type might be marked nonnull.
3291 getOrCreateAAFor<AANonNull>(ArgPos);
3292
3293 // Every argument with pointer type might be marked noalias.
3294 getOrCreateAAFor<AANoAlias>(ArgPos);
3295
3296 // Every argument with pointer type might be marked dereferenceable.
3297 getOrCreateAAFor<AADereferenceable>(ArgPos);
3298
3299 // Every argument with pointer type might be marked align.
3300 getOrCreateAAFor<AAAlign>(ArgPos);
3301
3302 // Every argument with pointer type might be marked nocapture.
3303 getOrCreateAAFor<AANoCapture>(ArgPos);
3304
3305 // Every argument with pointer type might be marked
3306 // "readnone/readonly/writeonly/..."
3307 getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
3308
3309 // Every argument with pointer type might be marked nofree.
3310 getOrCreateAAFor<AANoFree>(ArgPos);
3311
3312 // Every argument with pointer type might be privatizable (or promotable)
3313 getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
3314 } else if (AttributeFuncs::isNoFPClassCompatibleType(Arg.getType())) {
3315 getOrCreateAAFor<AANoFPClass>(ArgPos);
3316 }
3317 }
3318
3319 auto CallSitePred = [&](Instruction &I) -> bool {
3320 auto &CB = cast<CallBase>(I);
3321 IRPosition CBInstPos = IRPosition::inst(CB);
3323
3324 // Call sites might be dead if they do not have side effects and no live
3325 // users. The return value might be dead if there are no live users.
3326 getOrCreateAAFor<AAIsDead>(CBInstPos);
3327
3328 Function *Callee = CB.getCalledFunction();
3329 // TODO: Even if the callee is not known now we might be able to simplify
3330 // the call/callee.
3331 if (!Callee)
3332 return true;
3333
3334 // Every call site can track active assumptions.
3335 getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
3336
3337 // Skip declarations except if annotations on their call sites were
3338 // explicitly requested.
3339 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3340 !Callee->hasMetadata(LLVMContext::MD_callback))
3341 return true;
3342
3343 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3345 bool UsedAssumedInformation = false;
3346 getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3348
3350 getOrCreateAAFor<AANoFPClass>(CBInstPos);
3351 }
3352
3353 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3354
3356
3357 // Every call site argument might be dead.
3358 getOrCreateAAFor<AAIsDead>(CBArgPos);
3359
3360 // Call site argument might be simplified. We have to go through the
3361 // Attributor interface though as outside AAs can register custom
3362 // simplification callbacks.
3363 bool UsedAssumedInformation = false;
3364 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3366
3367 // Every call site argument might be marked "noundef".
3368 getOrCreateAAFor<AANoUndef>(CBArgPos);
3369
3370 Type *ArgTy = CB.getArgOperand(I)->getType();
3371
3372 if (!ArgTy->isPointerTy()) {
3374 getOrCreateAAFor<AANoFPClass>(CBArgPos);
3375
3376 continue;
3377 }
3378
3379 // Call site argument attribute "non-null".
3380 getOrCreateAAFor<AANonNull>(CBArgPos);
3381
3382 // Call site argument attribute "nocapture".
3383 getOrCreateAAFor<AANoCapture>(CBArgPos);
3384
3385 // Call site argument attribute "no-alias".
3386 getOrCreateAAFor<AANoAlias>(CBArgPos);
3387
3388 // Call site argument attribute "dereferenceable".
3389 getOrCreateAAFor<AADereferenceable>(CBArgPos);
3390
3391 // Call site argument attribute "align".
3392 getOrCreateAAFor<AAAlign>(CBArgPos);
3393
3394 // Call site argument attribute
3395 // "readnone/readonly/writeonly/..."
3396 getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
3397
3398 // Call site argument attribute "nofree".
3399 getOrCreateAAFor<AANoFree>(CBArgPos);
3400 }
3401 return true;
3402 };
3403
3404 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3405 bool Success;
3406 bool UsedAssumedInformation = false;
3408 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3409 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3410 (unsigned)Instruction::Call},
3411 UsedAssumedInformation);
3412 (void)Success;
3413 assert(Success && "Expected the check call to be successful!");
3414
3415 auto LoadStorePred = [&](Instruction &I) -> bool {
3416 if (isa<LoadInst>(I)) {
3417 getOrCreateAAFor<AAAlign>(
3418 IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
3419 if (SimplifyAllLoads)
3421 UsedAssumedInformation, AA::Intraprocedural);
3422 } else {
3423 auto &SI = cast<StoreInst>(I);
3424 getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
3425 getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3426 UsedAssumedInformation, AA::Intraprocedural);
3427 getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3428 }
3429 return true;
3430 };
3432 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3433 {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3434 UsedAssumedInformation);
3435 (void)Success;
3436 assert(Success && "Expected the check call to be successful!");
3437}
3438
3439/// Helpers to ease debugging through output streams and print calls.
3440///
3441///{
3443 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3444}
3445
3447 switch (AP) {
3449 return OS << "inv";
3451 return OS << "flt";
3453 return OS << "fn_ret";
3455 return OS << "cs_ret";
3457 return OS << "fn";
3459 return OS << "cs";
3461 return OS << "arg";
3463 return OS << "cs_arg";
3464 }
3465 llvm_unreachable("Unknown attribute position!");
3466}
3467
3469 const Value &AV = Pos.getAssociatedValue();
3470 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3471 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3472
3473 if (Pos.hasCallBaseContext())
3474 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3475 return OS << "}";
3476}
3477
3479 OS << "range-state(" << S.getBitWidth() << ")<";
3480 S.getKnown().print(OS);
3481 OS << " / ";
3482 S.getAssumed().print(OS);
3483 OS << ">";
3484
3485 return OS << static_cast<const AbstractState &>(S);
3486}
3487
3489 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3490}
3491
3493 AA.print(OS);
3494 return OS;
3495}
3496
3499 OS << "set-state(< {";
3500 if (!S.isValidState())
3501 OS << "full-set";
3502 else {
3503 for (const auto &It : S.getAssumedSet())
3504 OS << It << ", ";
3505 if (S.undefIsContained())
3506 OS << "undef ";
3507 }
3508 OS << "} >)";
3509
3510 return OS;
3511}
3512
3514 const PotentialLLVMValuesState &S) {
3515 OS << "set-state(< {";
3516 if (!S.isValidState())
3517 OS << "full-set";
3518 else {
3519 for (const auto &It : S.getAssumedSet()) {
3520 if (auto *F = dyn_cast<Function>(It.first.getValue()))
3521 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3522 else
3523 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3524 }
3525 if (S.undefIsContained())
3526 OS << "undef ";
3527 }
3528 OS << "} >)";
3529
3530 return OS;
3531}
3532
3534 OS << "[";
3535 OS << getName();
3536 OS << "] for CtxI ";
3537
3538 if (auto *I = getCtxI()) {
3539 OS << "'";
3540 I->print(OS);
3541 OS << "'";
3542 } else
3543 OS << "<<null inst>>";
3544
3545 OS << " at position " << getIRPosition() << " with state " << getAsStr()
3546 << '\n';
3547}
3548
3550 print(OS);
3551
3552 for (const auto &DepAA : Deps) {
3553 auto *AA = DepAA.getPointer();
3554 OS << " updates ";
3555 AA->print(OS);
3556 }
3557
3558 OS << '\n';
3559}
3560
3562 const AAPointerInfo::Access &Acc) {
3563 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3564 if (Acc.getLocalInst() != Acc.getRemoteInst())
3565 OS << " via " << *Acc.getLocalInst();
3566 if (Acc.getContent()) {
3567 if (*Acc.getContent())
3568 OS << " [" << **Acc.getContent() << "]";
3569 else
3570 OS << " [ <unknown> ]";
3571 }
3572 return OS;
3573}
3574///}
3575
3576/// ----------------------------------------------------------------------------
3577/// Pass (Manager) Boilerplate
3578/// ----------------------------------------------------------------------------
3579
3581 SetVector<Function *> &Functions,
3582 AnalysisGetter &AG,
3583 CallGraphUpdater &CGUpdater,
3584 bool DeleteFns, bool IsModulePass) {
3585 if (Functions.empty())
3586 return false;
3587
3588 LLVM_DEBUG({
3589 dbgs() << "[Attributor] Run on module with " << Functions.size()
3590 << " functions:\n";
3591 for (Function *Fn : Functions)
3592 dbgs() << " - " << Fn->getName() << "\n";
3593 });
3594
3595 // Create an Attributor and initially empty information cache that is filled
3596 // while we identify default attribute opportunities.
3597 AttributorConfig AC(CGUpdater);
3598 AC.IsModulePass = IsModulePass;
3599 AC.DeleteFns = DeleteFns;
3600 Attributor A(Functions, InfoCache, AC);
3601
3602 // Create shallow wrappers for all functions that are not IPO amendable
3604 for (Function *F : Functions)
3605 if (!A.isFunctionIPOAmendable(*F))
3607
3608 // Internalize non-exact functions
3609 // TODO: for now we eagerly internalize functions without calculating the
3610 // cost, we need a cost interface to determine whether internalizing
3611 // a function is "beneficial"
3612 if (AllowDeepWrapper) {
3613 unsigned FunSize = Functions.size();
3614 for (unsigned u = 0; u < FunSize; u++) {
3615 Function *F = Functions[u];
3616 if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() &&
3617 !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3619 assert(NewF && "Could not internalize function.");
3620 Functions.insert(NewF);
3621
3622 // Update call graph
3623 CGUpdater.replaceFunctionWith(*F, *NewF);
3624 for (const Use &U : NewF->uses())
3625 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3626 auto *CallerF = CB->getCaller();
3627 CGUpdater.reanalyzeFunction(*CallerF);
3628 }
3629 }
3630 }
3631 }
3632
3633 for (Function *F : Functions) {
3634 if (F->hasExactDefinition())
3635 NumFnWithExactDefinition++;
3636 else
3637 NumFnWithoutExactDefinition++;
3638
3639 // We look at internal functions only on-demand but if any use is not a
3640 // direct call or outside the current set of analyzed functions, we have
3641 // to do it eagerly.
3642 if (F->hasLocalLinkage()) {
3643 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3644 const auto *CB = dyn_cast<CallBase>(U.getUser());
3645 return CB && CB->isCallee(&U) &&
3646 Functions.count(const_cast<Function *>(CB->getCaller()));
3647 }))
3648 continue;
3649 }
3650
3651 // Populate the Attributor with abstract attribute opportunities in the
3652 // function and the information cache with IR information.
3653 A.identifyDefaultAbstractAttributes(*F);
3654 }
3655
3656 ChangeStatus Changed = A.run();
3657
3658 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3659 << " functions, result: " << Changed << ".\n");
3660 return Changed == ChangeStatus::CHANGED;
3661}
3662
3663void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
3664
3666 static std::atomic<int> CallTimes;
3667 std::string Prefix;
3668
3669 if (!DepGraphDotFileNamePrefix.empty())
3671 else
3672 Prefix = "dep_graph";
3673 std::string Filename =
3674 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
3675
3676 outs() << "Dependency graph dump to " << Filename << ".\n";
3677
3678 std::error_code EC;
3679
3680 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
3681 if (!EC)
3682 llvm::WriteGraph(File, this);
3683
3684 CallTimes++;
3685}
3686
3688 for (auto DepAA : SyntheticRoot.Deps)
3689 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
3690}
3691
3695 AnalysisGetter AG(FAM);
3696
3697 SetVector<Function *> Functions;
3698 for (Function &F : M)
3699 Functions.insert(&F);
3700
3701 CallGraphUpdater CGUpdater;
3703 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
3704 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3705 /* DeleteFns */ true, /* IsModulePass */ true)) {
3706 // FIXME: Think about passes we will preserve and add them here.
3707 return PreservedAnalyses::none();
3708 }
3709 return PreservedAnalyses::all();
3710}
3711
3714 LazyCallGraph &CG,
3715 CGSCCUpdateResult &UR) {
3717 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
3718 AnalysisGetter AG(FAM);
3719
3720 SetVector<Function *> Functions;
3721 for (LazyCallGraph::Node &N : C)
3722 Functions.insert(&N.getFunction());
3723
3724 if (Functions.empty())
3725 return PreservedAnalyses::all();
3726
3727 Module &M = *Functions.back()->getParent();
3728 CallGraphUpdater CGUpdater;
3729 CGUpdater.initialize(CG, C, AM, UR);
3731 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
3732 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3733 /* DeleteFns */ false,
3734 /* IsModulePass */ false)) {
3735 // FIXME: Think about passes we will preserve and add them here.
3738 return PA;
3739 }
3740 return PreservedAnalyses::all();
3741}
3742
3743namespace llvm {
3744
3745template <> struct GraphTraits<AADepGraphNode *> {
3749
3750 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
3751 static NodeRef DepGetVal(const DepTy &DT) { return DT.getPointer(); }
3752
3756
3757 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
3758
3759 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
3760};
3761
3762template <>
3764 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
3765
3768
3769 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
3770
3771 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
3772};
3773
3774template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
3776
3777 static std::string getNodeLabel(const AADepGraphNode *Node,
3778 const AADepGraph *DG) {
3779 std::string AAString;
3780 raw_string_ostream O(AAString);
3781 Node->print(O);
3782 return AAString;
3783 }
3784};
3785
3786} // end namespace llvm
#define Success
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Rewrite undef for PHI
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden, cl::desc("Allow the Attributor to create shallow " "wrappers for non-exact definitions."), cl::init(false))
static cl::opt< bool > VerifyMaxFixpointIterations("attributor-max-iterations-verify", cl::Hidden, cl::desc("Verify that max-iterations is a tight bound for a fixpoint"), cl::init(false))
static bool checkForAllInstructionsImpl(Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap, function_ref< bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA, const AAIsDead *LivenessAA, const ArrayRef< unsigned > &Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
#define VERBOSE_DEBUG_TYPE
Definition: Attributor.cpp:61
static cl::opt< bool > EnableHeapToStack("enable-heap-to-stack-conversion", cl::init(true), cl::Hidden)
static bool runAttributorOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, bool DeleteFns, bool IsModulePass)
}
static bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction *ToI, const Function &ToFn, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet, std::function< bool(const Function &F)> GoBackwardsCB)
Definition: Attributor.cpp:609
static bool getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I, SmallSetVector< Value *, 4 > &PotentialCopies, SmallSetVector< Instruction *, 4 > &PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact)
Definition: Attributor.cpp:351
static cl::list< std::string > FunctionSeedAllowList("attributor-function-seed-allow-list", cl::Hidden, cl::desc("Comma seperated list of function names that are " "allowed to be seeded."), cl::CommaSeparated)
static cl::opt< unsigned, true > MaxInitializationChainLengthX("attributor-max-initialization-chain-length", cl::Hidden, cl::desc("Maximal number of chained initializations (to avoid stack overflows)"), cl::location(MaxInitializationChainLength), cl::init(1024))
static cl::opt< bool > SimplifyAllLoads("attributor-simplify-all-loads", cl::Hidden, cl::desc("Try to simplify all loads."), cl::init(true))
static cl::opt< bool > ViewDepGraph("attributor-view-dep-graph", cl::Hidden, cl::desc("View the dependency graph."), cl::init(false))
static bool isEqualOrWorse(const Attribute &New, const Attribute &Old)
Return true if New is equal or worse than Old.
Definition: Attributor.cpp:893
static cl::opt< bool > AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden, cl::desc("Allow the Attributor to use IP information " "derived from non-exact functions via cloning"), cl::init(false))
static cl::opt< bool > DumpDepGraph("attributor-dump-dep-graph", cl::Hidden, cl::desc("Dump the dependency graph to dot files."), cl::init(false))
static cl::opt< bool > PrintCallGraph("attributor-print-call-graph", cl::Hidden, cl::desc("Print Attributor's internal call graph"), cl::init(false))
static cl::opt< bool > PrintDependencies("attributor-print-dep", cl::Hidden, cl::desc("Print attribute dependencies"), cl::init(false))
static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool RequireReadNone, bool &IsKnown)
Definition: Attributor.cpp:567
static cl::opt< std::string > DepGraphDotFileNamePrefix("attributor-depgraph-dot-filename-prefix", cl::Hidden, cl::desc("The prefix used for the CallGraph dot file names."))
static cl::opt< bool > AnnotateDeclarationCallSites("attributor-annotate-decl-cs", cl::Hidden, cl::desc("Annotate call sites of function declarations."), cl::init(false))
static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr, AttributeList &Attrs, int AttrIdx, bool ForceReplace=false)
Return true if the information provided by Attr was added to the attribute list Attrs.
Definition: Attributor.cpp:903
static cl::opt< unsigned > SetFixpointIterations("attributor-max-iterations", cl::Hidden, cl::desc("Maximal number of fixpoint iterations."), cl::init(32))
static cl::list< std::string > SeedAllowList("attributor-seed-allow-list", cl::Hidden, cl::desc("Comma seperated list of attribute names that are " "allowed to be seeded."), cl::CommaSeparated)
static cl::opt< bool > EnableCallSiteSpecific("attributor-enable-call-site-specific-deduction", cl::Hidden, cl::desc("Allow the Attributor to do call site specific analysis"), cl::init(false))
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:182
#define LLVM_DEBUG(X)
Definition: Debug.h:101
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition: Debug.h:64
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
Rewrite Partial Register Uses
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
print must be executed print the must be executed context for all instructions
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
static StringRef getName(Value *V)
Basic Register Allocator
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSimple(Instruction *I)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:75
AbstractCallSite.
CallBase * getInstruction() const
Return the underlying instruction.
bool isCallbackCall() const
Return true if this ACS represents a callback call.
const Use & getCalleeUseForCallback() const
Return the use of the callee value in the underlying instruction.
static void getCallbackUses(const CallBase &CB, SmallVectorImpl< const Use * > &CallbackUses)
Add operand uses of CB that represent callback uses into CallbackUses.
bool isCallee(Value::const_user_iterator UI) const
Return true if UI is the use that defines the callee of this ACS.
Value * getCallArgOperand(Argument &Arg) const
Return the operand of the underlying instruction associated with Arg.
int getCallArgOperandNo(Argument &Arg) const
Return the operand index of the underlying instruction associated with Arg.
Value * getCalledOperand() const
Return the pointer to function that is being called.
unsigned getNumArgOperands() const
Return the number of parameters of the callee.
Function * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it's an indirect...
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
bool hasAttributeAtIndex(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
Attribute getAttributeAtIndex(unsigned Index, Attribute::AttrKind Kind) const
Return the attribute object that exists at the given index.
AttributeSet getAttributes(unsigned Index) const
The attributes for the specified index are returned.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
bool isStringAttribute() const
Return true if the attribute is a string (target-dependent) attribute.
Definition: Attributes.cpp:281
bool isEnumAttribute() const
Return true if the attribute is an Attribute::AttrKind type.
Definition: Attributes.cpp:273
bool isIntAttribute() const
Return true if the attribute is an integer attribute.
Definition: Attributes.cpp:277
uint64_t getValueAsInt() const
Return the attribute's value as an integer.
Definition: Attributes.cpp:296
StringRef getKindAsString() const
Return the attribute's kind as a string.
Definition: Attributes.cpp:310
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
Definition: Attributes.cpp:91
Attribute::AttrKind getKindAsEnum() const
Return the attribute's kind as an enum (Attribute::AttrKind).
Definition: Attributes.cpp:289
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:87
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
const Instruction & front() const
Definition: BasicBlock.h:338
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:105
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:300
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
static BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Definition: Constants.cpp:1762
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1190
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1475
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1522
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1412
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1471
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1494
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1490
Function * getCaller()
Helper to get the caller (the parent function).
Wrapper to unify "old style" CallGraph and "new style" LazyCallGraph.
void removeFunction(Function &Fn)
Remove Fn from the call graph.
void removeCallSite(CallBase &CS)
Remove the call site CS from the call graph.
void replaceFunctionWith(Function &OldFn, Function &NewFn)
Replace OldFn in the call graph (and SCC) with NewFn.
void reanalyzeFunction(Function &Fn)
After an CGSCC pass changes a function in ways that affect the call graph, this method can be called ...
bool replaceCallSite(CallBase &OldCS, CallBase &NewCS)
Replace OldCS with the new call site NewCS.
void initialize(CallGraph &CG, CallGraphSCC &SCC)
Initializers for usage outside of a CGSCC pass, inside a CGSCC pass in the old and new pass manager (...
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:997
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2025
static Constant * getFPTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2117
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2075
void print(raw_ostream &OS) const
Print out the bounds to a stream.
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:72
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
bool empty() const
Definition: DenseMap.h:98
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
A proxy from a FunctionAnalysisManager to an SCC.
Class to represent function types.
Definition: DerivedTypes.h:103
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
Definition: Metadata.cpp:1721
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:136
void splice(Function::iterator ToIt, Function *FromF)
Transfer all blocks from FromF to this function at ToIt.
Definition: Function.h:697
const BasicBlock & getEntryBlock() const
Definition: Function.h:745
void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
removes the attribute from the list of attributes.
Definition: Function.cpp:626
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:174
iterator_range< arg_iterator > args()
Definition: Function.h:800
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1725
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:313
iterator begin()
Definition: Function.h:761
arg_iterator arg_begin()
Definition: Function.h:776
void setAttributes(AttributeList Attrs)
Set the attribute list for this Function.
Definition: Function.h:316
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:319
size_t arg_size() const
Definition: Function.h:809
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:179
Argument * getArg(unsigned i) const
Definition: Function.h:794
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:187
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:644
void copyAttributesFrom(const Function *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a Function) from the ...
Definition: Function.cpp:756
bool hasMetadata() const
Return true if this value has any metadata attached to it.
Definition: Value.h:585
void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
Definition: Metadata.cpp:1425
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:273
LinkageTypes getLinkage() const
Definition: GlobalValue.h:541
bool hasLocalLinkage() const
Definition: GlobalValue.h:523
void setLinkage(LinkageTypes LT)
Definition: GlobalValue.h:532
unsigned getAddressSpace() const
Definition: GlobalValue.h:201
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
void setDSOLocal(bool Local)
Definition: GlobalValue.h:299
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
@ DefaultVisibility
The GV is visible.
Definition: GlobalValue.h:63
void setVisibility(VisibilityTypes V)
Definition: GlobalValue.h:250
static bool isInterposableLinkage(LinkageTypes Linkage)
Whether the definition of this global may be replaced by something non-equivalent at link time.
Definition: GlobalValue.h:420
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:56
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
bool isSuccess() const
Definition: InlineCost.h:188
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:933
const BasicBlock * getParent() const
Definition: Instruction.h:90
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:74
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:82
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, Instruction *InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
A node in the call graph.
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
An instruction for reading from memory.
Definition: Instructions.h:177
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const FunctionListType & getFunctionList() const
Get the Module's list of functions (constant).
Definition: Module.h:579
Diagnostic information for missed-optimization remarks.
PointerIntPair - This class implements a pair of a pointer and small integer.
void * getOpaqueValue() const
PointerTy getPointer() const
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1743
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:155
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:173
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, Instruction *InsertBefore=nullptr)
A vector that has set insertion semantics.
Definition: SetVector.h:51
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:88
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:219
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:168
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:152
const value_type & front() const
Return the first element of the SetVector.
Definition: SetVector.h:133
void clear()
Completely clear the SetVector.
Definition: SetVector.h:224
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:83
typename vector_type::const_iterator iterator
Definition: SetVector.h:59
const value_type & back() const
Return the last element of the SetVector.
Definition: SetVector.h:139
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:93
ArrayRef< value_type > getArrayRef() const
Definition: SetVector.h:74
size_type size() const
Definition: SmallPtrSet.h:93
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:312
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void reserve(size_type N)
Definition: SmallVector.h:667
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
A visitor class for IR positions.
Definition: Attributor.h:1102
SubsumingPositionIterator(const IRPosition &IRP)
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:256
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:229
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1724
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:535
iterator_range< user_iterator > users()
Definition: Value.h:421
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:688
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1069
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:384
Value handle that is nullable, but tries to track the Value.
Definition: ValueHandle.h:204
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition: ilist_node.h:82
iterator insert(iterator where, pointer New)
Definition: ilist.h:165
A raw_ostream that writes to a file descriptor.
Definition: raw_ostream.h:454
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:642
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isAssumedReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readnone.
Definition: Attributor.cpp:602
bool isAssumedReadOnly(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readonly.
Definition: Attributor.cpp:597
std::optional< Value * > combineOptionalValuesInAAValueLatice(const std::optional< Value * > &A, const std::optional< Value * > &B, Type *Ty)
Return the combination of A and B such that the result is a possible value of both.
Definition: Attributor.cpp:326
bool isValidAtPosition(const ValueAndContext &VAC, InformationCache &InfoCache)
Return true if the value of VAC is a valid at the position of VAC, that is a constant,...
Definition: Attributor.cpp:277
bool isAssumedThreadLocalObject(Attributor &A, Value &Obj, const AbstractAttribute &QueryingAA)
Return true if Obj is assumed to be a thread local object.
Definition: Attributor.cpp:784
bool isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA, const Value &V, bool ForAnalysisOnly=true)
Return true if V is dynamically unique, that is, there are no two "instances" of V at runtime with di...
Definition: Attributor.cpp:219
bool getPotentialCopiesOfStoredValue(Attributor &A, StoreInst &SI, SmallSetVector< Value *, 4 > &PotentialCopies, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values of the one stored by SI into PotentialCopies.
Definition: Attributor.cpp:557
Constant * getInitialValueForObj(Attributor &A, Value &Obj, Type &Ty, const TargetLibraryInfo *TLI, const DataLayout &DL, RangeTy *RangePtr=nullptr)
Return the initial value of Obj with type Ty if that is a constant.
Definition: Attributor.cpp:229
bool isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is potentially affected by a barrier.
Definition: Attributor.cpp:837
bool isGPU(const Module &M)
Return true iff M target a GPU (and we can use GPU AS reasoning).
Definition: Attributor.cpp:189
ValueScope
Flags to distinguish intra-procedural queries from potentially inter-procedural queries.
Definition: Attributor.h:174
@ Intraprocedural
Definition: Attributor.h:175
@ Interprocedural
Definition: Attributor.h:176
bool isValidInScope(const Value &V, const Function *Scope)
Return true if V is a valid value in Scope, that is a constant or an instruction/argument of Scope.
Definition: Attributor.cpp:267
bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction &ToI, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet=nullptr, std::function< bool(const Function &F)> GoBackwardsCB=nullptr)
Return true if ToI is potentially reachable from FromI without running into any instruction in Exclus...
Definition: Attributor.cpp:765
bool isNoSyncInst(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is a nosync instruction.
Definition: Attributor.cpp:194
bool getPotentiallyLoadedValues(Attributor &A, LoadInst &LI, SmallSetVector< Value *, 4 > &PotentialValues, SmallSetVector< Instruction *, 4 > &PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values LI could read into PotentialValues.
Definition: Attributor.cpp:547
Value * getWithType(Value &V, Type &Ty)
Try to convert V to type Ty without introducing new instructions.
Definition: Attributor.cpp:303
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
void updateMinLegalVectorWidthAttr(Function &Fn, uint64_t Width)
Update min-legal-vector-width if it is in Attribute and less than Width.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
@ CommaSeparated
Definition: CommandLine.h:164
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:465
DiagnosticInfoOptimizationBase::Argument NV
@ OF_TextWithCRLF
The file should be opened in text mode and use a carriage linefeed '\r '.
Definition: FileSystem.h:770
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1819
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:529
unsigned MaxInitializationChainLength
The value passed to the line option that defines the maximal initialization chain length.
Definition: Attributor.cpp:97
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition: Local.cpp:126
APInt operator&(APInt a, const APInt &b)
Definition: APInt.h:2062
void detachDeadBlocks(ArrayRef< BasicBlock * > BBs, SmallVectorImpl< DominatorTree::UpdateType > *Updates, bool KeepOneInputPHIs=false)
Replace contents of every block in BBs with single unreachable instruction.
@ Done
Definition: Threading.h:61
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:6511
CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition: Local.cpp:2361
raw_fd_ostream & outs()
This returns a reference to a raw_fd_ostream for standard output.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
Definition: GraphWriter.h:359
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
InlineResult isInlineViable(Function &Callee)
Minimal filter to detect invalid constructs for inlining.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1826
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:398
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2102
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool AreStatisticsEnabled()
Check if statistics are enabled.
Definition: Statistic.cpp:139
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2296
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
bool operator&=(SparseBitVector< ElementSize > *LHS, const SparseBitVector< ElementSize > &RHS)
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
Definition: GraphWriter.h:427
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:292
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, CloneFunctionChangeType Changes, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc into NewFunc, transforming the old arguments into references to VMap values.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2113
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition: iterator.h:363
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates or reallocates memory (eith...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1976
ChangeStatus
{
Definition: Attributor.h:477
void fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result)
Insert into the map all the informations contained in the operand bundles of the llvm....
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
DepClassTy
Definition: Attributor.h:487
@ OPTIONAL
The target may be valid if the source is not.
@ NONE
Do not track a dependence between source and target.
@ REQUIRED
The target cannot be valid if the source is not.
APInt operator|(APInt a, const APInt &b)
Definition: APInt.h:2082
#define N
DepSetTy Deps
Set of dependency graph nodes which should be updated if this one is updated.
Definition: Attributor.h:504
aaiterator begin()
Definition: Attributor.h:518
aaiterator end()
Definition: Attributor.h:519
The data structure for the dependency graph.
Definition: Attributor.h:535
iterator begin()
Definition: Attributor.h:550
AADepGraphNode SyntheticRoot
There is no root node for the dependency graph.
Definition: Attributor.h:547
void print()
Print dependency graph.
iterator end()
Definition: Attributor.h:551
void dumpGraph()
Dump graph to file.
AADepGraphNode * GetEntryNode()
Definition: Attributor.h:548
An abstract interface to track if a value leaves it's defining function instance.
Definition: Attributor.h:4022
bool isAssumedUniqueForAnalysis() const
Return true if we assume that the underlying value is unique in its scope wrt.
Definition: Attributor.h:4036
An abstract Attribute for computing reachability between functions.
Definition: Attributor.h:5198
An abstract interface to determine reachability of point A to B.
Definition: Attributor.h:3576
An abstract interface for liveness abstract attribute.
Definition: Attributor.h:3699
virtual bool isKnownDead() const =0
Returns true if the underlying value is known dead.
virtual bool isAssumedDead() const =0
The query functions are protected such that other attributes need to go through the Attributor interf...
virtual bool isRemovableStore() const
Return true if the underlying value is a store that is known to be removable.
Definition: Attributor.h:3735
static bool mayCatchAsynchronousExceptions(const Function &F)
Determine if F might catch asynchronous exceptions.
Definition: Attributor.h:3756
An abstract interface for memory access kind related attributes (readnone/readonly/writeonly).
Definition: Attributor.h:4312
An abstract interface for all memory location attributes (readnone/argmemonly/inaccessiblememonly/ina...
Definition: Attributor.h:4375
An abstract interface for all nocapture attributes.
Definition: Attributor.h:4062
bool isAssumedNoSync() const
Returns true if "nosync" is assumed.
Definition: Attributor.h:3376
static bool isNonRelaxedAtomic(const Instruction *I)
Helper function used to determine whether an instruction is non-relaxed atomic.
static bool isNoSyncIntrinsic(const Instruction *I)
Helper function specific for intrinsics which are potentially volatile.
An access description.
Definition: Attributor.h:5448
bool isWrittenValueUnknown() const
Return true if the value written cannot be determined at all.
Definition: Attributor.h:5554
std::optional< Value * > getContent() const
Return the written value which can be llvm::null if it is not yet determined.
Definition: Attributor.h:5573
bool isWriteOrAssumption() const
Return true if this is a write access.
Definition: Attributor.h:5524
bool isRead() const
Return true if this is a read access.
Definition: Attributor.h:5518
Value * getWrittenValue() const
Return the value writen, if any.
Definition: Attributor.h:5565
Instruction * getLocalInst() const
Return the instruction that causes the access with respect to the local scope of the associated attri...
Definition: Attributor.h:5545
Instruction * getRemoteInst() const
Return the actual instruction that causes the access.
Definition: Attributor.h:5548
bool isWrittenValueYetUndetermined() const
Return true if the value written is not known yet.
Definition: Attributor.h:5551
AccessKind getKind() const
Return the access kind.
Definition: Attributor.h:5515
An abstract interface for struct information.
Definition: Attributor.h:5269
static Value * getSingleValue(Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP, SmallVectorImpl< AA::ValueAndContext > &Values)
Extract the single value in Values if any.
An abstract attribute for getting all assumption underlying objects.
Definition: Attributor.h:5691
Helper to represent an access offset and size, with logic to deal with uncertainty and check for over...
Definition: Attributor.h:231
bool offsetOrSizeAreUnknown() const
Return true if offset or size are unknown.
Definition: Attributor.h:240
Value * getValue() const
Definition: Attributor.h:186
const Instruction * getCtxI() const
Definition: Attributor.h:187
Base struct for all "concrete attribute" deductions.
Definition: Attributor.h:3159
ChangeStatus update(Attributor &A)
Hook for the Attributor to trigger an update of the internal state.
Definition: Attributor.cpp:993
virtual ChangeStatus manifest(Attributor &A)
Hook for the Attributor to trigger the manifestation of the information represented by the abstract a...
Definition: Attributor.h:3233
virtual void printWithDeps(raw_ostream &OS) const
virtual StateType & getState()=0
Return the internal abstract state for inspection.
virtual const std::string getName() const =0
This function should return the name of the AbstractAttribute.
virtual ~AbstractAttribute()=default
Virtual destructor.
virtual const std::string getAsStr() const =0
This function should return the "summarized" assumed state as string.
void print(raw_ostream &OS) const override
Helper functions, for debug purposes only.
virtual bool isQueryAA() const
A query AA is always scheduled as long as we do updates because it does lazy computation that cannot ...
Definition: Attributor.h:3191
virtual ChangeStatus updateImpl(Attributor &A)=0
The actual update/transfer function which has to be implemented by the derived classes.
virtual void trackStatistics() const =0
Hook to enable custom statistic tracking, called after manifest that resulted in a change if statisti...
const IRPosition & getIRPosition() const
Return an IR position, see struct IRPosition.
Definition: Attributor.h:3198
An interface to query the internal state of an abstract attribute.
Definition: Attributor.h:2479
virtual ChangeStatus indicatePessimisticFixpoint()=0
Indicate that the abstract state should converge to the pessimistic state.
virtual bool isAtFixpoint() const =0
Return if this abstract state is fixed, thus does not need to be updated if information changes as it...
virtual bool isValidState() const =0
Return if this abstract state is in a valid state.
virtual ChangeStatus indicateOptimisticFixpoint()=0
Indicate that the abstract state should converge to the optimistic state.
Wrapper for FunctionAnalysisManager.
Definition: Attributor.h:1113
Analysis::Result * getAnalysis(const Function &F)
Definition: Attributor.h:1128
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
void populateAll() const
Force populate the entire call graph.
Definition: Attributor.h:5062
Configuration for the Attributor.
Definition: Attributor.h:1410
std::optional< unsigned > MaxFixpointIterations
Maximum number of iterations to run until fixpoint.
Definition: Attributor.h:1444
bool RewriteSignatures
Flag to determine if we rewrite function signatures.
Definition: Attributor.h:1427
bool DeleteFns
Flag to determine if we can delete functions or keep dead ones around.
Definition: Attributor.h:1424
CallGraphUpdater & CGUpdater
Helper to update an underlying call graph and to delete functions.
Definition: Attributor.h:1438
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Helper struct used in the communication between an abstract attribute (AA) that wants to change the s...
Definition: Attributor.h:2084
std::function< void(const ArgumentReplacementInfo &, AbstractCallSite, SmallVectorImpl< Value * > &)> ACSRepairCBTy
Abstract call site (ACS) repair callback type.
Definition: Attributor.h:2107
std::function< void(const ArgumentReplacementInfo &, Function &, Function::arg_iterator)> CalleeRepairCBTy
Callee repair callback type.
Definition: Attributor.h:2093
The fixpoint analysis framework that orchestrates the attribute deduction.
Definition: Attributor.h:1484
bool registerFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes, ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB, ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB)
Register a rewrite for a function signature.
bool isModulePass() const
Return true if this is a module pass, false otherwise.
Definition: Attributor.h:1726
bool isValidFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes)
Check if we can rewrite a function signature.
bool checkForAllInstructions(function_ref< bool(Instruction &)> Pred, const Function *Fn, const AbstractAttribute &QueryingAA, const ArrayRef< unsigned > &Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
Check Pred on all instructions in Fn with an opcode present in Opcodes.
static bool isInternalizable(Function &F)
Returns true if the function F can be internalized.
bool isRunOn(Function &Fn) const
Return true if we derive attributes for Fn.
Definition: Attributor.h:1729
bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, DepClassTy DepClass=DepClassTy::OPTIONAL)
Return true if AA (or its context instruction) is assumed dead.
void recordDependence(const AbstractAttribute &FromAA, const AbstractAttribute &ToAA, DepClassTy DepClass)
Explicitly record a dependence from FromAA to ToAA, that is if FromAA changes ToAA should be updated ...
static void createShallowWrapper(Function &F)
Create a shallow wrapper for F such that F has internal linkage afterwards.
bool checkForAllReturnedValuesAndReturnInsts(function_ref< bool(Value &, const SmallSetVector< ReturnInst *, 4 > &)> Pred, const AbstractAttribute &QueryingAA)
Check Pred on all values potentially returned by F.
std::optional< Value * > getAssumedSimplified(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation, AA::ValueScope S)
If V is assumed simplified, return it, if it is unclear yet, return std::nullopt, otherwise return nu...
Definition: Attributor.h:1857
static Function * internalizeFunction(Function &F, bool Force=false)
Make another copy of the function F such that the copied version has internal linkage afterwards and ...
bool checkForAllReadWriteInstructions(function_ref< bool(Instruction &)> Pred, AbstractAttribute &QueryingAA, bool &UsedAssumedInformation)
Check Pred on all Read/Write instructions.
std::optional< Constant * > getAssumedConstant(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation)
If IRP is assumed to be a constant, return it, if it is unclear yet, return std::nullopt,...
InformationCache & getInfoCache()
Return the internal information cache.
Definition: Attributor.h:1723
std::optional< Value * > translateArgumentToCallSiteContent(std::optional< Value * > V, CallBase &CB, const AbstractAttribute &AA, bool &UsedAssumedInformation)
Translate V from the callee context into the call site context.
bool checkForAllReturnedValues(function_ref< bool(Value &)> Pred, const AbstractAttribute &QueryingAA)
Check Pred on all values potentially returned by the function associated with QueryingAA.
bool checkForAllUses(function_ref< bool(const Use &, bool &)> Pred, const AbstractAttribute &QueryingAA, const Value &V, bool CheckBBLivenessOnly=false, DepClassTy LivenessDepClass=DepClassTy::OPTIONAL, bool IgnoreDroppableUses=true, function_ref< bool(const Use &OldU, const Use &NewU)> EquivalentUseCB=nullptr)
Check Pred on all (transitive) uses of V.
void registerForUpdate(AbstractAttribute &AA)
Allows a query AA to request an update if a new query was received.
bool getAssumedSimplifiedValues(const IRPosition &IRP, const AbstractAttribute *AA, SmallVectorImpl< AA::ValueAndContext > &Values, AA::ValueScope S, bool &UsedAssumedInformation)
Try to simplify IRP and in the scope S.
void identifyDefaultAbstractAttributes(Function &F)
Determine opportunities to derive 'default' attributes in F and create abstract attribute objects for...
std::function< bool(Attributor &, const AbstractAttribute *)> VirtualUseCallbackTy
Definition: Attributor.h:1944
ChangeStatus run()
Run the analyses until a fixpoint is reached or enforced (timeout).
static bool internalizeFunctions(SmallPtrSetImpl< Function * > &FnSet, DenseMap< Function *, Function * > &FnMap)
Make copies of each function in the set FnSet such that the copied version has internal linkage after...
bool checkForAllCallSites(function_ref< bool(AbstractCallSite)> Pred, const AbstractAttribute &QueryingAA, bool RequireAllCallSites, bool &UsedAssumedInformation)
Check Pred on all function call sites.
bool isKnown(base_t BitsEncoding) const
Return true if the bits set in BitsEncoding are "known bits".
Definition: Attributor.h:2627
Support structure for SCC passes to communicate updates the call graph back to the CGSCC pass manager...
static std::string getNodeLabel(const AADepGraphNode *Node, const AADepGraph *DG)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
DefaultDOTGraphTraits - This class provides the default implementations of all of the DOTGraphTraits ...
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:51
static NodeRef DepGetVal(const DepTy &DT)
static ChildIteratorType child_end(NodeRef N)
static NodeRef getEntryNode(AADepGraphNode *DGN)
static ChildIteratorType child_begin(NodeRef N)
Def