LLVM 17.0.0git
Attributor.cpp
Go to the documentation of this file.
1//===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements an interprocedural pass that deduces and/or propagates
10// attributes. This is done in an abstract interpretation style fixpoint
11// iteration. See the Attributor.h file comment and the class descriptions in
12// that file for more information.
13//
14//===----------------------------------------------------------------------===//
15
17
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/Statistic.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/Constant.h"
31#include "llvm/IR/Constants.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/GlobalValue.h"
35#include "llvm/IR/Instruction.h"
38#include "llvm/IR/ValueHandle.h"
42#include "llvm/Support/Debug.h"
50#include <cstdint>
51
52#ifdef EXPENSIVE_CHECKS
53#include "llvm/IR/Verifier.h"
54#endif
55
56#include <cassert>
57#include <optional>
58#include <string>
59
60using namespace llvm;
61
62#define DEBUG_TYPE "attributor"
63#define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
64
65DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
66 "Determine what attributes are manifested in the IR");
67
68STATISTIC(NumFnDeleted, "Number of function deleted");
69STATISTIC(NumFnWithExactDefinition,
70 "Number of functions with exact definitions");
71STATISTIC(NumFnWithoutExactDefinition,
72 "Number of functions without exact definitions");
73STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
74STATISTIC(NumAttributesTimedOut,
75 "Number of abstract attributes timed out before fixpoint");
76STATISTIC(NumAttributesValidFixpoint,
77 "Number of abstract attributes in a valid fixpoint state");
78STATISTIC(NumAttributesManifested,
79 "Number of abstract attributes manifested in IR");
80
81// TODO: Determine a good default value.
82//
83// In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
84// (when run with the first 5 abstract attributes). The results also indicate
85// that we never reach 32 iterations but always find a fixpoint sooner.
86//
87// This will become more evolved once we perform two interleaved fixpoint
88// iterations: bottom-up and top-down.
90 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
91 cl::desc("Maximal number of fixpoint iterations."),
92 cl::init(32));
93
95 "attributor-max-initialization-chain-length", cl::Hidden,
97 "Maximal number of chained initializations (to avoid stack overflows)"),
100
102 "attributor-max-iterations-verify", cl::Hidden,
103 cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
104 cl::init(false));
105
107 "attributor-annotate-decl-cs", cl::Hidden,
108 cl::desc("Annotate call sites of function declarations."), cl::init(false));
109
110static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
111 cl::init(true), cl::Hidden);
112
113static cl::opt<bool>
114 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
115 cl::desc("Allow the Attributor to create shallow "
116 "wrappers for non-exact definitions."),
117 cl::init(false));
118
119static cl::opt<bool>
120 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
121 cl::desc("Allow the Attributor to use IP information "
122 "derived from non-exact functions via cloning"),
123 cl::init(false));
124
125// These options can only used for debug builds.
126#ifndef NDEBUG
128 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
129 cl::desc("Comma seperated list of attribute names that are "
130 "allowed to be seeded."),
132
134 "attributor-function-seed-allow-list", cl::Hidden,
135 cl::desc("Comma seperated list of function names that are "
136 "allowed to be seeded."),
138#endif
139
140static cl::opt<bool>
141 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
142 cl::desc("Dump the dependency graph to dot files."),
143 cl::init(false));
144
146 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
147 cl::desc("The prefix used for the CallGraph dot file names."));
148
149static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
150 cl::desc("View the dependency graph."),
151 cl::init(false));
152
153static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
154 cl::desc("Print attribute dependencies"),
155 cl::init(false));
156
158 "attributor-enable-call-site-specific-deduction", cl::Hidden,
159 cl::desc("Allow the Attributor to do call site specific analysis"),
160 cl::init(false));
161
162static cl::opt<bool>
163 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
164 cl::desc("Print Attributor's internal call graph"),
165 cl::init(false));
166
167static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
169 cl::desc("Try to simplify all loads."),
170 cl::init(true));
171
172/// Logic operators for the change status enum class.
173///
174///{
176 return L == ChangeStatus::CHANGED ? L : R;
177}
179 L = L | R;
180 return L;
181}
183 return L == ChangeStatus::UNCHANGED ? L : R;
184}
186 L = L & R;
187 return L;
188}
189///}
190
192 const AbstractAttribute &QueryingAA) {
193 // We are looking for volatile instructions or non-relaxed atomics.
194 if (const auto *CB = dyn_cast<CallBase>(&I)) {
195 if (CB->hasFnAttr(Attribute::NoSync))
196 return true;
197
198 // Non-convergent and readnone imply nosync.
199 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
200 return true;
201
203 return true;
204
205 const auto &NoSyncAA = A.getAAFor<AANoSync>(
206 QueryingAA, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
207 return NoSyncAA.isAssumedNoSync();
208 }
209
210 if (!I.mayReadOrWriteMemory())
211 return true;
212
213 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
214}
215
217 const Value &V, bool ForAnalysisOnly) {
218 // TODO: See the AAInstanceInfo class comment.
219 if (!ForAnalysisOnly)
220 return false;
221 auto &InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
222 QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
223 return InstanceInfoAA.isAssumedUniqueForAnalysis();
224}
225
227 const TargetLibraryInfo *TLI,
228 const DataLayout &DL,
229 AA::RangeTy *RangePtr) {
230 if (isa<AllocaInst>(Obj))
231 return UndefValue::get(&Ty);
232 if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
233 return Init;
234 auto *GV = dyn_cast<GlobalVariable>(&Obj);
235 if (!GV)
236 return nullptr;
237 if (!GV->hasLocalLinkage() && !(GV->isConstant() && GV->hasInitializer()))
238 return nullptr;
239 if (!GV->hasInitializer())
240 return UndefValue::get(&Ty);
241
242 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
243 APInt Offset = APInt(64, RangePtr->Offset);
244 return ConstantFoldLoadFromConst(GV->getInitializer(), &Ty, Offset, DL);
245 }
246
247 return ConstantFoldLoadFromUniformValue(GV->getInitializer(), &Ty);
248}
249
250bool AA::isValidInScope(const Value &V, const Function *Scope) {
251 if (isa<Constant>(V))
252 return true;
253 if (auto *I = dyn_cast<Instruction>(&V))
254 return I->getFunction() == Scope;
255 if (auto *A = dyn_cast<Argument>(&V))
256 return A->getParent() == Scope;
257 return false;
258}
259
261 InformationCache &InfoCache) {
262 if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
263 return true;
264 const Function *Scope = nullptr;
265 const Instruction *CtxI = VAC.getCtxI();
266 if (CtxI)
267 Scope = CtxI->getFunction();
268 if (auto *A = dyn_cast<Argument>(VAC.getValue()))
269 return A->getParent() == Scope;
270 if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
271 if (I->getFunction() == Scope) {
272 if (const DominatorTree *DT =
274 *Scope))
275 return DT->dominates(I, CtxI);
276 // Local dominance check mostly for the old PM passes.
277 if (CtxI && I->getParent() == CtxI->getParent())
278 return llvm::any_of(
279 make_range(I->getIterator(), I->getParent()->end()),
280 [&](const Instruction &AfterI) { return &AfterI == CtxI; });
281 }
282 }
283 return false;
284}
285
287 if (V.getType() == &Ty)
288 return &V;
289 if (isa<PoisonValue>(V))
290 return PoisonValue::get(&Ty);
291 if (isa<UndefValue>(V))
292 return UndefValue::get(&Ty);
293 if (auto *C = dyn_cast<Constant>(&V)) {
294 if (C->isNullValue())
295 return Constant::getNullValue(&Ty);
296 if (C->getType()->isPointerTy() && Ty.isPointerTy())
297 return ConstantExpr::getPointerCast(C, &Ty);
298 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
299 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
300 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
301 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
302 return ConstantExpr::getFPTrunc(C, &Ty, /* OnlyIfReduced */ true);
303 }
304 }
305 return nullptr;
306}
307
308std::optional<Value *>
309AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
310 const std::optional<Value *> &B,
311 Type *Ty) {
312 if (A == B)
313 return A;
314 if (!B)
315 return A;
316 if (*B == nullptr)
317 return nullptr;
318 if (!A)
319 return Ty ? getWithType(**B, *Ty) : nullptr;
320 if (*A == nullptr)
321 return nullptr;
322 if (!Ty)
323 Ty = (*A)->getType();
324 if (isa_and_nonnull<UndefValue>(*A))
325 return getWithType(**B, *Ty);
326 if (isa<UndefValue>(*B))
327 return A;
328 if (*A && *B && *A == getWithType(**B, *Ty))
329 return A;
330 return nullptr;
331}
332
333template <bool IsLoad, typename Ty>
335 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
336 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
337 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
338 bool OnlyExact) {
339 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
340 << " (only exact: " << OnlyExact << ")\n";);
341
342 Value &Ptr = *I.getPointerOperand();
343 // Containers to remember the pointer infos and new copies while we are not
344 // sure that we can find all of them. If we abort we want to avoid spurious
345 // dependences and potential copies in the provided container.
347 SmallVector<Value *> NewCopies;
348 SmallVector<Instruction *> NewCopyOrigins;
349
350 const auto *TLI =
351 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
352
353 auto Pred = [&](Value &Obj) {
354 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
355 if (isa<UndefValue>(&Obj))
356 return true;
357 if (isa<ConstantPointerNull>(&Obj)) {
358 // A null pointer access can be undefined but any offset from null may
359 // be OK. We do not try to optimize the latter.
360 if (!NullPointerIsDefined(I.getFunction(),
361 Ptr.getType()->getPointerAddressSpace()) &&
362 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
363 AA::Interprocedural) == &Obj)
364 return true;
366 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
367 return false;
368 }
369 // TODO: Use assumed noalias return.
370 if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
371 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
372 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
373 << "\n";);
374 return false;
375 }
376 if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
377 if (!GV->hasLocalLinkage() &&
378 !(GV->isConstant() && GV->hasInitializer())) {
379 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
380 "linkage, not supported yet: "
381 << Obj << "\n";);
382 return false;
383 }
384
385 bool NullOnly = true;
386 bool NullRequired = false;
387 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
388 bool IsExact) {
389 if (!V || *V == nullptr)
390 NullOnly = false;
391 else if (isa<UndefValue>(*V))
392 /* No op */;
393 else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
394 NullRequired = !IsExact;
395 else
396 NullOnly = false;
397 };
398
399 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
400 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
401 return true;
402 if (IsLoad && Acc.isWrittenValueYetUndetermined())
403 return true;
404 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
405 if (OnlyExact && !IsExact && !NullOnly &&
406 !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
407 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
408 << ", abort!\n");
409 return false;
410 }
411 if (NullRequired && !NullOnly) {
412 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
413 "one, however found non-null one: "
414 << *Acc.getRemoteInst() << ", abort!\n");
415 return false;
416 }
417 if (IsLoad) {
418 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
419 if (!Acc.isWrittenValueUnknown()) {
420 NewCopies.push_back(Acc.getWrittenValue());
421 NewCopyOrigins.push_back(Acc.getRemoteInst());
422 return true;
423 }
424 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
425 if (!SI) {
426 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
427 "instruction not supported yet: "
428 << *Acc.getRemoteInst() << "\n";);
429 return false;
430 }
431 NewCopies.push_back(SI->getValueOperand());
432 NewCopyOrigins.push_back(SI);
433 } else {
434 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
435 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
436 if (!LI && OnlyExact) {
437 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
438 "instruction not supported yet: "
439 << *Acc.getRemoteInst() << "\n";);
440 return false;
441 }
442 NewCopies.push_back(Acc.getRemoteInst());
443 }
444 return true;
445 };
446
447 // If the value has been written to we don't need the initial value of the
448 // object.
449 bool HasBeenWrittenTo = false;
450
451 AA::RangeTy Range;
452 auto &PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
453 DepClassTy::NONE);
454 if (!PI.forallInterferingAccesses(A, QueryingAA, I, CheckAccess,
455 HasBeenWrittenTo, Range)) {
457 dbgs()
458 << "Failed to verify all interfering accesses for underlying object: "
459 << Obj << "\n");
460 return false;
461 }
462
463 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
464 const DataLayout &DL = A.getDataLayout();
465 Value *InitialValue =
466 AA::getInitialValueForObj(Obj, *I.getType(), TLI, DL, &Range);
467 if (!InitialValue) {
468 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
469 "underlying object, abort!\n");
470 return false;
471 }
472 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
473 if (NullRequired && !NullOnly) {
474 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
475 "null or undef, abort!\n");
476 return false;
477 }
478
479 NewCopies.push_back(InitialValue);
480 NewCopyOrigins.push_back(nullptr);
481 }
482
483 PIs.push_back(&PI);
484
485 return true;
486 };
487
488 const auto &AAUO = A.getAAFor<AAUnderlyingObjects>(
489 QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
490 if (!AAUO.forallUnderlyingObjects(Pred)) {
492 dbgs() << "Underlying objects stored into could not be determined\n";);
493 return false;
494 }
495
496 // Only if we were successful collection all potential copies we record
497 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
498 // given PotentialCopies container.
499 for (const auto *PI : PIs) {
500 if (!PI->getState().isAtFixpoint())
501 UsedAssumedInformation = true;
502 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
503 }
504 PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
505 PotentialValueOrigins.insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
506
507 return true;
508}
509
511 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
512 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
513 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
514 bool OnlyExact) {
515 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
516 A, LI, PotentialValues, PotentialValueOrigins, QueryingAA,
517 UsedAssumedInformation, OnlyExact);
518}
519
521 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
522 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
523 bool OnlyExact) {
524 SmallSetVector<Instruction *, 4> PotentialValueOrigins;
525 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
526 A, SI, PotentialCopies, PotentialValueOrigins, QueryingAA,
527 UsedAssumedInformation, OnlyExact);
528}
529
531 const AbstractAttribute &QueryingAA,
532 bool RequireReadNone, bool &IsKnown) {
533
536 const auto &MemLocAA =
537 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
538 if (MemLocAA.isAssumedReadNone()) {
539 IsKnown = MemLocAA.isKnownReadNone();
540 if (!IsKnown)
541 A.recordDependence(MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
542 return true;
543 }
544 }
545
546 const auto &MemBehaviorAA =
547 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
548 if (MemBehaviorAA.isAssumedReadNone() ||
549 (!RequireReadNone && MemBehaviorAA.isAssumedReadOnly())) {
550 IsKnown = RequireReadNone ? MemBehaviorAA.isKnownReadNone()
551 : MemBehaviorAA.isKnownReadOnly();
552 if (!IsKnown)
553 A.recordDependence(MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
554 return true;
555 }
556
557 return false;
558}
559
561 const AbstractAttribute &QueryingAA, bool &IsKnown) {
562 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
563 /* RequireReadNone */ false, IsKnown);
564}
566 const AbstractAttribute &QueryingAA, bool &IsKnown) {
567 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
568 /* RequireReadNone */ true, IsKnown);
569}
570
571static bool
573 const Instruction *ToI, const Function &ToFn,
574 const AbstractAttribute &QueryingAA,
575 const AA::InstExclusionSetTy *ExclusionSet,
576 std::function<bool(const Function &F)> GoBackwardsCB) {
577 LLVM_DEBUG({
578 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
579 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
580 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
581 << "]\n";
582 if (ExclusionSet)
583 for (auto *ES : *ExclusionSet)
584 dbgs() << *ES << "\n";
585 });
586
587 // If we can go arbitrarily backwards we will eventually reach an entry point
588 // that can reach ToI. Only if a set of blocks through which we cannot go is
589 // provided, or once we track internal functions not accessible from the
590 // outside, it makes sense to perform backwards analysis in the absence of a
591 // GoBackwardsCB.
592 if (!GoBackwardsCB && !ExclusionSet) {
593 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
594 << " is not checked backwards and does not have an "
595 "exclusion set, abort\n");
596 return true;
597 }
598
601 Worklist.push_back(&FromI);
602
603 while (!Worklist.empty()) {
604 const Instruction *CurFromI = Worklist.pop_back_val();
605 if (!Visited.insert(CurFromI).second)
606 continue;
607
608 const Function *FromFn = CurFromI->getFunction();
609 if (FromFn == &ToFn) {
610 if (!ToI)
611 return true;
612 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
613 << " intraprocedurally\n");
614 const auto &ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
615 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
616 bool Result =
617 ReachabilityAA.isAssumedReachable(A, *CurFromI, *ToI, ExclusionSet);
618 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
619 << (Result ? "can potentially " : "cannot ") << "reach "
620 << *ToI << " [Intra]\n");
621 if (Result)
622 return true;
623 }
624
625 bool Result = true;
626 if (!ToFn.isDeclaration() && ToI) {
627 const auto &ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
628 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
629 const Instruction &EntryI = ToFn.getEntryBlock().front();
630 Result =
631 ToReachabilityAA.isAssumedReachable(A, EntryI, *ToI, ExclusionSet);
632 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
633 << " " << (Result ? "can potentially " : "cannot ")
634 << "reach @" << *ToI << " [ToFn]\n");
635 }
636
637 if (Result) {
638 // The entry of the ToFn can reach the instruction ToI. If the current
639 // instruction is already known to reach the ToFn.
640 const auto &FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
641 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
642 Result = FnReachabilityAA.instructionCanReach(A, *CurFromI, ToFn,
643 ExclusionSet);
644 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
645 << " " << (Result ? "can potentially " : "cannot ")
646 << "reach @" << ToFn.getName() << " [FromFn]\n");
647 if (Result)
648 return true;
649 }
650
651 // TODO: Check assumed nounwind.
652 const auto &ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
653 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
654 auto ReturnInstCB = [&](Instruction &Ret) {
655 bool Result =
656 ReachabilityAA.isAssumedReachable(A, *CurFromI, Ret, ExclusionSet);
657 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
658 << (Result ? "can potentially " : "cannot ") << "reach "
659 << Ret << " [Intra]\n");
660 return !Result;
661 };
662
663 // Check if we can reach returns.
664 bool UsedAssumedInformation = false;
665 if (A.checkForAllInstructions(ReturnInstCB, FromFn, QueryingAA,
666 {Instruction::Ret}, UsedAssumedInformation)) {
667 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
668 continue;
669 }
670
671 if (!GoBackwardsCB) {
672 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
673 << " is not checked backwards, abort\n");
674 return true;
675 }
676
677 // If we do not go backwards from the FromFn we are done here and so far we
678 // could not find a way to reach ToFn/ToI.
679 if (!GoBackwardsCB(*FromFn))
680 continue;
681
682 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
683 << FromFn->getName() << "\n");
684
685 auto CheckCallSite = [&](AbstractCallSite ACS) {
686 CallBase *CB = ACS.getInstruction();
687 if (!CB)
688 return false;
689
690 if (isa<InvokeInst>(CB))
691 return false;
692
694 Worklist.push_back(Inst);
695 return true;
696 };
697
698 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
699 /* RequireAllCallSites */ true,
700 &QueryingAA, UsedAssumedInformation);
701 if (Result) {
702 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
703 << " in @" << FromFn->getName()
704 << " failed, give up\n");
705 return true;
706 }
707
708 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
709 << " in @" << FromFn->getName()
710 << " worklist size is: " << Worklist.size() << "\n");
711 }
712 return false;
713}
714
716 Attributor &A, const Instruction &FromI, const Instruction &ToI,
717 const AbstractAttribute &QueryingAA,
718 const AA::InstExclusionSetTy *ExclusionSet,
719 std::function<bool(const Function &F)> GoBackwardsCB) {
720 const Function *ToFn = ToI.getFunction();
721 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
722 ExclusionSet, GoBackwardsCB);
723}
724
726 Attributor &A, const Instruction &FromI, const Function &ToFn,
727 const AbstractAttribute &QueryingAA,
728 const AA::InstExclusionSetTy *ExclusionSet,
729 std::function<bool(const Function &F)> GoBackwardsCB) {
730 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
731 ExclusionSet, GoBackwardsCB);
732}
733
735 const AbstractAttribute &QueryingAA) {
736 if (isa<UndefValue>(Obj))
737 return true;
738 if (isa<AllocaInst>(Obj)) {
739 InformationCache &InfoCache = A.getInfoCache();
740 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
742 dbgs() << "[AA] Object '" << Obj
743 << "' is thread local; stack objects are thread local.\n");
744 return true;
745 }
746 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
747 QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL);
748 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
749 << (NoCaptureAA.isAssumedNoCapture() ? "" : "not")
750 << " thread local; "
751 << (NoCaptureAA.isAssumedNoCapture() ? "non-" : "")
752 << "captured stack object.\n");
753 return NoCaptureAA.isAssumedNoCapture();
754 }
755 if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
756 if (GV->isConstant()) {
757 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
758 << "' is thread local; constant global\n");
759 return true;
760 }
761 if (GV->isThreadLocal()) {
762 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
763 << "' is thread local; thread local global\n");
764 return true;
765 }
766 }
767
768 if (A.getInfoCache().targetIsGPU()) {
769 if (Obj.getType()->getPointerAddressSpace() ==
770 (int)AA::GPUAddressSpace::Local) {
771 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
772 << "' is thread local; GPU local memory\n");
773 return true;
774 }
775 if (Obj.getType()->getPointerAddressSpace() ==
776 (int)AA::GPUAddressSpace::Constant) {
777 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
778 << "' is thread local; GPU constant memory\n");
779 return true;
780 }
781 }
782
783 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
784 return false;
785}
786
788 const AbstractAttribute &QueryingAA) {
789 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
790 return false;
791
793
794 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
795 if (!Loc || !Loc->Ptr) {
797 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
798 return false;
799 }
800 Ptrs.insert(Loc->Ptr);
801 return true;
802 };
803
804 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
805 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
806 return true;
807 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(&I))
808 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
809 return true;
810 } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
811 return true;
812
813 return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
814}
815
818 const AbstractAttribute &QueryingAA,
819 const Instruction *CtxI) {
820 for (const Value *Ptr : Ptrs) {
821 if (!Ptr) {
822 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
823 return true;
824 }
825
826 auto Pred = [&](Value &Obj) {
827 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
828 return true;
829 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
830 << "'; -> requires barrier\n");
831 return false;
832 };
833
834 const auto &UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
835 QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
836 if (!UnderlyingObjsAA.forallUnderlyingObjects(Pred))
837 return true;
838 }
839 return false;
840}
841
842/// Return true if \p New is equal or worse than \p Old.
843static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
844 if (!Old.isIntAttribute())
845 return true;
846
847 return Old.getValueAsInt() >= New.getValueAsInt();
848}
849
850/// Return true if the information provided by \p Attr was added to the
851/// attribute list \p Attrs. This is only the case if it was not already present
852/// in \p Attrs at the position describe by \p PK and \p AttrIdx.
853static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
854 AttributeList &Attrs, int AttrIdx,
855 bool ForceReplace = false) {
856
857 if (Attr.isEnumAttribute()) {
859 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
860 if (!ForceReplace &&
861 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
862 return false;
863 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
864 return true;
865 }
866 if (Attr.isStringAttribute()) {
867 StringRef Kind = Attr.getKindAsString();
868 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
869 if (!ForceReplace &&
870 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
871 return false;
872 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
873 return true;
874 }
875 if (Attr.isIntAttribute()) {
877 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
878 if (!ForceReplace &&
879 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
880 return false;
881 Attrs = Attrs.removeAttributeAtIndex(Ctx, AttrIdx, Kind);
882 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
883 return true;
884 }
885
886 llvm_unreachable("Expected enum or string attribute!");
887}
888
891 return cast<Argument>(&getAnchorValue());
892
893 // Not an Argument and no argument number means this is not a call site
894 // argument, thus we cannot find a callback argument to return.
895 int ArgNo = getCallSiteArgNo();
896 if (ArgNo < 0)
897 return nullptr;
898
899 // Use abstract call sites to make the connection between the call site
900 // values and the ones in callbacks. If a callback was found that makes use
901 // of the underlying call site operand, we want the corresponding callback
902 // callee argument and not the direct callee argument.
903 std::optional<Argument *> CBCandidateArg;
904 SmallVector<const Use *, 4> CallbackUses;
905 const auto &CB = cast<CallBase>(getAnchorValue());
906 AbstractCallSite::getCallbackUses(CB, CallbackUses);
907 for (const Use *U : CallbackUses) {
908 AbstractCallSite ACS(U);
909 assert(ACS && ACS.isCallbackCall());
910 if (!ACS.getCalledFunction())
911 continue;
912
913 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
914
915 // Test if the underlying call site operand is argument number u of the
916 // callback callee.
917 if (ACS.getCallArgOperandNo(u) != ArgNo)
918 continue;
919
920 assert(ACS.getCalledFunction()->arg_size() > u &&
921 "ACS mapped into var-args arguments!");
922 if (CBCandidateArg) {
923 CBCandidateArg = nullptr;
924 break;
925 }
926 CBCandidateArg = ACS.getCalledFunction()->getArg(u);
927 }
928 }
929
930 // If we found a unique callback candidate argument, return it.
931 if (CBCandidateArg && *CBCandidateArg)
932 return *CBCandidateArg;
933
934 // If no callbacks were found, or none used the underlying call site operand
935 // exclusively, use the direct callee argument if available.
936 const Function *Callee = CB.getCalledFunction();
937 if (Callee && Callee->arg_size() > unsigned(ArgNo))
938 return Callee->getArg(ArgNo);
939
940 return nullptr;
941}
942
945 if (getState().isAtFixpoint())
946 return HasChanged;
947
948 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
949
950 HasChanged = updateImpl(A);
951
952 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
953 << "\n");
954
955 return HasChanged;
956}
957
960 const ArrayRef<Attribute> &DeducedAttrs,
961 bool ForceReplace) {
962 Function *ScopeFn = IRP.getAnchorScope();
964
965 // In the following some generic code that will manifest attributes in
966 // DeducedAttrs if they improve the current IR. Due to the different
967 // annotation positions we use the underlying AttributeList interface.
968
969 AttributeList Attrs;
970 switch (PK) {
977 Attrs = ScopeFn->getAttributes();
978 break;
982 Attrs = cast<CallBase>(IRP.getAnchorValue()).getAttributes();
983 break;
984 }
985
988 for (const Attribute &Attr : DeducedAttrs) {
989 if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx(), ForceReplace))
990 continue;
991
992 HasChanged = ChangeStatus::CHANGED;
993 }
994
995 if (HasChanged == ChangeStatus::UNCHANGED)
996 return HasChanged;
997
998 switch (PK) {
1002 ScopeFn->setAttributes(Attrs);
1003 break;
1007 cast<CallBase>(IRP.getAnchorValue()).setAttributes(Attrs);
1008 break;
1011 break;
1012 }
1013
1014 return HasChanged;
1015}
1016
1018const IRPosition
1020
1022 IRPositions.emplace_back(IRP);
1023
1024 // Helper to determine if operand bundles on a call site are benin or
1025 // potentially problematic. We handle only llvm.assume for now.
1026 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1027 return (isa<IntrinsicInst>(CB) &&
1028 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1029 };
1030
1031 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1032 switch (IRP.getPositionKind()) {
1036 return;
1039 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1040 return;
1042 assert(CB && "Expected call site!");
1043 // TODO: We need to look at the operand bundles similar to the redirection
1044 // in CallBase.
1045 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1046 if (const Function *Callee = CB->getCalledFunction())
1047 IRPositions.emplace_back(IRPosition::function(*Callee));
1048 return;
1050 assert(CB && "Expected call site!");
1051 // TODO: We need to look at the operand bundles similar to the redirection
1052 // in CallBase.
1053 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1054 if (const Function *Callee = CB->getCalledFunction()) {
1055 IRPositions.emplace_back(IRPosition::returned(*Callee));
1056 IRPositions.emplace_back(IRPosition::function(*Callee));
1057 for (const Argument &Arg : Callee->args())
1058 if (Arg.hasReturnedAttr()) {
1059 IRPositions.emplace_back(
1060 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1061 IRPositions.emplace_back(
1062 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1063 IRPositions.emplace_back(IRPosition::argument(Arg));
1064 }
1065 }
1066 }
1067 IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1068 return;
1070 assert(CB && "Expected call site!");
1071 // TODO: We need to look at the operand bundles similar to the redirection
1072 // in CallBase.
1073 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1074 const Function *Callee = CB->getCalledFunction();
1075 if (Callee) {
1076 if (Argument *Arg = IRP.getAssociatedArgument())
1077 IRPositions.emplace_back(IRPosition::argument(*Arg));
1078 IRPositions.emplace_back(IRPosition::function(*Callee));
1079 }
1080 }
1081 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1082 return;
1083 }
1084 }
1085}
1086
1088 bool IgnoreSubsumingPositions, Attributor *A) const {
1090 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
1091 for (Attribute::AttrKind AK : AKs)
1092 if (EquivIRP.getAttrsFromIRAttr(AK, Attrs))
1093 return true;
1094 // The first position returned by the SubsumingPositionIterator is
1095 // always the position itself. If we ignore subsuming positions we
1096 // are done after the first iteration.
1097 if (IgnoreSubsumingPositions)
1098 break;
1099 }
1100 if (A)
1101 for (Attribute::AttrKind AK : AKs)
1102 if (getAttrsFromAssumes(AK, Attrs, *A))
1103 return true;
1104 return false;
1105}
1106
1109 bool IgnoreSubsumingPositions, Attributor *A) const {
1110 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
1111 for (Attribute::AttrKind AK : AKs)
1112 EquivIRP.getAttrsFromIRAttr(AK, Attrs);
1113 // The first position returned by the SubsumingPositionIterator is
1114 // always the position itself. If we ignore subsuming positions we
1115 // are done after the first iteration.
1116 if (IgnoreSubsumingPositions)
1117 break;
1118 }
1119 if (A)
1120 for (Attribute::AttrKind AK : AKs)
1121 getAttrsFromAssumes(AK, Attrs, *A);
1122}
1123
1124bool IRPosition::getAttrsFromIRAttr(Attribute::AttrKind AK,
1125 SmallVectorImpl<Attribute> &Attrs) const {
1127 return false;
1128
1129 AttributeList AttrList;
1130 if (const auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
1131 AttrList = CB->getAttributes();
1132 else
1133 AttrList = getAssociatedFunction()->getAttributes();
1134
1135 bool HasAttr = AttrList.hasAttributeAtIndex(getAttrIdx(), AK);
1136 if (HasAttr)
1137 Attrs.push_back(AttrList.getAttributeAtIndex(getAttrIdx(), AK));
1138 return HasAttr;
1139}
1140
1141bool IRPosition::getAttrsFromAssumes(Attribute::AttrKind AK,
1143 Attributor &A) const {
1144 assert(getPositionKind() != IRP_INVALID && "Did expect a valid position!");
1145 Value &AssociatedValue = getAssociatedValue();
1146
1147 const Assume2KnowledgeMap &A2K =
1148 A.getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1149
1150 // Check if we found any potential assume use, if not we don't need to create
1151 // explorer iterators.
1152 if (A2K.empty())
1153 return false;
1154
1155 LLVMContext &Ctx = AssociatedValue.getContext();
1156 unsigned AttrsSize = Attrs.size();
1158 A.getInfoCache().getMustBeExecutedContextExplorer();
1159 auto EIt = Explorer.begin(getCtxI()), EEnd = Explorer.end(getCtxI());
1160 for (const auto &It : A2K)
1161 if (Explorer.findInContextOf(It.first, EIt, EEnd))
1162 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1163 return AttrsSize != Attrs.size();
1164}
1165
1166void IRPosition::verify() {
1167#ifdef EXPENSIVE_CHECKS
1168 switch (getPositionKind()) {
1169 case IRP_INVALID:
1170 assert((CBContext == nullptr) &&
1171 "Invalid position must not have CallBaseContext!");
1172 assert(!Enc.getOpaqueValue() &&
1173 "Expected a nullptr for an invalid position!");
1174 return;
1175 case IRP_FLOAT:
1176 assert((!isa<Argument>(&getAssociatedValue())) &&
1177 "Expected specialized kind for argument values!");
1178 return;
1179 case IRP_RETURNED:
1180 assert(isa<Function>(getAsValuePtr()) &&
1181 "Expected function for a 'returned' position!");
1182 assert(getAsValuePtr() == &getAssociatedValue() &&
1183 "Associated value mismatch!");
1184 return;
1186 assert((CBContext == nullptr) &&
1187 "'call site returned' position must not have CallBaseContext!");
1188 assert((isa<CallBase>(getAsValuePtr())) &&
1189 "Expected call base for 'call site returned' position!");
1190 assert(getAsValuePtr() == &getAssociatedValue() &&
1191 "Associated value mismatch!");
1192 return;
1193 case IRP_CALL_SITE:
1194 assert((CBContext == nullptr) &&
1195 "'call site function' position must not have CallBaseContext!");
1196 assert((isa<CallBase>(getAsValuePtr())) &&
1197 "Expected call base for 'call site function' position!");
1198 assert(getAsValuePtr() == &getAssociatedValue() &&
1199 "Associated value mismatch!");
1200 return;
1201 case IRP_FUNCTION:
1202 assert(isa<Function>(getAsValuePtr()) &&
1203 "Expected function for a 'function' position!");
1204 assert(getAsValuePtr() == &getAssociatedValue() &&
1205 "Associated value mismatch!");
1206 return;
1207 case IRP_ARGUMENT:
1208 assert(isa<Argument>(getAsValuePtr()) &&
1209 "Expected argument for a 'argument' position!");
1210 assert(getAsValuePtr() == &getAssociatedValue() &&
1211 "Associated value mismatch!");
1212 return;
1214 assert((CBContext == nullptr) &&
1215 "'call site argument' position must not have CallBaseContext!");
1216 Use *U = getAsUsePtr();
1217 (void)U; // Silence unused variable warning.
1218 assert(U && "Expected use for a 'call site argument' position!");
1219 assert(isa<CallBase>(U->getUser()) &&
1220 "Expected call base user for a 'call site argument' position!");
1221 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1222 "Expected call base argument operand for a 'call site argument' "
1223 "position");
1224 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1225 unsigned(getCallSiteArgNo()) &&
1226 "Argument number mismatch!");
1227 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1228 return;
1229 }
1230 }
1231#endif
1232}
1233
1234std::optional<Constant *>
1236 const AbstractAttribute &AA,
1237 bool &UsedAssumedInformation) {
1238 // First check all callbacks provided by outside AAs. If any of them returns
1239 // a non-null value that is different from the associated value, or
1240 // std::nullopt, we assume it's simplified.
1241 for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1242 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1243 if (!SimplifiedV)
1244 return std::nullopt;
1245 if (isa_and_nonnull<Constant>(*SimplifiedV))
1246 return cast<Constant>(*SimplifiedV);
1247 return nullptr;
1248 }
1249 if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1250 return C;
1252 if (getAssumedSimplifiedValues(IRP, &AA, Values,
1254 UsedAssumedInformation)) {
1255 if (Values.empty())
1256 return std::nullopt;
1257 if (auto *C = dyn_cast_or_null<Constant>(
1258 AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1259 return C;
1260 }
1261 return nullptr;
1262}
1263
1265 const IRPosition &IRP, const AbstractAttribute *AA,
1266 bool &UsedAssumedInformation, AA::ValueScope S) {
1267 // First check all callbacks provided by outside AAs. If any of them returns
1268 // a non-null value that is different from the associated value, or
1269 // std::nullopt, we assume it's simplified.
1270 for (auto &CB : SimplificationCallbacks.lookup(IRP))
1271 return CB(IRP, AA, UsedAssumedInformation);
1272
1274 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1275 return &IRP.getAssociatedValue();
1276 if (Values.empty())
1277 return std::nullopt;
1278 if (AA)
1279 if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1280 return V;
1283 return nullptr;
1284 return &IRP.getAssociatedValue();
1285}
1286
1288 const IRPosition &IRP, const AbstractAttribute *AA,
1290 bool &UsedAssumedInformation) {
1291 // First check all callbacks provided by outside AAs. If any of them returns
1292 // a non-null value that is different from the associated value, or
1293 // std::nullopt, we assume it's simplified.
1294 const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1295 for (const auto &CB : SimplificationCBs) {
1296 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1297 if (!CBResult.has_value())
1298 continue;
1299 Value *V = *CBResult;
1300 if (!V)
1301 return false;
1304 Values.push_back(AA::ValueAndContext{*V, nullptr});
1305 else
1306 return false;
1307 }
1308 if (!SimplificationCBs.empty())
1309 return true;
1310
1311 // If no high-level/outside simplification occurred, use AAPotentialValues.
1312 const auto &PotentialValuesAA =
1313 getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
1314 if (!PotentialValuesAA.getAssumedSimplifiedValues(*this, Values, S))
1315 return false;
1316 UsedAssumedInformation |= !PotentialValuesAA.isAtFixpoint();
1317 return true;
1318}
1319
1321 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1322 bool &UsedAssumedInformation) {
1323 if (!V)
1324 return V;
1325 if (*V == nullptr || isa<Constant>(*V))
1326 return V;
1327 if (auto *Arg = dyn_cast<Argument>(*V))
1328 if (CB.getCalledFunction() == Arg->getParent())
1329 if (!Arg->hasPointeeInMemoryValueAttr())
1330 return getAssumedSimplified(
1331 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1332 UsedAssumedInformation, AA::Intraprocedural);
1333 return nullptr;
1334}
1335
1337 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1338 // thus we cannot delete them. We can, and want to, destruct them though.
1339 for (auto &It : AAMap) {
1340 AbstractAttribute *AA = It.getSecond();
1341 AA->~AbstractAttribute();
1342 }
1343}
1344
1346 const AAIsDead *FnLivenessAA,
1347 bool &UsedAssumedInformation,
1348 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1349 const IRPosition &IRP = AA.getIRPosition();
1350 if (!Functions.count(IRP.getAnchorScope()))
1351 return false;
1352 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1353 CheckBBLivenessOnly, DepClass);
1354}
1355
1357 const AbstractAttribute *QueryingAA,
1358 const AAIsDead *FnLivenessAA,
1359 bool &UsedAssumedInformation,
1360 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1361 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1362 if (!UserI)
1363 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1364 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1365
1366 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1367 // For call site argument uses we can check if the argument is
1368 // unused/dead.
1369 if (CB->isArgOperand(&U)) {
1370 const IRPosition &CSArgPos =
1371 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1372 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1373 UsedAssumedInformation, CheckBBLivenessOnly,
1374 DepClass);
1375 }
1376 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1377 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1378 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1379 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1380 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1381 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1382 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1383 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1384 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1385 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1386 const IRPosition IRP = IRPosition::inst(*SI);
1387 const AAIsDead &IsDeadAA =
1388 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1389 if (IsDeadAA.isRemovableStore()) {
1390 if (QueryingAA)
1391 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1392 if (!IsDeadAA.isKnown(AAIsDead::IS_REMOVABLE))
1393 UsedAssumedInformation = true;
1394 return true;
1395 }
1396 }
1397 }
1398
1399 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1400 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1401}
1402
1404 const AbstractAttribute *QueryingAA,
1405 const AAIsDead *FnLivenessAA,
1406 bool &UsedAssumedInformation,
1407 bool CheckBBLivenessOnly, DepClassTy DepClass,
1408 bool CheckForDeadStore) {
1409 const IRPosition::CallBaseContext *CBCtx =
1410 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1411
1412 if (ManifestAddedBlocks.contains(I.getParent()))
1413 return false;
1414
1415 const Function &F = *I.getFunction();
1416 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1417 FnLivenessAA = &getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1418 QueryingAA, DepClassTy::NONE);
1419
1420 // Don't use recursive reasoning.
1421 if (QueryingAA == FnLivenessAA)
1422 return false;
1423
1424 // If we have a context instruction and a liveness AA we use it.
1425 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1426 : FnLivenessAA->isAssumedDead(&I)) {
1427 if (QueryingAA)
1428 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1429 if (!FnLivenessAA->isKnownDead(&I))
1430 UsedAssumedInformation = true;
1431 return true;
1432 }
1433
1434 if (CheckBBLivenessOnly)
1435 return false;
1436
1437 const IRPosition IRP = IRPosition::inst(I, CBCtx);
1438 const AAIsDead &IsDeadAA =
1439 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1440
1441 // Don't use recursive reasoning.
1442 if (QueryingAA == &IsDeadAA)
1443 return false;
1444
1445 if (IsDeadAA.isAssumedDead()) {
1446 if (QueryingAA)
1447 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1448 if (!IsDeadAA.isKnownDead())
1449 UsedAssumedInformation = true;
1450 return true;
1451 }
1452
1453 if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA.isRemovableStore()) {
1454 if (QueryingAA)
1455 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1456 if (!IsDeadAA.isKnownDead())
1457 UsedAssumedInformation = true;
1458 return true;
1459 }
1460
1461 return false;
1462}
1463
1465 const AbstractAttribute *QueryingAA,
1466 const AAIsDead *FnLivenessAA,
1467 bool &UsedAssumedInformation,
1468 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1469 // Don't check liveness for constants, e.g. functions, used as (floating)
1470 // values since the context instruction and such is here meaningless.
1472 isa<Constant>(IRP.getAssociatedValue())) {
1473 return false;
1474 }
1475
1476 Instruction *CtxI = IRP.getCtxI();
1477 if (CtxI &&
1478 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1479 /* CheckBBLivenessOnly */ true,
1480 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1481 return true;
1482
1483 if (CheckBBLivenessOnly)
1484 return false;
1485
1486 // If we haven't succeeded we query the specific liveness info for the IRP.
1487 const AAIsDead *IsDeadAA;
1489 IsDeadAA = &getOrCreateAAFor<AAIsDead>(
1491 QueryingAA, DepClassTy::NONE);
1492 else
1493 IsDeadAA = &getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1494
1495 // Don't use recursive reasoning.
1496 if (QueryingAA == IsDeadAA)
1497 return false;
1498
1499 if (IsDeadAA->isAssumedDead()) {
1500 if (QueryingAA)
1501 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1502 if (!IsDeadAA->isKnownDead())
1503 UsedAssumedInformation = true;
1504 return true;
1505 }
1506
1507 return false;
1508}
1509
1511 const AbstractAttribute *QueryingAA,
1512 const AAIsDead *FnLivenessAA,
1513 DepClassTy DepClass) {
1514 const Function &F = *BB.getParent();
1515 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1516 FnLivenessAA = &getOrCreateAAFor<AAIsDead>(IRPosition::function(F),
1517 QueryingAA, DepClassTy::NONE);
1518
1519 // Don't use recursive reasoning.
1520 if (QueryingAA == FnLivenessAA)
1521 return false;
1522
1523 if (FnLivenessAA->isAssumedDead(&BB)) {
1524 if (QueryingAA)
1525 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1526 return true;
1527 }
1528
1529 return false;
1530}
1531
1533 function_ref<bool(const Use &, bool &)> Pred,
1534 const AbstractAttribute &QueryingAA, const Value &V,
1535 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1536 bool IgnoreDroppableUses,
1537 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1538
1539 // Check virtual uses first.
1540 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1541 if (!CB(*this, &QueryingAA))
1542 return false;
1543
1544 // Check the trivial case first as it catches void values.
1545 if (V.use_empty())
1546 return true;
1547
1548 const IRPosition &IRP = QueryingAA.getIRPosition();
1551
1552 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1553 for (const Use &UU : V.uses()) {
1554 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1555 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1556 "rejected by the equivalence call back: "
1557 << *UU << "!\n");
1558 return false;
1559 }
1560
1561 Worklist.push_back(&UU);
1562 }
1563 return true;
1564 };
1565
1566 AddUsers(V, /* OldUse */ nullptr);
1567
1568 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1569 << " initial uses to check\n");
1570
1571 const Function *ScopeFn = IRP.getAnchorScope();
1572 const auto *LivenessAA =
1573 ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1575 : nullptr;
1576
1577 while (!Worklist.empty()) {
1578 const Use *U = Worklist.pop_back_val();
1579 if (isa<PHINode>(U->getUser()) && !Visited.insert(U).second)
1580 continue;
1582 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1583 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1584 << "\n";
1585 else
1586 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1587 << "\n";
1588 });
1589 bool UsedAssumedInformation = false;
1590 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1591 CheckBBLivenessOnly, LivenessDepClass)) {
1593 dbgs() << "[Attributor] Dead use, skip!\n");
1594 continue;
1595 }
1596 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1598 dbgs() << "[Attributor] Droppable user, skip!\n");
1599 continue;
1600 }
1601
1602 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1603 if (&SI->getOperandUse(0) == U) {
1604 if (!Visited.insert(U).second)
1605 continue;
1606 SmallSetVector<Value *, 4> PotentialCopies;
1608 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1609 /* OnlyExact */ true)) {
1611 dbgs()
1612 << "[Attributor] Value is stored, continue with "
1613 << PotentialCopies.size()
1614 << " potential copies instead!\n");
1615 for (Value *PotentialCopy : PotentialCopies)
1616 if (!AddUsers(*PotentialCopy, U))
1617 return false;
1618 continue;
1619 }
1620 }
1621 }
1622
1623 bool Follow = false;
1624 if (!Pred(*U, Follow))
1625 return false;
1626 if (!Follow)
1627 continue;
1628
1629 User &Usr = *U->getUser();
1630 AddUsers(Usr, /* OldUse */ nullptr);
1631
1632 auto *RI = dyn_cast<ReturnInst>(&Usr);
1633 if (!RI)
1634 continue;
1635
1636 Function &F = *RI->getFunction();
1637 auto CallSitePred = [&](AbstractCallSite ACS) {
1638 return AddUsers(*ACS.getInstruction(), U);
1639 };
1640 if (!checkForAllCallSites(CallSitePred, F, /* RequireAllCallSites */ true,
1641 &QueryingAA, UsedAssumedInformation)) {
1642 LLVM_DEBUG(dbgs() << "[Attributor] Could not follow return instruction "
1643 "to all call sites: "
1644 << *RI << "\n");
1645 return false;
1646 }
1647 }
1648
1649 return true;
1650}
1651
1653 const AbstractAttribute &QueryingAA,
1654 bool RequireAllCallSites,
1655 bool &UsedAssumedInformation) {
1656 // We can try to determine information from
1657 // the call sites. However, this is only possible all call sites are known,
1658 // hence the function has internal linkage.
1659 const IRPosition &IRP = QueryingAA.getIRPosition();
1660 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1661 if (!AssociatedFunction) {
1662 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1663 << "\n");
1664 return false;
1665 }
1666
1667 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1668 &QueryingAA, UsedAssumedInformation);
1669}
1670
1672 const Function &Fn,
1673 bool RequireAllCallSites,
1674 const AbstractAttribute *QueryingAA,
1675 bool &UsedAssumedInformation,
1676 bool CheckPotentiallyDead) {
1677 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1678 LLVM_DEBUG(
1679 dbgs()
1680 << "[Attributor] Function " << Fn.getName()
1681 << " has no internal linkage, hence not all call sites are known\n");
1682 return false;
1683 }
1684 // Check virtual uses first.
1685 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1686 if (!CB(*this, QueryingAA))
1687 return false;
1688
1690 for (unsigned u = 0; u < Uses.size(); ++u) {
1691 const Use &U = *Uses[u];
1693 if (auto *Fn = dyn_cast<Function>(U))
1694 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1695 << *U.getUser() << "\n";
1696 else
1697 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1698 << "\n";
1699 });
1700 if (!CheckPotentiallyDead &&
1701 isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1702 /* CheckBBLivenessOnly */ true)) {
1704 dbgs() << "[Attributor] Dead use, skip!\n");
1705 continue;
1706 }
1707 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1708 if (CE->isCast() && CE->getType()->isPointerTy()) {
1710 dbgs() << "[Attributor] Use, is constant cast expression, add "
1711 << CE->getNumUses() << " uses of that expression instead!\n";
1712 });
1713 for (const Use &CEU : CE->uses())
1714 Uses.push_back(&CEU);
1715 continue;
1716 }
1717 }
1718
1719 AbstractCallSite ACS(&U);
1720 if (!ACS) {
1721 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1722 << " has non call site use " << *U.get() << " in "
1723 << *U.getUser() << "\n");
1724 // BlockAddress users are allowed.
1725 if (isa<BlockAddress>(U.getUser()))
1726 continue;
1727 return false;
1728 }
1729
1730 const Use *EffectiveUse =
1731 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1732 if (!ACS.isCallee(EffectiveUse)) {
1733 if (!RequireAllCallSites) {
1734 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1735 << " is not a call of " << Fn.getName()
1736 << ", skip use\n");
1737 continue;
1738 }
1739 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1740 << " is an invalid use of " << Fn.getName() << "\n");
1741 return false;
1742 }
1743
1744 // Make sure the arguments that can be matched between the call site and the
1745 // callee argee on their type. It is unlikely they do not and it doesn't
1746 // make sense for all attributes to know/care about this.
1747 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1748 unsigned MinArgsParams =
1749 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
1750 for (unsigned u = 0; u < MinArgsParams; ++u) {
1751 Value *CSArgOp = ACS.getCallArgOperand(u);
1752 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
1753 LLVM_DEBUG(
1754 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1755 << u << "@" << Fn.getName() << ": "
1756 << *Fn.getArg(u)->getType() << " vs. "
1757 << *ACS.getCallArgOperand(u)->getType() << "\n");
1758 return false;
1759 }
1760 }
1761
1762 if (Pred(ACS))
1763 continue;
1764
1765 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1766 << *ACS.getInstruction() << "\n");
1767 return false;
1768 }
1769
1770 return true;
1771}
1772
1773bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1774 // TODO: Maintain a cache of Values that are
1775 // on the pathway from a Argument to a Instruction that would effect the
1776 // liveness/return state etc.
1778}
1779
1781 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred,
1782 const AbstractAttribute &QueryingAA) {
1783
1784 const IRPosition &IRP = QueryingAA.getIRPosition();
1785 // Since we need to provide return instructions we have to have an exact
1786 // definition.
1787 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1788 if (!AssociatedFunction)
1789 return false;
1790
1791 // If this is a call site query we use the call site specific return values
1792 // and liveness information.
1793 // TODO: use the function scope once we have call site AAReturnedValues.
1794 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
1795 const auto &AARetVal =
1796 getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED);
1797 if (!AARetVal.getState().isValidState())
1798 return false;
1799
1800 return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
1801}
1802
1804 function_ref<bool(Value &)> Pred, const AbstractAttribute &QueryingAA) {
1805
1806 const IRPosition &IRP = QueryingAA.getIRPosition();
1807 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1808 if (!AssociatedFunction)
1809 return false;
1810
1811 // TODO: use the function scope once we have call site AAReturnedValues.
1812 const IRPosition &QueryIRP = IRPosition::function(
1813 *AssociatedFunction, QueryingAA.getCallBaseContext());
1814 const auto &AARetVal =
1815 getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED);
1816 if (!AARetVal.getState().isValidState())
1817 return false;
1818
1819 return AARetVal.checkForAllReturnedValuesAndReturnInsts(
1820 [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
1821 return Pred(RV);
1822 });
1823}
1824
1827 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
1828 const AAIsDead *LivenessAA, const ArrayRef<unsigned> &Opcodes,
1829 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
1830 bool CheckPotentiallyDead = false) {
1831 for (unsigned Opcode : Opcodes) {
1832 // Check if we have instructions with this opcode at all first.
1833 auto *Insts = OpcodeInstMap.lookup(Opcode);
1834 if (!Insts)
1835 continue;
1836
1837 for (Instruction *I : *Insts) {
1838 // Skip dead instructions.
1839 if (A && !CheckPotentiallyDead &&
1840 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
1841 UsedAssumedInformation, CheckBBLivenessOnly)) {
1843 dbgs() << "[Attributor] Instruction " << *I
1844 << " is potentially dead, skip!\n";);
1845 continue;
1846 }
1847
1848 if (!Pred(*I))
1849 return false;
1850 }
1851 }
1852 return true;
1853}
1854
1856 const Function *Fn,
1857 const AbstractAttribute &QueryingAA,
1858 const ArrayRef<unsigned> &Opcodes,
1859 bool &UsedAssumedInformation,
1860 bool CheckBBLivenessOnly,
1861 bool CheckPotentiallyDead) {
1862 // Since we need to provide instructions we have to have an exact definition.
1863 if (!Fn || Fn->isDeclaration())
1864 return false;
1865
1866 // TODO: use the function scope once we have call site AAReturnedValues.
1867 const IRPosition &QueryIRP = IRPosition::function(*Fn);
1868 const auto *LivenessAA =
1869 (CheckBBLivenessOnly || CheckPotentiallyDead)
1870 ? nullptr
1871 : &(getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE));
1872
1873 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
1874 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, &QueryingAA,
1875 LivenessAA, Opcodes, UsedAssumedInformation,
1876 CheckBBLivenessOnly, CheckPotentiallyDead))
1877 return false;
1878
1879 return true;
1880}
1881
1883 const AbstractAttribute &QueryingAA,
1884 const ArrayRef<unsigned> &Opcodes,
1885 bool &UsedAssumedInformation,
1886 bool CheckBBLivenessOnly,
1887 bool CheckPotentiallyDead) {
1888 const IRPosition &IRP = QueryingAA.getIRPosition();
1889 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1890 return checkForAllInstructions(Pred, AssociatedFunction, QueryingAA, Opcodes,
1891 UsedAssumedInformation, CheckBBLivenessOnly,
1892 CheckPotentiallyDead);
1893}
1894
1896 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
1897 bool &UsedAssumedInformation) {
1898
1899 const Function *AssociatedFunction =
1900 QueryingAA.getIRPosition().getAssociatedFunction();
1901 if (!AssociatedFunction)
1902 return false;
1903
1904 // TODO: use the function scope once we have call site AAReturnedValues.
1905 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
1906 const auto &LivenessAA =
1907 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
1908
1909 for (Instruction *I :
1910 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
1911 // Skip dead instructions.
1912 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, &LivenessAA,
1913 UsedAssumedInformation))
1914 continue;
1915
1916 if (!Pred(*I))
1917 return false;
1918 }
1919
1920 return true;
1921}
1922
1923void Attributor::runTillFixpoint() {
1924 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
1925 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
1926 << DG.SyntheticRoot.Deps.size()
1927 << " abstract attributes.\n");
1928
1929 // Now that all abstract attributes are collected and initialized we start
1930 // the abstract analysis.
1931
1932 unsigned IterationCounter = 1;
1933 unsigned MaxIterations =
1934 Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
1935
1937 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
1938 Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end());
1939
1940 do {
1941 // Remember the size to determine new attributes.
1942 size_t NumAAs = DG.SyntheticRoot.Deps.size();
1943 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
1944 << ", Worklist size: " << Worklist.size() << "\n");
1945
1946 // For invalid AAs we can fix dependent AAs that have a required dependence,
1947 // thereby folding long dependence chains in a single step without the need
1948 // to run updates.
1949 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
1950 AbstractAttribute *InvalidAA = InvalidAAs[u];
1951
1952 // Check the dependences to fast track invalidation.
1954 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
1955 << " has " << InvalidAA->Deps.size()
1956 << " required & optional dependences\n");
1957 while (!InvalidAA->Deps.empty()) {
1958 const auto &Dep = InvalidAA->Deps.back();
1959 InvalidAA->Deps.pop_back();
1960 AbstractAttribute *DepAA = cast<AbstractAttribute>(Dep.getPointer());
1961 if (Dep.getInt() == unsigned(DepClassTy::OPTIONAL)) {
1963 dbgs() << " - recompute: " << *DepAA);
1964 Worklist.insert(DepAA);
1965 continue;
1966 }
1968 << " - invalidate: " << *DepAA);
1970 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
1971 if (!DepAA->getState().isValidState())
1972 InvalidAAs.insert(DepAA);
1973 else
1974 ChangedAAs.push_back(DepAA);
1975 }
1976 }
1977
1978 // Add all abstract attributes that are potentially dependent on one that
1979 // changed to the work list.
1980 for (AbstractAttribute *ChangedAA : ChangedAAs)
1981 while (!ChangedAA->Deps.empty()) {
1982 Worklist.insert(
1983 cast<AbstractAttribute>(ChangedAA->Deps.back().getPointer()));
1984 ChangedAA->Deps.pop_back();
1985 }
1986
1987 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
1988 << ", Worklist+Dependent size: " << Worklist.size()
1989 << "\n");
1990
1991 // Reset the changed and invalid set.
1992 ChangedAAs.clear();
1993 InvalidAAs.clear();
1994
1995 // Update all abstract attribute in the work list and record the ones that
1996 // changed.
1997 for (AbstractAttribute *AA : Worklist) {
1998 const auto &AAState = AA->getState();
1999 if (!AAState.isAtFixpoint())
2000 if (updateAA(*AA) == ChangeStatus::CHANGED)
2001 ChangedAAs.push_back(AA);
2002
2003 // Use the InvalidAAs vector to propagate invalid states fast transitively
2004 // without requiring updates.
2005 if (!AAState.isValidState())
2006 InvalidAAs.insert(AA);
2007 }
2008
2009 // Add attributes to the changed set if they have been created in the last
2010 // iteration.
2011 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2012 DG.SyntheticRoot.end());
2013
2014 // Reset the work list and repopulate with the changed abstract attributes.
2015 // Note that dependent ones are added above.
2016 Worklist.clear();
2017 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2018 Worklist.insert(QueryAAsAwaitingUpdate.begin(),
2019 QueryAAsAwaitingUpdate.end());
2020 QueryAAsAwaitingUpdate.clear();
2021
2022 } while (!Worklist.empty() &&
2023 (IterationCounter++ < MaxIterations || VerifyMaxFixpointIterations));
2024
2025 if (IterationCounter > MaxIterations && !Functions.empty()) {
2026 auto Remark = [&](OptimizationRemarkMissed ORM) {
2027 return ORM << "Attributor did not reach a fixpoint after "
2028 << ore::NV("Iterations", MaxIterations) << " iterations.";
2029 };
2030 Function *F = Functions.front();
2031 emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
2032 }
2033
2034 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2035 << IterationCounter << "/" << MaxIterations
2036 << " iterations\n");
2037
2038 // Reset abstract arguments not settled in a sound fixpoint by now. This
2039 // happens when we stopped the fixpoint iteration early. Note that only the
2040 // ones marked as "changed" *and* the ones transitively depending on them
2041 // need to be reverted to a pessimistic state. Others might not be in a
2042 // fixpoint state but we can use the optimistic results for them anyway.
2044 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2045 AbstractAttribute *ChangedAA = ChangedAAs[u];
2046 if (!Visited.insert(ChangedAA).second)
2047 continue;
2048
2049 AbstractState &State = ChangedAA->getState();
2050 if (!State.isAtFixpoint()) {
2052
2053 NumAttributesTimedOut++;
2054 }
2055
2056 while (!ChangedAA->Deps.empty()) {
2057 ChangedAAs.push_back(
2058 cast<AbstractAttribute>(ChangedAA->Deps.back().getPointer()));
2059 ChangedAA->Deps.pop_back();
2060 }
2061 }
2062
2063 LLVM_DEBUG({
2064 if (!Visited.empty())
2065 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2066 << " abstract attributes.\n";
2067 });
2068
2069 if (VerifyMaxFixpointIterations && IterationCounter != MaxIterations) {
2070 errs() << "\n[Attributor] Fixpoint iteration done after: "
2071 << IterationCounter << "/" << MaxIterations << " iterations\n";
2072 llvm_unreachable("The fixpoint was not reached with exactly the number of "
2073 "specified iterations!");
2074 }
2075}
2076
2078 assert(AA.isQueryAA() &&
2079 "Non-query AAs should not be required to register for updates!");
2080 QueryAAsAwaitingUpdate.insert(&AA);
2081}
2082
2083ChangeStatus Attributor::manifestAttributes() {
2084 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2085 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2086
2087 unsigned NumManifested = 0;
2088 unsigned NumAtFixpoint = 0;
2089 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2090 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2091 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2092 AbstractState &State = AA->getState();
2093
2094 // If there is not already a fixpoint reached, we can now take the
2095 // optimistic state. This is correct because we enforced a pessimistic one
2096 // on abstract attributes that were transitively dependent on a changed one
2097 // already above.
2098 if (!State.isAtFixpoint())
2100
2101 // We must not manifest Attributes that use Callbase info.
2102 if (AA->hasCallBaseContext())
2103 continue;
2104 // If the state is invalid, we do not try to manifest it.
2105 if (!State.isValidState())
2106 continue;
2107
2108 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2109 continue;
2110
2111 // Skip dead code.
2112 bool UsedAssumedInformation = false;
2113 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2114 /* CheckBBLivenessOnly */ true))
2115 continue;
2116 // Check if the manifest debug counter that allows skipping manifestation of
2117 // AAs
2118 if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2119 continue;
2120 // Manifest the state and record if we changed the IR.
2121 ChangeStatus LocalChange = AA->manifest(*this);
2122 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2123 AA->trackStatistics();
2124 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2125 << "\n");
2126
2127 ManifestChange = ManifestChange | LocalChange;
2128
2129 NumAtFixpoint++;
2130 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2131 }
2132
2133 (void)NumManifested;
2134 (void)NumAtFixpoint;
2135 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2136 << " arguments while " << NumAtFixpoint
2137 << " were in a valid fixpoint state\n");
2138
2139 NumAttributesManifested += NumManifested;
2140 NumAttributesValidFixpoint += NumAtFixpoint;
2141
2142 (void)NumFinalAAs;
2143 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2144 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size(); ++u)
2145 errs() << "Unexpected abstract attribute: "
2146 << cast<AbstractAttribute>(DG.SyntheticRoot.Deps[u].getPointer())
2147 << " :: "
2148 << cast<AbstractAttribute>(DG.SyntheticRoot.Deps[u].getPointer())
2149 ->getIRPosition()
2150 .getAssociatedValue()
2151 << "\n";
2152 llvm_unreachable("Expected the final number of abstract attributes to "
2153 "remain unchanged!");
2154 }
2155 return ManifestChange;
2156}
2157
2158void Attributor::identifyDeadInternalFunctions() {
2159 // Early exit if we don't intend to delete functions.
2160 if (!Configuration.DeleteFns)
2161 return;
2162
2163 // To avoid triggering an assertion in the lazy call graph we will not delete
2164 // any internal library functions. We should modify the assertion though and
2165 // allow internals to be deleted.
2166 const auto *TLI =
2167 isModulePass()
2168 ? nullptr
2170 LibFunc LF;
2171
2172 // Identify dead internal functions and delete them. This happens outside
2173 // the other fixpoint analysis as we might treat potentially dead functions
2174 // as live to lower the number of iterations. If they happen to be dead, the
2175 // below fixpoint loop will identify and eliminate them.
2176
2177 SmallVector<Function *, 8> InternalFns;
2178 for (Function *F : Functions)
2179 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2180 InternalFns.push_back(F);
2181
2182 SmallPtrSet<Function *, 8> LiveInternalFns;
2183 bool FoundLiveInternal = true;
2184 while (FoundLiveInternal) {
2185 FoundLiveInternal = false;
2186 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
2187 Function *F = InternalFns[u];
2188 if (!F)
2189 continue;
2190
2191 bool UsedAssumedInformation = false;
2193 [&](AbstractCallSite ACS) {
2195 return ToBeDeletedFunctions.count(Callee) ||
2196 (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2197 !LiveInternalFns.count(Callee));
2198 },
2199 *F, true, nullptr, UsedAssumedInformation)) {
2200 continue;
2201 }
2202
2203 LiveInternalFns.insert(F);
2204 InternalFns[u] = nullptr;
2205 FoundLiveInternal = true;
2206 }
2207 }
2208
2209 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u)
2210 if (Function *F = InternalFns[u])
2211 ToBeDeletedFunctions.insert(F);
2212}
2213
2214ChangeStatus Attributor::cleanupIR() {
2215 TimeTraceScope TimeScope("Attributor::cleanupIR");
2216 // Delete stuff at the end to avoid invalid references and a nice order.
2217 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2218 << ToBeDeletedFunctions.size() << " functions and "
2219 << ToBeDeletedBlocks.size() << " blocks and "
2220 << ToBeDeletedInsts.size() << " instructions and "
2221 << ToBeChangedValues.size() << " values and "
2222 << ToBeChangedUses.size() << " uses. To insert "
2223 << ToBeChangedToUnreachableInsts.size()
2224 << " unreachables.\n"
2225 << "Preserve manifest added " << ManifestAddedBlocks.size()
2226 << " blocks\n");
2227
2229 SmallVector<Instruction *, 32> TerminatorsToFold;
2230
2231 auto ReplaceUse = [&](Use *U, Value *NewV) {
2232 Value *OldV = U->get();
2233
2234 // If we plan to replace NewV we need to update it at this point.
2235 do {
2236 const auto &Entry = ToBeChangedValues.lookup(NewV);
2237 if (!get<0>(Entry))
2238 break;
2239 NewV = get<0>(Entry);
2240 } while (true);
2241
2242 Instruction *I = dyn_cast<Instruction>(U->getUser());
2243 assert((!I || isRunOn(*I->getFunction())) &&
2244 "Cannot replace an instruction outside the current SCC!");
2245
2246 // Do not replace uses in returns if the value is a must-tail call we will
2247 // not delete.
2248 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2249 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2250 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2251 return;
2252 // If we rewrite a return and the new value is not an argument, strip the
2253 // `returned` attribute as it is wrong now.
2254 if (!isa<Argument>(NewV))
2255 for (auto &Arg : RI->getFunction()->args())
2256 Arg.removeAttr(Attribute::Returned);
2257 }
2258
2259 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2260 << " instead of " << *OldV << "\n");
2261 U->set(NewV);
2262
2263 if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2264 CGModifiedFunctions.insert(I->getFunction());
2265 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2267 DeadInsts.push_back(I);
2268 }
2269 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2270 auto *CB = cast<CallBase>(U->getUser());
2271 if (CB->isArgOperand(U)) {
2272 unsigned Idx = CB->getArgOperandNo(U);
2273 CB->removeParamAttr(Idx, Attribute::NoUndef);
2274 Function *Fn = CB->getCalledFunction();
2275 if (Fn && Fn->arg_size() > Idx)
2276 Fn->removeParamAttr(Idx, Attribute::NoUndef);
2277 }
2278 }
2279 if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
2280 Instruction *UserI = cast<Instruction>(U->getUser());
2281 if (isa<UndefValue>(NewV)) {
2282 ToBeChangedToUnreachableInsts.insert(UserI);
2283 } else {
2284 TerminatorsToFold.push_back(UserI);
2285 }
2286 }
2287 };
2288
2289 for (auto &It : ToBeChangedUses) {
2290 Use *U = It.first;
2291 Value *NewV = It.second;
2292 ReplaceUse(U, NewV);
2293 }
2294
2296 for (auto &It : ToBeChangedValues) {
2297 Value *OldV = It.first;
2298 auto [NewV, Done] = It.second;
2299 Uses.clear();
2300 for (auto &U : OldV->uses())
2301 if (Done || !U.getUser()->isDroppable())
2302 Uses.push_back(&U);
2303 for (Use *U : Uses) {
2304 if (auto *I = dyn_cast<Instruction>(U->getUser()))
2305 if (!isRunOn(*I->getFunction()))
2306 continue;
2307 ReplaceUse(U, NewV);
2308 }
2309 }
2310
2311 for (const auto &V : InvokeWithDeadSuccessor)
2312 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2313 assert(isRunOn(*II->getFunction()) &&
2314 "Cannot replace an invoke outside the current SCC!");
2315 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2316 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2317 bool Invoke2CallAllowed =
2318 !AAIsDead::mayCatchAsynchronousExceptions(*II->getFunction());
2319 assert((UnwindBBIsDead || NormalBBIsDead) &&
2320 "Invoke does not have dead successors!");
2321 BasicBlock *BB = II->getParent();
2322 BasicBlock *NormalDestBB = II->getNormalDest();
2323 if (UnwindBBIsDead) {
2324 Instruction *NormalNextIP = &NormalDestBB->front();
2325 if (Invoke2CallAllowed) {
2326 changeToCall(II);
2327 NormalNextIP = BB->getTerminator();
2328 }
2329 if (NormalBBIsDead)
2330 ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2331 } else {
2332 assert(NormalBBIsDead && "Broken invariant!");
2333 if (!NormalDestBB->getUniquePredecessor())
2334 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2335 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2336 }
2337 }
2338 for (Instruction *I : TerminatorsToFold) {
2339 assert(isRunOn(*I->getFunction()) &&
2340 "Cannot replace a terminator outside the current SCC!");
2341 CGModifiedFunctions.insert(I->getFunction());
2342 ConstantFoldTerminator(I->getParent());
2343 }
2344 for (const auto &V : ToBeChangedToUnreachableInsts)
2345 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2346 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2347 << "\n");
2348 assert(isRunOn(*I->getFunction()) &&
2349 "Cannot replace an instruction outside the current SCC!");
2350 CGModifiedFunctions.insert(I->getFunction());
2352 }
2353
2354 for (const auto &V : ToBeDeletedInsts) {
2355 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2356 if (auto *CB = dyn_cast<CallBase>(I)) {
2357 assert((isa<IntrinsicInst>(CB) || isRunOn(*I->getFunction())) &&
2358 "Cannot delete an instruction outside the current SCC!");
2359 if (!isa<IntrinsicInst>(CB))
2360 Configuration.CGUpdater.removeCallSite(*CB);
2361 }
2362 I->dropDroppableUses();
2363 CGModifiedFunctions.insert(I->getFunction());
2364 if (!I->getType()->isVoidTy())
2365 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2366 if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
2367 DeadInsts.push_back(I);
2368 else
2369 I->eraseFromParent();
2370 }
2371 }
2372
2373 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2374
2375 LLVM_DEBUG({
2376 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2377 for (auto &I : DeadInsts)
2378 if (I)
2379 dbgs() << " - " << *I << "\n";
2380 });
2381
2383
2384 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2385 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2386 ToBeDeletedBBs.reserve(NumDeadBlocks);
2387 for (BasicBlock *BB : ToBeDeletedBlocks) {
2388 assert(isRunOn(*BB->getParent()) &&
2389 "Cannot delete a block outside the current SCC!");
2390 CGModifiedFunctions.insert(BB->getParent());
2391 // Do not delete BBs added during manifests of AAs.
2392 if (ManifestAddedBlocks.contains(BB))
2393 continue;
2394 ToBeDeletedBBs.push_back(BB);
2395 }
2396 // Actually we do not delete the blocks but squash them into a single
2397 // unreachable but untangling branches that jump here is something we need
2398 // to do in a more generic way.
2399 detachDeadBlocks(ToBeDeletedBBs, nullptr);
2400 }
2401
2402 identifyDeadInternalFunctions();
2403
2404 // Rewrite the functions as requested during manifest.
2405 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2406
2407 for (Function *Fn : CGModifiedFunctions)
2408 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2409 Configuration.CGUpdater.reanalyzeFunction(*Fn);
2410
2411 for (Function *Fn : ToBeDeletedFunctions) {
2412 if (!Functions.count(Fn))
2413 continue;
2414 Configuration.CGUpdater.removeFunction(*Fn);
2415 }
2416
2417 if (!ToBeChangedUses.empty())
2418 ManifestChange = ChangeStatus::CHANGED;
2419
2420 if (!ToBeChangedToUnreachableInsts.empty())
2421 ManifestChange = ChangeStatus::CHANGED;
2422
2423 if (!ToBeDeletedFunctions.empty())
2424 ManifestChange = ChangeStatus::CHANGED;
2425
2426 if (!ToBeDeletedBlocks.empty())
2427 ManifestChange = ChangeStatus::CHANGED;
2428
2429 if (!ToBeDeletedInsts.empty())
2430 ManifestChange = ChangeStatus::CHANGED;
2431
2432 if (!InvokeWithDeadSuccessor.empty())
2433 ManifestChange = ChangeStatus::CHANGED;
2434
2435 if (!DeadInsts.empty())
2436 ManifestChange = ChangeStatus::CHANGED;
2437
2438 NumFnDeleted += ToBeDeletedFunctions.size();
2439
2440 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2441 << " functions after manifest.\n");
2442
2443#ifdef EXPENSIVE_CHECKS
2444 for (Function *F : Functions) {
2445 if (ToBeDeletedFunctions.count(F))
2446 continue;
2447 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2448 }
2449#endif
2450
2451 return ManifestChange;
2452}
2453
2455 TimeTraceScope TimeScope("Attributor::run");
2456 AttributorCallGraph ACallGraph(*this);
2457
2458 if (PrintCallGraph)
2459 ACallGraph.populateAll();
2460
2461 Phase = AttributorPhase::UPDATE;
2462 runTillFixpoint();
2463
2464 // dump graphs on demand
2465 if (DumpDepGraph)
2466 DG.dumpGraph();
2467
2468 if (ViewDepGraph)
2469 DG.viewGraph();
2470
2472 DG.print();
2473
2474 Phase = AttributorPhase::MANIFEST;
2475 ChangeStatus ManifestChange = manifestAttributes();
2476
2477 Phase = AttributorPhase::CLEANUP;
2478 ChangeStatus CleanupChange = cleanupIR();
2479
2480 if (PrintCallGraph)
2481 ACallGraph.print();
2482
2483 return ManifestChange | CleanupChange;
2484}
2485
2486ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2487 TimeTraceScope TimeScope(
2488 AA.getName() + std::to_string(AA.getIRPosition().getPositionKind()) +
2489 "::updateAA");
2490 assert(Phase == AttributorPhase::UPDATE &&
2491 "We can update AA only in the update stage!");
2492
2493 // Use a new dependence vector for this update.
2494 DependenceVector DV;
2495 DependenceStack.push_back(&DV);
2496
2497 auto &AAState = AA.getState();
2499 bool UsedAssumedInformation = false;
2500 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2501 /* CheckBBLivenessOnly */ true))
2502 CS = AA.update(*this);
2503
2504 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2505 // If the AA did not rely on outside information but changed, we run it
2506 // again to see if it found a fixpoint. Most AAs do but we don't require
2507 // them to. Hence, it might take the AA multiple iterations to get to a
2508 // fixpoint even if it does not rely on outside information, which is fine.
2510 if (CS == ChangeStatus::CHANGED)
2511 RerunCS = AA.update(*this);
2512
2513 // If the attribute did not change during the run or rerun, and it still did
2514 // not query any non-fix information, the state will not change and we can
2515 // indicate that right at this point.
2516 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2517 AAState.indicateOptimisticFixpoint();
2518 }
2519
2520 if (!AAState.isAtFixpoint())
2521 rememberDependences();
2522
2523 // Verify the stack was used properly, that is we pop the dependence vector we
2524 // put there earlier.
2525 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2526 (void)PoppedDV;
2527 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2528
2529 return CS;
2530}
2531
2533 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2534
2535 Module &M = *F.getParent();
2536 LLVMContext &Ctx = M.getContext();
2537 FunctionType *FnTy = F.getFunctionType();
2538
2539 Function *Wrapper =
2540 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2541 F.setName(""); // set the inside function anonymous
2542 M.getFunctionList().insert(F.getIterator(), Wrapper);
2543
2544 F.setLinkage(GlobalValue::InternalLinkage);
2545
2546 F.replaceAllUsesWith(Wrapper);
2547 assert(F.use_empty() && "Uses remained after wrapper was created!");
2548
2549 // Move the COMDAT section to the wrapper.
2550 // TODO: Check if we need to keep it for F as well.
2551 Wrapper->setComdat(F.getComdat());
2552 F.setComdat(nullptr);
2553
2554 // Copy all metadata and attributes but keep them on F as well.
2556 F.getAllMetadata(MDs);
2557 for (auto MDIt : MDs)
2558 Wrapper->addMetadata(MDIt.first, *MDIt.second);
2559 Wrapper->setAttributes(F.getAttributes());
2560
2561 // Create the call in the wrapper.
2562 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2563
2565 Argument *FArgIt = F.arg_begin();
2566 for (Argument &Arg : Wrapper->args()) {
2567 Args.push_back(&Arg);
2568 Arg.setName((FArgIt++)->getName());
2569 }
2570
2571 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2572 CI->setTailCall(true);
2573 CI->addFnAttr(Attribute::NoInline);
2574 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2575
2576 NumFnShallowWrappersCreated++;
2577}
2578
2580 if (F.isDeclaration() || F.hasLocalLinkage() ||
2582 return false;
2583 return true;
2584}
2585
2587 if (!AllowDeepWrapper && !Force)
2588 return nullptr;
2589 if (!isInternalizable(F))
2590 return nullptr;
2591
2592 SmallPtrSet<Function *, 2> FnSet = {&F};
2593 DenseMap<Function *, Function *> InternalizedFns;
2594 internalizeFunctions(FnSet, InternalizedFns);
2595
2596 return InternalizedFns[&F];
2597}
2598
2601 for (Function *F : FnSet)
2603 return false;
2604
2605 FnMap.clear();
2606 // Generate the internalized version of each function.
2607 for (Function *F : FnSet) {
2608 Module &M = *F->getParent();
2609 FunctionType *FnTy = F->getFunctionType();
2610
2611 // Create a copy of the current function
2612 Function *Copied =
2613 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2614 F->getName() + ".internalized");
2615 ValueToValueMapTy VMap;
2616 auto *NewFArgIt = Copied->arg_begin();
2617 for (auto &Arg : F->args()) {
2618 auto ArgName = Arg.getName();
2619 NewFArgIt->setName(ArgName);
2620 VMap[&Arg] = &(*NewFArgIt++);
2621 }
2623
2624 // Copy the body of the original function to the new one
2625 CloneFunctionInto(Copied, F, VMap,
2627
2628 // Set the linakage and visibility late as CloneFunctionInto has some
2629 // implicit requirements.
2632
2633 // Copy metadata
2635 F->getAllMetadata(MDs);
2636 for (auto MDIt : MDs)
2637 if (!Copied->hasMetadata())
2638 Copied->addMetadata(MDIt.first, *MDIt.second);
2639
2640 M.getFunctionList().insert(F->getIterator(), Copied);
2641 Copied->setDSOLocal(true);
2642 FnMap[F] = Copied;
2643 }
2644
2645 // Replace all uses of the old function with the new internalized function
2646 // unless the caller is a function that was just internalized.
2647 for (Function *F : FnSet) {
2648 auto &InternalizedFn = FnMap[F];
2649 auto IsNotInternalized = [&](Use &U) -> bool {
2650 if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2651 return !FnMap.lookup(CB->getCaller());
2652 return false;
2653 };
2654 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2655 }
2656
2657 return true;
2658}
2659
2661 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2662
2663 if (!Configuration.RewriteSignatures)
2664 return false;
2665
2666 Function *Fn = Arg.getParent();
2667 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2668 // Forbid the call site to cast the function return type. If we need to
2669 // rewrite these functions we need to re-create a cast for the new call site
2670 // (if the old had uses).
2671 if (!ACS.getCalledFunction() ||
2672 ACS.getInstruction()->getType() !=
2674 return false;
2675 if (ACS.getCalledOperand()->getType() != Fn->getType())
2676 return false;
2677 // Forbid must-tail calls for now.
2678 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2679 };
2680
2681 // Avoid var-arg functions for now.
2682 if (Fn->isVarArg()) {
2683 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2684 return false;
2685 }
2686
2687 // Avoid functions with complicated argument passing semantics.
2688 AttributeList FnAttributeList = Fn->getAttributes();
2689 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2690 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2691 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2692 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2693 LLVM_DEBUG(
2694 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2695 return false;
2696 }
2697
2698 // Avoid callbacks for now.
2699 bool UsedAssumedInformation = false;
2700 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2701 UsedAssumedInformation)) {
2702 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2703 return false;
2704 }
2705
2706 auto InstPred = [](Instruction &I) {
2707 if (auto *CI = dyn_cast<CallInst>(&I))
2708 return !CI->isMustTailCall();
2709 return true;
2710 };
2711
2712 // Forbid must-tail calls for now.
2713 // TODO:
2714 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2715 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2716 nullptr, {Instruction::Call},
2717 UsedAssumedInformation)) {
2718 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2719 return false;
2720 }
2721
2722 return true;
2723}
2724
2726 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2729 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2730 << Arg.getParent()->getName() << " with "
2731 << ReplacementTypes.size() << " replacements\n");
2732 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2733 "Cannot register an invalid rewrite");
2734
2735 Function *Fn = Arg.getParent();
2737 ArgumentReplacementMap[Fn];
2738 if (ARIs.empty())
2739 ARIs.resize(Fn->arg_size());
2740
2741 // If we have a replacement already with less than or equal new arguments,
2742 // ignore this request.
2743 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2744 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2745 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2746 return false;
2747 }
2748
2749 // If we have a replacement already but we like the new one better, delete
2750 // the old.
2751 ARI.reset();
2752
2753 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2754 << Arg.getParent()->getName() << " with "
2755 << ReplacementTypes.size() << " replacements\n");
2756
2757 // Remember the replacement.
2758 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2759 std::move(CalleeRepairCB),
2760 std::move(ACSRepairCB)));
2761
2762 return true;
2763}
2764
2765bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2766 bool Result = true;
2767#ifndef NDEBUG
2768 if (SeedAllowList.size() != 0)
2770 Function *Fn = AA.getAnchorScope();
2771 if (FunctionSeedAllowList.size() != 0 && Fn)
2773#endif
2774 return Result;
2775}
2776
2777ChangeStatus Attributor::rewriteFunctionSignatures(
2778 SmallSetVector<Function *, 8> &ModifiedFns) {
2780
2781 for (auto &It : ArgumentReplacementMap) {
2782 Function *OldFn = It.getFirst();
2783
2784 // Deleted functions do not require rewrites.
2785 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
2786 continue;
2787
2789 It.getSecond();
2790 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2791
2792 SmallVector<Type *, 16> NewArgumentTypes;
2793 SmallVector<AttributeSet, 16> NewArgumentAttributes;
2794
2795 // Collect replacement argument types and copy over existing attributes.
2796 AttributeList OldFnAttributeList = OldFn->getAttributes();
2797 for (Argument &Arg : OldFn->args()) {
2798 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2799 ARIs[Arg.getArgNo()]) {
2800 NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
2801 ARI->ReplacementTypes.end());
2802 NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
2803 AttributeSet());
2804 } else {
2805 NewArgumentTypes.push_back(Arg.getType());
2806 NewArgumentAttributes.push_back(
2807 OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
2808 }
2809 }
2810
2811 uint64_t LargestVectorWidth = 0;
2812 for (auto *I : NewArgumentTypes)
2813 if (auto *VT = dyn_cast<llvm::VectorType>(I))
2814 LargestVectorWidth =
2815 std::max(LargestVectorWidth,
2816 VT->getPrimitiveSizeInBits().getKnownMinValue());
2817
2818 FunctionType *OldFnTy = OldFn->getFunctionType();
2819 Type *RetTy = OldFnTy->getReturnType();
2820
2821 // Construct the new function type using the new arguments types.
2822 FunctionType *NewFnTy =
2823 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
2824
2825 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
2826 << "' from " << *OldFn->getFunctionType() << " to "
2827 << *NewFnTy << "\n");
2828
2829 // Create the new function body and insert it into the module.
2830 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
2831 OldFn->getAddressSpace(), "");
2832 Functions.insert(NewFn);
2833 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
2834 NewFn->takeName(OldFn);
2835 NewFn->copyAttributesFrom(OldFn);
2836
2837 // Patch the pointer to LLVM function in debug info descriptor.
2838 NewFn->setSubprogram(OldFn->getSubprogram());
2839 OldFn->setSubprogram(nullptr);
2840
2841 // Recompute the parameter attributes list based on the new arguments for
2842 // the function.
2843 LLVMContext &Ctx = OldFn->getContext();
2845 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
2846 NewArgumentAttributes));
2847 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
2848
2849 // Since we have now created the new function, splice the body of the old
2850 // function right into the new function, leaving the old rotting hulk of the
2851 // function empty.
2852 NewFn->splice(NewFn->begin(), OldFn);
2853
2854 // Fixup block addresses to reference new function.
2855 SmallVector<BlockAddress *, 8u> BlockAddresses;
2856 for (User *U : OldFn->users())
2857 if (auto *BA = dyn_cast<BlockAddress>(U))
2858 BlockAddresses.push_back(BA);
2859 for (auto *BA : BlockAddresses)
2860 BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock()));
2861
2862 // Set of all "call-like" instructions that invoke the old function mapped
2863 // to their new replacements.
2865
2866 // Callback to create a new "call-like" instruction for a given one.
2867 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
2868 CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
2869 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
2870
2871 // Collect the new argument operands for the replacement call site.
2872 SmallVector<Value *, 16> NewArgOperands;
2873 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
2874 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
2875 unsigned NewFirstArgNum = NewArgOperands.size();
2876 (void)NewFirstArgNum; // only used inside assert.
2877 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2878 ARIs[OldArgNum]) {
2879 if (ARI->ACSRepairCB)
2880 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
2881 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
2882 NewArgOperands.size() &&
2883 "ACS repair callback did not provide as many operand as new "
2884 "types were registered!");
2885 // TODO: Exose the attribute set to the ACS repair callback
2886 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
2887 AttributeSet());
2888 } else {
2889 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
2890 NewArgOperandAttributes.push_back(
2891 OldCallAttributeList.getParamAttrs(OldArgNum));
2892 }
2893 }
2894
2895 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
2896 "Mismatch # argument operands vs. # argument operand attributes!");
2897 assert(NewArgOperands.size() == NewFn->arg_size() &&
2898 "Mismatch # argument operands vs. # function arguments!");
2899
2900 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
2901 OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
2902
2903 // Create a new call or invoke instruction to replace the old one.
2904 CallBase *NewCB;
2905 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
2906 NewCB =
2907 InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
2908 NewArgOperands, OperandBundleDefs, "", OldCB);
2909 } else {
2910 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
2911 "", OldCB);
2912 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
2913 NewCB = NewCI;
2914 }
2915
2916 // Copy over various properties and the new attributes.
2917 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
2918 NewCB->setCallingConv(OldCB->getCallingConv());
2919 NewCB->takeName(OldCB);
2921 Ctx, OldCallAttributeList.getFnAttrs(),
2922 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
2923
2925 LargestVectorWidth);
2926
2927 CallSitePairs.push_back({OldCB, NewCB});
2928 return true;
2929 };
2930
2931 // Use the CallSiteReplacementCreator to create replacement call sites.
2932 bool UsedAssumedInformation = false;
2933 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
2934 true, nullptr, UsedAssumedInformation,
2935 /* CheckPotentiallyDead */ true);
2936 (void)Success;
2937 assert(Success && "Assumed call site replacement to succeed!");
2938
2939 // Rewire the arguments.
2940 Argument *OldFnArgIt = OldFn->arg_begin();
2941 Argument *NewFnArgIt = NewFn->arg_begin();
2942 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
2943 ++OldArgNum, ++OldFnArgIt) {
2944 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2945 ARIs[OldArgNum]) {
2946 if (ARI->CalleeRepairCB)
2947 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
2948 if (ARI->ReplacementTypes.empty())
2949 OldFnArgIt->replaceAllUsesWith(
2950 PoisonValue::get(OldFnArgIt->getType()));
2951 NewFnArgIt += ARI->ReplacementTypes.size();
2952 } else {
2953 NewFnArgIt->takeName(&*OldFnArgIt);
2954 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
2955 ++NewFnArgIt;
2956 }
2957 }
2958
2959 // Eliminate the instructions *after* we visited all of them.
2960 for (auto &CallSitePair : CallSitePairs) {
2961 CallBase &OldCB = *CallSitePair.first;
2962 CallBase &NewCB = *CallSitePair.second;
2963 assert(OldCB.getType() == NewCB.getType() &&
2964 "Cannot handle call sites with different types!");
2965 ModifiedFns.insert(OldCB.getFunction());
2966 Configuration.CGUpdater.replaceCallSite(OldCB, NewCB);
2967 OldCB.replaceAllUsesWith(&NewCB);
2968 OldCB.eraseFromParent();
2969 }
2970
2971 // Replace the function in the call graph (if any).
2972 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
2973
2974 // If the old function was modified and needed to be reanalyzed, the new one
2975 // does now.
2976 if (ModifiedFns.remove(OldFn))
2977 ModifiedFns.insert(NewFn);
2978
2979 Changed = ChangeStatus::CHANGED;
2980 }
2981
2982 return Changed;
2983}
2984
2985void InformationCache::initializeInformationCache(const Function &CF,
2986 FunctionInfo &FI) {
2987 // As we do not modify the function here we can remove the const
2988 // withouth breaking implicit assumptions. At the end of the day, we could
2989 // initialize the cache eagerly which would look the same to the users.
2990 Function &F = const_cast<Function &>(CF);
2991
2992 // Walk all instructions to find interesting instructions that might be
2993 // queried by abstract attributes during their initialization or update.
2994 // This has to happen before we create attributes.
2995
2997
2998 // Add \p V to the assume uses map which track the number of uses outside of
2999 // "visited" assumes. If no outside uses are left the value is added to the
3000 // assume only use vector.
3001 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3003 if (auto *I = dyn_cast<Instruction>(&V))
3004 Worklist.push_back(I);
3005 while (!Worklist.empty()) {
3006 const Instruction *I = Worklist.pop_back_val();
3007 std::optional<short> &NumUses = AssumeUsesMap[I];
3008 if (!NumUses)
3009 NumUses = I->getNumUses();
3010 NumUses = *NumUses - /* this assume */ 1;
3011 if (*NumUses != 0)
3012 continue;
3013 AssumeOnlyValues.insert(I);
3014 for (const Value *Op : I->operands())
3015 if (auto *OpI = dyn_cast<Instruction>(Op))
3016 Worklist.push_back(OpI);
3017 }
3018 };
3019
3020 for (Instruction &I : instructions(&F)) {
3021 bool IsInterestingOpcode = false;
3022
3023 // To allow easy access to all instructions in a function with a given
3024 // opcode we store them in the InfoCache. As not all opcodes are interesting
3025 // to concrete attributes we only cache the ones that are as identified in
3026 // the following switch.
3027 // Note: There are no concrete attributes now so this is initially empty.
3028 switch (I.getOpcode()) {
3029 default:
3030 assert(!isa<CallBase>(&I) &&
3031 "New call base instruction type needs to be known in the "
3032 "Attributor.");
3033 break;
3034 case Instruction::Call:
3035 // Calls are interesting on their own, additionally:
3036 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3037 // For `must-tail` calls we remember the caller and callee.
3038 if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3039 AssumeOnlyValues.insert(Assume);
3040 fillMapFromAssume(*Assume, KnowledgeMap);
3041 AddToAssumeUsesMap(*Assume->getArgOperand(0));
3042 } else if (cast<CallInst>(I).isMustTailCall()) {
3043 FI.ContainsMustTailCall = true;
3044 if (const Function *Callee = cast<CallInst>(I).getCalledFunction())
3045 getFunctionInfo(*Callee).CalledViaMustTail = true;
3046 }
3047 [[fallthrough]];
3048 case Instruction::CallBr:
3049 case Instruction::Invoke:
3050 case Instruction::CleanupRet:
3051 case Instruction::CatchSwitch:
3052 case Instruction::AtomicRMW:
3053 case Instruction::AtomicCmpXchg:
3054 case Instruction::Br:
3055 case Instruction::Resume:
3056 case Instruction::Ret:
3057 case Instruction::Load:
3058 // The alignment of a pointer is interesting for loads.
3059 case Instruction::Store:
3060 // The alignment of a pointer is interesting for stores.
3061 case Instruction::Alloca:
3062 case Instruction::AddrSpaceCast:
3063 IsInterestingOpcode = true;
3064 }
3065 if (IsInterestingOpcode) {
3066 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3067 if (!Insts)
3068 Insts = new (Allocator) InstructionVectorTy();
3069 Insts->push_back(&I);
3070 }
3071 if (I.mayReadOrWriteMemory())
3072 FI.RWInsts.push_back(&I);
3073 }
3074
3075 if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3077 InlineableFunctions.insert(&F);
3078}
3079
3081 return AG.getAnalysis<AAManager>(F);
3082}
3083
3084InformationCache::FunctionInfo::~FunctionInfo() {
3085 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3086 // manually destroy them.
3087 for (auto &It : OpcodeInstMap)
3088 It.getSecond()->~InstructionVectorTy();
3089}
3090
3092 const AbstractAttribute &ToAA,
3093 DepClassTy DepClass) {
3094 if (DepClass == DepClassTy::NONE)
3095 return;
3096 // If we are outside of an update, thus before the actual fixpoint iteration
3097 // started (= when we create AAs), we do not track dependences because we will
3098 // put all AAs into the initial worklist anyway.
3099 if (DependenceStack.empty())
3100 return;
3101 if (FromAA.getState().isAtFixpoint())
3102 return;
3103 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3104}
3105
3106void Attributor::rememberDependences() {
3107 assert(!DependenceStack.empty() && "No dependences to remember!");
3108
3109 for (DepInfo &DI : *DependenceStack.back()) {
3110 assert((DI.DepClass == DepClassTy::REQUIRED ||
3111 DI.DepClass == DepClassTy::OPTIONAL) &&
3112 "Expected required or optional dependence (1 bit)!");
3113 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3114 DepAAs.push_back(AbstractAttribute::DepTy(
3115 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3116 }
3117}
3118
3120 if (!VisitedFunctions.insert(&F).second)
3121 return;
3122 if (F.isDeclaration())
3123 return;
3124
3125 // In non-module runs we need to look at the call sites of a function to
3126 // determine if it is part of a must-tail call edge. This will influence what
3127 // attributes we can derive.
3128 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3129 if (!isModulePass() && !FI.CalledViaMustTail) {
3130 for (const Use &U : F.uses())
3131 if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3132 if (CB->isCallee(&U) && CB->isMustTailCall())
3133 FI.CalledViaMustTail = true;
3134 }
3135
3137
3138 // Check for dead BasicBlocks in every function.
3139 // We need dead instruction detection because we do not want to deal with
3140 // broken IR in which SSA rules do not apply.
3141 getOrCreateAAFor<AAIsDead>(FPos);
3142
3143 // Every function might be "will-return".
3144 getOrCreateAAFor<AAWillReturn>(FPos);
3145
3146 // Every function might contain instructions that cause "undefined behavior".
3147 getOrCreateAAFor<AAUndefinedBehavior>(FPos);
3148
3149 // Every function can be nounwind.
3150 getOrCreateAAFor<AANoUnwind>(FPos);
3151
3152 // Every function might be marked "nosync"
3153 getOrCreateAAFor<AANoSync>(FPos);
3154
3155 // Every function might be "no-free".
3156 getOrCreateAAFor<AANoFree>(FPos);
3157
3158 // Every function might be "no-return".
3159 getOrCreateAAFor<AANoReturn>(FPos);
3160
3161 // Every function might be "no-recurse".
3162 getOrCreateAAFor<AANoRecurse>(FPos);
3163
3164 // Every function might be "readnone/readonly/writeonly/...".
3165 getOrCreateAAFor<AAMemoryBehavior>(FPos);
3166
3167 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3168 getOrCreateAAFor<AAMemoryLocation>(FPos);
3169
3170 // Every function can track active assumptions.
3171 getOrCreateAAFor<AAAssumptionInfo>(FPos);
3172
3173 // Every function might be applicable for Heap-To-Stack conversion.
3175 getOrCreateAAFor<AAHeapToStack>(FPos);
3176
3177 // Return attributes are only appropriate if the return type is non void.
3178 Type *ReturnType = F.getReturnType();
3179 if (!ReturnType->isVoidTy()) {
3180 // Argument attribute "returned" --- Create only one per function even
3181 // though it is an argument attribute.
3182 getOrCreateAAFor<AAReturnedValues>(FPos);
3183
3185
3186 // Every returned value might be dead.
3187 getOrCreateAAFor<AAIsDead>(RetPos);
3188
3189 // Every function might be simplified.
3190 bool UsedAssumedInformation = false;
3191 getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3193
3194 // Every returned value might be marked noundef.
3195 getOrCreateAAFor<AANoUndef>(RetPos);
3196
3197 if (ReturnType->isPointerTy()) {
3198
3199 // Every function with pointer return type might be marked align.
3200 getOrCreateAAFor<AAAlign>(RetPos);
3201
3202 // Every function with pointer return type might be marked nonnull.
3203 getOrCreateAAFor<AANonNull>(RetPos);
3204
3205 // Every function with pointer return type might be marked noalias.
3206 getOrCreateAAFor<AANoAlias>(RetPos);
3207
3208 // Every function with pointer return type might be marked
3209 // dereferenceable.
3210 getOrCreateAAFor<AADereferenceable>(RetPos);
3211 }
3212 }
3213
3214 for (Argument &Arg : F.args()) {
3216
3217 // Every argument might be simplified. We have to go through the Attributor
3218 // interface though as outside AAs can register custom simplification
3219 // callbacks.
3220 bool UsedAssumedInformation = false;
3221 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3223
3224 // Every argument might be dead.
3225 getOrCreateAAFor<AAIsDead>(ArgPos);
3226
3227 // Every argument might be marked noundef.
3228 getOrCreateAAFor<AANoUndef>(ArgPos);
3229
3230 if (Arg.getType()->isPointerTy()) {
3231 // Every argument with pointer type might be marked nonnull.
3232 getOrCreateAAFor<AANonNull>(ArgPos);
3233
3234 // Every argument with pointer type might be marked noalias.
3235 getOrCreateAAFor<AANoAlias>(ArgPos);
3236
3237 // Every argument with pointer type might be marked dereferenceable.
3238 getOrCreateAAFor<AADereferenceable>(ArgPos);
3239
3240 // Every argument with pointer type might be marked align.
3241 getOrCreateAAFor<AAAlign>(ArgPos);
3242
3243 // Every argument with pointer type might be marked nocapture.
3244 getOrCreateAAFor<AANoCapture>(ArgPos);
3245
3246 // Every argument with pointer type might be marked
3247 // "readnone/readonly/writeonly/..."
3248 getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
3249
3250 // Every argument with pointer type might be marked nofree.
3251 getOrCreateAAFor<AANoFree>(ArgPos);
3252
3253 // Every argument with pointer type might be privatizable (or promotable)
3254 getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
3255 }
3256 }
3257
3258 auto CallSitePred = [&](Instruction &I) -> bool {
3259 auto &CB = cast<CallBase>(I);
3260 IRPosition CBInstPos = IRPosition::inst(CB);
3262
3263 // Call sites might be dead if they do not have side effects and no live
3264 // users. The return value might be dead if there are no live users.
3265 getOrCreateAAFor<AAIsDead>(CBInstPos);
3266
3267 Function *Callee = CB.getCalledFunction();
3268 // TODO: Even if the callee is not known now we might be able to simplify
3269 // the call/callee.
3270 if (!Callee)
3271 return true;
3272
3273 // Every call site can track active assumptions.
3274 getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
3275
3276 // Skip declarations except if annotations on their call sites were
3277 // explicitly requested.
3278 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3279 !Callee->hasMetadata(LLVMContext::MD_callback))
3280 return true;
3281
3282 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3283
3285 bool UsedAssumedInformation = false;
3286 getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3288 }
3289
3290 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3291
3293
3294 // Every call site argument might be dead.
3295 getOrCreateAAFor<AAIsDead>(CBArgPos);
3296
3297 // Call site argument might be simplified. We have to go through the
3298 // Attributor interface though as outside AAs can register custom
3299 // simplification callbacks.
3300 bool UsedAssumedInformation = false;
3301 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3303
3304 // Every call site argument might be marked "noundef".
3305 getOrCreateAAFor<AANoUndef>(CBArgPos);
3306
3307 if (!CB.getArgOperand(I)->getType()->isPointerTy())
3308 continue;
3309
3310 // Call site argument attribute "non-null".
3311 getOrCreateAAFor<AANonNull>(CBArgPos);
3312
3313 // Call site argument attribute "nocapture".
3314 getOrCreateAAFor<AANoCapture>(CBArgPos);
3315
3316 // Call site argument attribute "no-alias".
3317 getOrCreateAAFor<AANoAlias>(CBArgPos);
3318
3319 // Call site argument attribute "dereferenceable".
3320 getOrCreateAAFor<AADereferenceable>(CBArgPos);
3321
3322 // Call site argument attribute "align".
3323 getOrCreateAAFor<AAAlign>(CBArgPos);
3324
3325 // Call site argument attribute
3326 // "readnone/readonly/writeonly/..."
3327 getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
3328
3329 // Call site argument attribute "nofree".
3330 getOrCreateAAFor<AANoFree>(CBArgPos);
3331 }
3332 return true;
3333 };
3334
3335 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3336 bool Success;
3337 bool UsedAssumedInformation = false;
3339 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3340 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3341 (unsigned)Instruction::Call},
3342 UsedAssumedInformation);
3343 (void)Success;
3344 assert(Success && "Expected the check call to be successful!");
3345
3346 auto LoadStorePred = [&](Instruction &I) -> bool {
3347 if (isa<LoadInst>(I)) {
3348 getOrCreateAAFor<AAAlign>(
3349 IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
3350 if (SimplifyAllLoads)
3352 UsedAssumedInformation, AA::Intraprocedural);
3353 } else {
3354 auto &SI = cast<StoreInst>(I);
3355 getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
3356 getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3357 UsedAssumedInformation, AA::Intraprocedural);
3358 getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3359 }
3360 return true;
3361 };
3363 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3364 {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3365 UsedAssumedInformation);
3366 (void)Success;
3367 assert(Success && "Expected the check call to be successful!");
3368}
3369
3370/// Helpers to ease debugging through output streams and print calls.
3371///
3372///{
3374 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3375}
3376
3378 switch (AP) {
3380 return OS << "inv";
3382 return OS << "flt";
3384 return OS << "fn_ret";
3386 return OS << "cs_ret";
3388 return OS << "fn";
3390 return OS << "cs";
3392 return OS << "arg";
3394 return OS << "cs_arg";
3395 }
3396 llvm_unreachable("Unknown attribute position!");
3397}
3398
3400 const Value &AV = Pos.getAssociatedValue();
3401 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3402 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3403
3404 if (Pos.hasCallBaseContext())
3405 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3406 return OS << "}";
3407}
3408
3410 OS << "range-state(" << S.getBitWidth() << ")<";
3411 S.getKnown().print(OS);
3412 OS << " / ";
3413 S.getAssumed().print(OS);
3414 OS << ">";
3415
3416 return OS << static_cast<const AbstractState &>(S);
3417}
3418
3420 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3421}
3422
3424 AA.print(OS);
3425 return OS;
3426}
3427
3430 OS << "set-state(< {";
3431 if (!S.isValidState())
3432 OS << "full-set";
3433 else {
3434 for (const auto &It : S.getAssumedSet())
3435 OS << It << ", ";
3436 if (S.undefIsContained())
3437 OS << "undef ";
3438 }
3439 OS << "} >)";
3440
3441 return OS;
3442}
3443
3445 const PotentialLLVMValuesState &S) {
3446 OS << "set-state(< {";
3447 if (!S.isValidState())
3448 OS << "full-set";
3449 else {
3450 for (const auto &It : S.getAssumedSet()) {
3451 if (auto *F = dyn_cast<Function>(It.first.getValue()))
3452 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3453 else
3454 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3455 }
3456 if (S.undefIsContained())
3457 OS << "undef ";
3458 }
3459 OS << "} >)";
3460
3461 return OS;
3462}
3463
3465 OS << "[";
3466 OS << getName();
3467 OS << "] for CtxI ";
3468
3469 if (auto *I = getCtxI()) {
3470 OS << "'";
3471 I->print(OS);
3472 OS << "'";
3473 } else
3474 OS << "<<null inst>>";
3475
3476 OS << " at position " << getIRPosition() << " with state " << getAsStr()
3477 << '\n';
3478}
3479
3481 print(OS);
3482
3483 for (const auto &DepAA : Deps) {
3484 auto *AA = DepAA.getPointer();
3485 OS << " updates ";
3486 AA->print(OS);
3487 }
3488
3489 OS << '\n';
3490}
3491
3493 const AAPointerInfo::Access &Acc) {
3494 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3495 if (Acc.getLocalInst() != Acc.getRemoteInst())
3496 OS << " via " << *Acc.getLocalInst();
3497 if (Acc.getContent()) {
3498 if (*Acc.getContent())
3499 OS << " [" << **Acc.getContent() << "]";
3500 else
3501 OS << " [ <unknown> ]";
3502 }
3503 return OS;
3504}
3505///}
3506
3507/// ----------------------------------------------------------------------------
3508/// Pass (Manager) Boilerplate
3509/// ----------------------------------------------------------------------------
3510
3512 SetVector<Function *> &Functions,
3513 AnalysisGetter &AG,
3514 CallGraphUpdater &CGUpdater,
3515 bool DeleteFns, bool IsModulePass) {
3516 if (Functions.empty())
3517 return false;
3518
3519 LLVM_DEBUG({
3520 dbgs() << "[Attributor] Run on module with " << Functions.size()
3521 << " functions:\n";
3522 for (Function *Fn : Functions)
3523 dbgs() << " - " << Fn->getName() << "\n";
3524 });
3525
3526 // Create an Attributor and initially empty information cache that is filled
3527 // while we identify default attribute opportunities.
3528 AttributorConfig AC(CGUpdater);
3529 AC.IsModulePass = IsModulePass;
3530 AC.DeleteFns = DeleteFns;
3531 Attributor A(Functions, InfoCache, AC);
3532
3533 // Create shallow wrappers for all functions that are not IPO amendable
3535 for (Function *F : Functions)
3536 if (!A.isFunctionIPOAmendable(*F))
3538
3539 // Internalize non-exact functions
3540 // TODO: for now we eagerly internalize functions without calculating the
3541 // cost, we need a cost interface to determine whether internalizing
3542 // a function is "beneficial"
3543 if (AllowDeepWrapper) {
3544 unsigned FunSize = Functions.size();
3545 for (unsigned u = 0; u < FunSize; u++) {
3546 Function *F = Functions[u];
3547 if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() &&
3548 !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3550 assert(NewF && "Could not internalize function.");
3551 Functions.insert(NewF);
3552
3553 // Update call graph
3554 CGUpdater.replaceFunctionWith(*F, *NewF);
3555 for (const Use &U : NewF->uses())
3556 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3557 auto *CallerF = CB->getCaller();
3558 CGUpdater.reanalyzeFunction(*CallerF);
3559 }
3560 }
3561 }
3562 }
3563
3564 for (Function *F : Functions) {
3565 if (F->hasExactDefinition())
3566 NumFnWithExactDefinition++;
3567 else
3568 NumFnWithoutExactDefinition++;
3569
3570 // We look at internal functions only on-demand but if any use is not a
3571 // direct call or outside the current set of analyzed functions, we have
3572 // to do it eagerly.
3573 if (F->hasLocalLinkage()) {
3574 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3575 const auto *CB = dyn_cast<CallBase>(U.getUser());
3576 return CB && CB->isCallee(&U) &&
3577 Functions.count(const_cast<Function *>(CB->getCaller()));
3578 }))
3579 continue;
3580 }
3581
3582 // Populate the Attributor with abstract attribute opportunities in the
3583 // function and the information cache with IR information.
3584 A.identifyDefaultAbstractAttributes(*F);
3585 }
3586
3587 ChangeStatus Changed = A.run();
3588
3589 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3590 << " functions, result: " << Changed << ".\n");
3591 return Changed == ChangeStatus::CHANGED;
3592}
3593
3594void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
3595
3597 static std::atomic<int> CallTimes;
3598 std::string Prefix;
3599
3600 if (!DepGraphDotFileNamePrefix.empty())
3602 else
3603 Prefix = "dep_graph";
3604 std::string Filename =
3605 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
3606
3607 outs() << "Dependency graph dump to " << Filename << ".\n";
3608
3609 std::error_code EC;
3610
3611 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
3612 if (!EC)
3613 llvm::WriteGraph(File, this);
3614
3615 CallTimes++;
3616}
3617
3619 for (auto DepAA : SyntheticRoot.Deps)
3620 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
3621}
3622
3626 AnalysisGetter AG(FAM);
3627
3628 SetVector<Function *> Functions;
3629 for (Function &F : M)
3630 Functions.insert(&F);
3631
3632 CallGraphUpdater CGUpdater;
3634 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
3635 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3636 /* DeleteFns */ true, /* IsModulePass */ true)) {
3637 // FIXME: Think about passes we will preserve and add them here.
3638 return PreservedAnalyses::none();
3639 }
3640 return PreservedAnalyses::all();
3641}
3642
3645 LazyCallGraph &CG,
3646 CGSCCUpdateResult &UR) {
3648 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
3649 AnalysisGetter AG(FAM);
3650
3651 SetVector<Function *> Functions;
3652 for (LazyCallGraph::Node &N : C)
3653 Functions.insert(&N.getFunction());
3654
3655 if (Functions.empty())
3656 return PreservedAnalyses::all();
3657
3658 Module &M = *Functions.back()->getParent();
3659 CallGraphUpdater CGUpdater;
3660 CGUpdater.initialize(CG, C, AM, UR);
3662 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
3663 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3664 /* DeleteFns */ false,
3665 /* IsModulePass */ false)) {
3666 // FIXME: Think about passes we will preserve and add them here.
3669 return PA;
3670 }
3671 return PreservedAnalyses::all();
3672}
3673
3674namespace llvm {
3675
3676template <> struct GraphTraits<AADepGraphNode *> {
3680
3681 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
3682 static NodeRef DepGetVal(DepTy &DT) { return DT.getPointer(); }
3683
3687
3688 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
3689
3690 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
3691};
3692
3693template <>
3695 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
3696
3699
3700 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
3701
3702 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
3703};
3704
3705template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
3707
3708 static std::string getNodeLabel(const AADepGraphNode *Node,
3709 const AADepGraph *DG) {
3710 std::string AAString;
3711 raw_string_ostream O(AAString);
3712 Node->print(O);
3713 return AAString;
3714 }
3715};
3716
3717} // end namespace llvm
3718
3719namespace {
3720
3721struct AttributorLegacyPass : public ModulePass {
3722 static char ID;
3723
3724 AttributorLegacyPass() : ModulePass(ID) {
3726 }
3727
3728 bool runOnModule(Module &M) override {
3729 if (skipModule(M))
3730 return false;
3731
3732 AnalysisGetter AG;
3733 SetVector<Function *> Functions;
3734 for (Function &F : M)
3735 Functions.insert(&F);
3736
3737 CallGraphUpdater CGUpdater;
3739 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
3740 return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3741 /* DeleteFns*/ true,
3742 /* IsModulePass */ true);
3743 }
3744
3745 void getAnalysisUsage(AnalysisUsage &AU) const override {
3746 // FIXME: Think about passes we will preserve and add them here.
3748 }
3749};
3750
3751struct AttributorCGSCCLegacyPass : public CallGraphSCCPass {
3752 static char ID;
3753
3754 AttributorCGSCCLegacyPass() : CallGraphSCCPass(ID) {
3756 }
3757
3758 bool runOnSCC(CallGraphSCC &SCC) override {
3759 if (skipSCC(SCC))
3760 return false;
3761
3762 SetVector<Function *> Functions;
3763 for (CallGraphNode *CGN : SCC)
3764 if (Function *Fn = CGN->getFunction())
3765 if (!Fn->isDeclaration())
3766 Functions.insert(Fn);
3767
3768 if (Functions.empty())
3769 return false;
3770
3771 AnalysisGetter AG;
3772 CallGraph &CG = const_cast<CallGraph &>(SCC.getCallGraph());
3773 CallGraphUpdater CGUpdater;
3774 CGUpdater.initialize(CG, SCC);
3775 Module &M = *Functions.back()->getParent();
3777 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
3778 return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3779 /* DeleteFns */ false,
3780 /* IsModulePass */ false);
3781 }
3782
3783 void getAnalysisUsage(AnalysisUsage &AU) const override {
3784 // FIXME: Think about passes we will preserve and add them here.
3787 }
3788};
3789
3790} // end anonymous namespace
3791
3792Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
3794 return new AttributorCGSCCLegacyPass();
3795}
3796
3797char AttributorLegacyPass::ID = 0;
3798char AttributorCGSCCLegacyPass::ID = 0;
3799
3800INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
3801 "Deduce and propagate attributes", false, false)
3803INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
3804 "Deduce and propagate attributes", false, false)
3805INITIALIZE_PASS_BEGIN(AttributorCGSCCLegacyPass, "attributor-cgscc",
3806 "Deduce and propagate attributes (CGSCC pass)", false,
3807 false)
3810INITIALIZE_PASS_END(AttributorCGSCCLegacyPass, "attributor-cgscc",
3811 "Deduce and propagate attributes (CGSCC pass)", false,
3812 false)
#define Success
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Rewrite undef for PHI
SmallPtrSet< MachineInstr *, 2 > Uses
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden, cl::desc("Allow the Attributor to create shallow " "wrappers for non-exact definitions."), cl::init(false))
static cl::opt< bool > VerifyMaxFixpointIterations("attributor-max-iterations-verify", cl::Hidden, cl::desc("Verify that max-iterations is a tight bound for a fixpoint"), cl::init(false))
static bool checkForAllInstructionsImpl(Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap, function_ref< bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA, const AAIsDead *LivenessAA, const ArrayRef< unsigned > &Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
#define VERBOSE_DEBUG_TYPE
Definition: Attributor.cpp:63
static cl::opt< bool > EnableHeapToStack("enable-heap-to-stack-conversion", cl::init(true), cl::Hidden)
static bool runAttributorOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, bool DeleteFns, bool IsModulePass)
}
static bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction *ToI, const Function &ToFn, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet, std::function< bool(const Function &F)> GoBackwardsCB)
Definition: Attributor.cpp:572
attributor
static bool getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I, SmallSetVector< Value *, 4 > &PotentialCopies, SmallSetVector< Instruction *, 4 > &PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact)
Definition: Attributor.cpp:334
static cl::list< std::string > FunctionSeedAllowList("attributor-function-seed-allow-list", cl::Hidden, cl::desc("Comma seperated list of function names that are " "allowed to be seeded."), cl::CommaSeparated)
Deduce and propagate attributes
static cl::opt< unsigned, true > MaxInitializationChainLengthX("attributor-max-initialization-chain-length", cl::Hidden, cl::desc("Maximal number of chained initializations (to avoid stack overflows)"), cl::location(MaxInitializationChainLength), cl::init(1024))
static cl::opt< bool > SimplifyAllLoads("attributor-simplify-all-loads", cl::Hidden, cl::desc("Try to simplify all loads."), cl::init(true))
static cl::opt< bool > ViewDepGraph("attributor-view-dep-graph", cl::Hidden, cl::desc("View the dependency graph."), cl::init(false))
static bool isEqualOrWorse(const Attribute &New, const Attribute &Old)
Return true if New is equal or worse than Old.
Definition: Attributor.cpp:843
static cl::opt< bool > AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden, cl::desc("Allow the Attributor to use IP information " "derived from non-exact functions via cloning"), cl::init(false))
static cl::opt< bool > DumpDepGraph("attributor-dump-dep-graph", cl::Hidden, cl::desc("Dump the dependency graph to dot files."), cl::init(false))
static cl::opt< bool > PrintCallGraph("attributor-print-call-graph", cl::Hidden, cl::desc("Print Attributor's internal call graph"), cl::init(false))
static cl::opt< bool > PrintDependencies("attributor-print-dep", cl::Hidden, cl::desc("Print attribute dependencies"), cl::init(false))
static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool RequireReadNone, bool &IsKnown)
Definition: Attributor.cpp:530
static cl::opt< std::string > DepGraphDotFileNamePrefix("attributor-depgraph-dot-filename-prefix", cl::Hidden, cl::desc("The prefix used for the CallGraph dot file names."))
static cl::opt< bool > AnnotateDeclarationCallSites("attributor-annotate-decl-cs", cl::Hidden, cl::desc("Annotate call sites of function declarations."), cl::init(false))
static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr, AttributeList &Attrs, int AttrIdx, bool ForceReplace=false)
Return true if the information provided by Attr was added to the attribute list Attrs.
Definition: Attributor.cpp:853
static cl::opt< unsigned > SetFixpointIterations("attributor-max-iterations", cl::Hidden, cl::desc("Maximal number of fixpoint iterations."), cl::init(32))
static cl::list< std::string > SeedAllowList("attributor-seed-allow-list", cl::Hidden, cl::desc("Comma seperated list of attribute names that are " "allowed to be seeded."), cl::CommaSeparated)
static cl::opt< bool > EnableCallSiteSpecific("attributor-enable-call-site-specific-deduction", cl::Hidden, cl::desc("Allow the Attributor to do call site specific analysis"), cl::init(false))
Deduce and propagate false attributor cgscc
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:182
#define LLVM_DEBUG(X)
Definition: Debug.h:101
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition: Debug.h:64
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
IRTranslator LLVM IR MI
static bool isMustTailCall(Value *V)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
modulo schedule Modulo Schedule test pass
print must be executed print the must be executed context for all instructions
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
This file defines the PointerIntPair class.
static StringRef getName(Value *V)
Basic Register Allocator
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSimple(Instruction *I)
This file contains some templates that are useful if you are working with the STL at all.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:75
AbstractCallSite.
CallBase * getInstruction() const
Return the underlying instruction.
bool isCallbackCall() const
Return true if this ACS represents a callback call.
const Use & getCalleeUseForCallback() const
Return the use of the callee value in the underlying instruction.
static void getCallbackUses(const CallBase &CB, SmallVectorImpl< const Use * > &CallbackUses)
Add operand uses of CB that represent callback uses into CallbackUses.
bool isCallee(Value::const_user_iterator UI) const
Return true if UI is the use that defines the callee of this ACS.
Value * getCallArgOperand(Argument &Arg) const
Return the operand of the underlying instruction associated with Arg.
int getCallArgOperandNo(Argument &Arg) const
Return the operand index of the underlying instruction associated with Arg.
Value * getCalledOperand() const
Return the pointer to function that is being called.
unsigned getNumArgOperands() const
Return the number of parameters of the callee.
Function * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it's an indirect...
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
bool hasAttributeAtIndex(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
Attribute getAttributeAtIndex(unsigned Index, Attribute::AttrKind Kind) const
Return the attribute object that exists at the given index.
AttributeSet getAttributes(unsigned Index) const
The attributes for the specified index are returned.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
bool isStringAttribute() const
Return true if the attribute is a string (target-dependent) attribute.
Definition: Attributes.cpp:276
bool isEnumAttribute() const
Return true if the attribute is an Attribute::AttrKind type.
Definition: Attributes.cpp:268
bool isIntAttribute() const
Return true if the attribute is an integer attribute.
Definition: Attributes.cpp:272
uint64_t getValueAsInt() const
Return the attribute's value as an integer.
Definition: Attributes.cpp:291
StringRef getKindAsString() const
Return the attribute's kind as a string.
Definition: Attributes.cpp:305
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
Definition: Attributes.cpp:91
Attribute::AttrKind getKindAsEnum() const
Return the attribute's kind as an enum (Attribute::AttrKind).
Definition: Attributes.cpp:284
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:86
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
const Instruction & front() const
Definition: BasicBlock.h:326
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:105
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:292
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
static BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Definition: Constants.cpp:1778
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1184
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1469
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1516
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1406
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1465
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1488
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1484
Function * getCaller()
Helper to get the caller (the parent function).
A node in the call graph for a module.
Definition: CallGraph.h:166
void getAnalysisUsage(AnalysisUsage &Info) const override
getAnalysisUsage - For this class, we declare that we require and preserve the call graph.
CallGraphSCC - This is a single SCC that a CallGraphSCCPass is run on.
Wrapper to unify "old style" CallGraph and "new style" LazyCallGraph.
void removeFunction(Function &Fn)
Remove Fn from the call graph.
void removeCallSite(CallBase &CS)
Remove the call site CS from the call graph.
void replaceFunctionWith(Function &OldFn, Function &NewFn)
Replace OldFn in the call graph (and SCC) with NewFn.
void reanalyzeFunction(Function &Fn)
After an CGSCC pass changes a function in ways that affect the call graph, this method can be called ...
bool replaceCallSite(CallBase &OldCS, CallBase &NewCS)
Replace OldCS with the new call site NewCS.
void initialize(CallGraph &CG, CallGraphSCC &SCC)
Initializers for usage outside of a CGSCC pass, inside a CGSCC pass in the old and new pass manager (...
The ModulePass which wraps up a CallGraph and the logic to build it.
Definition: CallGraph.h:345
The basic data container for the call graph of a Module of IR.
Definition: CallGraph.h:72
This class represents a function call, abstracting a target machine's calling convention.
void setTailCall(bool IsTc=true)
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:998
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2041
static Constant * getFPTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2133
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2091
void print(raw_ostream &OS) const
Print out the bounds to a stream.
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:356
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:114
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:72
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:197
bool empty() const
Definition: DenseMap.h:98
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
A proxy from a FunctionAnalysisManager to an SCC.
Class to represent function types.
Definition: DerivedTypes.h:103
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
Definition: Metadata.cpp:1621
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:136
void splice(Function::iterator ToIt, Function *FromF)
Transfer all blocks from FromF to this function at ToIt.
Definition: Function.h:687
const BasicBlock & getEntryBlock() const
Definition: Function.h:735
void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
removes the attribute from the list of attributes.
Definition: Function.cpp:622
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:174
iterator_range< arg_iterator > args()
Definition: Function.h:790
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1625
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:313
const Function & getFunction() const
Definition: Function.h:134
iterator begin()
Definition: Function.h:751
arg_iterator arg_begin()
Definition: Function.h:766
void setAttributes(AttributeList Attrs)
Set the attribute list for this Function.
Definition: Function.h:316
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:315
size_t arg_size() const
Definition: Function.h:799
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:179
Argument * getArg(unsigned i) const
Definition: Function.h:784
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:187
void copyAttributesFrom(const Function *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a Function) from the ...
Definition: Function.cpp:739
bool hasMetadata() const
Return true if this value has any metadata attached to it.
Definition: Value.h:585
void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
Definition: Metadata.cpp:1359
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:275
LinkageTypes getLinkage() const
Definition: GlobalValue.h:541
bool hasLocalLinkage() const
Definition: GlobalValue.h:523
void setLinkage(LinkageTypes LT)
Definition: GlobalValue.h:532
unsigned getAddressSpace() const
Definition: GlobalValue.h:201
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:652
void setDSOLocal(bool Local)
Definition: GlobalValue.h:299
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
@ DefaultVisibility
The GV is visible.
Definition: GlobalValue.h:63
void setVisibility(VisibilityTypes V)
Definition: GlobalValue.h:250
static bool isInterposableLinkage(LinkageTypes Linkage)
Whether the definition of this global may be replaced by something non-equivalent at link time.
Definition: GlobalValue.h:420
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:56
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:55
bool isSuccess() const
Definition: InlineCost.h:188
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:933
const BasicBlock * getParent() const
Definition: Instruction.h:90
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:74
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:82
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, Instruction *InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
A node in the call graph.
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
An instruction for reading from memory.
Definition: Instructions.h:177
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition: Pass.h:248
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const FunctionListType & getFunctionList() const
Get the Module's list of functions (constant).
Definition: Module.h:559
Diagnostic information for missed-optimization remarks.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:91
PointerIntPair - This class implements a pair of a pointer and small integer.
void * getOpaqueValue() const
PointerTy getPointer() const
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1759
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:155
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: PassManager.h:158
void preserve()
Mark an analysis as preserved.
Definition: PassManager.h:173
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, Instruction *InsertBefore=nullptr)
A vector that has set insertion semantics.
Definition: SetVector.h:40
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:77
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:208
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:157
ArrayRef< T > getArrayRef() const
Definition: SetVector.h:63
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:141
const T & front() const
Return the first element of the SetVector.
Definition: SetVector.h:122
void clear()
Completely clear the SetVector.
Definition: SetVector.h:213
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:72
const T & back() const
Return the last element of the SetVector.
Definition: SetVector.h:128
size_type size() const
Definition: SmallPtrSet.h:93
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:344
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:383
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:301
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void reserve(size_type N)
Definition: SmallVector.h:667
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
A visitor class for IR positions.
Definition: Attributor.h:1089
SubsumingPositionIterator(const IRPosition &IRP)
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:249
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:222
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1740
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:72
Value * get() const
Definition: Use.h:66
bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
Definition: User.cpp:115
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:532
iterator_range< user_iterator > users()
Definition: Value.h:421
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:685
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:994
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:308
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:381
Value handle that is nullable, but tries to track the Value.
Definition: ValueHandle.h:204
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition: ilist_node.h:82
iterator insert(iterator where, pointer New)
Definition: ilist.h:229
A raw_ostream that writes to a file descriptor.
Definition: raw_ostream.h:454
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:642
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isAssumedReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readnone.
Definition: Attributor.cpp:565
bool isAssumedReadOnly(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readonly.
Definition: Attributor.cpp:560
std::optional< Value * > combineOptionalValuesInAAValueLatice(const std::optional< Value * > &A, const std::optional< Value * > &B, Type *Ty)
Return the combination of A and B such that the result is a possible value of both.
Definition: Attributor.cpp:309
bool isValidAtPosition(const ValueAndContext &VAC, InformationCache &InfoCache)
Return true if the value of VAC is a valid at the position of VAC, that is a constant,...
Definition: Attributor.cpp:260
bool isAssumedThreadLocalObject(Attributor &A, Value &Obj, const AbstractAttribute &QueryingAA)
Return true if Obj is assumed to be a thread local object.
Definition: Attributor.cpp:734
Constant * getInitialValueForObj(Value &Obj, Type &Ty, const TargetLibraryInfo *TLI, const DataLayout &DL, RangeTy *RangePtr=nullptr)
Return the initial value of Obj with type Ty if that is a constant.
Definition: Attributor.cpp:226
bool isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA, const Value &V, bool ForAnalysisOnly=true)
Return true if V is dynamically unique, that is, there are no two "instances" of V at runtime with di...
Definition: Attributor.cpp:216
bool getPotentialCopiesOfStoredValue(Attributor &A, StoreInst &SI, SmallSetVector< Value *, 4 > &PotentialCopies, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values of the one stored by SI into PotentialCopies.
Definition: Attributor.cpp:520
bool isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is potentially affected by a barrier.
Definition: Attributor.cpp:787
ValueScope
Flags to distinguish intra-procedural queries from potentially inter-procedural queries.
Definition: Attributor.h:169
@ Intraprocedural
Definition: Attributor.h:170
@ Interprocedural
Definition: Attributor.h:171
bool isValidInScope(const Value &V, const Function *Scope)
Return true if V is a valid value in Scope, that is a constant or an instruction/argument of Scope.
Definition: Attributor.cpp:250
bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction &ToI, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet=nullptr, std::function< bool(const Function &F)> GoBackwardsCB=nullptr)
Return true if ToI is potentially reachable from FromI without running into any instruction in Exclus...
Definition: Attributor.cpp:715
bool isNoSyncInst(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is a nosync instruction.
Definition: Attributor.cpp:191
bool getPotentiallyLoadedValues(Attributor &A, LoadInst &LI, SmallSetVector< Value *, 4 > &PotentialValues, SmallSetVector< Instruction *, 4 > &PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values LI could read into PotentialValues.
Definition: Attributor.cpp:510
Value * getWithType(Value &V, Type &Ty)
Try to convert V to type Ty without introducing new instructions.
Definition: Attributor.cpp:286
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
void updateMinLegalVectorWidthAttr(Function &Fn, uint64_t Width)
Update min-legal-vector-width if it is in Attribute and less than Width.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
@ CommaSeparated
Definition: CommandLine.h:164
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:465
DiagnosticInfoOptimizationBase::Argument NV
@ OF_TextWithCRLF
The file should be opened in text mode and use a carriage linefeed '\r '.
Definition: FileSystem.h:770
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void initializeAttributorLegacyPassPass(PassRegistry &)
@ Offset
Definition: DWP.cpp:406
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1735
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:537
unsigned MaxInitializationChainLength
The value passed to the line option that defines the maximal initialization chain length.
Definition: Attributor.cpp:99
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition: Local.cpp:126
void initializeAttributorCGSCCLegacyPassPass(PassRegistry &)
APInt operator&(APInt a, const APInt &b)
Definition: APInt.h:2050
void detachDeadBlocks(ArrayRef< BasicBlock * > BBs, SmallVectorImpl< DominatorTree::UpdateType > *Updates, bool KeepOneInputPHIs=false)
Replace contents of every block in BBs with single unreachable instruction.
@ Done
Definition: Threading.h:61
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:6428
CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition: Local.cpp:2309
raw_fd_ostream & outs()
This returns a reference to a raw_fd_ostream for standard output.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
Definition: GraphWriter.h:359
@ CGSCC
Definition: Attributor.h:5570
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
InlineResult isInlineViable(Function &Callee)
Minimal filter to detect invalid constructs for inlining.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1742
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:398
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2136
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool AreStatisticsEnabled()
Check if statistics are enabled.
Definition: Statistic.cpp:139
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2244
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
bool operator&=(SparseBitVector< ElementSize > *LHS, const SparseBitVector< ElementSize > &RHS)
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
Definition: GraphWriter.h:427
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:292
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, CloneFunctionChangeType Changes, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc into NewFunc, transforming the old arguments into references to VMap values.
Pass * createAttributorCGSCCLegacyPass()
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:1998
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition: iterator.h:363
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates or reallocates memory (eith...
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1869
ChangeStatus
{
Definition: Attributor.h:464
void fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result)
Insert into the map all the informations contained in the operand bundles of the llvm....
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
DepClassTy
Definition: Attributor.h:474
@ OPTIONAL
The target may be valid if the source is not.
@ NONE
Do not track a dependence between source and target.
@ REQUIRED
The target cannot be valid if the source is not.
APInt operator|(APInt a, const APInt &b)
Definition: APInt.h:2070
Pass * createAttributorLegacyPass()
#define N
TinyPtrVector< DepTy > Deps
Set of dependency graph nodes which should be updated if this one is updated.
Definition: Attributor.h:490
aaiterator begin()
Definition: Attributor.h:505
aaiterator end()
Definition: Attributor.h:506
The data structure for the dependency graph.
Definition: Attributor.h:522
iterator begin()
Definition: Attributor.h:537
AADepGraphNode SyntheticRoot
There is no root node for the dependency graph.
Definition: Attributor.h:534
void print()
Print dependency graph.
iterator end()
Definition: Attributor.h:538
void dumpGraph()
Dump graph to file.
AADepGraphNode * GetEntryNode()
Definition: Attributor.h:535
An abstract interface to track if a value leaves it's defining function instance.
Definition: Attributor.h:3936
bool isAssumedUniqueForAnalysis() const
Return true if we assume that the underlying value is unique in its scope wrt.
Definition: Attributor.h:3950
An abstract Attribute for computing reachability between functions.
Definition: Attributor.h:5072
An abstract interface to determine reachability of point A to B.
Definition: Attributor.h:3490
An abstract interface for liveness abstract attribute.
Definition: Attributor.h:3613
virtual bool isKnownDead() const =0
Returns true if the underlying value is known dead.
virtual bool isAssumedDead() const =0
The query functions are protected such that other attributes need to go through the Attributor interf...
virtual bool isRemovableStore() const
Return true if the underlying value is a store that is known to be removable.
Definition: Attributor.h:3649
static bool mayCatchAsynchronousExceptions(const Function &F)
Determine if F might catch asynchronous exceptions.
Definition: Attributor.h:3670
An abstract interface for memory access kind related attributes (readnone/readonly/writeonly).
Definition: Attributor.h:4226
An abstract interface for all memory location attributes (readnone/argmemonly/inaccessiblememonly/ina...
Definition: Attributor.h:4289
An abstract interface for all nocapture attributes.
Definition: Attributor.h:3976
bool isAssumedNoSync() const
Returns true if "nosync" is assumed.
Definition: Attributor.h:3322
static bool isNonRelaxedAtomic(const Instruction *I)
Helper function used to determine whether an instruction is non-relaxed atomic.
static bool isNoSyncIntrinsic(const Instruction *I)
Helper function specific for intrinsics which are potentially volatile.
An access description.
Definition: Attributor.h:5291
bool isWrittenValueUnknown() const
Return true if the value written cannot be determined at all.
Definition: Attributor.h:5397
std::optional< Value * > getContent() const
Return the written value which can be llvm::null if it is not yet determined.
Definition: Attributor.h:5416
bool isWriteOrAssumption() const
Return true if this is a write access.
Definition: Attributor.h:5367
bool isRead() const
Return true if this is a read access.
Definition: Attributor.h:5361
Value * getWrittenValue() const
Return the value writen, if any.
Definition: Attributor.h:5408
Instruction * getLocalInst() const
Return the instruction that causes the access with respect to the local scope of the associated attri...
Definition: Attributor.h:5388
Instruction * getRemoteInst() const
Return the actual instruction that causes the access.
Definition: Attributor.h:5391
bool isWrittenValueYetUndetermined() const
Return true if the value written is not known yet.
Definition: Attributor.h:5394
AccessKind getKind() const
Return the access kind.
Definition: Attributor.h:5358
An abstract interface for struct information.
Definition: Attributor.h:5112
static Value * getSingleValue(Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP, SmallVectorImpl< AA::ValueAndContext > &Values)
Extract the single value in Values if any.
An abstract attribute for getting all assumption underlying objects.
Definition: Attributor.h:5533
Helper to represent an access offset and size, with logic to deal with uncertainty and check for over...
Definition: Attributor.h:226
bool offsetOrSizeAreUnknown() const
Return true if offset or size are unknown.
Definition: Attributor.h:235
Value * getValue() const
Definition: Attributor.h:181
const Instruction * getCtxI() const
Definition: Attributor.h:182
Base struct for all "concrete attribute" deductions.
Definition: Attributor.h:3102
ChangeStatus update(Attributor &A)
Hook for the Attributor to trigger an update of the internal state.
Definition: Attributor.cpp:943
virtual ChangeStatus manifest(Attributor &A)
Hook for the Attributor to trigger the manifestation of the information represented by the abstract a...
Definition: Attributor.h:3176
virtual void printWithDeps(raw_ostream &OS) const
virtual StateType & getState()=0
Return the internal abstract state for inspection.
virtual const std::string getName() const =0
This function should return the name of the AbstractAttribute.
virtual ~AbstractAttribute()=default
Virtual destructor.
virtual const std::string getAsStr() const =0
This function should return the "summarized" assumed state as string.
void print(raw_ostream &OS) const override
Helper functions, for debug purposes only.
virtual bool isQueryAA() const
A query AA is always scheduled as long as we do updates because it does lazy computation that cannot ...
Definition: Attributor.h:3134
virtual ChangeStatus updateImpl(Attributor &A)=0
The actual update/transfer function which has to be implemented by the derived classes.
virtual void trackStatistics() const =0
Hook to enable custom statistic tracking, called after manifest that resulted in a change if statisti...
const IRPosition & getIRPosition() const
Return an IR position, see struct IRPosition.
Definition: Attributor.h:3141
An interface to query the internal state of an abstract attribute.
Definition: Attributor.h:2425
virtual ChangeStatus indicatePessimisticFixpoint()=0
Indicate that the abstract state should converge to the pessimistic state.
virtual bool isAtFixpoint() const =0
Return if this abstract state is fixed, thus does not need to be updated if information changes as it...
virtual bool isValidState() const =0
Return if this abstract state is in a valid state.
virtual ChangeStatus indicateOptimisticFixpoint()=0
Indicate that the abstract state should converge to the optimistic state.
Wrapper for FunctionAnalysisManager.
Definition: Attributor.h:1100
Analysis::Result * getAnalysis(const Function &F)
Definition: Attributor.h:1115
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
void populateAll() const
Force populate the entire call graph.
Definition: Attributor.h:4943
Configuration for the Attributor.
Definition: Attributor.h:1396
std::optional< unsigned > MaxFixpointIterations
Maximum number of iterations to run until fixpoint.
Definition: Attributor.h:1430
bool RewriteSignatures
Flag to determine if we rewrite function signatures.
Definition: Attributor.h:1413
bool DeleteFns
Flag to determine if we can delete functions or keep dead ones around.
Definition: Attributor.h:1410
CallGraphUpdater & CGUpdater
Helper to update an underlying call graph and to delete functions.
Definition: Attributor.h:1424
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Helper struct used in the communication between an abstract attribute (AA) that wants to change the s...
Definition: Attributor.h:2030
std::function< void(const ArgumentReplacementInfo &, AbstractCallSite, SmallVectorImpl< Value * > &)> ACSRepairCBTy
Abstract call site (ACS) repair callback type.
Definition: Attributor.h:2053
std::function< void(const ArgumentReplacementInfo &, Function &, Function::arg_iterator)> CalleeRepairCBTy
Callee repair callback type.
Definition: Attributor.h:2039
The fixpoint analysis framework that orchestrates the attribute deduction.
Definition: Attributor.h:1470
bool registerFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes, ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB, ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB)
Register a rewrite for a function signature.
bool isModulePass() const
Return true if this is a module pass, false otherwise.
Definition: Attributor.h:1712
bool isValidFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes)
Check if we can rewrite a function signature.
bool checkForAllInstructions(function_ref< bool(Instruction &)> Pred, const Function *Fn, const AbstractAttribute &QueryingAA, const ArrayRef< unsigned > &Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
Check Pred on all instructions in Fn with an opcode present in Opcodes.
static bool isInternalizable(Function &F)
Returns true if the function F can be internalized.
bool isRunOn(Function &Fn) const
Return true if we derive attributes for Fn.
Definition: Attributor.h:1715
bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, DepClassTy DepClass=DepClassTy::OPTIONAL)
Return true if AA (or its context instruction) is assumed dead.
void recordDependence(const AbstractAttribute &FromAA, const AbstractAttribute &ToAA, DepClassTy DepClass)
Explicitly record a dependence from FromAA to ToAA, that is if FromAA changes ToAA should be updated ...
static void createShallowWrapper(Function &F)
Create a shallow wrapper for F such that F has internal linkage afterwards.
bool checkForAllReturnedValuesAndReturnInsts(function_ref< bool(Value &, const SmallSetVector< ReturnInst *, 4 > &)> Pred, const AbstractAttribute &QueryingAA)
Check Pred on all values potentially returned by F.
std::optional< Value * > getAssumedSimplified(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation, AA::ValueScope S)
If V is assumed simplified, return it, if it is unclear yet, return std::nullopt, otherwise return nu...
Definition: Attributor.h:1843
static Function * internalizeFunction(Function &F, bool Force=false)
Make another copy of the function F such that the copied version has internal linkage afterwards and ...
bool checkForAllReadWriteInstructions(function_ref< bool(Instruction &)> Pred, AbstractAttribute &QueryingAA, bool &UsedAssumedInformation)
Check Pred on all Read/Write instructions.
std::optional< Constant * > getAssumedConstant(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation)
If IRP is assumed to be a constant, return it, if it is unclear yet, return std::nullopt,...
InformationCache & getInfoCache()
Return the internal information cache.
Definition: Attributor.h:1709
std::optional< Value * > translateArgumentToCallSiteContent(std::optional< Value * > V, CallBase &CB, const AbstractAttribute &AA, bool &UsedAssumedInformation)
Translate V from the callee context into the call site context.
bool checkForAllReturnedValues(function_ref< bool(Value &)> Pred, const AbstractAttribute &QueryingAA)
Check Pred on all values potentially returned by the function associated with QueryingAA.
bool checkForAllUses(function_ref< bool(const Use &, bool &)> Pred, const AbstractAttribute &QueryingAA, const Value &V, bool CheckBBLivenessOnly=false, DepClassTy LivenessDepClass=DepClassTy::OPTIONAL, bool IgnoreDroppableUses=true, function_ref< bool(const Use &OldU, const Use &NewU)> EquivalentUseCB=nullptr)
Check Pred on all (transitive) uses of V.