LLVM 19.0.0git
Attributor.cpp
Go to the documentation of this file.
1//===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements an interprocedural pass that deduces and/or propagates
10// attributes. This is done in an abstract interpretation style fixpoint
11// iteration. See the Attributor.h file comment and the class descriptions in
12// that file for more information.
13//
14//===----------------------------------------------------------------------===//
15
17
18#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/Statistic.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/GlobalValue.h"
37#include "llvm/IR/Instruction.h"
40#include "llvm/IR/LLVMContext.h"
41#include "llvm/IR/ValueHandle.h"
44#include "llvm/Support/Debug.h"
48#include "llvm/Support/ModRef.h"
53#include <cstdint>
54#include <memory>
55
56#ifdef EXPENSIVE_CHECKS
57#include "llvm/IR/Verifier.h"
58#endif
59
60#include <cassert>
61#include <optional>
62#include <string>
63
64using namespace llvm;
65
66#define DEBUG_TYPE "attributor"
67#define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
68
69DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
70 "Determine what attributes are manifested in the IR");
71
72STATISTIC(NumFnDeleted, "Number of function deleted");
73STATISTIC(NumFnWithExactDefinition,
74 "Number of functions with exact definitions");
75STATISTIC(NumFnWithoutExactDefinition,
76 "Number of functions without exact definitions");
77STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
78STATISTIC(NumAttributesTimedOut,
79 "Number of abstract attributes timed out before fixpoint");
80STATISTIC(NumAttributesValidFixpoint,
81 "Number of abstract attributes in a valid fixpoint state");
82STATISTIC(NumAttributesManifested,
83 "Number of abstract attributes manifested in IR");
84
85// TODO: Determine a good default value.
86//
87// In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
88// (when run with the first 5 abstract attributes). The results also indicate
89// that we never reach 32 iterations but always find a fixpoint sooner.
90//
91// This will become more evolved once we perform two interleaved fixpoint
92// iterations: bottom-up and top-down.
94 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
95 cl::desc("Maximal number of fixpoint iterations."),
96 cl::init(32));
97
99 MaxSpecializationPerCB("attributor-max-specializations-per-call-base",
101 cl::desc("Maximal number of callees specialized for "
102 "a call base"),
103 cl::init(UINT32_MAX));
104
106 "attributor-max-initialization-chain-length", cl::Hidden,
107 cl::desc(
108 "Maximal number of chained initializations (to avoid stack overflows)"),
111
113 "attributor-annotate-decl-cs", cl::Hidden,
114 cl::desc("Annotate call sites of function declarations."), cl::init(false));
115
116static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
117 cl::init(true), cl::Hidden);
118
119static cl::opt<bool>
120 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
121 cl::desc("Allow the Attributor to create shallow "
122 "wrappers for non-exact definitions."),
123 cl::init(false));
124
125static cl::opt<bool>
126 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
127 cl::desc("Allow the Attributor to use IP information "
128 "derived from non-exact functions via cloning"),
129 cl::init(false));
130
131// These options can only used for debug builds.
132#ifndef NDEBUG
134 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
135 cl::desc("Comma seperated list of attribute names that are "
136 "allowed to be seeded."),
138
140 "attributor-function-seed-allow-list", cl::Hidden,
141 cl::desc("Comma seperated list of function names that are "
142 "allowed to be seeded."),
144#endif
145
146static cl::opt<bool>
147 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
148 cl::desc("Dump the dependency graph to dot files."),
149 cl::init(false));
150
152 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
153 cl::desc("The prefix used for the CallGraph dot file names."));
154
155static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
156 cl::desc("View the dependency graph."),
157 cl::init(false));
158
159static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
160 cl::desc("Print attribute dependencies"),
161 cl::init(false));
162
164 "attributor-enable-call-site-specific-deduction", cl::Hidden,
165 cl::desc("Allow the Attributor to do call site specific analysis"),
166 cl::init(false));
167
168static cl::opt<bool>
169 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
170 cl::desc("Print Attributor's internal call graph"),
171 cl::init(false));
172
173static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
175 cl::desc("Try to simplify all loads."),
176 cl::init(true));
177
179 "attributor-assume-closed-world", cl::Hidden,
180 cl::desc("Should a closed world be assumed, or not. Default if not set."));
181
182/// Logic operators for the change status enum class.
183///
184///{
186 return L == ChangeStatus::CHANGED ? L : R;
187}
189 L = L | R;
190 return L;
191}
193 return L == ChangeStatus::UNCHANGED ? L : R;
194}
196 L = L & R;
197 return L;
198}
199///}
200
201bool AA::isGPU(const Module &M) {
202 Triple T(M.getTargetTriple());
203 return T.isAMDGPU() || T.isNVPTX();
204}
205
207 const AbstractAttribute &QueryingAA) {
208 // We are looking for volatile instructions or non-relaxed atomics.
209 if (const auto *CB = dyn_cast<CallBase>(&I)) {
210 if (CB->hasFnAttr(Attribute::NoSync))
211 return true;
212
213 // Non-convergent and readnone imply nosync.
214 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
215 return true;
216
218 return true;
219
220 bool IsKnownNoSync;
221 return AA::hasAssumedIRAttr<Attribute::NoSync>(
222 A, &QueryingAA, IRPosition::callsite_function(*CB),
223 DepClassTy::OPTIONAL, IsKnownNoSync);
224 }
225
226 if (!I.mayReadOrWriteMemory())
227 return true;
228
229 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
230}
231
233 const Value &V, bool ForAnalysisOnly) {
234 // TODO: See the AAInstanceInfo class comment.
235 if (!ForAnalysisOnly)
236 return false;
237 auto *InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
238 QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
239 return InstanceInfoAA && InstanceInfoAA->isAssumedUniqueForAnalysis();
240}
241
242Constant *
244 Value &Obj, Type &Ty, const TargetLibraryInfo *TLI,
245 const DataLayout &DL, AA::RangeTy *RangePtr) {
246 if (isa<AllocaInst>(Obj))
247 return UndefValue::get(&Ty);
248 if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
249 return Init;
250 auto *GV = dyn_cast<GlobalVariable>(&Obj);
251 if (!GV)
252 return nullptr;
253
254 bool UsedAssumedInformation = false;
255 Constant *Initializer = nullptr;
256 if (A.hasGlobalVariableSimplificationCallback(*GV)) {
257 auto AssumedGV = A.getAssumedInitializerFromCallBack(
258 *GV, &QueryingAA, UsedAssumedInformation);
259 Initializer = *AssumedGV;
260 if (!Initializer)
261 return nullptr;
262 } else {
263 if (!GV->hasLocalLinkage() &&
264 (GV->isInterposable() || !(GV->isConstant() && GV->hasInitializer())))
265 return nullptr;
266 if (!GV->hasInitializer())
267 return UndefValue::get(&Ty);
268
269 if (!Initializer)
270 Initializer = GV->getInitializer();
271 }
272
273 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
274 APInt Offset = APInt(64, RangePtr->Offset);
275 return ConstantFoldLoadFromConst(Initializer, &Ty, Offset, DL);
276 }
277
278 return ConstantFoldLoadFromUniformValue(Initializer, &Ty, DL);
279}
280
281bool AA::isValidInScope(const Value &V, const Function *Scope) {
282 if (isa<Constant>(V))
283 return true;
284 if (auto *I = dyn_cast<Instruction>(&V))
285 return I->getFunction() == Scope;
286 if (auto *A = dyn_cast<Argument>(&V))
287 return A->getParent() == Scope;
288 return false;
289}
290
292 InformationCache &InfoCache) {
293 if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
294 return true;
295 const Function *Scope = nullptr;
296 const Instruction *CtxI = VAC.getCtxI();
297 if (CtxI)
298 Scope = CtxI->getFunction();
299 if (auto *A = dyn_cast<Argument>(VAC.getValue()))
300 return A->getParent() == Scope;
301 if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
302 if (I->getFunction() == Scope) {
303 if (const DominatorTree *DT =
305 *Scope))
306 return DT->dominates(I, CtxI);
307 // Local dominance check mostly for the old PM passes.
308 if (CtxI && I->getParent() == CtxI->getParent())
309 return llvm::any_of(
310 make_range(I->getIterator(), I->getParent()->end()),
311 [&](const Instruction &AfterI) { return &AfterI == CtxI; });
312 }
313 }
314 return false;
315}
316
318 if (V.getType() == &Ty)
319 return &V;
320 if (isa<PoisonValue>(V))
321 return PoisonValue::get(&Ty);
322 if (isa<UndefValue>(V))
323 return UndefValue::get(&Ty);
324 if (auto *C = dyn_cast<Constant>(&V)) {
325 if (C->isNullValue())
326 return Constant::getNullValue(&Ty);
327 if (C->getType()->isPointerTy() && Ty.isPointerTy())
328 return ConstantExpr::getPointerCast(C, &Ty);
329 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
330 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
331 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
332 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
333 return ConstantFoldCastInstruction(Instruction::FPTrunc, C, &Ty);
334 }
335 }
336 return nullptr;
337}
338
339std::optional<Value *>
340AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
341 const std::optional<Value *> &B,
342 Type *Ty) {
343 if (A == B)
344 return A;
345 if (!B)
346 return A;
347 if (*B == nullptr)
348 return nullptr;
349 if (!A)
350 return Ty ? getWithType(**B, *Ty) : nullptr;
351 if (*A == nullptr)
352 return nullptr;
353 if (!Ty)
354 Ty = (*A)->getType();
355 if (isa_and_nonnull<UndefValue>(*A))
356 return getWithType(**B, *Ty);
357 if (isa<UndefValue>(*B))
358 return A;
359 if (*A && *B && *A == getWithType(**B, *Ty))
360 return A;
361 return nullptr;
362}
363
364template <bool IsLoad, typename Ty>
366 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
367 SmallSetVector<Instruction *, 4> *PotentialValueOrigins,
368 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
369 bool OnlyExact) {
370 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
371 << " (only exact: " << OnlyExact << ")\n";);
372
373 Value &Ptr = *I.getPointerOperand();
374 // Containers to remember the pointer infos and new copies while we are not
375 // sure that we can find all of them. If we abort we want to avoid spurious
376 // dependences and potential copies in the provided container.
380
381 const auto *TLI =
382 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
383
384 auto Pred = [&](Value &Obj) {
385 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
386 if (isa<UndefValue>(&Obj))
387 return true;
388 if (isa<ConstantPointerNull>(&Obj)) {
389 // A null pointer access can be undefined but any offset from null may
390 // be OK. We do not try to optimize the latter.
391 if (!NullPointerIsDefined(I.getFunction(),
392 Ptr.getType()->getPointerAddressSpace()) &&
393 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
394 AA::Interprocedural) == &Obj)
395 return true;
397 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
398 return false;
399 }
400 // TODO: Use assumed noalias return.
401 if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
402 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
403 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
404 << "\n";);
405 return false;
406 }
407 if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
408 if (!GV->hasLocalLinkage() &&
409 !(GV->isConstant() && GV->hasInitializer())) {
410 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
411 "linkage, not supported yet: "
412 << Obj << "\n";);
413 return false;
414 }
415
416 bool NullOnly = true;
417 bool NullRequired = false;
418 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
419 bool IsExact) {
420 if (!V || *V == nullptr)
421 NullOnly = false;
422 else if (isa<UndefValue>(*V))
423 /* No op */;
424 else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
425 NullRequired = !IsExact;
426 else
427 NullOnly = false;
428 };
429
430 auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
431 Value &V) {
432 Value *AdjV = AA::getWithType(V, *I.getType());
433 if (!AdjV) {
434 LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
435 "cannot be converted to read type: "
436 << *Acc.getRemoteInst() << " : " << *I.getType()
437 << "\n";);
438 }
439 return AdjV;
440 };
441
442 auto SkipCB = [&](const AAPointerInfo::Access &Acc) {
443 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
444 return true;
445 if (IsLoad) {
447 return true;
448 if (PotentialValueOrigins && !isa<AssumeInst>(Acc.getRemoteInst()))
449 return false;
450 if (!Acc.isWrittenValueUnknown())
451 if (Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue()))
452 if (NewCopies.count(V)) {
453 NewCopyOrigins.insert(Acc.getRemoteInst());
454 return true;
455 }
456 if (auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst()))
457 if (Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand()))
458 if (NewCopies.count(V)) {
459 NewCopyOrigins.insert(Acc.getRemoteInst());
460 return true;
461 }
462 }
463 return false;
464 };
465
466 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
467 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
468 return true;
469 if (IsLoad && Acc.isWrittenValueYetUndetermined())
470 return true;
471 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
472 if (OnlyExact && !IsExact && !NullOnly &&
473 !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
474 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
475 << ", abort!\n");
476 return false;
477 }
478 if (NullRequired && !NullOnly) {
479 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
480 "one, however found non-null one: "
481 << *Acc.getRemoteInst() << ", abort!\n");
482 return false;
483 }
484 if (IsLoad) {
485 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
486 if (!Acc.isWrittenValueUnknown()) {
487 Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
488 if (!V)
489 return false;
490 NewCopies.insert(V);
491 if (PotentialValueOrigins)
492 NewCopyOrigins.insert(Acc.getRemoteInst());
493 return true;
494 }
495 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
496 if (!SI) {
497 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
498 "instruction not supported yet: "
499 << *Acc.getRemoteInst() << "\n";);
500 return false;
501 }
502 Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
503 if (!V)
504 return false;
505 NewCopies.insert(V);
506 if (PotentialValueOrigins)
507 NewCopyOrigins.insert(SI);
508 } else {
509 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
510 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
511 if (!LI && OnlyExact) {
512 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
513 "instruction not supported yet: "
514 << *Acc.getRemoteInst() << "\n";);
515 return false;
516 }
517 NewCopies.insert(Acc.getRemoteInst());
518 }
519 return true;
520 };
521
522 // If the value has been written to we don't need the initial value of the
523 // object.
524 bool HasBeenWrittenTo = false;
525
526 AA::RangeTy Range;
527 auto *PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
528 DepClassTy::NONE);
529 if (!PI || !PI->forallInterferingAccesses(
530 A, QueryingAA, I,
531 /* FindInterferingWrites */ IsLoad,
532 /* FindInterferingReads */ !IsLoad, CheckAccess,
533 HasBeenWrittenTo, Range, SkipCB)) {
535 dbgs()
536 << "Failed to verify all interfering accesses for underlying object: "
537 << Obj << "\n");
538 return false;
539 }
540
541 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
542 const DataLayout &DL = A.getDataLayout();
543 Value *InitialValue = AA::getInitialValueForObj(
544 A, QueryingAA, Obj, *I.getType(), TLI, DL, &Range);
545 if (!InitialValue) {
546 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
547 "underlying object, abort!\n");
548 return false;
549 }
550 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
551 if (NullRequired && !NullOnly) {
552 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
553 "null or undef, abort!\n");
554 return false;
555 }
556
557 NewCopies.insert(InitialValue);
558 if (PotentialValueOrigins)
559 NewCopyOrigins.insert(nullptr);
560 }
561
562 PIs.push_back(PI);
563
564 return true;
565 };
566
567 const auto *AAUO = A.getAAFor<AAUnderlyingObjects>(
568 QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
569 if (!AAUO || !AAUO->forallUnderlyingObjects(Pred)) {
571 dbgs() << "Underlying objects stored into could not be determined\n";);
572 return false;
573 }
574
575 // Only if we were successful collection all potential copies we record
576 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
577 // given PotentialCopies container.
578 for (const auto *PI : PIs) {
579 if (!PI->getState().isAtFixpoint())
580 UsedAssumedInformation = true;
581 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
582 }
583 PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
584 if (PotentialValueOrigins)
585 PotentialValueOrigins->insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
586
587 return true;
588}
589
591 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
592 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
593 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
594 bool OnlyExact) {
595 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
596 A, LI, PotentialValues, &PotentialValueOrigins, QueryingAA,
597 UsedAssumedInformation, OnlyExact);
598}
599
601 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
602 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
603 bool OnlyExact) {
604 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
605 A, SI, PotentialCopies, nullptr, QueryingAA, UsedAssumedInformation,
606 OnlyExact);
607}
608
610 const AbstractAttribute &QueryingAA,
611 bool RequireReadNone, bool &IsKnown) {
612 if (RequireReadNone) {
613 if (AA::hasAssumedIRAttr<Attribute::ReadNone>(
614 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
615 /* IgnoreSubsumingPositions */ true))
616 return true;
617 } else if (AA::hasAssumedIRAttr<Attribute::ReadOnly>(
618 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
619 /* IgnoreSubsumingPositions */ true))
620 return true;
621
624 const auto *MemLocAA =
625 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
626 if (MemLocAA && MemLocAA->isAssumedReadNone()) {
627 IsKnown = MemLocAA->isKnownReadNone();
628 if (!IsKnown)
629 A.recordDependence(*MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
630 return true;
631 }
632 }
633
634 const auto *MemBehaviorAA =
635 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
636 if (MemBehaviorAA &&
637 (MemBehaviorAA->isAssumedReadNone() ||
638 (!RequireReadNone && MemBehaviorAA->isAssumedReadOnly()))) {
639 IsKnown = RequireReadNone ? MemBehaviorAA->isKnownReadNone()
640 : MemBehaviorAA->isKnownReadOnly();
641 if (!IsKnown)
642 A.recordDependence(*MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
643 return true;
644 }
645
646 return false;
647}
648
650 const AbstractAttribute &QueryingAA, bool &IsKnown) {
651 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
652 /* RequireReadNone */ false, IsKnown);
653}
655 const AbstractAttribute &QueryingAA, bool &IsKnown) {
656 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
657 /* RequireReadNone */ true, IsKnown);
658}
659
660static bool
662 const Instruction *ToI, const Function &ToFn,
663 const AbstractAttribute &QueryingAA,
664 const AA::InstExclusionSetTy *ExclusionSet,
665 std::function<bool(const Function &F)> GoBackwardsCB) {
667 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
668 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
669 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
670 << "]\n";
671 if (ExclusionSet)
672 for (auto *ES : *ExclusionSet)
673 dbgs() << *ES << "\n";
674 });
675
676 // We know kernels (generally) cannot be called from within the module. Thus,
677 // for reachability we would need to step back from a kernel which would allow
678 // us to reach anything anyway. Even if a kernel is invoked from another
679 // kernel, values like allocas and shared memory are not accessible. We
680 // implicitly check for this situation to avoid costly lookups.
681 if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
682 !GoBackwardsCB(*FromI.getFunction()) && ToFn.hasFnAttribute("kernel") &&
683 FromI.getFunction()->hasFnAttribute("kernel")) {
684 LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
685 "module; success\n";);
686 return false;
687 }
688
689 // If we can go arbitrarily backwards we will eventually reach an entry point
690 // that can reach ToI. Only if a set of blocks through which we cannot go is
691 // provided, or once we track internal functions not accessible from the
692 // outside, it makes sense to perform backwards analysis in the absence of a
693 // GoBackwardsCB.
694 if (!GoBackwardsCB && !ExclusionSet) {
695 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
696 << " is not checked backwards and does not have an "
697 "exclusion set, abort\n");
698 return true;
699 }
700
703 Worklist.push_back(&FromI);
704
705 while (!Worklist.empty()) {
706 const Instruction *CurFromI = Worklist.pop_back_val();
707 if (!Visited.insert(CurFromI).second)
708 continue;
709
710 const Function *FromFn = CurFromI->getFunction();
711 if (FromFn == &ToFn) {
712 if (!ToI)
713 return true;
714 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
715 << " intraprocedurally\n");
716 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
717 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
718 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
719 A, *CurFromI, *ToI, ExclusionSet);
720 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
721 << (Result ? "can potentially " : "cannot ") << "reach "
722 << *ToI << " [Intra]\n");
723 if (Result)
724 return true;
725 }
726
727 bool Result = true;
728 if (!ToFn.isDeclaration() && ToI) {
729 const auto *ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
730 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
731 const Instruction &EntryI = ToFn.getEntryBlock().front();
732 Result = !ToReachabilityAA || ToReachabilityAA->isAssumedReachable(
733 A, EntryI, *ToI, ExclusionSet);
734 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
735 << " " << (Result ? "can potentially " : "cannot ")
736 << "reach @" << *ToI << " [ToFn]\n");
737 }
738
739 if (Result) {
740 // The entry of the ToFn can reach the instruction ToI. If the current
741 // instruction is already known to reach the ToFn.
742 const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
743 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
744 Result = !FnReachabilityAA || FnReachabilityAA->instructionCanReach(
745 A, *CurFromI, ToFn, ExclusionSet);
746 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
747 << " " << (Result ? "can potentially " : "cannot ")
748 << "reach @" << ToFn.getName() << " [FromFn]\n");
749 if (Result)
750 return true;
751 }
752
753 // TODO: Check assumed nounwind.
754 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
755 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
756 auto ReturnInstCB = [&](Instruction &Ret) {
757 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
758 A, *CurFromI, Ret, ExclusionSet);
759 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
760 << (Result ? "can potentially " : "cannot ") << "reach "
761 << Ret << " [Intra]\n");
762 return !Result;
763 };
764
765 // Check if we can reach returns.
766 bool UsedAssumedInformation = false;
767 if (A.checkForAllInstructions(ReturnInstCB, FromFn, &QueryingAA,
768 {Instruction::Ret}, UsedAssumedInformation)) {
769 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
770 continue;
771 }
772
773 if (!GoBackwardsCB) {
774 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
775 << " is not checked backwards, abort\n");
776 return true;
777 }
778
779 // If we do not go backwards from the FromFn we are done here and so far we
780 // could not find a way to reach ToFn/ToI.
781 if (!GoBackwardsCB(*FromFn))
782 continue;
783
784 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
785 << FromFn->getName() << "\n");
786
787 auto CheckCallSite = [&](AbstractCallSite ACS) {
788 CallBase *CB = ACS.getInstruction();
789 if (!CB)
790 return false;
791
792 if (isa<InvokeInst>(CB))
793 return false;
794
796 Worklist.push_back(Inst);
797 return true;
798 };
799
800 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
801 /* RequireAllCallSites */ true,
802 &QueryingAA, UsedAssumedInformation);
803 if (Result) {
804 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
805 << " in @" << FromFn->getName()
806 << " failed, give up\n");
807 return true;
808 }
809
810 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
811 << " in @" << FromFn->getName()
812 << " worklist size is: " << Worklist.size() << "\n");
813 }
814 return false;
815}
816
818 Attributor &A, const Instruction &FromI, const Instruction &ToI,
819 const AbstractAttribute &QueryingAA,
820 const AA::InstExclusionSetTy *ExclusionSet,
821 std::function<bool(const Function &F)> GoBackwardsCB) {
822 const Function *ToFn = ToI.getFunction();
823 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
824 ExclusionSet, GoBackwardsCB);
825}
826
828 Attributor &A, const Instruction &FromI, const Function &ToFn,
829 const AbstractAttribute &QueryingAA,
830 const AA::InstExclusionSetTy *ExclusionSet,
831 std::function<bool(const Function &F)> GoBackwardsCB) {
832 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
833 ExclusionSet, GoBackwardsCB);
834}
835
837 const AbstractAttribute &QueryingAA) {
838 if (isa<UndefValue>(Obj))
839 return true;
840 if (isa<AllocaInst>(Obj)) {
841 InformationCache &InfoCache = A.getInfoCache();
842 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
844 dbgs() << "[AA] Object '" << Obj
845 << "' is thread local; stack objects are thread local.\n");
846 return true;
847 }
848 bool IsKnownNoCapture;
849 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
850 A, &QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL,
851 IsKnownNoCapture);
852 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
853 << (IsAssumedNoCapture ? "" : "not") << " thread local; "
854 << (IsAssumedNoCapture ? "non-" : "")
855 << "captured stack object.\n");
856 return IsAssumedNoCapture;
857 }
858 if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
859 if (GV->isConstant()) {
860 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
861 << "' is thread local; constant global\n");
862 return true;
863 }
864 if (GV->isThreadLocal()) {
865 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
866 << "' is thread local; thread local global\n");
867 return true;
868 }
869 }
870
871 if (A.getInfoCache().targetIsGPU()) {
872 if (Obj.getType()->getPointerAddressSpace() ==
873 (int)AA::GPUAddressSpace::Local) {
874 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
875 << "' is thread local; GPU local memory\n");
876 return true;
877 }
878 if (Obj.getType()->getPointerAddressSpace() ==
879 (int)AA::GPUAddressSpace::Constant) {
880 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
881 << "' is thread local; GPU constant memory\n");
882 return true;
883 }
884 }
885
886 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
887 return false;
888}
889
891 const AbstractAttribute &QueryingAA) {
892 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
893 return false;
894
896
897 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
898 if (!Loc || !Loc->Ptr) {
900 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
901 return false;
902 }
903 Ptrs.insert(Loc->Ptr);
904 return true;
905 };
906
907 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
908 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
909 return true;
910 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(&I))
911 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
912 return true;
913 } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
914 return true;
915
916 return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
917}
918
921 const AbstractAttribute &QueryingAA,
922 const Instruction *CtxI) {
923 for (const Value *Ptr : Ptrs) {
924 if (!Ptr) {
925 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
926 return true;
927 }
928
929 auto Pred = [&](Value &Obj) {
930 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
931 return true;
932 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
933 << "'; -> requires barrier\n");
934 return false;
935 };
936
937 const auto *UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
938 QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
939 if (!UnderlyingObjsAA || !UnderlyingObjsAA->forallUnderlyingObjects(Pred))
940 return true;
941 }
942 return false;
943}
944
945/// Return true if \p New is equal or worse than \p Old.
946static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
947 if (!Old.isIntAttribute())
948 return true;
949
950 return Old.getValueAsInt() >= New.getValueAsInt();
951}
952
953/// Return true if the information provided by \p Attr was added to the
954/// attribute set \p AttrSet. This is only the case if it was not already
955/// present in \p AttrSet.
956static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
957 AttributeSet AttrSet, bool ForceReplace,
958 AttrBuilder &AB) {
959
960 if (Attr.isEnumAttribute()) {
962 if (AttrSet.hasAttribute(Kind))
963 return false;
964 AB.addAttribute(Kind);
965 return true;
966 }
967 if (Attr.isStringAttribute()) {
968 StringRef Kind = Attr.getKindAsString();
969 if (AttrSet.hasAttribute(Kind)) {
970 if (!ForceReplace)
971 return false;
972 }
973 AB.addAttribute(Kind, Attr.getValueAsString());
974 return true;
975 }
976 if (Attr.isIntAttribute()) {
978 if (!ForceReplace && Kind == Attribute::Memory) {
979 MemoryEffects ME = Attr.getMemoryEffects() & AttrSet.getMemoryEffects();
980 if (ME == AttrSet.getMemoryEffects())
981 return false;
982 AB.addMemoryAttr(ME);
983 return true;
984 }
985 if (AttrSet.hasAttribute(Kind)) {
986 if (!ForceReplace && isEqualOrWorse(Attr, AttrSet.getAttribute(Kind)))
987 return false;
988 }
989 AB.addAttribute(Attr);
990 return true;
991 }
992
993 llvm_unreachable("Expected enum or string attribute!");
994}
995
998 return cast<Argument>(&getAnchorValue());
999
1000 // Not an Argument and no argument number means this is not a call site
1001 // argument, thus we cannot find a callback argument to return.
1002 int ArgNo = getCallSiteArgNo();
1003 if (ArgNo < 0)
1004 return nullptr;
1005
1006 // Use abstract call sites to make the connection between the call site
1007 // values and the ones in callbacks. If a callback was found that makes use
1008 // of the underlying call site operand, we want the corresponding callback
1009 // callee argument and not the direct callee argument.
1010 std::optional<Argument *> CBCandidateArg;
1011 SmallVector<const Use *, 4> CallbackUses;
1012 const auto &CB = cast<CallBase>(getAnchorValue());
1013 AbstractCallSite::getCallbackUses(CB, CallbackUses);
1014 for (const Use *U : CallbackUses) {
1015 AbstractCallSite ACS(U);
1016 assert(ACS && ACS.isCallbackCall());
1017 if (!ACS.getCalledFunction())
1018 continue;
1019
1020 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
1021
1022 // Test if the underlying call site operand is argument number u of the
1023 // callback callee.
1024 if (ACS.getCallArgOperandNo(u) != ArgNo)
1025 continue;
1026
1027 assert(ACS.getCalledFunction()->arg_size() > u &&
1028 "ACS mapped into var-args arguments!");
1029 if (CBCandidateArg) {
1030 CBCandidateArg = nullptr;
1031 break;
1032 }
1033 CBCandidateArg = ACS.getCalledFunction()->getArg(u);
1034 }
1035 }
1036
1037 // If we found a unique callback candidate argument, return it.
1038 if (CBCandidateArg && *CBCandidateArg)
1039 return *CBCandidateArg;
1040
1041 // If no callbacks were found, or none used the underlying call site operand
1042 // exclusively, use the direct callee argument if available.
1043 auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
1044 if (Callee && Callee->arg_size() > unsigned(ArgNo))
1045 return Callee->getArg(ArgNo);
1046
1047 return nullptr;
1048}
1049
1052 if (getState().isAtFixpoint())
1053 return HasChanged;
1054
1055 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
1056
1057 HasChanged = updateImpl(A);
1058
1059 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
1060 << "\n");
1061
1062 return HasChanged;
1063}
1064
1066 InformationCache &InfoCache,
1067 AttributorConfig Configuration)
1068 : Allocator(InfoCache.Allocator), Functions(Functions),
1069 InfoCache(InfoCache), Configuration(Configuration) {
1070 if (!isClosedWorldModule())
1071 return;
1072 for (Function *Fn : Functions)
1073 if (Fn->hasAddressTaken(/*PutOffender=*/nullptr,
1074 /*IgnoreCallbackUses=*/false,
1075 /*IgnoreAssumeLikeCalls=*/true,
1076 /*IgnoreLLVMUsed=*/true,
1077 /*IgnoreARCAttachedCall=*/false,
1078 /*IgnoreCastedDirectCall=*/true))
1079 InfoCache.IndirectlyCallableFunctions.push_back(Fn);
1080}
1081
1086 "Did expect a valid position!");
1089 if (!Explorer)
1090 return false;
1091
1092 Value &AssociatedValue = IRP.getAssociatedValue();
1093
1094 const Assume2KnowledgeMap &A2K =
1095 getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1096
1097 // Check if we found any potential assume use, if not we don't need to create
1098 // explorer iterators.
1099 if (A2K.empty())
1100 return false;
1101
1102 LLVMContext &Ctx = AssociatedValue.getContext();
1103 unsigned AttrsSize = Attrs.size();
1104 auto EIt = Explorer->begin(IRP.getCtxI()),
1105 EEnd = Explorer->end(IRP.getCtxI());
1106 for (const auto &It : A2K)
1107 if (Explorer->findInContextOf(It.first, EIt, EEnd))
1108 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1109 return AttrsSize != Attrs.size();
1110}
1111
1112template <typename DescTy>
1114Attributor::updateAttrMap(const IRPosition &IRP, ArrayRef<DescTy> AttrDescs,
1115 function_ref<bool(const DescTy &, AttributeSet,
1117 CB) {
1118 if (AttrDescs.empty())
1120 switch (IRP.getPositionKind()) {
1124 default:
1125 break;
1126 };
1127
1128 AttributeList AL;
1129 Value *AttrListAnchor = IRP.getAttrListAnchor();
1130 auto It = AttrsMap.find(AttrListAnchor);
1131 if (It == AttrsMap.end())
1132 AL = IRP.getAttrList();
1133 else
1134 AL = It->getSecond();
1135
1136 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1137 auto AttrIdx = IRP.getAttrIdx();
1138 AttributeSet AS = AL.getAttributes(AttrIdx);
1139 AttributeMask AM;
1140 AttrBuilder AB(Ctx);
1141
1143 for (const DescTy &AttrDesc : AttrDescs)
1144 if (CB(AttrDesc, AS, AM, AB))
1145 HasChanged = ChangeStatus::CHANGED;
1146
1147 if (HasChanged == ChangeStatus::UNCHANGED)
1149
1150 AL = AL.removeAttributesAtIndex(Ctx, AttrIdx, AM);
1151 AL = AL.addAttributesAtIndex(Ctx, AttrIdx, AB);
1152 AttrsMap[AttrListAnchor] = AL;
1153 return ChangeStatus::CHANGED;
1154}
1155
1158 bool IgnoreSubsumingPositions,
1159 Attribute::AttrKind ImpliedAttributeKind) {
1160 bool Implied = false;
1161 bool HasAttr = false;
1162 auto HasAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1164 if (AttrSet.hasAttribute(Kind)) {
1165 Implied |= Kind != ImpliedAttributeKind;
1166 HasAttr = true;
1167 }
1168 return false;
1169 };
1170 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1171 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, HasAttrCB);
1172 if (HasAttr)
1173 break;
1174 // The first position returned by the SubsumingPositionIterator is
1175 // always the position itself. If we ignore subsuming positions we
1176 // are done after the first iteration.
1177 if (IgnoreSubsumingPositions)
1178 break;
1179 Implied = true;
1180 }
1181 if (!HasAttr) {
1182 Implied = true;
1184 for (Attribute::AttrKind AK : AttrKinds)
1185 if (getAttrsFromAssumes(IRP, AK, Attrs)) {
1186 HasAttr = true;
1187 break;
1188 }
1189 }
1190
1191 // Check if we should manifest the implied attribute kind at the IRP.
1192 if (ImpliedAttributeKind != Attribute::None && HasAttr && Implied)
1194 ImpliedAttributeKind)});
1195 return HasAttr;
1196}
1197
1201 bool IgnoreSubsumingPositions) {
1202 auto CollectAttrCB = [&](const Attribute::AttrKind &Kind,
1203 AttributeSet AttrSet, AttributeMask &,
1204 AttrBuilder &) {
1205 if (AttrSet.hasAttribute(Kind))
1206 Attrs.push_back(AttrSet.getAttribute(Kind));
1207 return false;
1208 };
1209 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1210 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, CollectAttrCB);
1211 // The first position returned by the SubsumingPositionIterator is
1212 // always the position itself. If we ignore subsuming positions we
1213 // are done after the first iteration.
1214 if (IgnoreSubsumingPositions)
1215 break;
1216 }
1217 for (Attribute::AttrKind AK : AttrKinds)
1218 getAttrsFromAssumes(IRP, AK, Attrs);
1219}
1220
1223 auto RemoveAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1224 AttributeMask &AM, AttrBuilder &) {
1225 if (!AttrSet.hasAttribute(Kind))
1226 return false;
1227 AM.addAttribute(Kind);
1228 return true;
1229 };
1230 return updateAttrMap<Attribute::AttrKind>(IRP, AttrKinds, RemoveAttrCB);
1231}
1232
1234 ArrayRef<StringRef> Attrs) {
1235 auto RemoveAttrCB = [&](StringRef Attr, AttributeSet AttrSet,
1236 AttributeMask &AM, AttrBuilder &) -> bool {
1237 if (!AttrSet.hasAttribute(Attr))
1238 return false;
1239 AM.addAttribute(Attr);
1240 return true;
1241 };
1242
1243 return updateAttrMap<StringRef>(IRP, Attrs, RemoveAttrCB);
1244}
1245
1247 ArrayRef<Attribute> Attrs,
1248 bool ForceReplace) {
1249 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1250 auto AddAttrCB = [&](const Attribute &Attr, AttributeSet AttrSet,
1251 AttributeMask &, AttrBuilder &AB) {
1252 return addIfNotExistent(Ctx, Attr, AttrSet, ForceReplace, AB);
1253 };
1254 return updateAttrMap<Attribute>(IRP, Attrs, AddAttrCB);
1255}
1256
1258const IRPosition
1260
1262 IRPositions.emplace_back(IRP);
1263
1264 // Helper to determine if operand bundles on a call site are benign or
1265 // potentially problematic. We handle only llvm.assume for now.
1266 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1267 return (isa<IntrinsicInst>(CB) &&
1268 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1269 };
1270
1271 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1272 switch (IRP.getPositionKind()) {
1276 return;
1279 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1280 return;
1282 assert(CB && "Expected call site!");
1283 // TODO: We need to look at the operand bundles similar to the redirection
1284 // in CallBase.
1285 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1286 if (auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand()))
1287 IRPositions.emplace_back(IRPosition::function(*Callee));
1288 return;
1290 assert(CB && "Expected call site!");
1291 // TODO: We need to look at the operand bundles similar to the redirection
1292 // in CallBase.
1293 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1294 if (auto *Callee =
1295 dyn_cast_if_present<Function>(CB->getCalledOperand())) {
1296 IRPositions.emplace_back(IRPosition::returned(*Callee));
1297 IRPositions.emplace_back(IRPosition::function(*Callee));
1298 for (const Argument &Arg : Callee->args())
1299 if (Arg.hasReturnedAttr()) {
1300 IRPositions.emplace_back(
1301 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1302 IRPositions.emplace_back(
1303 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1304 IRPositions.emplace_back(IRPosition::argument(Arg));
1305 }
1306 }
1307 }
1308 IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1309 return;
1311 assert(CB && "Expected call site!");
1312 // TODO: We need to look at the operand bundles similar to the redirection
1313 // in CallBase.
1314 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1315 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
1316 if (Callee) {
1317 if (Argument *Arg = IRP.getAssociatedArgument())
1318 IRPositions.emplace_back(IRPosition::argument(*Arg));
1319 IRPositions.emplace_back(IRPosition::function(*Callee));
1320 }
1321 }
1322 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1323 return;
1324 }
1325 }
1326}
1327
1328void IRPosition::verify() {
1329#ifdef EXPENSIVE_CHECKS
1330 switch (getPositionKind()) {
1331 case IRP_INVALID:
1332 assert((CBContext == nullptr) &&
1333 "Invalid position must not have CallBaseContext!");
1334 assert(!Enc.getOpaqueValue() &&
1335 "Expected a nullptr for an invalid position!");
1336 return;
1337 case IRP_FLOAT:
1338 assert((!isa<Argument>(&getAssociatedValue())) &&
1339 "Expected specialized kind for argument values!");
1340 return;
1341 case IRP_RETURNED:
1342 assert(isa<Function>(getAsValuePtr()) &&
1343 "Expected function for a 'returned' position!");
1344 assert(getAsValuePtr() == &getAssociatedValue() &&
1345 "Associated value mismatch!");
1346 return;
1348 assert((CBContext == nullptr) &&
1349 "'call site returned' position must not have CallBaseContext!");
1350 assert((isa<CallBase>(getAsValuePtr())) &&
1351 "Expected call base for 'call site returned' position!");
1352 assert(getAsValuePtr() == &getAssociatedValue() &&
1353 "Associated value mismatch!");
1354 return;
1355 case IRP_CALL_SITE:
1356 assert((CBContext == nullptr) &&
1357 "'call site function' position must not have CallBaseContext!");
1358 assert((isa<CallBase>(getAsValuePtr())) &&
1359 "Expected call base for 'call site function' position!");
1360 assert(getAsValuePtr() == &getAssociatedValue() &&
1361 "Associated value mismatch!");
1362 return;
1363 case IRP_FUNCTION:
1364 assert(isa<Function>(getAsValuePtr()) &&
1365 "Expected function for a 'function' position!");
1366 assert(getAsValuePtr() == &getAssociatedValue() &&
1367 "Associated value mismatch!");
1368 return;
1369 case IRP_ARGUMENT:
1370 assert(isa<Argument>(getAsValuePtr()) &&
1371 "Expected argument for a 'argument' position!");
1372 assert(getAsValuePtr() == &getAssociatedValue() &&
1373 "Associated value mismatch!");
1374 return;
1376 assert((CBContext == nullptr) &&
1377 "'call site argument' position must not have CallBaseContext!");
1378 Use *U = getAsUsePtr();
1379 (void)U; // Silence unused variable warning.
1380 assert(U && "Expected use for a 'call site argument' position!");
1381 assert(isa<CallBase>(U->getUser()) &&
1382 "Expected call base user for a 'call site argument' position!");
1383 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1384 "Expected call base argument operand for a 'call site argument' "
1385 "position");
1386 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1387 unsigned(getCallSiteArgNo()) &&
1388 "Argument number mismatch!");
1389 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1390 return;
1391 }
1392 }
1393#endif
1394}
1395
1396std::optional<Constant *>
1398 const AbstractAttribute &AA,
1399 bool &UsedAssumedInformation) {
1400 // First check all callbacks provided by outside AAs. If any of them returns
1401 // a non-null value that is different from the associated value, or
1402 // std::nullopt, we assume it's simplified.
1403 for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1404 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1405 if (!SimplifiedV)
1406 return std::nullopt;
1407 if (isa_and_nonnull<Constant>(*SimplifiedV))
1408 return cast<Constant>(*SimplifiedV);
1409 return nullptr;
1410 }
1411 if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1412 return C;
1414 if (getAssumedSimplifiedValues(IRP, &AA, Values,
1416 UsedAssumedInformation)) {
1417 if (Values.empty())
1418 return std::nullopt;
1419 if (auto *C = dyn_cast_or_null<Constant>(
1420 AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1421 return C;
1422 }
1423 return nullptr;
1424}
1425
1427 const IRPosition &IRP, const AbstractAttribute *AA,
1428 bool &UsedAssumedInformation, AA::ValueScope S) {
1429 // First check all callbacks provided by outside AAs. If any of them returns
1430 // a non-null value that is different from the associated value, or
1431 // std::nullopt, we assume it's simplified.
1432 for (auto &CB : SimplificationCallbacks.lookup(IRP))
1433 return CB(IRP, AA, UsedAssumedInformation);
1434
1436 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1437 return &IRP.getAssociatedValue();
1438 if (Values.empty())
1439 return std::nullopt;
1440 if (AA)
1441 if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1442 return V;
1445 return nullptr;
1446 return &IRP.getAssociatedValue();
1447}
1448
1450 const IRPosition &InitialIRP, const AbstractAttribute *AA,
1452 bool &UsedAssumedInformation, bool RecurseForSelectAndPHI) {
1455 Worklist.push_back(InitialIRP);
1456 while (!Worklist.empty()) {
1457 const IRPosition &IRP = Worklist.pop_back_val();
1458
1459 // First check all callbacks provided by outside AAs. If any of them returns
1460 // a non-null value that is different from the associated value, or
1461 // std::nullopt, we assume it's simplified.
1462 int NV = Values.size();
1463 const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1464 for (const auto &CB : SimplificationCBs) {
1465 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1466 if (!CBResult.has_value())
1467 continue;
1468 Value *V = *CBResult;
1469 if (!V)
1470 return false;
1473 Values.push_back(AA::ValueAndContext{*V, nullptr});
1474 else
1475 return false;
1476 }
1477 if (SimplificationCBs.empty()) {
1478 // If no high-level/outside simplification occurred, use
1479 // AAPotentialValues.
1480 const auto *PotentialValuesAA =
1481 getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
1482 if (PotentialValuesAA && PotentialValuesAA->getAssumedSimplifiedValues(*this, Values, S)) {
1483 UsedAssumedInformation |= !PotentialValuesAA->isAtFixpoint();
1484 } else if (IRP.getPositionKind() != IRPosition::IRP_RETURNED) {
1485 Values.push_back({IRP.getAssociatedValue(), IRP.getCtxI()});
1486 } else {
1487 // TODO: We could visit all returns and add the operands.
1488 return false;
1489 }
1490 }
1491
1492 if (!RecurseForSelectAndPHI)
1493 break;
1494
1495 for (int I = NV, E = Values.size(); I < E; ++I) {
1496 Value *V = Values[I].getValue();
1497 if (!isa<PHINode>(V) && !isa<SelectInst>(V))
1498 continue;
1499 if (!Seen.insert(V).second)
1500 continue;
1501 // Move the last element to this slot.
1502 Values[I] = Values[E - 1];
1503 // Eliminate the last slot, adjust the indices.
1504 Values.pop_back();
1505 --E;
1506 --I;
1507 // Add a new value (select or phi) to the worklist.
1508 Worklist.push_back(IRPosition::value(*V));
1509 }
1510 }
1511 return true;
1512}
1513
1515 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1516 bool &UsedAssumedInformation) {
1517 if (!V)
1518 return V;
1519 if (*V == nullptr || isa<Constant>(*V))
1520 return V;
1521 if (auto *Arg = dyn_cast<Argument>(*V))
1522 if (CB.getCalledOperand() == Arg->getParent() &&
1523 CB.arg_size() > Arg->getArgNo())
1524 if (!Arg->hasPointeeInMemoryValueAttr())
1525 return getAssumedSimplified(
1526 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1527 UsedAssumedInformation, AA::Intraprocedural);
1528 return nullptr;
1529}
1530
1532 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1533 // thus we cannot delete them. We can, and want to, destruct them though.
1534 for (auto &It : AAMap) {
1535 AbstractAttribute *AA = It.getSecond();
1536 AA->~AbstractAttribute();
1537 }
1538}
1539
1541 const AAIsDead *FnLivenessAA,
1542 bool &UsedAssumedInformation,
1543 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1544 if (!Configuration.UseLiveness)
1545 return false;
1546 const IRPosition &IRP = AA.getIRPosition();
1547 if (!Functions.count(IRP.getAnchorScope()))
1548 return false;
1549 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1550 CheckBBLivenessOnly, DepClass);
1551}
1552
1554 const AbstractAttribute *QueryingAA,
1555 const AAIsDead *FnLivenessAA,
1556 bool &UsedAssumedInformation,
1557 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1558 if (!Configuration.UseLiveness)
1559 return false;
1560 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1561 if (!UserI)
1562 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1563 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1564
1565 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1566 // For call site argument uses we can check if the argument is
1567 // unused/dead.
1568 if (CB->isArgOperand(&U)) {
1569 const IRPosition &CSArgPos =
1570 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1571 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1572 UsedAssumedInformation, CheckBBLivenessOnly,
1573 DepClass);
1574 }
1575 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1576 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1577 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1578 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1579 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1580 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1581 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1582 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1583 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1584 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1585 const IRPosition IRP = IRPosition::inst(*SI);
1586 const AAIsDead *IsDeadAA =
1587 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1588 if (IsDeadAA && IsDeadAA->isRemovableStore()) {
1589 if (QueryingAA)
1590 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1591 if (!IsDeadAA->isKnown(AAIsDead::IS_REMOVABLE))
1592 UsedAssumedInformation = true;
1593 return true;
1594 }
1595 }
1596 }
1597
1598 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1599 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1600}
1601
1603 const AbstractAttribute *QueryingAA,
1604 const AAIsDead *FnLivenessAA,
1605 bool &UsedAssumedInformation,
1606 bool CheckBBLivenessOnly, DepClassTy DepClass,
1607 bool CheckForDeadStore) {
1608 if (!Configuration.UseLiveness)
1609 return false;
1610 const IRPosition::CallBaseContext *CBCtx =
1611 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1612
1613 if (ManifestAddedBlocks.contains(I.getParent()))
1614 return false;
1615
1616 const Function &F = *I.getFunction();
1617 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1618 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1619 QueryingAA, DepClassTy::NONE);
1620
1621 // Don't use recursive reasoning.
1622 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1623 return false;
1624
1625 // If we have a context instruction and a liveness AA we use it.
1626 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1627 : FnLivenessAA->isAssumedDead(&I)) {
1628 if (QueryingAA)
1629 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1630 if (!FnLivenessAA->isKnownDead(&I))
1631 UsedAssumedInformation = true;
1632 return true;
1633 }
1634
1635 if (CheckBBLivenessOnly)
1636 return false;
1637
1638 const IRPosition IRP = IRPosition::inst(I, CBCtx);
1639 const AAIsDead *IsDeadAA =
1640 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1641
1642 // Don't use recursive reasoning.
1643 if (!IsDeadAA || QueryingAA == IsDeadAA)
1644 return false;
1645
1646 if (IsDeadAA->isAssumedDead()) {
1647 if (QueryingAA)
1648 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1649 if (!IsDeadAA->isKnownDead())
1650 UsedAssumedInformation = true;
1651 return true;
1652 }
1653
1654 if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA->isRemovableStore()) {
1655 if (QueryingAA)
1656 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1657 if (!IsDeadAA->isKnownDead())
1658 UsedAssumedInformation = true;
1659 return true;
1660 }
1661
1662 return false;
1663}
1664
1666 const AbstractAttribute *QueryingAA,
1667 const AAIsDead *FnLivenessAA,
1668 bool &UsedAssumedInformation,
1669 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1670 if (!Configuration.UseLiveness)
1671 return false;
1672 // Don't check liveness for constants, e.g. functions, used as (floating)
1673 // values since the context instruction and such is here meaningless.
1675 isa<Constant>(IRP.getAssociatedValue())) {
1676 return false;
1677 }
1678
1679 Instruction *CtxI = IRP.getCtxI();
1680 if (CtxI &&
1681 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1682 /* CheckBBLivenessOnly */ true,
1683 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1684 return true;
1685
1686 if (CheckBBLivenessOnly)
1687 return false;
1688
1689 // If we haven't succeeded we query the specific liveness info for the IRP.
1690 const AAIsDead *IsDeadAA;
1692 IsDeadAA = getOrCreateAAFor<AAIsDead>(
1694 QueryingAA, DepClassTy::NONE);
1695 else
1696 IsDeadAA = getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1697
1698 // Don't use recursive reasoning.
1699 if (!IsDeadAA || QueryingAA == IsDeadAA)
1700 return false;
1701
1702 if (IsDeadAA->isAssumedDead()) {
1703 if (QueryingAA)
1704 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1705 if (!IsDeadAA->isKnownDead())
1706 UsedAssumedInformation = true;
1707 return true;
1708 }
1709
1710 return false;
1711}
1712
1714 const AbstractAttribute *QueryingAA,
1715 const AAIsDead *FnLivenessAA,
1716 DepClassTy DepClass) {
1717 if (!Configuration.UseLiveness)
1718 return false;
1719 const Function &F = *BB.getParent();
1720 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1721 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F),
1722 QueryingAA, DepClassTy::NONE);
1723
1724 // Don't use recursive reasoning.
1725 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1726 return false;
1727
1728 if (FnLivenessAA->isAssumedDead(&BB)) {
1729 if (QueryingAA)
1730 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1731 return true;
1732 }
1733
1734 return false;
1735}
1736
1739 const AbstractAttribute &QueryingAA, const CallBase &CB) {
1740 if (const Function *Callee = dyn_cast<Function>(CB.getCalledOperand()))
1741 return Pred(Callee);
1742
1743 const auto *CallEdgesAA = getAAFor<AACallEdges>(
1745 if (!CallEdgesAA || CallEdgesAA->hasUnknownCallee())
1746 return false;
1747
1748 const auto &Callees = CallEdgesAA->getOptimisticEdges();
1749 return Pred(Callees.getArrayRef());
1750}
1751
1753 function_ref<bool(const Use &, bool &)> Pred,
1754 const AbstractAttribute &QueryingAA, const Value &V,
1755 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1756 bool IgnoreDroppableUses,
1757 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1758
1759 // Check virtual uses first.
1760 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1761 if (!CB(*this, &QueryingAA))
1762 return false;
1763
1764 // Check the trivial case first as it catches void values.
1765 if (V.use_empty())
1766 return true;
1767
1768 const IRPosition &IRP = QueryingAA.getIRPosition();
1771
1772 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1773 for (const Use &UU : V.uses()) {
1774 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1775 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1776 "rejected by the equivalence call back: "
1777 << *UU << "!\n");
1778 return false;
1779 }
1780
1781 Worklist.push_back(&UU);
1782 }
1783 return true;
1784 };
1785
1786 AddUsers(V, /* OldUse */ nullptr);
1787
1788 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1789 << " initial uses to check\n");
1790
1791 const Function *ScopeFn = IRP.getAnchorScope();
1792 const auto *LivenessAA =
1793 ScopeFn ? getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1795 : nullptr;
1796
1797 while (!Worklist.empty()) {
1798 const Use *U = Worklist.pop_back_val();
1799 if (isa<PHINode>(U->getUser()) && !Visited.insert(U).second)
1800 continue;
1802 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1803 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1804 << "\n";
1805 else
1806 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1807 << "\n";
1808 });
1809 bool UsedAssumedInformation = false;
1810 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1811 CheckBBLivenessOnly, LivenessDepClass)) {
1813 dbgs() << "[Attributor] Dead use, skip!\n");
1814 continue;
1815 }
1816 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1818 dbgs() << "[Attributor] Droppable user, skip!\n");
1819 continue;
1820 }
1821
1822 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1823 if (&SI->getOperandUse(0) == U) {
1824 if (!Visited.insert(U).second)
1825 continue;
1826 SmallSetVector<Value *, 4> PotentialCopies;
1828 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1829 /* OnlyExact */ true)) {
1831 dbgs()
1832 << "[Attributor] Value is stored, continue with "
1833 << PotentialCopies.size()
1834 << " potential copies instead!\n");
1835 for (Value *PotentialCopy : PotentialCopies)
1836 if (!AddUsers(*PotentialCopy, U))
1837 return false;
1838 continue;
1839 }
1840 }
1841 }
1842
1843 bool Follow = false;
1844 if (!Pred(*U, Follow))
1845 return false;
1846 if (!Follow)
1847 continue;
1848
1849 User &Usr = *U->getUser();
1850 AddUsers(Usr, /* OldUse */ nullptr);
1851
1852 auto *RI = dyn_cast<ReturnInst>(&Usr);
1853 if (!RI)
1854 continue;
1855
1856 Function &F = *RI->getFunction();
1857 auto CallSitePred = [&](AbstractCallSite ACS) {
1858 return AddUsers(*ACS.getInstruction(), U);
1859 };
1860 if (!checkForAllCallSites(CallSitePred, F, /* RequireAllCallSites */ true,
1861 &QueryingAA, UsedAssumedInformation)) {
1862 LLVM_DEBUG(dbgs() << "[Attributor] Could not follow return instruction "
1863 "to all call sites: "
1864 << *RI << "\n");
1865 return false;
1866 }
1867 }
1868
1869 return true;
1870}
1871
1873 const AbstractAttribute &QueryingAA,
1874 bool RequireAllCallSites,
1875 bool &UsedAssumedInformation) {
1876 // We can try to determine information from
1877 // the call sites. However, this is only possible all call sites are known,
1878 // hence the function has internal linkage.
1879 const IRPosition &IRP = QueryingAA.getIRPosition();
1880 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1881 if (!AssociatedFunction) {
1882 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1883 << "\n");
1884 return false;
1885 }
1886
1887 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1888 &QueryingAA, UsedAssumedInformation);
1889}
1890
1892 const Function &Fn,
1893 bool RequireAllCallSites,
1894 const AbstractAttribute *QueryingAA,
1895 bool &UsedAssumedInformation,
1896 bool CheckPotentiallyDead) {
1897 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1898 LLVM_DEBUG(
1899 dbgs()
1900 << "[Attributor] Function " << Fn.getName()
1901 << " has no internal linkage, hence not all call sites are known\n");
1902 return false;
1903 }
1904 // Check virtual uses first.
1905 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1906 if (!CB(*this, QueryingAA))
1907 return false;
1908
1910 for (unsigned u = 0; u < Uses.size(); ++u) {
1911 const Use &U = *Uses[u];
1913 if (auto *Fn = dyn_cast<Function>(U))
1914 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1915 << *U.getUser() << "\n";
1916 else
1917 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1918 << "\n";
1919 });
1920 if (!CheckPotentiallyDead &&
1921 isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1922 /* CheckBBLivenessOnly */ true)) {
1924 dbgs() << "[Attributor] Dead use, skip!\n");
1925 continue;
1926 }
1927 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1928 if (CE->isCast() && CE->getType()->isPointerTy()) {
1930 dbgs() << "[Attributor] Use, is constant cast expression, add "
1931 << CE->getNumUses() << " uses of that expression instead!\n";
1932 });
1933 for (const Use &CEU : CE->uses())
1934 Uses.push_back(&CEU);
1935 continue;
1936 }
1937 }
1938
1939 AbstractCallSite ACS(&U);
1940 if (!ACS) {
1941 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1942 << " has non call site use " << *U.get() << " in "
1943 << *U.getUser() << "\n");
1944 // BlockAddress users are allowed.
1945 if (isa<BlockAddress>(U.getUser()))
1946 continue;
1947 return false;
1948 }
1949
1950 const Use *EffectiveUse =
1951 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1952 if (!ACS.isCallee(EffectiveUse)) {
1953 if (!RequireAllCallSites) {
1954 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1955 << " is not a call of " << Fn.getName()
1956 << ", skip use\n");
1957 continue;
1958 }
1959 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1960 << " is an invalid use of " << Fn.getName() << "\n");
1961 return false;
1962 }
1963
1964 // Make sure the arguments that can be matched between the call site and the
1965 // callee argee on their type. It is unlikely they do not and it doesn't
1966 // make sense for all attributes to know/care about this.
1967 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1968 unsigned MinArgsParams =
1969 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
1970 for (unsigned u = 0; u < MinArgsParams; ++u) {
1971 Value *CSArgOp = ACS.getCallArgOperand(u);
1972 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
1973 LLVM_DEBUG(
1974 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1975 << u << "@" << Fn.getName() << ": "
1976 << *Fn.getArg(u)->getType() << " vs. "
1977 << *ACS.getCallArgOperand(u)->getType() << "\n");
1978 return false;
1979 }
1980 }
1981
1982 if (Pred(ACS))
1983 continue;
1984
1985 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1986 << *ACS.getInstruction() << "\n");
1987 return false;
1988 }
1989
1990 return true;
1991}
1992
1993bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1994 // TODO: Maintain a cache of Values that are
1995 // on the pathway from a Argument to a Instruction that would effect the
1996 // liveness/return state etc.
1998}
1999
2001 const AbstractAttribute &QueryingAA,
2003 bool RecurseForSelectAndPHI) {
2004
2005 const IRPosition &IRP = QueryingAA.getIRPosition();
2006 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2007 if (!AssociatedFunction)
2008 return false;
2009
2010 bool UsedAssumedInformation = false;
2013 IRPosition::returned(*AssociatedFunction), &QueryingAA, Values, S,
2014 UsedAssumedInformation, RecurseForSelectAndPHI))
2015 return false;
2016
2017 return llvm::all_of(Values, [&](const AA::ValueAndContext &VAC) {
2018 return Pred(*VAC.getValue());
2019 });
2020}
2021
2024 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
2025 const AAIsDead *LivenessAA, ArrayRef<unsigned> Opcodes,
2026 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
2027 bool CheckPotentiallyDead = false) {
2028 for (unsigned Opcode : Opcodes) {
2029 // Check if we have instructions with this opcode at all first.
2030 auto *Insts = OpcodeInstMap.lookup(Opcode);
2031 if (!Insts)
2032 continue;
2033
2034 for (Instruction *I : *Insts) {
2035 // Skip dead instructions.
2036 if (A && !CheckPotentiallyDead &&
2037 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
2038 UsedAssumedInformation, CheckBBLivenessOnly)) {
2040 dbgs() << "[Attributor] Instruction " << *I
2041 << " is potentially dead, skip!\n";);
2042 continue;
2043 }
2044
2045 if (!Pred(*I))
2046 return false;
2047 }
2048 }
2049 return true;
2050}
2051
2053 const Function *Fn,
2054 const AbstractAttribute *QueryingAA,
2055 ArrayRef<unsigned> Opcodes,
2056 bool &UsedAssumedInformation,
2057 bool CheckBBLivenessOnly,
2058 bool CheckPotentiallyDead) {
2059 // Since we need to provide instructions we have to have an exact definition.
2060 if (!Fn || Fn->isDeclaration())
2061 return false;
2062
2063 const IRPosition &QueryIRP = IRPosition::function(*Fn);
2064 const auto *LivenessAA =
2065 CheckPotentiallyDead && QueryingAA
2066 ? (getAAFor<AAIsDead>(*QueryingAA, QueryIRP, DepClassTy::NONE))
2067 : nullptr;
2068
2069 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2070 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, QueryingAA,
2071 LivenessAA, Opcodes, UsedAssumedInformation,
2072 CheckBBLivenessOnly, CheckPotentiallyDead))
2073 return false;
2074
2075 return true;
2076}
2077
2079 const AbstractAttribute &QueryingAA,
2080 ArrayRef<unsigned> Opcodes,
2081 bool &UsedAssumedInformation,
2082 bool CheckBBLivenessOnly,
2083 bool CheckPotentiallyDead) {
2084 const IRPosition &IRP = QueryingAA.getIRPosition();
2085 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2086 return checkForAllInstructions(Pred, AssociatedFunction, &QueryingAA, Opcodes,
2087 UsedAssumedInformation, CheckBBLivenessOnly,
2088 CheckPotentiallyDead);
2089}
2090
2092 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
2093 bool &UsedAssumedInformation) {
2094 TimeTraceScope TS("checkForAllReadWriteInstructions");
2095
2096 const Function *AssociatedFunction =
2097 QueryingAA.getIRPosition().getAssociatedFunction();
2098 if (!AssociatedFunction)
2099 return false;
2100
2101 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2102 const auto *LivenessAA =
2103 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
2104
2105 for (Instruction *I :
2106 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
2107 // Skip dead instructions.
2108 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, LivenessAA,
2109 UsedAssumedInformation))
2110 continue;
2111
2112 if (!Pred(*I))
2113 return false;
2114 }
2115
2116 return true;
2117}
2118
2119void Attributor::runTillFixpoint() {
2120 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
2121 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2122 << DG.SyntheticRoot.Deps.size()
2123 << " abstract attributes.\n");
2124
2125 // Now that all abstract attributes are collected and initialized we start
2126 // the abstract analysis.
2127
2128 unsigned IterationCounter = 1;
2129 unsigned MaxIterations =
2130 Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
2131
2133 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
2134 Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end());
2135
2136 do {
2137 // Remember the size to determine new attributes.
2138 size_t NumAAs = DG.SyntheticRoot.Deps.size();
2139 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2140 << ", Worklist size: " << Worklist.size() << "\n");
2141
2142 // For invalid AAs we can fix dependent AAs that have a required dependence,
2143 // thereby folding long dependence chains in a single step without the need
2144 // to run updates.
2145 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
2146 AbstractAttribute *InvalidAA = InvalidAAs[u];
2147
2148 // Check the dependences to fast track invalidation.
2150 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
2151 << " has " << InvalidAA->Deps.size()
2152 << " required & optional dependences\n");
2153 for (auto &DepIt : InvalidAA->Deps) {
2154 AbstractAttribute *DepAA = cast<AbstractAttribute>(DepIt.getPointer());
2155 if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
2157 dbgs() << " - recompute: " << *DepAA);
2158 Worklist.insert(DepAA);
2159 continue;
2160 }
2162 << " - invalidate: " << *DepAA);
2164 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2165 if (!DepAA->getState().isValidState())
2166 InvalidAAs.insert(DepAA);
2167 else
2168 ChangedAAs.push_back(DepAA);
2169 }
2170 InvalidAA->Deps.clear();
2171 }
2172
2173 // Add all abstract attributes that are potentially dependent on one that
2174 // changed to the work list.
2175 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2176 for (auto &DepIt : ChangedAA->Deps)
2177 Worklist.insert(cast<AbstractAttribute>(DepIt.getPointer()));
2178 ChangedAA->Deps.clear();
2179 }
2180
2181 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2182 << ", Worklist+Dependent size: " << Worklist.size()
2183 << "\n");
2184
2185 // Reset the changed and invalid set.
2186 ChangedAAs.clear();
2187 InvalidAAs.clear();
2188
2189 // Update all abstract attribute in the work list and record the ones that
2190 // changed.
2191 for (AbstractAttribute *AA : Worklist) {
2192 const auto &AAState = AA->getState();
2193 if (!AAState.isAtFixpoint())
2194 if (updateAA(*AA) == ChangeStatus::CHANGED)
2195 ChangedAAs.push_back(AA);
2196
2197 // Use the InvalidAAs vector to propagate invalid states fast transitively
2198 // without requiring updates.
2199 if (!AAState.isValidState())
2200 InvalidAAs.insert(AA);
2201 }
2202
2203 // Add attributes to the changed set if they have been created in the last
2204 // iteration.
2205 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2206 DG.SyntheticRoot.end());
2207
2208 // Reset the work list and repopulate with the changed abstract attributes.
2209 // Note that dependent ones are added above.
2210 Worklist.clear();
2211 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2212 Worklist.insert(QueryAAsAwaitingUpdate.begin(),
2213 QueryAAsAwaitingUpdate.end());
2214 QueryAAsAwaitingUpdate.clear();
2215
2216 } while (!Worklist.empty() && (IterationCounter++ < MaxIterations));
2217
2218 if (IterationCounter > MaxIterations && !Functions.empty()) {
2219 auto Remark = [&](OptimizationRemarkMissed ORM) {
2220 return ORM << "Attributor did not reach a fixpoint after "
2221 << ore::NV("Iterations", MaxIterations) << " iterations.";
2222 };
2223 Function *F = Functions.front();
2224 emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
2225 }
2226
2227 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2228 << IterationCounter << "/" << MaxIterations
2229 << " iterations\n");
2230
2231 // Reset abstract arguments not settled in a sound fixpoint by now. This
2232 // happens when we stopped the fixpoint iteration early. Note that only the
2233 // ones marked as "changed" *and* the ones transitively depending on them
2234 // need to be reverted to a pessimistic state. Others might not be in a
2235 // fixpoint state but we can use the optimistic results for them anyway.
2237 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2238 AbstractAttribute *ChangedAA = ChangedAAs[u];
2239 if (!Visited.insert(ChangedAA).second)
2240 continue;
2241
2242 AbstractState &State = ChangedAA->getState();
2243 if (!State.isAtFixpoint()) {
2245
2246 NumAttributesTimedOut++;
2247 }
2248
2249 for (auto &DepIt : ChangedAA->Deps)
2250 ChangedAAs.push_back(cast<AbstractAttribute>(DepIt.getPointer()));
2251 ChangedAA->Deps.clear();
2252 }
2253
2254 LLVM_DEBUG({
2255 if (!Visited.empty())
2256 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2257 << " abstract attributes.\n";
2258 });
2259}
2260
2262 assert(AA.isQueryAA() &&
2263 "Non-query AAs should not be required to register for updates!");
2264 QueryAAsAwaitingUpdate.insert(&AA);
2265}
2266
2267ChangeStatus Attributor::manifestAttributes() {
2268 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2269 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2270
2271 unsigned NumManifested = 0;
2272 unsigned NumAtFixpoint = 0;
2273 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2274 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2275 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2276 AbstractState &State = AA->getState();
2277
2278 // If there is not already a fixpoint reached, we can now take the
2279 // optimistic state. This is correct because we enforced a pessimistic one
2280 // on abstract attributes that were transitively dependent on a changed one
2281 // already above.
2282 if (!State.isAtFixpoint())
2284
2285 // We must not manifest Attributes that use Callbase info.
2286 if (AA->hasCallBaseContext())
2287 continue;
2288 // If the state is invalid, we do not try to manifest it.
2289 if (!State.isValidState())
2290 continue;
2291
2292 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2293 continue;
2294
2295 // Skip dead code.
2296 bool UsedAssumedInformation = false;
2297 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2298 /* CheckBBLivenessOnly */ true))
2299 continue;
2300 // Check if the manifest debug counter that allows skipping manifestation of
2301 // AAs
2302 if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2303 continue;
2304 // Manifest the state and record if we changed the IR.
2305 ChangeStatus LocalChange = AA->manifest(*this);
2306 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2307 AA->trackStatistics();
2308 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2309 << "\n");
2310
2311 ManifestChange = ManifestChange | LocalChange;
2312
2313 NumAtFixpoint++;
2314 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2315 }
2316
2317 (void)NumManifested;
2318 (void)NumAtFixpoint;
2319 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2320 << " arguments while " << NumAtFixpoint
2321 << " were in a valid fixpoint state\n");
2322
2323 NumAttributesManifested += NumManifested;
2324 NumAttributesValidFixpoint += NumAtFixpoint;
2325
2326 (void)NumFinalAAs;
2327 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2328 auto DepIt = DG.SyntheticRoot.Deps.begin();
2329 for (unsigned u = 0; u < NumFinalAAs; ++u)
2330 ++DepIt;
2331 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2332 ++u, ++DepIt) {
2333 errs() << "Unexpected abstract attribute: "
2334 << cast<AbstractAttribute>(DepIt->getPointer()) << " :: "
2335 << cast<AbstractAttribute>(DepIt->getPointer())
2336 ->getIRPosition()
2337 .getAssociatedValue()
2338 << "\n";
2339 }
2340 llvm_unreachable("Expected the final number of abstract attributes to "
2341 "remain unchanged!");
2342 }
2343
2344 for (auto &It : AttrsMap) {
2345 AttributeList &AL = It.getSecond();
2346 const IRPosition &IRP =
2347 isa<Function>(It.getFirst())
2348 ? IRPosition::function(*cast<Function>(It.getFirst()))
2349 : IRPosition::callsite_function(*cast<CallBase>(It.getFirst()));
2350 IRP.setAttrList(AL);
2351 }
2352
2353 return ManifestChange;
2354}
2355
2356void Attributor::identifyDeadInternalFunctions() {
2357 // Early exit if we don't intend to delete functions.
2358 if (!Configuration.DeleteFns)
2359 return;
2360
2361 // To avoid triggering an assertion in the lazy call graph we will not delete
2362 // any internal library functions. We should modify the assertion though and
2363 // allow internals to be deleted.
2364 const auto *TLI =
2365 isModulePass()
2366 ? nullptr
2368 LibFunc LF;
2369
2370 // Identify dead internal functions and delete them. This happens outside
2371 // the other fixpoint analysis as we might treat potentially dead functions
2372 // as live to lower the number of iterations. If they happen to be dead, the
2373 // below fixpoint loop will identify and eliminate them.
2374
2375 SmallVector<Function *, 8> InternalFns;
2376 for (Function *F : Functions)
2377 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2378 InternalFns.push_back(F);
2379
2380 SmallPtrSet<Function *, 8> LiveInternalFns;
2381 bool FoundLiveInternal = true;
2382 while (FoundLiveInternal) {
2383 FoundLiveInternal = false;
2384 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
2385 Function *F = InternalFns[u];
2386 if (!F)
2387 continue;
2388
2389 bool UsedAssumedInformation = false;
2391 [&](AbstractCallSite ACS) {
2393 return ToBeDeletedFunctions.count(Callee) ||
2394 (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2395 !LiveInternalFns.count(Callee));
2396 },
2397 *F, true, nullptr, UsedAssumedInformation)) {
2398 continue;
2399 }
2400
2401 LiveInternalFns.insert(F);
2402 InternalFns[u] = nullptr;
2403 FoundLiveInternal = true;
2404 }
2405 }
2406
2407 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u)
2408 if (Function *F = InternalFns[u])
2409 ToBeDeletedFunctions.insert(F);
2410}
2411
2412ChangeStatus Attributor::cleanupIR() {
2413 TimeTraceScope TimeScope("Attributor::cleanupIR");
2414 // Delete stuff at the end to avoid invalid references and a nice order.
2415 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2416 << ToBeDeletedFunctions.size() << " functions and "
2417 << ToBeDeletedBlocks.size() << " blocks and "
2418 << ToBeDeletedInsts.size() << " instructions and "
2419 << ToBeChangedValues.size() << " values and "
2420 << ToBeChangedUses.size() << " uses. To insert "
2421 << ToBeChangedToUnreachableInsts.size()
2422 << " unreachables.\n"
2423 << "Preserve manifest added " << ManifestAddedBlocks.size()
2424 << " blocks\n");
2425
2427 SmallVector<Instruction *, 32> TerminatorsToFold;
2428
2429 auto ReplaceUse = [&](Use *U, Value *NewV) {
2430 Value *OldV = U->get();
2431
2432 // If we plan to replace NewV we need to update it at this point.
2433 do {
2434 const auto &Entry = ToBeChangedValues.lookup(NewV);
2435 if (!get<0>(Entry))
2436 break;
2437 NewV = get<0>(Entry);
2438 } while (true);
2439
2440 Instruction *I = dyn_cast<Instruction>(U->getUser());
2441 assert((!I || isRunOn(*I->getFunction())) &&
2442 "Cannot replace an instruction outside the current SCC!");
2443
2444 // Do not replace uses in returns if the value is a must-tail call we will
2445 // not delete.
2446 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2447 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2448 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2449 return;
2450 // If we rewrite a return and the new value is not an argument, strip the
2451 // `returned` attribute as it is wrong now.
2452 if (!isa<Argument>(NewV))
2453 for (auto &Arg : RI->getFunction()->args())
2454 Arg.removeAttr(Attribute::Returned);
2455 }
2456
2457 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2458 << " instead of " << *OldV << "\n");
2459 U->set(NewV);
2460
2461 if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2462 CGModifiedFunctions.insert(I->getFunction());
2463 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2465 DeadInsts.push_back(I);
2466 }
2467 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2468 auto *CB = cast<CallBase>(U->getUser());
2469 if (CB->isArgOperand(U)) {
2470 unsigned Idx = CB->getArgOperandNo(U);
2471 CB->removeParamAttr(Idx, Attribute::NoUndef);
2472 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
2473 if (Callee && Callee->arg_size() > Idx)
2474 Callee->removeParamAttr(Idx, Attribute::NoUndef);
2475 }
2476 }
2477 if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
2478 Instruction *UserI = cast<Instruction>(U->getUser());
2479 if (isa<UndefValue>(NewV)) {
2480 ToBeChangedToUnreachableInsts.insert(UserI);
2481 } else {
2482 TerminatorsToFold.push_back(UserI);
2483 }
2484 }
2485 };
2486
2487 for (auto &It : ToBeChangedUses) {
2488 Use *U = It.first;
2489 Value *NewV = It.second;
2490 ReplaceUse(U, NewV);
2491 }
2492
2494 for (auto &It : ToBeChangedValues) {
2495 Value *OldV = It.first;
2496 auto [NewV, Done] = It.second;
2497 Uses.clear();
2498 for (auto &U : OldV->uses())
2499 if (Done || !U.getUser()->isDroppable())
2500 Uses.push_back(&U);
2501 for (Use *U : Uses) {
2502 if (auto *I = dyn_cast<Instruction>(U->getUser()))
2503 if (!isRunOn(*I->getFunction()))
2504 continue;
2505 ReplaceUse(U, NewV);
2506 }
2507 }
2508
2509 for (const auto &V : InvokeWithDeadSuccessor)
2510 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2511 assert(isRunOn(*II->getFunction()) &&
2512 "Cannot replace an invoke outside the current SCC!");
2513 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2514 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2515 bool Invoke2CallAllowed =
2516 !AAIsDead::mayCatchAsynchronousExceptions(*II->getFunction());
2517 assert((UnwindBBIsDead || NormalBBIsDead) &&
2518 "Invoke does not have dead successors!");
2519 BasicBlock *BB = II->getParent();
2520 BasicBlock *NormalDestBB = II->getNormalDest();
2521 if (UnwindBBIsDead) {
2522 Instruction *NormalNextIP = &NormalDestBB->front();
2523 if (Invoke2CallAllowed) {
2524 changeToCall(II);
2525 NormalNextIP = BB->getTerminator();
2526 }
2527 if (NormalBBIsDead)
2528 ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2529 } else {
2530 assert(NormalBBIsDead && "Broken invariant!");
2531 if (!NormalDestBB->getUniquePredecessor())
2532 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2533 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2534 }
2535 }
2536 for (Instruction *I : TerminatorsToFold) {
2537 assert(isRunOn(*I->getFunction()) &&
2538 "Cannot replace a terminator outside the current SCC!");
2539 CGModifiedFunctions.insert(I->getFunction());
2540 ConstantFoldTerminator(I->getParent());
2541 }
2542 for (const auto &V : ToBeChangedToUnreachableInsts)
2543 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2544 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2545 << "\n");
2546 assert(isRunOn(*I->getFunction()) &&
2547 "Cannot replace an instruction outside the current SCC!");
2548 CGModifiedFunctions.insert(I->getFunction());
2550 }
2551
2552 for (const auto &V : ToBeDeletedInsts) {
2553 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2554 if (auto *CB = dyn_cast<CallBase>(I)) {
2555 assert((isa<IntrinsicInst>(CB) || isRunOn(*I->getFunction())) &&
2556 "Cannot delete an instruction outside the current SCC!");
2557 if (!isa<IntrinsicInst>(CB))
2558 Configuration.CGUpdater.removeCallSite(*CB);
2559 }
2560 I->dropDroppableUses();
2561 CGModifiedFunctions.insert(I->getFunction());
2562 if (!I->getType()->isVoidTy())
2563 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2564 if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
2565 DeadInsts.push_back(I);
2566 else
2567 I->eraseFromParent();
2568 }
2569 }
2570
2571 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2572
2573 LLVM_DEBUG({
2574 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2575 for (auto &I : DeadInsts)
2576 if (I)
2577 dbgs() << " - " << *I << "\n";
2578 });
2579
2581
2582 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2583 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2584 ToBeDeletedBBs.reserve(NumDeadBlocks);
2585 for (BasicBlock *BB : ToBeDeletedBlocks) {
2586 assert(isRunOn(*BB->getParent()) &&
2587 "Cannot delete a block outside the current SCC!");
2588 CGModifiedFunctions.insert(BB->getParent());
2589 // Do not delete BBs added during manifests of AAs.
2590 if (ManifestAddedBlocks.contains(BB))
2591 continue;
2592 ToBeDeletedBBs.push_back(BB);
2593 }
2594 // Actually we do not delete the blocks but squash them into a single
2595 // unreachable but untangling branches that jump here is something we need
2596 // to do in a more generic way.
2597 detachDeadBlocks(ToBeDeletedBBs, nullptr);
2598 }
2599
2600 identifyDeadInternalFunctions();
2601
2602 // Rewrite the functions as requested during manifest.
2603 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2604
2605 for (Function *Fn : CGModifiedFunctions)
2606 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2607 Configuration.CGUpdater.reanalyzeFunction(*Fn);
2608
2609 for (Function *Fn : ToBeDeletedFunctions) {
2610 if (!Functions.count(Fn))
2611 continue;
2612 Configuration.CGUpdater.removeFunction(*Fn);
2613 }
2614
2615 if (!ToBeChangedUses.empty())
2616 ManifestChange = ChangeStatus::CHANGED;
2617
2618 if (!ToBeChangedToUnreachableInsts.empty())
2619 ManifestChange = ChangeStatus::CHANGED;
2620
2621 if (!ToBeDeletedFunctions.empty())
2622 ManifestChange = ChangeStatus::CHANGED;
2623
2624 if (!ToBeDeletedBlocks.empty())
2625 ManifestChange = ChangeStatus::CHANGED;
2626
2627 if (!ToBeDeletedInsts.empty())
2628 ManifestChange = ChangeStatus::CHANGED;
2629
2630 if (!InvokeWithDeadSuccessor.empty())
2631 ManifestChange = ChangeStatus::CHANGED;
2632
2633 if (!DeadInsts.empty())
2634 ManifestChange = ChangeStatus::CHANGED;
2635
2636 NumFnDeleted += ToBeDeletedFunctions.size();
2637
2638 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2639 << " functions after manifest.\n");
2640
2641#ifdef EXPENSIVE_CHECKS
2642 for (Function *F : Functions) {
2643 if (ToBeDeletedFunctions.count(F))
2644 continue;
2645 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2646 }
2647#endif
2648
2649 return ManifestChange;
2650}
2651
2653 TimeTraceScope TimeScope("Attributor::run");
2654 AttributorCallGraph ACallGraph(*this);
2655
2656 if (PrintCallGraph)
2657 ACallGraph.populateAll();
2658
2659 Phase = AttributorPhase::UPDATE;
2660 runTillFixpoint();
2661
2662 // dump graphs on demand
2663 if (DumpDepGraph)
2664 DG.dumpGraph();
2665
2666 if (ViewDepGraph)
2667 DG.viewGraph();
2668
2670 DG.print();
2671
2672 Phase = AttributorPhase::MANIFEST;
2673 ChangeStatus ManifestChange = manifestAttributes();
2674
2675 Phase = AttributorPhase::CLEANUP;
2676 ChangeStatus CleanupChange = cleanupIR();
2677
2678 if (PrintCallGraph)
2679 ACallGraph.print();
2680
2681 return ManifestChange | CleanupChange;
2682}
2683
2684ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2685 TimeTraceScope TimeScope("updateAA", [&]() {
2686 return AA.getName() + std::to_string(AA.getIRPosition().getPositionKind());
2687 });
2688 assert(Phase == AttributorPhase::UPDATE &&
2689 "We can update AA only in the update stage!");
2690
2691 // Use a new dependence vector for this update.
2692 DependenceVector DV;
2693 DependenceStack.push_back(&DV);
2694
2695 auto &AAState = AA.getState();
2697 bool UsedAssumedInformation = false;
2698 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2699 /* CheckBBLivenessOnly */ true))
2700 CS = AA.update(*this);
2701
2702 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2703 // If the AA did not rely on outside information but changed, we run it
2704 // again to see if it found a fixpoint. Most AAs do but we don't require
2705 // them to. Hence, it might take the AA multiple iterations to get to a
2706 // fixpoint even if it does not rely on outside information, which is fine.
2708 if (CS == ChangeStatus::CHANGED)
2709 RerunCS = AA.update(*this);
2710
2711 // If the attribute did not change during the run or rerun, and it still did
2712 // not query any non-fix information, the state will not change and we can
2713 // indicate that right at this point.
2714 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2715 AAState.indicateOptimisticFixpoint();
2716 }
2717
2718 if (!AAState.isAtFixpoint())
2719 rememberDependences();
2720
2721 // Verify the stack was used properly, that is we pop the dependence vector we
2722 // put there earlier.
2723 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2724 (void)PoppedDV;
2725 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2726
2727 return CS;
2728}
2729
2731 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2732
2733 Module &M = *F.getParent();
2734 LLVMContext &Ctx = M.getContext();
2735 FunctionType *FnTy = F.getFunctionType();
2736
2737 Function *Wrapper =
2738 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2739 F.setName(""); // set the inside function anonymous
2740 M.getFunctionList().insert(F.getIterator(), Wrapper);
2741 // Flag whether the function is using new-debug-info or not.
2742 Wrapper->IsNewDbgInfoFormat = M.IsNewDbgInfoFormat;
2743
2744 F.setLinkage(GlobalValue::InternalLinkage);
2745
2746 F.replaceAllUsesWith(Wrapper);
2747 assert(F.use_empty() && "Uses remained after wrapper was created!");
2748
2749 // Move the COMDAT section to the wrapper.
2750 // TODO: Check if we need to keep it for F as well.
2751 Wrapper->setComdat(F.getComdat());
2752 F.setComdat(nullptr);
2753
2754 // Copy all metadata and attributes but keep them on F as well.
2756 F.getAllMetadata(MDs);
2757 for (auto MDIt : MDs)
2758 Wrapper->addMetadata(MDIt.first, *MDIt.second);
2759 Wrapper->setAttributes(F.getAttributes());
2760
2761 // Create the call in the wrapper.
2762 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2763
2765 Argument *FArgIt = F.arg_begin();
2766 for (Argument &Arg : Wrapper->args()) {
2767 Args.push_back(&Arg);
2768 Arg.setName((FArgIt++)->getName());
2769 }
2770
2771 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2772 CI->setTailCall(true);
2773 CI->addFnAttr(Attribute::NoInline);
2774 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2775
2776 NumFnShallowWrappersCreated++;
2777}
2778
2780 if (F.isDeclaration() || F.hasLocalLinkage() ||
2782 return false;
2783 return true;
2784}
2785
2787 if (!AllowDeepWrapper && !Force)
2788 return nullptr;
2789 if (!isInternalizable(F))
2790 return nullptr;
2791
2792 SmallPtrSet<Function *, 2> FnSet = {&F};
2793 DenseMap<Function *, Function *> InternalizedFns;
2794 internalizeFunctions(FnSet, InternalizedFns);
2795
2796 return InternalizedFns[&F];
2797}
2798
2801 for (Function *F : FnSet)
2803 return false;
2804
2805 FnMap.clear();
2806 // Generate the internalized version of each function.
2807 for (Function *F : FnSet) {
2808 Module &M = *F->getParent();
2809 FunctionType *FnTy = F->getFunctionType();
2810
2811 // Create a copy of the current function
2812 Function *Copied =
2813 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2814 F->getName() + ".internalized");
2815 ValueToValueMapTy VMap;
2816 auto *NewFArgIt = Copied->arg_begin();
2817 for (auto &Arg : F->args()) {
2818 auto ArgName = Arg.getName();
2819 NewFArgIt->setName(ArgName);
2820 VMap[&Arg] = &(*NewFArgIt++);
2821 }
2823 // Flag whether the function is using new-debug-info or not.
2824 Copied->IsNewDbgInfoFormat = F->IsNewDbgInfoFormat;
2825
2826 // Copy the body of the original function to the new one
2827 CloneFunctionInto(Copied, F, VMap,
2829
2830 // Set the linakage and visibility late as CloneFunctionInto has some
2831 // implicit requirements.
2834
2835 // Copy metadata
2837 F->getAllMetadata(MDs);
2838 for (auto MDIt : MDs)
2839 if (!Copied->hasMetadata())
2840 Copied->addMetadata(MDIt.first, *MDIt.second);
2841
2842 M.getFunctionList().insert(F->getIterator(), Copied);
2843 Copied->setDSOLocal(true);
2844 FnMap[F] = Copied;
2845 }
2846
2847 // Replace all uses of the old function with the new internalized function
2848 // unless the caller is a function that was just internalized.
2849 for (Function *F : FnSet) {
2850 auto &InternalizedFn = FnMap[F];
2851 auto IsNotInternalized = [&](Use &U) -> bool {
2852 if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2853 return !FnMap.lookup(CB->getCaller());
2854 return false;
2855 };
2856 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2857 }
2858
2859 return true;
2860}
2861
2863 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2864
2865 if (!Configuration.RewriteSignatures)
2866 return false;
2867
2868 Function *Fn = Arg.getParent();
2869 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2870 // Forbid the call site to cast the function return type. If we need to
2871 // rewrite these functions we need to re-create a cast for the new call site
2872 // (if the old had uses).
2873 if (!ACS.getCalledFunction() ||
2874 ACS.getInstruction()->getType() !=
2876 return false;
2877 if (cast<CallBase>(ACS.getInstruction())->getCalledOperand()->getType() !=
2878 Fn->getType())
2879 return false;
2880 if (ACS.getNumArgOperands() != Fn->arg_size())
2881 return false;
2882 // Forbid must-tail calls for now.
2883 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2884 };
2885
2886 // Avoid var-arg functions for now.
2887 if (Fn->isVarArg()) {
2888 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2889 return false;
2890 }
2891
2892 // Avoid functions with complicated argument passing semantics.
2893 AttributeList FnAttributeList = Fn->getAttributes();
2894 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2895 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2896 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2897 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2898 LLVM_DEBUG(
2899 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2900 return false;
2901 }
2902
2903 // Avoid callbacks for now.
2904 bool UsedAssumedInformation = false;
2905 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2906 UsedAssumedInformation,
2907 /* CheckPotentiallyDead */ true)) {
2908 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2909 return false;
2910 }
2911
2912 auto InstPred = [](Instruction &I) {
2913 if (auto *CI = dyn_cast<CallInst>(&I))
2914 return !CI->isMustTailCall();
2915 return true;
2916 };
2917
2918 // Forbid must-tail calls for now.
2919 // TODO:
2920 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2921 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2922 nullptr, {Instruction::Call},
2923 UsedAssumedInformation)) {
2924 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2925 return false;
2926 }
2927
2928 return true;
2929}
2930
2932 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2935 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2936 << Arg.getParent()->getName() << " with "
2937 << ReplacementTypes.size() << " replacements\n");
2938 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2939 "Cannot register an invalid rewrite");
2940
2941 Function *Fn = Arg.getParent();
2943 ArgumentReplacementMap[Fn];
2944 if (ARIs.empty())
2945 ARIs.resize(Fn->arg_size());
2946
2947 // If we have a replacement already with less than or equal new arguments,
2948 // ignore this request.
2949 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2950 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2951 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2952 return false;
2953 }
2954
2955 // If we have a replacement already but we like the new one better, delete
2956 // the old.
2957 ARI.reset();
2958
2959 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2960 << Arg.getParent()->getName() << " with "
2961 << ReplacementTypes.size() << " replacements\n");
2962
2963 // Remember the replacement.
2964 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2965 std::move(CalleeRepairCB),
2966 std::move(ACSRepairCB)));
2967
2968 return true;
2969}
2970
2971bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2972 bool Result = true;
2973#ifndef NDEBUG
2974 if (SeedAllowList.size() != 0)
2976 Function *Fn = AA.getAnchorScope();
2977 if (FunctionSeedAllowList.size() != 0 && Fn)
2979#endif
2980 return Result;
2981}
2982
2983ChangeStatus Attributor::rewriteFunctionSignatures(
2984 SmallSetVector<Function *, 8> &ModifiedFns) {
2986
2987 for (auto &It : ArgumentReplacementMap) {
2988 Function *OldFn = It.getFirst();
2989
2990 // Deleted functions do not require rewrites.
2991 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
2992 continue;
2993
2995 It.getSecond();
2996 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2997
2998 SmallVector<Type *, 16> NewArgumentTypes;
2999 SmallVector<AttributeSet, 16> NewArgumentAttributes;
3000
3001 // Collect replacement argument types and copy over existing attributes.
3002 AttributeList OldFnAttributeList = OldFn->getAttributes();
3003 for (Argument &Arg : OldFn->args()) {
3004 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3005 ARIs[Arg.getArgNo()]) {
3006 NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
3007 ARI->ReplacementTypes.end());
3008 NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
3009 AttributeSet());
3010 } else {
3011 NewArgumentTypes.push_back(Arg.getType());
3012 NewArgumentAttributes.push_back(
3013 OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
3014 }
3015 }
3016
3017 uint64_t LargestVectorWidth = 0;
3018 for (auto *I : NewArgumentTypes)
3019 if (auto *VT = dyn_cast<llvm::VectorType>(I))
3020 LargestVectorWidth =
3021 std::max(LargestVectorWidth,
3022 VT->getPrimitiveSizeInBits().getKnownMinValue());
3023
3024 FunctionType *OldFnTy = OldFn->getFunctionType();
3025 Type *RetTy = OldFnTy->getReturnType();
3026
3027 // Construct the new function type using the new arguments types.
3028 FunctionType *NewFnTy =
3029 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
3030
3031 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
3032 << "' from " << *OldFn->getFunctionType() << " to "
3033 << *NewFnTy << "\n");
3034
3035 // Create the new function body and insert it into the module.
3036 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
3037 OldFn->getAddressSpace(), "");
3038 Functions.insert(NewFn);
3039 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
3040 NewFn->takeName(OldFn);
3041 NewFn->copyAttributesFrom(OldFn);
3042 // Flag whether the function is using new-debug-info or not.
3043 NewFn->IsNewDbgInfoFormat = OldFn->IsNewDbgInfoFormat;
3044
3045 // Patch the pointer to LLVM function in debug info descriptor.
3046 NewFn->setSubprogram(OldFn->getSubprogram());
3047 OldFn->setSubprogram(nullptr);
3048
3049 // Recompute the parameter attributes list based on the new arguments for
3050 // the function.
3051 LLVMContext &Ctx = OldFn->getContext();
3053 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
3054 NewArgumentAttributes));
3055 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
3056
3057 // Remove argmem from the memory effects if we have no more pointer
3058 // arguments, or they are readnone.
3059 MemoryEffects ME = NewFn->getMemoryEffects();
3060 int ArgNo = -1;
3061 if (ME.doesAccessArgPointees() && all_of(NewArgumentTypes, [&](Type *T) {
3062 ++ArgNo;
3063 return !T->isPtrOrPtrVectorTy() ||
3064 NewFn->hasParamAttribute(ArgNo, Attribute::ReadNone);
3065 })) {
3067 }
3068
3069 // Since we have now created the new function, splice the body of the old
3070 // function right into the new function, leaving the old rotting hulk of the
3071 // function empty.
3072 NewFn->splice(NewFn->begin(), OldFn);
3073
3074 // Fixup block addresses to reference new function.
3075 SmallVector<BlockAddress *, 8u> BlockAddresses;
3076 for (User *U : OldFn->users())
3077 if (auto *BA = dyn_cast<BlockAddress>(U))
3078 BlockAddresses.push_back(BA);
3079 for (auto *BA : BlockAddresses)
3080 BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock()));
3081
3082 // Set of all "call-like" instructions that invoke the old function mapped
3083 // to their new replacements.
3085
3086 // Callback to create a new "call-like" instruction for a given one.
3087 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
3088 CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
3089 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
3090
3091 // Collect the new argument operands for the replacement call site.
3092 SmallVector<Value *, 16> NewArgOperands;
3093 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
3094 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
3095 unsigned NewFirstArgNum = NewArgOperands.size();
3096 (void)NewFirstArgNum; // only used inside assert.
3097 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3098 ARIs[OldArgNum]) {
3099 if (ARI->ACSRepairCB)
3100 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
3101 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
3102 NewArgOperands.size() &&
3103 "ACS repair callback did not provide as many operand as new "
3104 "types were registered!");
3105 // TODO: Exose the attribute set to the ACS repair callback
3106 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
3107 AttributeSet());
3108 } else {
3109 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
3110 NewArgOperandAttributes.push_back(
3111 OldCallAttributeList.getParamAttrs(OldArgNum));
3112 }
3113 }
3114
3115 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
3116 "Mismatch # argument operands vs. # argument operand attributes!");
3117 assert(NewArgOperands.size() == NewFn->arg_size() &&
3118 "Mismatch # argument operands vs. # function arguments!");
3119
3120 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
3121 OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
3122
3123 // Create a new call or invoke instruction to replace the old one.
3124 CallBase *NewCB;
3125 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
3126 NewCB = InvokeInst::Create(NewFn, II->getNormalDest(),
3127 II->getUnwindDest(), NewArgOperands,
3128 OperandBundleDefs, "", OldCB->getIterator());
3129 } else {
3130 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
3131 "", OldCB->getIterator());
3132 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
3133 NewCB = NewCI;
3134 }
3135
3136 // Copy over various properties and the new attributes.
3137 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
3138 NewCB->setCallingConv(OldCB->getCallingConv());
3139 NewCB->takeName(OldCB);
3141 Ctx, OldCallAttributeList.getFnAttrs(),
3142 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
3143
3145 LargestVectorWidth);
3146
3147 CallSitePairs.push_back({OldCB, NewCB});
3148 return true;
3149 };
3150
3151 // Use the CallSiteReplacementCreator to create replacement call sites.
3152 bool UsedAssumedInformation = false;
3153 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
3154 true, nullptr, UsedAssumedInformation,
3155 /* CheckPotentiallyDead */ true);
3156 (void)Success;
3157 assert(Success && "Assumed call site replacement to succeed!");
3158
3159 // Rewire the arguments.
3160 Argument *OldFnArgIt = OldFn->arg_begin();
3161 Argument *NewFnArgIt = NewFn->arg_begin();
3162 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
3163 ++OldArgNum, ++OldFnArgIt) {
3164 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3165 ARIs[OldArgNum]) {
3166 if (ARI->CalleeRepairCB)
3167 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
3168 if (ARI->ReplacementTypes.empty())
3169 OldFnArgIt->replaceAllUsesWith(
3170 PoisonValue::get(OldFnArgIt->getType()));
3171 NewFnArgIt += ARI->ReplacementTypes.size();
3172 } else {
3173 NewFnArgIt->takeName(&*OldFnArgIt);
3174 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
3175 ++NewFnArgIt;
3176 }
3177 }
3178
3179 // Eliminate the instructions *after* we visited all of them.
3180 for (auto &CallSitePair : CallSitePairs) {
3181 CallBase &OldCB = *CallSitePair.first;
3182 CallBase &NewCB = *CallSitePair.second;
3183 assert(OldCB.getType() == NewCB.getType() &&
3184 "Cannot handle call sites with different types!");
3185 ModifiedFns.insert(OldCB.getFunction());
3186 Configuration.CGUpdater.replaceCallSite(OldCB, NewCB);
3187 OldCB.replaceAllUsesWith(&NewCB);
3188 OldCB.eraseFromParent();
3189 }
3190
3191 // Replace the function in the call graph (if any).
3192 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
3193
3194 // If the old function was modified and needed to be reanalyzed, the new one
3195 // does now.
3196 if (ModifiedFns.remove(OldFn))
3197 ModifiedFns.insert(NewFn);
3198
3199 Changed = ChangeStatus::CHANGED;
3200 }
3201
3202 return Changed;
3203}
3204
3205void InformationCache::initializeInformationCache(const Function &CF,
3206 FunctionInfo &FI) {
3207 // As we do not modify the function here we can remove the const
3208 // withouth breaking implicit assumptions. At the end of the day, we could
3209 // initialize the cache eagerly which would look the same to the users.
3210 Function &F = const_cast<Function &>(CF);
3211
3212 // Walk all instructions to find interesting instructions that might be
3213 // queried by abstract attributes during their initialization or update.
3214 // This has to happen before we create attributes.
3215
3217
3218 // Add \p V to the assume uses map which track the number of uses outside of
3219 // "visited" assumes. If no outside uses are left the value is added to the
3220 // assume only use vector.
3221 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3223 if (auto *I = dyn_cast<Instruction>(&V))
3224 Worklist.push_back(I);
3225 while (!Worklist.empty()) {
3226 const Instruction *I = Worklist.pop_back_val();
3227 std::optional<short> &NumUses = AssumeUsesMap[I];
3228 if (!NumUses)
3229 NumUses = I->getNumUses();
3230 NumUses = *NumUses - /* this assume */ 1;
3231 if (*NumUses != 0)
3232 continue;
3233 AssumeOnlyValues.insert(I);
3234 for (const Value *Op : I->operands())
3235 if (auto *OpI = dyn_cast<Instruction>(Op))
3236 Worklist.push_back(OpI);
3237 }
3238 };
3239
3240 for (Instruction &I : instructions(&F)) {
3241 bool IsInterestingOpcode = false;
3242
3243 // To allow easy access to all instructions in a function with a given
3244 // opcode we store them in the InfoCache. As not all opcodes are interesting
3245 // to concrete attributes we only cache the ones that are as identified in
3246 // the following switch.
3247 // Note: There are no concrete attributes now so this is initially empty.
3248 switch (I.getOpcode()) {
3249 default:
3250 assert(!isa<CallBase>(&I) &&
3251 "New call base instruction type needs to be known in the "
3252 "Attributor.");
3253 break;
3254 case Instruction::Call:
3255 // Calls are interesting on their own, additionally:
3256 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3257 // For `must-tail` calls we remember the caller and callee.
3258 if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3259 AssumeOnlyValues.insert(Assume);
3260 fillMapFromAssume(*Assume, KnowledgeMap);
3261 AddToAssumeUsesMap(*Assume->getArgOperand(0));
3262 } else if (cast<CallInst>(I).isMustTailCall()) {
3263 FI.ContainsMustTailCall = true;
3264 if (auto *Callee = dyn_cast_if_present<Function>(
3265 cast<CallInst>(I).getCalledOperand()))
3266 getFunctionInfo(*Callee).CalledViaMustTail = true;
3267 }
3268 [[fallthrough]];
3269 case Instruction::CallBr:
3270 case Instruction::Invoke:
3271 case Instruction::CleanupRet:
3272 case Instruction::CatchSwitch:
3273 case Instruction::AtomicRMW:
3274 case Instruction::AtomicCmpXchg:
3275 case Instruction::Br:
3276 case Instruction::Resume:
3277 case Instruction::Ret:
3278 case Instruction::Load:
3279 // The alignment of a pointer is interesting for loads.
3280 case Instruction::Store:
3281 // The alignment of a pointer is interesting for stores.
3282 case Instruction::Alloca:
3283 case Instruction::AddrSpaceCast:
3284 IsInterestingOpcode = true;
3285 }
3286 if (IsInterestingOpcode) {
3287 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3288 if (!Insts)
3289 Insts = new (Allocator) InstructionVectorTy();
3290 Insts->push_back(&I);
3291 }
3292 if (I.mayReadOrWriteMemory())
3293 FI.RWInsts.push_back(&I);
3294 }
3295
3296 if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3298 InlineableFunctions.insert(&F);
3299}
3300
3301InformationCache::FunctionInfo::~FunctionInfo() {
3302 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3303 // manually destroy them.
3304 for (auto &It : OpcodeInstMap)
3305 It.getSecond()->~InstructionVectorTy();
3306}
3307
3310 assert(A.isClosedWorldModule() && "Cannot see all indirect callees!");
3311 return IndirectlyCallableFunctions;
3312}
3313
3315 const AbstractAttribute &ToAA,
3316 DepClassTy DepClass) {
3317 if (DepClass == DepClassTy::NONE)
3318 return;
3319 // If we are outside of an update, thus before the actual fixpoint iteration
3320 // started (= when we create AAs), we do not track dependences because we will
3321 // put all AAs into the initial worklist anyway.
3322 if (DependenceStack.empty())
3323 return;
3324 if (FromAA.getState().isAtFixpoint())
3325 return;
3326 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3327}
3328
3329void Attributor::rememberDependences() {
3330 assert(!DependenceStack.empty() && "No dependences to remember!");
3331
3332 for (DepInfo &DI : *DependenceStack.back()) {
3333 assert((DI.DepClass == DepClassTy::REQUIRED ||
3334 DI.DepClass == DepClassTy::OPTIONAL) &&
3335 "Expected required or optional dependence (1 bit)!");
3336 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3337 DepAAs.insert(AbstractAttribute::DepTy(
3338 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3339 }
3340}
3341
3342template <Attribute::AttrKind AK, typename AAType>
3343void Attributor::checkAndQueryIRAttr(const IRPosition &IRP,
3344 AttributeSet Attrs) {
3345 bool IsKnown;
3346 if (!Attrs.hasAttribute(AK))
3347 if (!Configuration.Allowed || Configuration.Allowed->count(&AAType::ID))
3348 if (!AA::hasAssumedIRAttr<AK>(*this, nullptr, IRP, DepClassTy::NONE,
3349 IsKnown))
3350 getOrCreateAAFor<AAType>(IRP);
3351}
3352
3354 if (!VisitedFunctions.insert(&F).second)
3355 return;
3356 if (F.isDeclaration())
3357 return;
3358
3359 // In non-module runs we need to look at the call sites of a function to
3360 // determine if it is part of a must-tail call edge. This will influence what
3361 // attributes we can derive.
3362 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3363 if (!isModulePass() && !FI.CalledViaMustTail) {
3364 for (const Use &U : F.uses())
3365 if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3366 if (CB->isCallee(&U) && CB->isMustTailCall())
3367 FI.CalledViaMustTail = true;
3368 }
3369
3371 bool IsIPOAmendable = isFunctionIPOAmendable(F);
3372 auto Attrs = F.getAttributes();
3373 auto FnAttrs = Attrs.getFnAttrs();
3374
3375 // Check for dead BasicBlocks in every function.
3376 // We need dead instruction detection because we do not want to deal with
3377 // broken IR in which SSA rules do not apply.
3378 getOrCreateAAFor<AAIsDead>(FPos);
3379
3380 // Every function might contain instructions that cause "undefined
3381 // behavior".
3382 getOrCreateAAFor<AAUndefinedBehavior>(FPos);
3383
3384 // Every function might be applicable for Heap-To-Stack conversion.
3386 getOrCreateAAFor<AAHeapToStack>(FPos);
3387
3388 // Every function might be "must-progress".
3389 checkAndQueryIRAttr<Attribute::MustProgress, AAMustProgress>(FPos, FnAttrs);
3390
3391 // Every function might be "no-free".
3392 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(FPos, FnAttrs);
3393
3394 // Every function might be "will-return".
3395 checkAndQueryIRAttr<Attribute::WillReturn, AAWillReturn>(FPos, FnAttrs);
3396
3397 // Every function might be marked "nosync"
3398 checkAndQueryIRAttr<Attribute::NoSync, AANoSync>(FPos, FnAttrs);
3399
3400 // Everything that is visible from the outside (=function, argument, return
3401 // positions), cannot be changed if the function is not IPO amendable. We can
3402 // however analyse the code inside.
3403 if (IsIPOAmendable) {
3404
3405 // Every function can be nounwind.
3406 checkAndQueryIRAttr<Attribute::NoUnwind, AANoUnwind>(FPos, FnAttrs);
3407
3408 // Every function might be "no-return".
3409 checkAndQueryIRAttr<Attribute::NoReturn, AANoReturn>(FPos, FnAttrs);
3410
3411 // Every function might be "no-recurse".
3412 checkAndQueryIRAttr<Attribute::NoRecurse, AANoRecurse>(FPos, FnAttrs);
3413
3414 // Every function can be "non-convergent".
3415 if (Attrs.hasFnAttr(Attribute::Convergent))
3416 getOrCreateAAFor<AANonConvergent>(FPos);
3417
3418 // Every function might be "readnone/readonly/writeonly/...".
3419 getOrCreateAAFor<AAMemoryBehavior>(FPos);
3420
3421 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3422 getOrCreateAAFor<AAMemoryLocation>(FPos);
3423
3424 // Every function can track active assumptions.
3425 getOrCreateAAFor<AAAssumptionInfo>(FPos);
3426
3427 // If we're not using a dynamic mode for float, there's nothing worthwhile
3428 // to infer. This misses the edge case denormal-fp-math="dynamic" and
3429 // denormal-fp-math-f32=something, but that likely has no real world use.
3430 DenormalMode Mode = F.getDenormalMode(APFloat::IEEEsingle());
3431 if (Mode.Input == DenormalMode::Dynamic ||
3432 Mode.Output == DenormalMode::Dynamic)
3433 getOrCreateAAFor<AADenormalFPMath>(FPos);
3434
3435 // Return attributes are only appropriate if the return type is non void.
3436 Type *ReturnType = F.getReturnType();
3437 if (!ReturnType->isVoidTy()) {
3439 AttributeSet RetAttrs = Attrs.getRetAttrs();
3440
3441 // Every returned value might be dead.
3442 getOrCreateAAFor<AAIsDead>(RetPos);
3443
3444 // Every function might be simplified.
3445 bool UsedAssumedInformation = false;
3446 getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3448
3449 // Every returned value might be marked noundef.
3450 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(RetPos, RetAttrs);
3451
3452 if (ReturnType->isPointerTy()) {
3453
3454 // Every function with pointer return type might be marked align.
3455 getOrCreateAAFor<AAAlign>(RetPos);
3456
3457 // Every function with pointer return type might be marked nonnull.
3458 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(RetPos, RetAttrs);
3459
3460 // Every function with pointer return type might be marked noalias.
3461 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(RetPos, RetAttrs);
3462
3463 // Every function with pointer return type might be marked
3464 // dereferenceable.
3465 getOrCreateAAFor<AADereferenceable>(RetPos);
3466 } else if (AttributeFuncs::isNoFPClassCompatibleType(ReturnType)) {
3467 getOrCreateAAFor<AANoFPClass>(RetPos);
3468 }
3469 }
3470 }
3471
3472 for (Argument &Arg : F.args()) {
3473 IRPosition ArgPos = IRPosition::argument(Arg);
3474 auto ArgNo = Arg.getArgNo();
3475 AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo);
3476
3477 if (!IsIPOAmendable) {
3478 if (Arg.getType()->isPointerTy())
3479 // Every argument with pointer type might be marked nofree.
3480 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3481 continue;
3482 }
3483
3484 // Every argument might be simplified. We have to go through the
3485 // Attributor interface though as outside AAs can register custom
3486 // simplification callbacks.
3487 bool UsedAssumedInformation = false;
3488 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3490
3491 // Every argument might be dead.
3492 getOrCreateAAFor<AAIsDead>(ArgPos);
3493
3494 // Every argument might be marked noundef.
3495 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(ArgPos, ArgAttrs);
3496
3497 if (Arg.getType()->isPointerTy()) {
3498 // Every argument with pointer type might be marked nonnull.
3499 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(ArgPos, ArgAttrs);
3500
3501 // Every argument with pointer type might be marked noalias.
3502 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(ArgPos, ArgAttrs);
3503
3504 // Every argument with pointer type might be marked dereferenceable.
3505 getOrCreateAAFor<AADereferenceable>(ArgPos);
3506
3507 // Every argument with pointer type might be marked align.
3508 getOrCreateAAFor<AAAlign>(ArgPos);
3509
3510 // Every argument with pointer type might be marked nocapture.
3511 checkAndQueryIRAttr<Attribute::NoCapture, AANoCapture>(ArgPos, ArgAttrs);
3512
3513 // Every argument with pointer type might be marked
3514 // "readnone/readonly/writeonly/..."
3515 getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
3516
3517 // Every argument with pointer type might be marked nofree.
3518 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3519
3520 // Every argument with pointer type might be privatizable (or
3521 // promotable)
3522 getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
3523 } else if (AttributeFuncs::isNoFPClassCompatibleType(Arg.getType())) {
3524 getOrCreateAAFor<AANoFPClass>(ArgPos);
3525 }
3526 }
3527
3528 auto CallSitePred = [&](Instruction &I) -> bool {
3529 auto &CB = cast<CallBase>(I);
3530 IRPosition CBInstPos = IRPosition::inst(CB);
3532
3533 // Call sites might be dead if they do not have side effects and no live
3534 // users. The return value might be dead if there are no live users.
3535 getOrCreateAAFor<AAIsDead>(CBInstPos);
3536
3537 Function *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
3538 // TODO: Even if the callee is not known now we might be able to simplify
3539 // the call/callee.
3540 if (!Callee) {
3541 getOrCreateAAFor<AAIndirectCallInfo>(CBFnPos);
3542 return true;
3543 }
3544
3545 // Every call site can track active assumptions.
3546 getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
3547
3548 // Skip declarations except if annotations on their call sites were
3549 // explicitly requested.
3550 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3551 !Callee->hasMetadata(LLVMContext::MD_callback))
3552 return true;
3553
3554 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3556 bool UsedAssumedInformation = false;
3557 getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3559
3560 if (AttributeFuncs::isNoFPClassCompatibleType(Callee->getReturnType()))
3561 getOrCreateAAFor<AANoFPClass>(CBInstPos);
3562 }
3563
3564 const AttributeList &CBAttrs = CBFnPos.getAttrList();
3565 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3566
3568 AttributeSet CBArgAttrs = CBAttrs.getParamAttrs(I);
3569
3570 // Every call site argument might be dead.
3571 getOrCreateAAFor<AAIsDead>(CBArgPos);
3572
3573 // Call site argument might be simplified. We have to go through the
3574 // Attributor interface though as outside AAs can register custom
3575 // simplification callbacks.
3576 bool UsedAssumedInformation = false;
3577 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3579
3580 // Every call site argument might be marked "noundef".
3581 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(CBArgPos, CBArgAttrs);
3582
3583 Type *ArgTy = CB.getArgOperand(I)->getType();
3584
3585 if (!ArgTy->isPointerTy()) {
3587 getOrCreateAAFor<AANoFPClass>(CBArgPos);
3588
3589 continue;
3590 }
3591
3592 // Call site argument attribute "non-null".
3593 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(CBArgPos, CBArgAttrs);
3594
3595 // Call site argument attribute "nocapture".
3596 checkAndQueryIRAttr<Attribute::NoCapture, AANoCapture>(CBArgPos,
3597 CBArgAttrs);
3598
3599 // Call site argument attribute "no-alias".
3600 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(CBArgPos, CBArgAttrs);
3601
3602 // Call site argument attribute "dereferenceable".
3603 getOrCreateAAFor<AADereferenceable>(CBArgPos);
3604
3605 // Call site argument attribute "align".
3606 getOrCreateAAFor<AAAlign>(CBArgPos);
3607
3608 // Call site argument attribute
3609 // "readnone/readonly/writeonly/..."
3610 if (!CBAttrs.hasParamAttr(I, Attribute::ReadNone))
3611 getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
3612
3613 // Call site argument attribute "nofree".
3614 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(CBArgPos, CBArgAttrs);
3615 }
3616 return true;
3617 };
3618
3619 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3620 [[maybe_unused]] bool Success;
3621 bool UsedAssumedInformation = false;
3623 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3624 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3625 (unsigned)Instruction::Call},
3626 UsedAssumedInformation);
3627 assert(Success && "Expected the check call to be successful!");
3628
3629 auto LoadStorePred = [&](Instruction &I) -> bool {
3630 if (auto *LI = dyn_cast<LoadInst>(&I)) {
3631 getOrCreateAAFor<AAAlign>(IRPosition::value(*LI->getPointerOperand()));
3632 if (SimplifyAllLoads)
3634 UsedAssumedInformation, AA::Intraprocedural);
3635 getOrCreateAAFor<AAAddressSpace>(
3636 IRPosition::value(*LI->getPointerOperand()));
3637 } else {
3638 auto &SI = cast<StoreInst>(I);
3639 getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
3640 getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3641 UsedAssumedInformation, AA::Intraprocedural);
3642 getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3643 getOrCreateAAFor<AAAddressSpace>(
3644 IRPosition::value(*SI.getPointerOperand()));
3645 }
3646 return true;
3647 };
3649 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3650 {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3651 UsedAssumedInformation);
3652 assert(Success && "Expected the check call to be successful!");
3653
3654 // AllocaInstPredicate
3655 auto AAAllocationInfoPred = [&](Instruction &I) -> bool {
3656 getOrCreateAAFor<AAAllocationInfo>(IRPosition::value(I));
3657 return true;
3658 };
3659
3661 nullptr, OpcodeInstMap, AAAllocationInfoPred, nullptr, nullptr,
3662 {(unsigned)Instruction::Alloca}, UsedAssumedInformation);
3663 assert(Success && "Expected the check call to be successful!");
3664}
3665
3667 if (CloseWorldAssumption.getNumOccurrences())
3668 return CloseWorldAssumption;
3669 return isModulePass() && Configuration.IsClosedWorldModule;
3670}
3671
3672/// Helpers to ease debugging through output streams and print calls.
3673///
3674///{
3676 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3677}
3678
3680 switch (AP) {
3682 return OS << "inv";
3684 return OS << "flt";
3686 return OS << "fn_ret";
3688 return OS << "cs_ret";
3690 return OS << "fn";
3692 return OS << "cs";
3694 return OS << "arg";
3696 return OS << "cs_arg";
3697 }
3698 llvm_unreachable("Unknown attribute position!");
3699}
3700
3702 const Value &AV = Pos.getAssociatedValue();
3703 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3704 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3705
3706 if (Pos.hasCallBaseContext())
3707 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3708 return OS << "}";
3709}
3710
3712 OS << "range-state(" << S.getBitWidth() << ")<";
3713 S.getKnown().print(OS);
3714 OS << " / ";
3715 S.getAssumed().print(OS);
3716 OS << ">";
3717
3718 return OS << static_cast<const AbstractState &>(S);
3719}
3720
3722 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3723}
3724
3726 AA.print(OS);
3727 return OS;
3728}
3729
3732 OS << "set-state(< {";
3733 if (!S.isValidState())
3734 OS << "full-set";
3735 else {
3736 for (const auto &It : S.getAssumedSet())
3737 OS << It << ", ";
3738 if (S.undefIsContained())
3739 OS << "undef ";
3740 }
3741 OS << "} >)";
3742
3743 return OS;
3744}
3745
3747 const PotentialLLVMValuesState &S) {
3748 OS << "set-state(< {";
3749 if (!S.isValidState())
3750 OS << "full-set";
3751 else {
3752 for (const auto &It : S.getAssumedSet()) {
3753 if (auto *F = dyn_cast<Function>(It.first.getValue()))
3754 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3755 else
3756 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3757 }
3758 if (S.undefIsContained())
3759 OS << "undef ";
3760 }
3761 OS << "} >)";
3762
3763 return OS;
3764}
3765
3767 OS << "[";
3768 OS << getName();
3769 OS << "] for CtxI ";
3770
3771 if (auto *I = getCtxI()) {
3772 OS << "'";
3773 I->print(OS);
3774 OS << "'";
3775 } else
3776 OS << "<<null inst>>";
3777
3778 OS << " at position " << getIRPosition() << " with state " << getAsStr(A)
3779 << '\n';
3780}
3781
3783 print(OS);
3784
3785 for (const auto &DepAA : Deps) {
3786 auto *AA = DepAA.getPointer();
3787 OS << " updates ";
3788 AA->print(OS);
3789 }
3790
3791 OS << '\n';
3792}
3793
3795 const AAPointerInfo::Access &Acc) {
3796 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3797 if (Acc.getLocalInst() != Acc.getRemoteInst())
3798 OS << " via " << *Acc.getLocalInst();
3799 if (Acc.getContent()) {
3800 if (*Acc.getContent())
3801 OS << " [" << **Acc.getContent() << "]";
3802 else
3803 OS << " [ <unknown> ]";
3804 }
3805 return OS;
3806}
3807///}
3808
3809/// ----------------------------------------------------------------------------
3810/// Pass (Manager) Boilerplate
3811/// ----------------------------------------------------------------------------
3812
3814 SetVector<Function *> &Functions,
3815 AnalysisGetter &AG,
3816 CallGraphUpdater &CGUpdater,
3817 bool DeleteFns, bool IsModulePass) {
3818 if (Functions.empty())
3819 return false;
3820
3821 LLVM_DEBUG({
3822 dbgs() << "[Attributor] Run on module with " << Functions.size()
3823 << " functions:\n";
3824 for (Function *Fn : Functions)
3825 dbgs() << " - " << Fn->getName() << "\n";
3826 });
3827
3828 // Create an Attributor and initially empty information cache that is filled
3829 // while we identify default attribute opportunities.
3830 AttributorConfig AC(CGUpdater);
3831 AC.IsModulePass = IsModulePass;
3832 AC.DeleteFns = DeleteFns;
3833
3834 /// Tracking callback for specialization of indirect calls.
3836 IndirectCalleeTrackingMap;
3837 if (MaxSpecializationPerCB.getNumOccurrences()) {
3838 AC.IndirectCalleeSpecializationCallback =
3839 [&](Attributor &, const AbstractAttribute &AA, CallBase &CB,
3840 Function &Callee) {
3841 if (MaxSpecializationPerCB == 0)
3842 return false;
3843 auto &Set = IndirectCalleeTrackingMap[&CB];
3844 if (!Set)
3845 Set = std::make_unique<SmallPtrSet<Function *, 8>>();
3846 if (Set->size() >= MaxSpecializationPerCB)
3847 return Set->contains(&Callee);
3848 Set->insert(&Callee);
3849 return true;
3850 };
3851 }
3852
3853 Attributor A(Functions, InfoCache, AC);
3854
3855 // Create shallow wrappers for all functions that are not IPO amendable
3857 for (Function *F : Functions)
3858 if (!A.isFunctionIPOAmendable(*F))
3860
3861 // Internalize non-exact functions
3862 // TODO: for now we eagerly internalize functions without calculating the
3863 // cost, we need a cost interface to determine whether internalizing
3864 // a function is "beneficial"
3865 if (AllowDeepWrapper) {
3866 unsigned FunSize = Functions.size();
3867 for (unsigned u = 0; u < FunSize; u++) {
3868 Function *F = Functions[u];
3869 if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() &&
3870 !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3872 assert(NewF && "Could not internalize function.");
3873 Functions.insert(NewF);
3874
3875 // Update call graph
3876 CGUpdater.replaceFunctionWith(*F, *NewF);
3877 for (const Use &U : NewF->uses())
3878 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3879 auto *CallerF = CB->getCaller();
3880 CGUpdater.reanalyzeFunction(*CallerF);
3881 }
3882 }
3883 }
3884 }
3885
3886 for (Function *F : Functions) {
3887 if (F->hasExactDefinition())
3888 NumFnWithExactDefinition++;
3889 else
3890 NumFnWithoutExactDefinition++;
3891
3892 // We look at internal functions only on-demand but if any use is not a
3893 // direct call or outside the current set of analyzed functions, we have
3894 // to do it eagerly.
3895 if (F->hasLocalLinkage()) {
3896 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3897 const auto *CB = dyn_cast<CallBase>(U.getUser());
3898 return CB && CB->isCallee(&U) &&
3899 Functions.count(const_cast<Function *>(CB->getCaller()));
3900 }))
3901 continue;
3902 }
3903
3904 // Populate the Attributor with abstract attribute opportunities in the
3905 // function and the information cache with IR information.
3906 A.identifyDefaultAbstractAttributes(*F);
3907 }
3908
3909 ChangeStatus Changed = A.run();
3910
3911 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3912 << " functions, result: " << Changed << ".\n");
3913 return Changed == ChangeStatus::CHANGED;
3914}
3915
3917 SetVector<Function *> &Functions,
3918 AnalysisGetter &AG,
3919 CallGraphUpdater &CGUpdater,
3921 bool IsModulePass) {
3922 if (Functions.empty())
3923 return false;
3924
3925 LLVM_DEBUG({
3926 dbgs() << "[AttributorLight] Run on module with " << Functions.size()
3927 << " functions:\n";
3928 for (Function *Fn : Functions)
3929 dbgs() << " - " << Fn->getName() << "\n";
3930 });
3931
3932 // Create an Attributor and initially empty information cache that is filled
3933 // while we identify default attribute opportunities.
3934 AttributorConfig AC(CGUpdater);
3935 AC.IsModulePass = IsModulePass;
3936 AC.DeleteFns = false;
3937 DenseSet<const char *> Allowed(
3943 AC.Allowed = &Allowed;
3944 AC.UseLiveness = false;
3945
3946 Attributor A(Functions, InfoCache, AC);
3947
3948 for (Function *F : Functions) {
3949 if (F->hasExactDefinition())
3950 NumFnWithExactDefinition++;
3951 else
3952 NumFnWithoutExactDefinition++;
3953
3954 // We look at internal functions only on-demand but if any use is not a
3955 // direct call or outside the current set of analyzed functions, we have
3956 // to do it eagerly.
3957 if (F->hasLocalLinkage()) {
3958 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3959 const auto *CB = dyn_cast<CallBase>(U.getUser());
3960 return CB && CB->isCallee(&U) &&
3961 Functions.count(const_cast<Function *>(CB->getCaller()));
3962 }))
3963 continue;
3964 }
3965
3966 // Populate the Attributor with abstract attribute opportunities in the
3967 // function and the information cache with IR information.
3968 A.identifyDefaultAbstractAttributes(*F);
3969 }
3970
3971 ChangeStatus Changed = A.run();
3972
3973 if (Changed == ChangeStatus::CHANGED) {
3974 // Invalidate analyses for modified functions so that we don't have to
3975 // invalidate all analyses for all functions in this SCC.
3976 PreservedAnalyses FuncPA;
3977 // We haven't changed the CFG for modified functions.
3978 FuncPA.preserveSet<CFGAnalyses>();
3979 for (Function *Changed : A.getModifiedFunctions()) {
3980 FAM.invalidate(*Changed, FuncPA);
3981 // Also invalidate any direct callers of changed functions since analyses
3982 // may care about attributes of direct callees. For example, MemorySSA
3983 // cares about whether or not a call's callee modifies memory and queries
3984 // that through function attributes.
3985 for (auto *U : Changed->users()) {
3986 if (auto *Call = dyn_cast<CallBase>(U)) {
3987 if (Call->getCalledFunction() == Changed)
3988 FAM.invalidate(*Call->getFunction(), FuncPA);
3989 }
3990 }
3991 }
3992 }
3993 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3994 << " functions, result: " << Changed << ".\n");
3995 return Changed == ChangeStatus::CHANGED;
3996}
3997
3998void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
3999
4001 static std::atomic<int> CallTimes;
4002 std::string Prefix;
4003
4004 if (!DepGraphDotFileNamePrefix.empty())
4006 else
4007 Prefix = "dep_graph";
4008 std::string Filename =
4009 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
4010
4011 outs() << "Dependency graph dump to " << Filename << ".\n";
4012
4013 std::error_code EC;
4014
4015 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
4016 if (!EC)
4017 llvm::WriteGraph(File, this);
4018
4019 CallTimes++;
4020}
4021
4023 for (auto DepAA : SyntheticRoot.Deps)
4024 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
4025}
4026
4030 AnalysisGetter AG(FAM);
4031
4032 SetVector<Function *> Functions;
4033 for (Function &F : M)
4034 Functions.insert(&F);
4035
4036 CallGraphUpdater CGUpdater;
4038 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4039 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4040 /* DeleteFns */ true, /* IsModulePass */ true)) {
4041 // FIXME: Think about passes we will preserve and add them here.
4042 return PreservedAnalyses::none();
4043 }
4044 return PreservedAnalyses::all();
4045}
4046
4049 LazyCallGraph &CG,
4050 CGSCCUpdateResult &UR) {
4052 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4053 AnalysisGetter AG(FAM);
4054
4055 SetVector<Function *> Functions;
4056 for (LazyCallGraph::Node &N : C)
4057 Functions.insert(&N.getFunction());
4058
4059 if (Functions.empty())
4060 return PreservedAnalyses::all();
4061
4062 Module &M = *Functions.back()->getParent();
4063 CallGraphUpdater CGUpdater;
4064 CGUpdater.initialize(CG, C, AM, UR);
4066 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4067 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4068 /* DeleteFns */ false,
4069 /* IsModulePass */ false)) {
4070 // FIXME: Think about passes we will preserve and add them here.
4073 return PA;
4074 }
4075 return PreservedAnalyses::all();
4076}
4077
4082 AnalysisGetter AG(FAM, /* CachedOnly */ true);
4083
4084 SetVector<Function *> Functions;
4085 for (Function &F : M)
4086 Functions.insert(&F);
4087
4088 CallGraphUpdater CGUpdater;
4090 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4091 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4092 /* IsModulePass */ true)) {
4094 // We have not added or removed functions.
4096 // We already invalidated all relevant function analyses above.
4098 return PA;
4099 }
4100 return PreservedAnalyses::all();
4101}
4102
4105 LazyCallGraph &CG,
4106 CGSCCUpdateResult &UR) {
4108 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4109 AnalysisGetter AG(FAM);
4110
4111 SetVector<Function *> Functions;
4112 for (LazyCallGraph::Node &N : C)
4113 Functions.insert(&N.getFunction());
4114
4115 if (Functions.empty())
4116 return PreservedAnalyses::all();
4117
4118 Module &M = *Functions.back()->getParent();
4119 CallGraphUpdater CGUpdater;
4120 CGUpdater.initialize(CG, C, AM, UR);
4122 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4123 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4124 /* IsModulePass */ false)) {
4126 // We have not added or removed functions.
4128 // We already invalidated all relevant function analyses above.
4130 return PA;
4131 }
4132 return PreservedAnalyses::all();
4133}
4134namespace llvm {
4135
4136template <> struct GraphTraits<AADepGraphNode *> {
4140
4141 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
4142 static NodeRef DepGetVal(const DepTy &DT) { return DT.getPointer(); }
4143
4147
4148 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
4149
4150 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
4151};
4152
4153template <>
4155 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
4156
4159
4160 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
4161
4162 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
4163};
4164
4165template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
4167
4168 static std::string getNodeLabel(const AADepGraphNode *Node,
4169 const AADepGraph *DG) {
4170 std::string AAString;
4171 raw_string_ostream O(AAString);
4172 Node->print(O);
4173 return AAString;
4174 }
4175};
4176
4177} // end namespace llvm
#define Success
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
Rewrite undef for PHI
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden, cl::desc("Allow the Attributor to create shallow " "wrappers for non-exact definitions."), cl::init(false))
#define VERBOSE_DEBUG_TYPE
Definition: Attributor.cpp:67
static cl::opt< bool > EnableHeapToStack("enable-heap-to-stack-conversion", cl::init(true), cl::Hidden)
static bool runAttributorOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, bool DeleteFns, bool IsModulePass)
}
static bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction *ToI, const Function &ToFn, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet, std::function< bool(const Function &F)> GoBackwardsCB)
Definition: Attributor.cpp:661
static cl::list< std::string > FunctionSeedAllowList("attributor-function-seed-allow-list", cl::Hidden, cl::desc("Comma seperated list of function names that are " "allowed to be seeded."), cl::CommaSeparated)
static bool getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I, SmallSetVector< Value *, 4 > &PotentialCopies, SmallSetVector< Instruction *, 4 > *PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact)
Definition: Attributor.cpp:365
static bool runAttributorLightOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, FunctionAnalysisManager &FAM, bool IsModulePass)
static cl::opt< unsigned, true > MaxInitializationChainLengthX("attributor-max-initialization-chain-length", cl::Hidden, cl::desc("Maximal number of chained initializations (to avoid stack overflows)"), cl::location(MaxInitializationChainLength), cl::init(1024))
static cl::opt< unsigned > MaxSpecializationPerCB("attributor-max-specializations-per-call-base", cl::Hidden, cl::desc("Maximal number of callees specialized for " "a call base"), cl::init(UINT32_MAX))
static cl::opt< bool > SimplifyAllLoads("attributor-simplify-all-loads", cl::Hidden, cl::desc("Try to simplify all loads."), cl::init(true))
static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr, AttributeSet AttrSet, bool ForceReplace, AttrBuilder &AB)
Return true if the information provided by Attr was added to the attribute set AttrSet.
Definition: Attributor.cpp:956
static cl::opt< bool > ViewDepGraph("attributor-view-dep-graph", cl::Hidden, cl::desc("View the dependency graph."), cl::init(false))
static bool isEqualOrWorse(const Attribute &New, const Attribute &Old)
Return true if New is equal or worse than Old.
Definition: Attributor.cpp:946
static cl::opt< bool > AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden, cl::desc("Allow the Attributor to use IP information " "derived from non-exact functions via cloning"), cl::init(false))
static cl::opt< bool > DumpDepGraph("attributor-dump-dep-graph", cl::Hidden, cl::desc("Dump the dependency graph to dot files."), cl::init(false))
static cl::opt< bool > PrintCallGraph("attributor-print-call-graph", cl::Hidden, cl::desc("Print Attributor's internal call graph"), cl::init(false))
static bool checkForAllInstructionsImpl(Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap, function_ref< bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA, const AAIsDead *LivenessAA, ArrayRef< unsigned > Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
static cl::opt< bool > PrintDependencies("attributor-print-dep", cl::Hidden, cl::desc("Print attribute dependencies"), cl::init(false))
static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool RequireReadNone, bool &IsKnown)
Definition: Attributor.cpp:609
static cl::opt< std::string > DepGraphDotFileNamePrefix("attributor-depgraph-dot-filename-prefix", cl::Hidden, cl::desc("The prefix used for the CallGraph dot file names."))
static cl::opt< bool > AnnotateDeclarationCallSites("attributor-annotate-decl-cs", cl::Hidden, cl::desc("Annotate call sites of function declarations."), cl::init(false))
static cl::opt< unsigned > SetFixpointIterations("attributor-max-iterations", cl::Hidden, cl::desc("Maximal number of fixpoint iterations."), cl::init(32))
static cl::list< std::string > SeedAllowList("attributor-seed-allow-list", cl::Hidden, cl::desc("Comma seperated list of attribute names that are " "allowed to be seeded."), cl::CommaSeparated)
static cl::opt< bool > EnableCallSiteSpecific("attributor-enable-call-site-specific-deduction", cl::Hidden, cl::desc("Allow the Attributor to do call site specific analysis"), cl::init(false))
static cl::opt< bool > CloseWorldAssumption("attributor-assume-closed-world", cl::Hidden, cl::desc("Should a closed world be assumed, or not. Default if not set."))
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
Definition: DebugCounter.h:182
#define LLVM_DEBUG(X)
Definition: Debug.h:101
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition: Debug.h:64
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
Rewrite Partial Register Uses
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
static StringRef getName(Value *V)
Basic Register Allocator
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isSimple(Instruction *I)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
Class for arbitrary precision integers.
Definition: APInt.h:76
AbstractCallSite.
CallBase * getInstruction() const
Return the underlying instruction.
bool isCallbackCall() const
Return true if this ACS represents a callback call.
const Use & getCalleeUseForCallback() const
Return the use of the callee value in the underlying instruction.
static void getCallbackUses(const CallBase &CB, SmallVectorImpl< const Use * > &CallbackUses)
Add operand uses of CB that represent callback uses into CallbackUses.
bool isCallee(Value::const_user_iterator UI) const
Return true if UI is the use that defines the callee of this ACS.
Value * getCallArgOperand(Argument &Arg) const
Return the operand of the underlying instruction associated with Arg.
int getCallArgOperandNo(Argument &Arg) const
Return the operand index of the underlying instruction associated with Arg.
unsigned getNumArgOperands() const
Return the number of parameters of the callee.
Function * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it's an indirect...
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:47
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:348
void invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Invalidate cached analyses for an IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:500
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
const Function * getParent() const
Definition: Argument.h:40
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition: Argument.h:46
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
Definition: Attributes.h:783
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
MemoryEffects getMemoryEffects() const
Definition: Attributes.cpp:920
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
Definition: Attributes.cpp:841
Attribute getAttribute(Attribute::AttrKind Kind) const
Return the attribute object.
Definition: Attributes.cpp:849
bool isStringAttribute() const
Return true if the attribute is a string (target-dependent) attribute.
Definition: Attributes.cpp:308
bool isEnumAttribute() const
Return true if the attribute is an Attribute::AttrKind type.
Definition: Attributes.cpp:300
bool isIntAttribute() const
Return true if the attribute is an integer attribute.
Definition: Attributes.cpp:304
uint64_t getValueAsInt() const
Return the attribute's value as an integer.
Definition: Attributes.cpp:328
StringRef getKindAsString() const
Return the attribute's kind as a string.
Definition: Attributes.cpp:342
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
Definition: Attributes.cpp:93
Attribute::AttrKind getKindAsEnum() const
Return the attribute's kind as an enum (Attribute::AttrKind).
Definition: Attributes.cpp:320
MemoryEffects getMemoryEffects() const
Returns memory effects.
Definition: Attributes.cpp:435
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:349
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:85
@ None
No attributes have been set.
Definition: Attributes.h:87
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
const Instruction & front() const
Definition: BasicBlock.h:452
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:198
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
Definition: BasicBlock.cpp:447
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:205
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:220
static BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
Definition: Constants.cpp:1846
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
Represents analyses that only rely on functions' control flow.
Definition: Analysis.h:70
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1455
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1765
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1812
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1761
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Value * getCalledOperand() const
Definition: InstrTypes.h:1696
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1784
unsigned arg_size() const
Definition: InstrTypes.h:1646
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1780
Function * getCaller()
Helper to get the caller (the parent function).
Wrapper to unify "old style" CallGraph and "new style" LazyCallGraph.
void removeFunction(Function &Fn)
Remove Fn from the call graph.
void removeCallSite(CallBase &CS)
Remove the call site CS from the call graph.
void replaceFunctionWith(Function &OldFn, Function &NewFn)
Replace OldFn in the call graph (and SCC) with NewFn.
void reanalyzeFunction(Function &Fn)
After an CGSCC pass changes a function in ways that affect the call graph, this method can be called ...
bool replaceCallSite(CallBase &OldCS, CallBase &NewCS)
Replace OldCS with the new call site NewCS.
void initialize(CallGraph &CG, CallGraphSCC &SCC)
Initializers for usage outside of a CGSCC pass, inside a CGSCC pass in the old and new pass manager (...
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
void setTailCall(bool IsTc=true)
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:1016
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
Definition: Constants.cpp:2072
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2098
void print(raw_ostream &OS) const
Print out the bounds to a stream.
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
static bool shouldExecute(unsigned CounterName)
Definition: DebugCounter.h:72
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:202
bool empty() const
Definition: DenseMap.h:98
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:145
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
A proxy from a FunctionAnalysisManager to an SCC.
Class to represent function types.
Definition: DerivedTypes.h:103
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
Definition: Metadata.cpp:1824
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition: Function.h:162
void splice(Function::iterator ToIt, Function *FromF)
Transfer all blocks from FromF to this function at ToIt.
Definition: Function.h:734
const BasicBlock & getEntryBlock() const
Definition: Function.h:782
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:200
iterator_range< arg_iterator > args()
Definition: Function.h:837
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1828
MemoryEffects getMemoryEffects() const
Definition: Function.cpp:801
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
Definition: Function.cpp:681
bool IsNewDbgInfoFormat
Is this function using intrinsics to record the position of debugging information,...
Definition: Function.h:106
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:338
iterator begin()
Definition: Function.h:798
arg_iterator arg_begin()
Definition: Function.h:813
void setAttributes(AttributeList Attrs)
Set the attribute list for this Function.
Definition: Function.h:341
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:342
size_t arg_size() const
Definition: Function.h:846
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:205
void setMemoryEffects(MemoryEffects ME)
Definition: Function.cpp:804
Argument * getArg(unsigned i) const
Definition: Function.h:831
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:213
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:669
void copyAttributesFrom(const Function *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a Function) from the ...
Definition: Function.cpp:785
bool hasMetadata() const
Return true if this value has any metadata attached to it.
Definition: Value.h:589
void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
Definition: Metadata.cpp:1519
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:274
LinkageTypes getLinkage() const
Definition: GlobalValue.h:545
bool hasLocalLinkage() const
Definition: GlobalValue.h:527
void setLinkage(LinkageTypes LT)
Definition: GlobalValue.h:536
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:655
void setDSOLocal(bool Local)
Definition: GlobalValue.h:303
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
@ DefaultVisibility
The GV is visible.
Definition: GlobalValue.h:67
void setVisibility(VisibilityTypes V)
Definition: GlobalValue.h:254
static bool isInterposableLinkage(LinkageTypes Linkage)
Whether the definition of this global may be replaced by something non-equivalent at link time.
Definition: GlobalValue.h:424
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition: GlobalValue.h:60
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
bool isSuccess() const
Definition: InlineCost.h:188
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
Definition: PassManager.h:658
const BasicBlock * getParent() const
Definition: Instruction.h:151
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:84
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Invoke instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
A node in the call graph.
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
An instruction for reading from memory.
Definition: Instructions.h:184
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
Definition: ModRef.h:132
bool doesAccessArgPointees() const
Whether this function may access argument memory.
Definition: ModRef.h:206
static MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const FunctionListType & getFunctionList() const
Get the Module's list of functions (constant).
Definition: Module.h:605
Diagnostic information for missed-optimization remarks.
PointerIntPair - This class implements a pair of a pointer and small integer.
void * getOpaqueValue() const
PointerTy getPointer() const
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1827
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:115
void preserveSet()
Mark an analysis set as preserved.
Definition: Analysis.h:144
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:129
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal, BasicBlock::iterator InsertBefore)
A vector that has set insertion semantics.
Definition: SetVector.h:57
ArrayRef< value_type > getArrayRef() const
Definition: SetVector.h:84
bool remove(const value_type &X)
Remove an item from the set vector.
Definition: SetVector.h:188
size_type size() const
Determine the number of elements in the SetVector.
Definition: SetVector.h:98
const value_type & front() const
Return the first element of the SetVector.
Definition: SetVector.h:143
const value_type & back() const
Return the last element of the SetVector.
Definition: SetVector.h:149
typename vector_type::const_iterator iterator
Definition: SetVector.h:69
iterator end()
Get an iterator to the end of the SetVector.
Definition: SetVector.h:113
void clear()
Completely clear the SetVector.
Definition: SetVector.h:273
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
Definition: SetVector.h:264
bool empty() const
Determine if the SetVector is empty or not.
Definition: SetVector.h:93
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition: SetVector.h:103
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition: SetVector.h:162
size_type size() const
Definition: SmallPtrSet.h:94
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void reserve(size_type N)
Definition: SmallVector.h:676
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
A visitor class for IR positions.
Definition: Attributor.h:1110
SubsumingPositionIterator(const IRPosition &IRP)
Provides information about what library functions are available for the current target.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1808
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition: Value.cpp:693
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
Value handle that is nullable, but tries to track the Value.
Definition: ValueHandle.h:204
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition: ilist_node.h:109
iterator insert(iterator where, pointer New)
Definition: ilist.h:165
A raw_ostream that writes to a file descriptor.
Definition: raw_ostream.h:470
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:660
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isAssumedReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readnone.
Definition: Attributor.cpp:654
bool isAssumedReadOnly(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readonly.
Definition: Attributor.cpp:649
std::optional< Value * > combineOptionalValuesInAAValueLatice(const std::optional< Value * > &A, const std::optional< Value * > &B, Type *Ty)
Return the combination of A and B such that the result is a possible value of both.
Definition: Attributor.cpp:340
bool isValidAtPosition(const ValueAndContext &VAC, InformationCache &InfoCache)
Return true if the value of VAC is a valid at the position of VAC, that is a constant,...
Definition: Attributor.cpp:291
bool isAssumedThreadLocalObject(Attributor &A, Value &Obj, const AbstractAttribute &QueryingAA)
Return true if Obj is assumed to be a thread local object.
Definition: Attributor.cpp:836
bool isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA, const Value &V, bool ForAnalysisOnly=true)
Return true if V is dynamically unique, that is, there are no two "instances" of V at runtime with di...
Definition: Attributor.cpp:232
bool getPotentialCopiesOfStoredValue(Attributor &A, StoreInst &SI, SmallSetVector< Value *, 4 > &PotentialCopies, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values of the one stored by SI into PotentialCopies.
Definition: Attributor.cpp:600
bool isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is potentially affected by a barrier.
Definition: Attributor.cpp:890
bool isGPU(const Module &M)
Return true iff M target a GPU (and we can use GPU AS reasoning).
Definition: Attributor.cpp:201
Constant * getInitialValueForObj(Attributor &A, const AbstractAttribute &QueryingAA, Value &Obj, Type &Ty, const TargetLibraryInfo *TLI, const DataLayout &DL, RangeTy *RangePtr=nullptr)
Return the initial value of Obj with type Ty if that is a constant.
Definition: Attributor.cpp:243
ValueScope
Flags to distinguish intra-procedural queries from potentially inter-procedural queries.
Definition: Attributor.h:179
@ Intraprocedural
Definition: Attributor.h:180
@ Interprocedural
Definition: Attributor.h:181
bool isValidInScope(const Value &V, const Function *Scope)
Return true if V is a valid value in Scope, that is a constant or an instruction/argument of Scope.
Definition: Attributor.cpp:281
bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction &ToI, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet=nullptr, std::function< bool(const Function &F)> GoBackwardsCB=nullptr)
Return true if ToI is potentially reachable from FromI without running into any instruction in Exclus...
Definition: Attributor.cpp:817
bool isNoSyncInst(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is a nosync instruction.
Definition: Attributor.cpp:206
bool getPotentiallyLoadedValues(Attributor &A, LoadInst &LI, SmallSetVector< Value *, 4 > &PotentialValues, SmallSetVector< Instruction *, 4 > &PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values LI could read into PotentialValues.
Definition: Attributor.cpp:590
Value * getWithType(Value &V, Type &Ty)
Try to convert V to type Ty without introducing new instructions.
Definition: Attributor.cpp:317
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
void updateMinLegalVectorWidthAttr(Function &Fn, uint64_t Width)
Update min-legal-vector-width if it is in Attribute and less than Width.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
@ CommaSeparated
Definition: CommandLine.h:164
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:470
DiagnosticInfoOptimizationBase::Argument NV
@ OF_TextWithCRLF
The file should be opened in text mode and use a carriage linefeed '\r '.
Definition: FileSystem.h:768
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1731
Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition: Local.cpp:533
unsigned MaxInitializationChainLength
The value passed to the line option that defines the maximal initialization chain length.
Definition: Attributor.cpp:110
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition: Local.cpp:129
APInt operator&(APInt a, const APInt &b)
Definition: APInt.h:2053
void detachDeadBlocks(ArrayRef< BasicBlock * > BBs, SmallVectorImpl< DominatorTree::UpdateType > *Updates, bool KeepOneInputPHIs=false)
Replace contents of every block in BBs with single unreachable instruction.
@ Done
Definition: Threading.h:61
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
Definition: Verifier.cpp:6966
CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition: Local.cpp:2881
raw_fd_ostream & outs()
This returns a reference to a raw_fd_ostream for standard output.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
Definition: GraphWriter.h:359
InlineResult isInlineViable(Function &Callee)
Minimal filter to detect invalid constructs for inlining.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1738
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:399
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2014
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool AreStatisticsEnabled()
Check if statistics are enabled.
Definition: Statistic.cpp:139
Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2815
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
bool operator&=(SparseBitVector< ElementSize > *LHS, const SparseBitVector< ElementSize > &RHS)
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
Definition: GraphWriter.h:427
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:293
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, CloneFunctionChangeType Changes, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc into NewFunc, transforming the old arguments into references to VMap values.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition: Casting.h:565
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2060
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition: iterator.h:363
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates or reallocates memory (eith...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1888
ChangeStatus
{
Definition: Attributor.h:483
void fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result)
Insert into the map all the informations contained in the operand bundles of the llvm....
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
DepClassTy
Definition: Attributor.h:493
@ OPTIONAL
The target may be valid if the source is not.
@ NONE
Do not track a dependence between source and target.
@ REQUIRED
The target cannot be valid if the source is not.
APInt operator|(APInt a, const APInt &b)
Definition: APInt.h:2073
#define N
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5530
DepSetTy Deps
Set of dependency graph nodes which should be updated if this one is updated.
Definition: Attributor.h:510
aaiterator begin()
Definition: Attributor.h:524
aaiterator end()
Definition: Attributor.h:525
The data structure for the dependency graph.
Definition: Attributor.h:544
iterator begin()
Definition: Attributor.h:559
AADepGraphNode SyntheticRoot
There is no root node for the dependency graph.
Definition: Attributor.h:556
void print()
Print dependency graph.
iterator end()
Definition: Attributor.h:560
void dumpGraph()
Dump graph to file.
AADepGraphNode * GetEntryNode()
Definition: Attributor.h:557
An abstract interface to track if a value leaves it's defining function instance.
Definition: Attributor.h:4308
bool isAssumedUniqueForAnalysis() const
Return true if we assume that the underlying value is unique in its scope wrt.
Definition: Attributor.h:4322
An abstract Attribute for computing reachability between functions.
Definition: Attributor.h:5683
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5718
An abstract interface to determine reachability of point A to B.
Definition: Attributor.h:3814
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3842
An abstract interface for liveness abstract attribute.
Definition: Attributor.h:3974
virtual bool isKnownDead() const =0
Returns true if the underlying value is known dead.
virtual bool isAssumedDead() const =0
The query functions are protected such that other attributes need to go through the Attributor interf...
virtual bool isRemovableStore() const
Return true if the underlying value is a store that is known to be removable.
Definition: Attributor.h:4018
static bool mayCatchAsynchronousExceptions(const Function &F)
Determine if F might catch asynchronous exceptions.
Definition: Attributor.h:4039
An abstract interface for memory access kind related attributes (readnone/readonly/writeonly).
Definition: Attributor.h:4628
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4694
An abstract interface for all memory location attributes (readnone/argmemonly/inaccessiblememonly/ina...
Definition: Attributor.h:4703
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4879
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3625
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:4422
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:5441
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3938
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3707
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3969
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3583
static bool isNonRelaxedAtomic(const Instruction *I)
Helper function used to determine whether an instruction is non-relaxed atomic.
static bool isNoSyncIntrinsic(const Instruction *I)
Helper function specific for intrinsics which are potentially volatile.
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3500
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3676
An access description.
Definition: Attributor.h:5941
bool isWrittenValueUnknown() const
Return true if the value written cannot be determined at all.
Definition: Attributor.h:6047
std::optional< Value * > getContent() const
Return the written value which can be llvm::null if it is not yet determined.
Definition: Attributor.h:6066
bool isWriteOrAssumption() const
Return true if this is a write access.
Definition: Attributor.h:6017
bool isRead() const
Return true if this is a read access.
Definition: Attributor.h:6011
Value * getWrittenValue() const
Return the value writen, if any.
Definition: Attributor.h:6058
Instruction * getLocalInst() const
Return the instruction that causes the access with respect to the local scope of the associated attri...
Definition: Attributor.h:6038
Instruction * getRemoteInst() const
Return the actual instruction that causes the access.
Definition: Attributor.h:6041
bool isWrittenValueYetUndetermined() const
Return true if the value written is not known yet.
Definition: Attributor.h:6044
AccessKind getKind() const
Return the access kind.
Definition: Attributor.h:6008
An abstract interface for struct information.
Definition: Attributor.h:5755
static Value * getSingleValue(Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP, SmallVectorImpl< AA::ValueAndContext > &Values)
Extract the single value in Values if any.
An abstract attribute for getting all assumption underlying objects.
Definition: Attributor.h:6195
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:6225
static const char ID
Unique ID (due to the unique address)
Definition: Attributor.h:3771
Helper to represent an access offset and size, with logic to deal with uncertainty and check for over...
Definition: Attributor.h:236
bool offsetOrSizeAreUnknown() const
Return true if offset or size are unknown.
Definition: Attributor.h:245
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:249
Base struct for all "concrete attribute" deductions.
Definition: Attributor.h:3282
ChangeStatus update(Attributor &A)
Hook for the Attributor to trigger an update of the internal state.
virtual ChangeStatus manifest(Attributor &A)
Hook for the Attributor to trigger the manifestation of the information represented by the abstract a...
Definition: Attributor.h:3397
virtual void printWithDeps(raw_ostream &OS) const
void print(raw_ostream &OS) const
Helper functions, for debug purposes only.
Definition: Attributor.h:3366
virtual StateType & getState()=0
Return the internal abstract state for inspection.
virtual const std::string getName() const =0
This function should return the name of the AbstractAttribute.
virtual ~AbstractAttribute()=default
Virtual destructor.
virtual const std::string getAsStr(Attributor *A) const =0
This function should return the "summarized" assumed state as string.
virtual bool isQueryAA() const
A query AA is always scheduled as long as we do updates because it does lazy computation that cannot ...
Definition: Attributor.h:3354
virtual ChangeStatus updateImpl(Attributor &A)=0
The actual update/transfer function which has to be implemented by the derived classes.
virtual void trackStatistics() const =0
Hook to enable custom statistic tracking, called after manifest that resulted in a change if statisti...
const IRPosition & getIRPosition() const
Return an IR position, see struct IRPosition.
Definition: Attributor.h:3361
An interface to query the internal state of an abstract attribute.
Definition: Attributor.h:2602
virtual ChangeStatus indicatePessimisticFixpoint()=0
Indicate that the abstract state should converge to the pessimistic state.
virtual bool isAtFixpoint() const =0
Return if this abstract state is fixed, thus does not need to be updated if information changes as it...
virtual bool isValidState() const =0
Return if this abstract state is in a valid state.
virtual ChangeStatus indicateOptimisticFixpoint()=0
Indicate that the abstract state should converge to the optimistic state.
Wrapper for FunctionAnalysisManager.
Definition: Attributor.h:1121
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
void populateAll() const
Force populate the entire call graph.
Definition: Attributor.h:5547
Configuration for the Attributor.
Definition: Attributor.h:1413
bool UseLiveness
Flag to determine if we should skip all liveness checks early on.
Definition: Attributor.h:1437
std::optional< unsigned > MaxFixpointIterations
Maximum number of iterations to run until fixpoint.
Definition: Attributor.h:1460
DenseSet< const char * > * Allowed
If not null, a set limiting the attribute opportunities.
Definition: Attributor.h:1457
bool RewriteSignatures
Flag to determine if we rewrite function signatures.
Definition: Attributor.h:1430
bool DeleteFns
Flag to determine if we can delete functions or keep dead ones around.
Definition: Attributor.h:1427
bool IsClosedWorldModule
Flag to indicate if the entire world is contained in this module, that is, no outside functions exist...
Definition: Attributor.h:1441
CallGraphUpdater & CGUpdater
Helper to update an underlying call graph and to delete functions.
Definition: Attributor.h:1454
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Helper struct used in the communication between an abstract attribute (AA) that wants to change the s...
Definition: Attributor.h:2210
std::function< void(const ArgumentReplacementInfo &, AbstractCallSite, SmallVectorImpl< Value * > &)> ACSRepairCBTy
Abstract call site (ACS) repair callback type.
Definition: Attributor.h:2233
std::function< void(const ArgumentReplacementInfo &, Function &, Function::arg_iterator)> CalleeRepairCBTy
Callee repair callback type.
Definition: Attributor.h:2219
The fixpoint analysis framework that orchestrates the attribute deduction.
Definition: Attributor.h:1507
bool registerFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes, ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB, ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB)
Register a rewrite for a function signature.
bool checkForAllCallees(function_ref< bool(ArrayRef< const Function * > Callees)> Pred, const AbstractAttribute &QueryingAA, const CallBase &CB)
Check Pred on all potential Callees of CB.
bool isModulePass() const
Return true if this is a module pass, false otherwise.
Definition: Attributor.h:1715
bool isValidFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes)
Check if we can rewrite a function signature.
static bool isInternalizable(Function &F)
Returns true if the function F can be internalized.
ChangeStatus removeAttrs(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AttrKinds)
Remove all AttrKinds attached to IRP.
bool isRunOn(Function &Fn) const
Return true if we derive attributes for Fn.
Definition: Attributor.h:1732
bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, DepClassTy DepClass=DepClassTy::OPTIONAL)
Return true if AA (or its context instruction) is assumed dead.
bool checkForAllInstructions(function_ref< bool(Instruction &)> Pred, const Function *Fn, const AbstractAttribute *QueryingAA, ArrayRef< unsigned > Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
Check Pred on all instructions in Fn with an opcode present in Opcodes.
void recordDependence(const AbstractAttribute &FromAA, const AbstractAttribute &ToAA, DepClassTy DepClass)
Explicitly record a dependence from FromAA to ToAA, that is if FromAA changes ToAA should be updated ...
static void createShallowWrapper(Function &F)
Create a shallow wrapper for F such that F has internal linkage afterwards.
std::optional< Value * > getAssumedSimplified(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation, AA::ValueScope S)
If V is assumed simplified, return it, if it is unclear yet, return std::nullopt, otherwise return nu...
Definition: Attributor.h:1973
static Function * internalizeFunction(Function &F, bool Force=false)
Make another copy of the function F such that the copied version has internal linkage afterwards and ...
bool isFunctionIPOAmendable(const Function &F)
Determine whether the function F is IPO amendable.
Definition: Attributor.h:1811
bool checkForAllReadWriteInstructions(function_ref< bool(Instruction &)> Pred, AbstractAttribute &QueryingAA, bool &UsedAssumedInformation)
Check Pred on all Read/Write instructions.
bool checkForAllReturnedValues(function_ref< bool(Value &)> Pred, const AbstractAttribute &QueryingAA, AA::ValueScope S=AA::ValueScope::Intraprocedural, bool RecurseForSelectAndPHI=true)
Check Pred on all values potentially returned by the function associated with QueryingAA.
bool isClosedWorldModule() const
Return true if the module contains the whole world, thus, no outside functions exist.
std::optional< Constant * > getAssumedConstant(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation)
If IRP is assumed to be a constant, return it, if it is unclear yet, return std::nullopt,...
Attributor(SetVector< Function * > &Functions, InformationCache &InfoCache, AttributorConfig Configuration)
Constructor.
void getAttrs(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AKs, SmallVectorImpl< Attribute > &Attrs, bool IgnoreSubsumingPositions=false)
Return the attributes of any kind in AKs existing in the IR at a position that will affect this one.
InformationCache & getInfoCache()
Return the internal information cache.
Definition: Attributor.h:1712
std::optional< Value * > translateArgumentToCallSiteContent(std::optional< Value * > V, CallBase &CB, const AbstractAttribute &AA, bool &UsedAssumedInformation)
Translate V from the callee context into the call site context.
bool checkForAllUses(function_ref< bool(const Use &, bool &)> Pred, const AbstractAttribute &QueryingAA, const Value &V, bool CheckBBLivenessOnly=false, DepClassTy LivenessDepClass=DepClassTy::OPTIONAL, bool IgnoreDroppableUses=true, function_ref< bool(const Use &OldU, const Use &NewU)> EquivalentUseCB=nullptr)
Check Pred on all (transitive) uses of V.
ChangeStatus manifestAttrs(const IRPosition &IRP, ArrayRef< Attribute > DeducedAttrs, bool ForceReplace=false)
Attach DeducedAttrs to IRP, if ForceReplace is set we do this even if the same attribute kind was alr...
bool hasAttr(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AKs, bool IgnoreSubsumingPositions=false, Attribute::AttrKind ImpliedAttributeKind=Attribute::None)
Return true if any kind in AKs existing in the IR at a position that will affect this one.
void registerForUpdate(AbstractAttribute &AA)
Allows a query AA to request an update if a new query was received.
void identifyDefaultAbstractAttributes(Function &F)
Determine opportunities to derive 'default' attributes in F and create abstract attribute objects for...
bool getAssumedSimplifiedValues(const IRPosition &IRP, const AbstractAttribute *AA, SmallVectorImpl< AA::ValueAndContext > &Values, AA::ValueScope S, bool &UsedAssumedInformation, bool RecurseForSelectAndPHI=true)
Try to simplify IRP and in the scope S.
std::function< bool(Attributor &, const AbstractAttribute *)> VirtualUseCallbackTy
Definition: Attributor.h:2061
ChangeStatus run()
Run the analyses until a fixpoint is reached or enforced (timeout).
static bool internalizeFunctions(SmallPtrSetImpl< Function * > &FnSet, DenseMap< Function *, Function * > &FnMap)
Make copies of each function in the set FnSet such that the copied version has internal linkage after...
bool checkForAllCallSites(function_ref< bool(AbstractCallSite)> Pred, const AbstractAttribute &QueryingAA, bool RequireAllCallSites, bool &UsedAssumedInformation)
Check Pred on all function call sites.
bool getAttrsFromAssumes(const IRPosition &IRP, Attribute::AttrKind AK, SmallVectorImpl< Attribute > &Attrs)
Return the attributes of kind AK existing in the IR as operand bundles of an llvm....
bool isKnown(base_t BitsEncoding=BestState) const
Return true if the bits set in BitsEncoding are "known bits".
Definition: Attributor.h:2750
Support structure for SCC passes to communicate updates the call graph back to the CGSCC pass manager...
static std::string getNodeLabel(const AADepGraphNode *Node, const AADepGraph *DG)
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
DefaultDOTGraphTraits - This class provides the default implementations of all of the DOTGraphTraits ...
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ Dynamic
Denormals have unknown treatment.
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition: DenseMapInfo.h:50
static NodeRef DepGetVal(const DepTy &DT)
static ChildIteratorType child_end(NodeRef N)
static NodeRef getEntryNode(AADepGraphNode *DGN)
static ChildIteratorType child_begin(NodeRef N)
AADepGraphNode::DepSetTy::iterator ChildEdgeIteratorType
static NodeRef getEntryNode(AADepGraph *DG)
static nodes_iterator nodes_begin(AADepGraph *DG)
static nodes_iterator nodes_end(AADepGraph *DG)
Helper to describe and deal with positions in the LLVM-IR.
Definition: Attributor.h:580
Function * getAssociatedFunction() const
Return the associated function, if any.
Definition: Attributor.h:711
void setAttrList(const AttributeList &AttrList) const
Update the attributes associated with this function or call site scope.
Definition: Attributor.h:847
unsigned getAttrIdx() const
Return the index in the attribute list for this position.
Definition: Attributor.h:812
bool hasCallBaseContext() const
Check if the position has any call base context.
Definition: Attributor.h:929
static const IRPosition callsite_returned(const CallBase &CB)
Create a position describing the returned value of CB.
Definition: Attributor.h:648
static const IRPosition returned(const Function &F, const CallBaseContext *CBContext=nullptr)
Create a position describing the returned value of F.
Definition: Attributor.h:630
Argument * getAssociatedArgument() const
Return the associated argument, if any.
Definition: Attributor.cpp:996
static const IRPosition value(const Value &V, const CallBaseContext *CBContext=nullptr)
Create a position describing the value of V.
Definition: Attributor.h:604
AttributeList getAttrList() const
Return the attributes associated with this function or call site scope.
Definition: Attributor.h:840
static const IRPosition inst(const Instruction &I, const CallBaseContext *CBContext=nullptr)
Create a position describing the instruction I.
Definition: Attributor.h:616
static const IRPosition callsite_argument(const CallBase &CB, unsigned ArgNo)
Create a position describing the argument of CB at position ArgNo.
Definition: Attributor.h:653
static const IRPosition TombstoneKey
Definition: Attributor.h:935
Kind
The positions we distinguish in the IR.
Definition: Attributor.h:586
@ IRP_ARGUMENT
An attribute for a function argument.
Definition: Attributor.h:594
@ IRP_RETURNED
An attribute for the function return value.
Definition: Attributor.h:590
@ IRP_CALL_SITE
An attribute for a call site (function scope).
Definition: Attributor.h:593
@ IRP_CALL_SITE_RETURNED
An attribute for a call site return value.
Definition: Attributor.h:591
@ IRP_FUNCTION
An attribute for a function (scope).
Definition: Attributor.h:592
@ IRP_FLOAT
A position that is not associated with a spot suitable for attributes.
Definition: Attributor.h:588
@ IRP_CALL_SITE_ARGUMENT
An attribute for a call site argument.
Definition: Attributor.h:595
@ IRP_INVALID
An invalid position.
Definition: Attributor.h:587
Instruction * getCtxI() const
Return the context instruction, if any.
Definition: Attributor.h:764
static const IRPosition argument(const Argument &Arg, const CallBaseContext *CBContext=nullptr)
Create a position describing the argument Arg.
Definition: Attributor.h:637
static const IRPosition EmptyKey
Special DenseMap key values.
Definition: Attributor.h:934
static const IRPosition function(const Function &F, const CallBaseContext *CBContext=nullptr)
Create a position describing the function scope of F.
Definition: Attributor.h:623
const CallBaseContext * getCallBaseContext() const
Get the call base context from the position.
Definition: Attributor.h:926
Value & getAssociatedValue() const
Return the value this abstract attribute is associated with.
Definition: Attributor.h:778
Value & getAnchorValue() const
Return the value this abstract attribute is anchored with.
Definition: Attributor.h:697
Value * getAttrListAnchor() const
Return the value attributes are attached to.
Definition: Attributor.h:833
int getCallSiteArgNo() const
Return the call site argument number of the associated value if it is an argument or call site argume...
Definition: Attributor.h:807
Kind getPositionKind() const
Return the associated position kind.
Definition: Attributor.h:876
static const IRPosition callsite_function(const CallBase &CB)
Create a position describing the function scope of CB.
Definition: Attributor.h:643
Function * getAnchorScope() const
Return the Function surrounding the anchor value.
Definition: Attributor.h:752
Data structure to hold cached (LLVM-IR) information.
Definition: Attributor.h:1197
bool stackIsAccessibleByOtherThreads()
Return true if the stack (llvm::Alloca) can be accessed by other threads.
Definition: Attributor.h:1326
MustBeExecutedContextExplorer * getMustBeExecutedContextExplorer()
Return MustBeExecutedContextExplorer.
Definition: Attributor.h:1275
const ArrayRef< Function * > getIndirectlyCallableFunctions(Attributor &A) const
Return all functions that might be called indirectly, only valid for closed world modules (see isClos...
TargetLibraryInfo * getTargetLibraryInfoForFunction(const Function &F)
Return TargetLibraryInfo for function F.
Definition: Attributor.h:1280
OpcodeInstMapTy & getOpcodeInstMapForFunction(const Function &F)
Return the map that relates "interesting" opcodes with all instructions with that opcode in F.
Definition: Attributor.h:1265
const RetainedKnowledgeMap & getKnowledgeMap() const
Return the map conaining all the knowledge we have from llvm.assumes.
Definition: Attributor.h:1310
SmallVector< Instruction *, 8 > InstructionVectorTy
A vector type to hold instructions.
Definition: Attributor.h:1258
InstructionVectorTy & getReadOrWriteInstsForFunction(const Function &F)
Return the instructions in F that may read or write memory.
Definition: Attributor.h:1270
AP::Result * getAnalysisResultForFunction(const Function &F, bool CachedOnly=false)
Return the analysis result from a pass AP for function F.
Definition: Attributor.h:1301
State for an integer range.
Definition: Attributor.h:2928
ConstantRange getKnown() const
Return the known state encoding.
Definition: Attributor.h:2984
ConstantRange getAssumed() const
Return the assumed state encoding.
Definition: Attributor.h:2987
uint32_t getBitWidth() const
Return associated values' bit width.
Definition: Attributor.h:2961
A "must be executed context" for a given program point PP is the set of instructions,...
Definition: MustExecute.h:386
iterator & end()
Return an universal end iterator.
Definition: MustExecute.h:434
bool findInContextOf(const Instruction *I, const Instruction *PP)
Helper to look for I in the context of PP.
Definition: MustExecute.h:470
iterator & begin(const Instruction *PP)
Return an iterator to explore the context around PP.
Definition: MustExecute.h:420
A class for a set state.
Definition: Attributor.h:4958
bool undefIsContained() const
Returns whether this state contains an undef value or not.
Definition: Attributor.h:4994
bool isValidState() const override
See AbstractState::isValidState(...)
Definition: Attributor.h:4967
const SetTy & getAssumedSet() const
Return this set.
Definition: Attributor.h:4988
The TimeTraceScope is a helper class to call the begin and end functions of the time trace profiler.
Definition: TimeProfiler.h:134