LLVM 23.0.0git
Attributor.cpp
Go to the documentation of this file.
1//===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements an interprocedural pass that deduces and/or propagates
10// attributes. This is done in an abstract interpretation style fixpoint
11// iteration. See the Attributor.h file comment and the class descriptions in
12// that file for more information.
13//
14//===----------------------------------------------------------------------===//
15
17
18#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/Statistic.h"
29#include "llvm/IR/Attributes.h"
30#include "llvm/IR/Constant.h"
32#include "llvm/IR/Constants.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/GlobalValue.h"
36#include "llvm/IR/Instruction.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/Debug.h"
47#include "llvm/Support/ModRef.h"
52#include <cstdint>
53#include <memory>
54
55#ifdef EXPENSIVE_CHECKS
56#include "llvm/IR/Verifier.h"
57#endif
58
59#include <cassert>
60#include <optional>
61#include <string>
62
63using namespace llvm;
64
65#define DEBUG_TYPE "attributor"
66#define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
67
68DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
69 "Determine what attributes are manifested in the IR");
70
71STATISTIC(NumFnDeleted, "Number of function deleted");
72STATISTIC(NumFnWithExactDefinition,
73 "Number of functions with exact definitions");
74STATISTIC(NumFnWithoutExactDefinition,
75 "Number of functions without exact definitions");
76STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
77STATISTIC(NumAttributesTimedOut,
78 "Number of abstract attributes timed out before fixpoint");
79STATISTIC(NumAttributesValidFixpoint,
80 "Number of abstract attributes in a valid fixpoint state");
81STATISTIC(NumAttributesManifested,
82 "Number of abstract attributes manifested in IR");
83
84// TODO: Determine a good default value.
85//
86// In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
87// (when run with the first 5 abstract attributes). The results also indicate
88// that we never reach 32 iterations but always find a fixpoint sooner.
89//
90// This will become more evolved once we perform two interleaved fixpoint
91// iterations: bottom-up and top-down.
93 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
94 cl::desc("Maximal number of fixpoint iterations."),
95 cl::init(32));
96
98 MaxSpecializationPerCB("attributor-max-specializations-per-call-base",
100 cl::desc("Maximal number of callees specialized for "
101 "a call base"),
102 cl::init(UINT32_MAX));
103
105 "attributor-max-initialization-chain-length", cl::Hidden,
106 cl::desc(
107 "Maximal number of chained initializations (to avoid stack overflows)"),
110
112 "attributor-annotate-decl-cs", cl::Hidden,
113 cl::desc("Annotate call sites of function declarations."), cl::init(false));
114
115static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
116 cl::init(true), cl::Hidden);
117
118static cl::opt<bool>
119 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
120 cl::desc("Allow the Attributor to create shallow "
121 "wrappers for non-exact definitions."),
122 cl::init(false));
123
124static cl::opt<bool>
125 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
126 cl::desc("Allow the Attributor to use IP information "
127 "derived from non-exact functions via cloning"),
128 cl::init(false));
129
130// These options can only used for debug builds.
131#ifndef NDEBUG
133 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
134 cl::desc("Comma separated list of attribute names that are "
135 "allowed to be seeded."),
137
139 "attributor-function-seed-allow-list", cl::Hidden,
140 cl::desc("Comma separated list of function names that are "
141 "allowed to be seeded."),
143#endif
144
145static cl::opt<bool>
146 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
147 cl::desc("Dump the dependency graph to dot files."),
148 cl::init(false));
149
151 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
152 cl::desc("The prefix used for the CallGraph dot file names."));
153
154static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
155 cl::desc("View the dependency graph."),
156 cl::init(false));
157
158static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
159 cl::desc("Print attribute dependencies"),
160 cl::init(false));
161
163 "attributor-enable-call-site-specific-deduction", cl::Hidden,
164 cl::desc("Allow the Attributor to do call site specific analysis"),
165 cl::init(false));
166
167static cl::opt<bool>
168 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
169 cl::desc("Print Attributor's internal call graph"),
170 cl::init(false));
171
172static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
174 cl::desc("Try to simplify all loads."),
175 cl::init(true));
176
178 "attributor-assume-closed-world", cl::Hidden,
179 cl::desc("Should a closed world be assumed, or not. Default if not set."));
180
181/// Logic operators for the change status enum class.
182///
183///{
188 L = L | R;
189 return L;
190}
195 L = L & R;
196 return L;
197}
198///}
199
200namespace {
201/// NVPTX/AMDGPU address space values (shared between both targets)
202enum class NVPTXAMDGPUAddressSpace : unsigned {
203 Generic = 0,
204 Global = 1,
205 Shared = 3,
206 Constant = 4,
207 Local = 5,
208};
209
210/// SPIRV address space values (StorageClass)
211enum class SPIRVAddressSpace : unsigned {
212 Local = 0, // Function (private/local)
213 Global = 1, // CrossWorkgroup (global)
214 Constant = 2, // UniformConstant (constant)
215 Shared = 3, // Workgroup (shared)
216 Generic = 4, // Generic
217};
218} // namespace
219
220bool AA::isGPU(const Module &M) {
221 Triple T(M.getTargetTriple());
222 return T.isGPU();
223}
224
225bool AA::isGPUGenericAddressSpace(const Module &M, unsigned AS) {
226 assert(AA::isGPU(M) && "Only callable on GPU targets");
227 Triple T(M.getTargetTriple());
228
229 if (T.isSPIRV())
230 return AS == static_cast<unsigned>(SPIRVAddressSpace::Generic);
231
232 return AS == static_cast<unsigned>(NVPTXAMDGPUAddressSpace::Generic);
233}
234
235bool AA::isGPUGlobalAddressSpace(const Module &M, unsigned AS) {
236 assert(AA::isGPU(M) && "Only callable on GPU targets");
237 Triple T(M.getTargetTriple());
238
239 if (T.isSPIRV())
240 return AS == static_cast<unsigned>(SPIRVAddressSpace::Global);
241
242 return AS == static_cast<unsigned>(NVPTXAMDGPUAddressSpace::Global);
243}
244
245bool AA::isGPUSharedAddressSpace(const Module &M, unsigned AS) {
246 assert(AA::isGPU(M) && "Only callable on GPU targets");
247 Triple T(M.getTargetTriple());
248
249 if (T.isSPIRV())
250 return AS == static_cast<unsigned>(SPIRVAddressSpace::Shared);
251
252 return AS == static_cast<unsigned>(NVPTXAMDGPUAddressSpace::Shared);
253}
254
255bool AA::isGPUConstantAddressSpace(const Module &M, unsigned AS) {
256 assert(AA::isGPU(M) && "Only callable on GPU targets");
257 Triple T(M.getTargetTriple());
258
259 if (T.isSPIRV())
260 return AS == static_cast<unsigned>(SPIRVAddressSpace::Constant);
261
262 return AS == static_cast<unsigned>(NVPTXAMDGPUAddressSpace::Constant);
263}
264
265bool AA::isGPULocalAddressSpace(const Module &M, unsigned AS) {
266 assert(AA::isGPU(M) && "Only callable on GPU targets");
267 Triple T(M.getTargetTriple());
268
269 if (T.isSPIRV())
270 return AS == static_cast<unsigned>(SPIRVAddressSpace::Local);
271
272 return AS == static_cast<unsigned>(NVPTXAMDGPUAddressSpace::Local);
273}
274
276 const AbstractAttribute &QueryingAA) {
277 // We are looking for volatile instructions or non-relaxed atomics.
278 if (const auto *CB = dyn_cast<CallBase>(&I)) {
279 if (CB->hasFnAttr(Attribute::NoSync))
280 return true;
281
282 // Non-convergent and readnone imply nosync.
283 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
284 return true;
285
286 bool IsKnownNoSync;
288 A, &QueryingAA, IRPosition::callsite_function(*CB),
289 DepClassTy::OPTIONAL, IsKnownNoSync);
290 }
291
292 if (!I.mayReadOrWriteMemory())
293 return true;
294
296}
297
299 const Value &V, bool ForAnalysisOnly) {
300 // TODO: See the AAInstanceInfo class comment.
301 if (!ForAnalysisOnly)
302 return false;
303 auto *InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
305 return InstanceInfoAA && InstanceInfoAA->isAssumedUniqueForAnalysis();
306}
307
308Constant *
310 Value &Obj, Type &Ty, const TargetLibraryInfo *TLI,
311 const DataLayout &DL, AA::RangeTy *RangePtr) {
312 if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
313 return Init;
314 auto *GV = dyn_cast<GlobalVariable>(&Obj);
315 if (!GV)
316 return nullptr;
317
318 bool UsedAssumedInformation = false;
319 Constant *Initializer = nullptr;
320 if (A.hasGlobalVariableSimplificationCallback(*GV)) {
321 auto AssumedGV = A.getAssumedInitializerFromCallBack(
322 *GV, &QueryingAA, UsedAssumedInformation);
323 Initializer = *AssumedGV;
324 if (!Initializer)
325 return nullptr;
326 } else {
327 if (!GV->hasLocalLinkage()) {
328 // Externally visible global that's either non-constant,
329 // or a constant with an uncertain initializer.
330 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
331 return nullptr;
332 }
333
334 // Globals with local linkage are always initialized.
335 assert(!GV->hasLocalLinkage() || GV->hasInitializer());
336
337 if (!Initializer)
338 Initializer = GV->getInitializer();
339 }
340
341 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
342 int64_t StorageSize = DL.getTypeStoreSize(&Ty);
343 if (StorageSize != RangePtr->Size)
344 return nullptr;
345 APInt Offset = APInt(64, RangePtr->Offset);
346 return ConstantFoldLoadFromConst(Initializer, &Ty, Offset, DL);
347 }
348
349 return ConstantFoldLoadFromUniformValue(Initializer, &Ty, DL);
350}
351
352bool AA::isValidInScope(const Value &V, const Function *Scope) {
353 if (isa<Constant>(V))
354 return true;
355 if (auto *I = dyn_cast<Instruction>(&V))
356 return I->getFunction() == Scope;
357 if (auto *A = dyn_cast<Argument>(&V))
358 return A->getParent() == Scope;
359 return false;
360}
361
363 InformationCache &InfoCache) {
364 if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
365 return true;
366 const Function *Scope = nullptr;
367 const Instruction *CtxI = VAC.getCtxI();
368 if (CtxI)
369 Scope = CtxI->getFunction();
370 if (auto *A = dyn_cast<Argument>(VAC.getValue()))
371 return A->getParent() == Scope;
372 if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
373 if (I->getFunction() == Scope) {
374 if (const DominatorTree *DT =
376 *Scope))
377 return DT->dominates(I, CtxI);
378 // Local dominance check mostly for the old PM passes.
379 if (CtxI && I->getParent() == CtxI->getParent())
380 return llvm::any_of(
381 make_range(I->getIterator(), I->getParent()->end()),
382 [&](const Instruction &AfterI) { return &AfterI == CtxI; });
383 }
384 }
385 return false;
386}
387
389 if (V.getType() == &Ty)
390 return &V;
391 if (isa<PoisonValue>(V))
392 return PoisonValue::get(&Ty);
393 if (isa<UndefValue>(V))
394 return UndefValue::get(&Ty);
395 if (auto *C = dyn_cast<Constant>(&V)) {
396 if (C->isNullValue() && !Ty.isPtrOrPtrVectorTy())
397 return Constant::getNullValue(&Ty);
398 if (C->getType()->isPointerTy() && Ty.isPointerTy())
399 return ConstantExpr::getPointerCast(C, &Ty);
400 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
401 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
402 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
403 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
404 return ConstantFoldCastInstruction(Instruction::FPTrunc, C, &Ty);
405 }
406 }
407 return nullptr;
408}
409
410std::optional<Value *>
411AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
412 const std::optional<Value *> &B,
413 Type *Ty) {
414 if (A == B)
415 return A;
416 if (!B)
417 return A;
418 if (*B == nullptr)
419 return nullptr;
420 if (!A)
421 return Ty ? getWithType(**B, *Ty) : nullptr;
422 if (*A == nullptr)
423 return nullptr;
424 if (!Ty)
425 Ty = (*A)->getType();
427 return getWithType(**B, *Ty);
428 if (isa<UndefValue>(*B))
429 return A;
430 if (*A && *B && *A == getWithType(**B, *Ty))
431 return A;
432 return nullptr;
433}
434
435template <bool IsLoad, typename Ty>
437 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
438 SmallSetVector<Instruction *, 4> *PotentialValueOrigins,
439 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
440 bool OnlyExact) {
441 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
442 << " (only exact: " << OnlyExact << ")\n";);
443
444 Value &Ptr = *I.getPointerOperand();
445 // Containers to remember the pointer infos and new copies while we are not
446 // sure that we can find all of them. If we abort we want to avoid spurious
447 // dependences and potential copies in the provided container.
451
452 const auto *TLI =
453 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
454
455 auto Pred = [&](Value &Obj) {
456 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
457 if (isa<UndefValue>(&Obj))
458 return true;
459 if (isa<ConstantPointerNull>(&Obj)) {
460 // A null pointer access can be undefined but any offset from null may
461 // be OK. We do not try to optimize the latter.
462 if (!NullPointerIsDefined(I.getFunction(),
463 Ptr.getType()->getPointerAddressSpace()) &&
464 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
465 AA::Interprocedural) == &Obj)
466 return true;
468 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
469 return false;
470 }
471 // TODO: Use assumed noalias return.
472 if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
473 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
474 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
475 << "\n";);
476 return false;
477 }
478 if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
479 if (!GV->hasLocalLinkage() &&
480 !(GV->isConstant() && GV->hasInitializer())) {
481 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
482 "linkage, not supported yet: "
483 << Obj << "\n";);
484 return false;
485 }
486
487 bool NullOnly = true;
488 bool NullRequired = false;
489 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
490 bool IsExact) {
491 if (!V || *V == nullptr)
492 NullOnly = false;
493 else if (isa<UndefValue>(*V))
494 /* No op */;
495 else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
496 NullRequired = !IsExact;
497 else
498 NullOnly = false;
499 };
500
501 auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
502 Value &V) {
503 Value *AdjV = AA::getWithType(V, *I.getType());
504 if (!AdjV) {
505 LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
506 "cannot be converted to read type: "
507 << *Acc.getRemoteInst() << " : " << *I.getType()
508 << "\n";);
509 }
510 return AdjV;
511 };
512
513 auto SkipCB = [&](const AAPointerInfo::Access &Acc) {
514 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
515 return true;
516 if (IsLoad) {
518 return true;
519 if (PotentialValueOrigins && !isa<AssumeInst>(Acc.getRemoteInst()))
520 return false;
521 if (!Acc.isWrittenValueUnknown())
522 if (Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue()))
523 if (NewCopies.count(V)) {
524 NewCopyOrigins.insert(Acc.getRemoteInst());
525 return true;
526 }
527 if (auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst()))
528 if (Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand()))
529 if (NewCopies.count(V)) {
530 NewCopyOrigins.insert(Acc.getRemoteInst());
531 return true;
532 }
533 }
534 return false;
535 };
536
537 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
538 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
539 return true;
540 if (IsLoad && Acc.isWrittenValueYetUndetermined())
541 return true;
542 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
543 if (OnlyExact && !IsExact && !NullOnly &&
545 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
546 << ", abort!\n");
547 return false;
548 }
549 if (NullRequired && !NullOnly) {
550 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
551 "one, however found non-null one: "
552 << *Acc.getRemoteInst() << ", abort!\n");
553 return false;
554 }
555 if (IsLoad) {
556 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
557 if (!Acc.isWrittenValueUnknown()) {
558 Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
559 if (!V)
560 return false;
561 NewCopies.insert(V);
562 if (PotentialValueOrigins)
563 NewCopyOrigins.insert(Acc.getRemoteInst());
564 return true;
565 }
566 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
567 if (!SI) {
568 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
569 "instruction not supported yet: "
570 << *Acc.getRemoteInst() << "\n";);
571 return false;
572 }
573 Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
574 if (!V)
575 return false;
576 NewCopies.insert(V);
577 if (PotentialValueOrigins)
578 NewCopyOrigins.insert(SI);
579 } else {
580 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
581 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
582 if (!LI && OnlyExact) {
583 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
584 "instruction not supported yet: "
585 << *Acc.getRemoteInst() << "\n";);
586 return false;
587 }
588 NewCopies.insert(Acc.getRemoteInst());
589 }
590 return true;
591 };
592
593 // If the value has been written to we don't need the initial value of the
594 // object.
595 bool HasBeenWrittenTo = false;
596
598 auto *PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
600 if (!PI || !PI->forallInterferingAccesses(
601 A, QueryingAA, I,
602 /* FindInterferingWrites */ IsLoad,
603 /* FindInterferingReads */ !IsLoad, CheckAccess,
604 HasBeenWrittenTo, Range, SkipCB)) {
606 dbgs()
607 << "Failed to verify all interfering accesses for underlying object: "
608 << Obj << "\n");
609 return false;
610 }
611
612 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
613 const DataLayout &DL = A.getDataLayout();
614 Value *InitialValue = AA::getInitialValueForObj(
615 A, QueryingAA, Obj, *I.getType(), TLI, DL, &Range);
616 if (!InitialValue) {
617 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
618 "underlying object, abort!\n");
619 return false;
620 }
621 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
622 if (NullRequired && !NullOnly) {
623 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
624 "null or undef, abort!\n");
625 return false;
626 }
627
628 NewCopies.insert(InitialValue);
629 if (PotentialValueOrigins)
630 NewCopyOrigins.insert(nullptr);
631 }
632
633 PIs.push_back(PI);
634
635 return true;
636 };
637
638 const auto *AAUO = A.getAAFor<AAUnderlyingObjects>(
639 QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
640 if (!AAUO || !AAUO->forallUnderlyingObjects(Pred)) {
642 dbgs() << "Underlying objects stored into could not be determined\n";);
643 return false;
644 }
645
646 // Only if we were successful collection all potential copies we record
647 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
648 // given PotentialCopies container.
649 for (const auto *PI : PIs) {
650 if (!PI->getState().isAtFixpoint())
651 UsedAssumedInformation = true;
652 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
653 }
654 PotentialCopies.insert_range(NewCopies);
655 if (PotentialValueOrigins)
656 PotentialValueOrigins->insert_range(NewCopyOrigins);
657
658 return true;
659}
660
662 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
663 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
664 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
665 bool OnlyExact) {
666 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
667 A, LI, PotentialValues, &PotentialValueOrigins, QueryingAA,
668 UsedAssumedInformation, OnlyExact);
669}
670
673 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
674 bool OnlyExact) {
675 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
676 A, SI, PotentialCopies, nullptr, QueryingAA, UsedAssumedInformation,
677 OnlyExact);
678}
679
681 const AbstractAttribute &QueryingAA,
682 bool RequireReadNone, bool &IsKnown) {
683 if (RequireReadNone) {
685 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
686 /* IgnoreSubsumingPositions */ true))
687 return true;
689 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
690 /* IgnoreSubsumingPositions */ true))
691 return true;
692
695 const auto *MemLocAA =
696 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
697 if (MemLocAA && MemLocAA->isAssumedReadNone()) {
698 IsKnown = MemLocAA->isKnownReadNone();
699 if (!IsKnown)
700 A.recordDependence(*MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
701 return true;
702 }
703 }
704
705 const auto *MemBehaviorAA =
706 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
707 if (MemBehaviorAA &&
708 (MemBehaviorAA->isAssumedReadNone() ||
709 (!RequireReadNone && MemBehaviorAA->isAssumedReadOnly()))) {
710 IsKnown = RequireReadNone ? MemBehaviorAA->isKnownReadNone()
711 : MemBehaviorAA->isKnownReadOnly();
712 if (!IsKnown)
713 A.recordDependence(*MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
714 return true;
715 }
716
717 return false;
718}
719
721 const AbstractAttribute &QueryingAA, bool &IsKnown) {
722 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
723 /* RequireReadNone */ false, IsKnown);
724}
726 const AbstractAttribute &QueryingAA, bool &IsKnown) {
727 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
728 /* RequireReadNone */ true, IsKnown);
729}
730
731static bool
733 const Instruction *ToI, const Function &ToFn,
734 const AbstractAttribute &QueryingAA,
735 const AA::InstExclusionSetTy *ExclusionSet,
736 std::function<bool(const Function &F)> GoBackwardsCB) {
738 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
739 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
740 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
741 << "]\n";
742 if (ExclusionSet)
743 for (auto *ES : *ExclusionSet)
744 dbgs() << *ES << "\n";
745 });
746
747 // We know kernels (generally) cannot be called from within the module. Thus,
748 // for reachability we would need to step back from a kernel which would allow
749 // us to reach anything anyway. Even if a kernel is invoked from another
750 // kernel, values like allocas and shared memory are not accessible. We
751 // implicitly check for this situation to avoid costly lookups.
752 if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
753 !GoBackwardsCB(*FromI.getFunction()) && A.getInfoCache().isKernel(ToFn) &&
754 A.getInfoCache().isKernel(*FromI.getFunction())) {
755 LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
756 "module; success\n";);
757 return false;
758 }
759
760 // If we can go arbitrarily backwards we will eventually reach an entry point
761 // that can reach ToI. Only if a set of blocks through which we cannot go is
762 // provided, or once we track internal functions not accessible from the
763 // outside, it makes sense to perform backwards analysis in the absence of a
764 // GoBackwardsCB.
765 if (!GoBackwardsCB && !ExclusionSet) {
766 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
767 << " is not checked backwards and does not have an "
768 "exclusion set, abort\n");
769 return true;
770 }
771
774 Worklist.push_back(&FromI);
775
776 while (!Worklist.empty()) {
777 const Instruction *CurFromI = Worklist.pop_back_val();
778 if (!Visited.insert(CurFromI).second)
779 continue;
780
781 const Function *FromFn = CurFromI->getFunction();
782 if (FromFn == &ToFn) {
783 if (!ToI)
784 return true;
785 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
786 << " intraprocedurally\n");
787 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
788 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
789 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
790 A, *CurFromI, *ToI, ExclusionSet);
791 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
792 << (Result ? "can potentially " : "cannot ") << "reach "
793 << *ToI << " [Intra]\n");
794 if (Result)
795 return true;
796 }
797
798 bool Result = true;
799 if (!ToFn.isDeclaration() && ToI) {
800 const auto *ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
801 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
802 const Instruction &EntryI = ToFn.getEntryBlock().front();
803 Result = !ToReachabilityAA || ToReachabilityAA->isAssumedReachable(
804 A, EntryI, *ToI, ExclusionSet);
805 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
806 << " " << (Result ? "can potentially " : "cannot ")
807 << "reach @" << *ToI << " [ToFn]\n");
808 }
809
810 if (Result) {
811 // The entry of the ToFn can reach the instruction ToI. If the current
812 // instruction is already known to reach the ToFn.
813 const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
814 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
815 Result = !FnReachabilityAA || FnReachabilityAA->instructionCanReach(
816 A, *CurFromI, ToFn, ExclusionSet);
817 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
818 << " " << (Result ? "can potentially " : "cannot ")
819 << "reach @" << ToFn.getName() << " [FromFn]\n");
820 if (Result)
821 return true;
822 }
823
824 // TODO: Check assumed nounwind.
825 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
826 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
827 auto ReturnInstCB = [&](Instruction &Ret) {
828 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
829 A, *CurFromI, Ret, ExclusionSet);
830 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
831 << (Result ? "can potentially " : "cannot ") << "reach "
832 << Ret << " [Intra]\n");
833 return !Result;
834 };
835
836 // Check if we can reach returns.
837 bool UsedAssumedInformation = false;
838 if (A.checkForAllInstructions(ReturnInstCB, FromFn, &QueryingAA,
839 {Instruction::Ret}, UsedAssumedInformation)) {
840 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
841 continue;
842 }
843
844 if (!GoBackwardsCB) {
845 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
846 << " is not checked backwards, abort\n");
847 return true;
848 }
849
850 // If we do not go backwards from the FromFn we are done here and so far we
851 // could not find a way to reach ToFn/ToI.
852 if (!GoBackwardsCB(*FromFn))
853 continue;
854
855 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
856 << FromFn->getName() << "\n");
857
858 auto CheckCallSite = [&](AbstractCallSite ACS) {
859 CallBase *CB = ACS.getInstruction();
860 if (!CB)
861 return false;
862
863 if (isa<InvokeInst>(CB))
864 return false;
865
866 Instruction *Inst = CB->getNextNode();
867 Worklist.push_back(Inst);
868 return true;
869 };
870
871 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
872 /* RequireAllCallSites */ true,
873 &QueryingAA, UsedAssumedInformation);
874 if (Result) {
875 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
876 << " in @" << FromFn->getName()
877 << " failed, give up\n");
878 return true;
879 }
880
881 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
882 << " in @" << FromFn->getName()
883 << " worklist size is: " << Worklist.size() << "\n");
884 }
885 return false;
886}
887
889 Attributor &A, const Instruction &FromI, const Instruction &ToI,
890 const AbstractAttribute &QueryingAA,
891 const AA::InstExclusionSetTy *ExclusionSet,
892 std::function<bool(const Function &F)> GoBackwardsCB) {
893 const Function *ToFn = ToI.getFunction();
894 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
895 ExclusionSet, GoBackwardsCB);
896}
897
899 Attributor &A, const Instruction &FromI, const Function &ToFn,
900 const AbstractAttribute &QueryingAA,
901 const AA::InstExclusionSetTy *ExclusionSet,
902 std::function<bool(const Function &F)> GoBackwardsCB) {
903 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
904 ExclusionSet, GoBackwardsCB);
905}
906
908 const AbstractAttribute &QueryingAA) {
909 if (isa<UndefValue>(Obj))
910 return true;
911 if (isa<AllocaInst>(Obj)) {
912 InformationCache &InfoCache = A.getInfoCache();
913 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
915 dbgs() << "[AA] Object '" << Obj
916 << "' is thread local; stack objects are thread local.\n");
917 return true;
918 }
919 bool IsKnownNoCapture;
920 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::Captures>(
921 A, &QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL,
922 IsKnownNoCapture);
923 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
924 << (IsAssumedNoCapture ? "" : "not") << " thread local; "
925 << (IsAssumedNoCapture ? "non-" : "")
926 << "captured stack object.\n");
927 return IsAssumedNoCapture;
928 }
929 if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
930 if (GV->isConstant()) {
931 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
932 << "' is thread local; constant global\n");
933 return true;
934 }
935 if (GV->isThreadLocal()) {
936 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
937 << "' is thread local; thread local global\n");
938 return true;
939 }
940 }
941
942 if (A.getInfoCache().IsTargetGPU()) {
943 if (AA::isGPULocalAddressSpace(A.getInfoCache().getModule(),
944 Obj.getType()->getPointerAddressSpace())) {
945 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
946 << "' is thread local; GPU local memory\n");
947 return true;
948 }
950 A.getInfoCache().getModule(),
951 Obj.getType()->getPointerAddressSpace())) {
952 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
953 << "' is thread local; GPU constant memory\n");
954 return true;
955 }
956 }
957
958 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
959 return false;
960}
961
963 const AbstractAttribute &QueryingAA) {
964 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
965 return false;
966
968
969 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
970 if (!Loc || !Loc->Ptr) {
972 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
973 return false;
974 }
975 Ptrs.insert(Loc->Ptr);
976 return true;
977 };
978
979 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
980 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
981 return true;
983 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
984 return true;
985 } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
986 return true;
987
988 return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
989}
990
993 const AbstractAttribute &QueryingAA,
994 const Instruction *CtxI) {
995 for (const Value *Ptr : Ptrs) {
996 if (!Ptr) {
997 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
998 return true;
999 }
1000
1001 auto Pred = [&](Value &Obj) {
1002 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
1003 return true;
1004 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
1005 << "'; -> requires barrier\n");
1006 return false;
1007 };
1008
1009 const auto *UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
1010 QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
1011 if (!UnderlyingObjsAA || !UnderlyingObjsAA->forallUnderlyingObjects(Pred))
1012 return true;
1013 }
1014 return false;
1015}
1016
1017/// Return true if \p New is equal or worse than \p Old.
1018static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
1019 if (!Old.isIntAttribute())
1020 return true;
1021
1022 return Old.getValueAsInt() >= New.getValueAsInt();
1023}
1024
1025/// Return true if the information provided by \p Attr was added to the
1026/// attribute set \p AttrSet. This is only the case if it was not already
1027/// present in \p AttrSet.
1028static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
1029 AttributeSet AttrSet, bool ForceReplace,
1030 AttrBuilder &AB) {
1031
1032 if (Attr.isEnumAttribute()) {
1033 Attribute::AttrKind Kind = Attr.getKindAsEnum();
1034 if (AttrSet.hasAttribute(Kind))
1035 return false;
1036 AB.addAttribute(Kind);
1037 return true;
1038 }
1039 if (Attr.isStringAttribute()) {
1040 StringRef Kind = Attr.getKindAsString();
1041 if (AttrSet.hasAttribute(Kind)) {
1042 if (!ForceReplace)
1043 return false;
1044 }
1045 AB.addAttribute(Kind, Attr.getValueAsString());
1046 return true;
1047 }
1048 if (Attr.isIntAttribute()) {
1049 Attribute::AttrKind Kind = Attr.getKindAsEnum();
1050 if (!ForceReplace && Kind == Attribute::Memory) {
1051 MemoryEffects ME = Attr.getMemoryEffects() & AttrSet.getMemoryEffects();
1052 if (ME == AttrSet.getMemoryEffects())
1053 return false;
1054 AB.addMemoryAttr(ME);
1055 return true;
1056 }
1057 if (AttrSet.hasAttribute(Kind)) {
1058 if (!ForceReplace && isEqualOrWorse(Attr, AttrSet.getAttribute(Kind)))
1059 return false;
1060 }
1061 AB.addAttribute(Attr);
1062 return true;
1063 }
1064 if (Attr.isConstantRangeAttribute()) {
1065 Attribute::AttrKind Kind = Attr.getKindAsEnum();
1066 if (!ForceReplace && AttrSet.hasAttribute(Kind))
1067 return false;
1068 AB.addAttribute(Attr);
1069 return true;
1070 }
1071
1072 llvm_unreachable("Expected enum or string attribute!");
1073}
1074
1077 return cast<Argument>(&getAnchorValue());
1078
1079 // Not an Argument and no argument number means this is not a call site
1080 // argument, thus we cannot find a callback argument to return.
1081 int ArgNo = getCallSiteArgNo();
1082 if (ArgNo < 0)
1083 return nullptr;
1084
1085 // Use abstract call sites to make the connection between the call site
1086 // values and the ones in callbacks. If a callback was found that makes use
1087 // of the underlying call site operand, we want the corresponding callback
1088 // callee argument and not the direct callee argument.
1089 std::optional<Argument *> CBCandidateArg;
1090 SmallVector<const Use *, 4> CallbackUses;
1091 const auto &CB = cast<CallBase>(getAnchorValue());
1092 AbstractCallSite::getCallbackUses(CB, CallbackUses);
1093 for (const Use *U : CallbackUses) {
1094 AbstractCallSite ACS(U);
1095 assert(ACS && ACS.isCallbackCall());
1096 if (!ACS.getCalledFunction())
1097 continue;
1098
1099 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
1100
1101 // Test if the underlying call site operand is argument number u of the
1102 // callback callee.
1103 if (ACS.getCallArgOperandNo(u) != ArgNo)
1104 continue;
1105
1106 assert(ACS.getCalledFunction()->arg_size() > u &&
1107 "ACS mapped into var-args arguments!");
1108 if (CBCandidateArg) {
1109 CBCandidateArg = nullptr;
1110 break;
1111 }
1112 CBCandidateArg = ACS.getCalledFunction()->getArg(u);
1113 }
1114 }
1115
1116 // If we found a unique callback candidate argument, return it.
1117 if (CBCandidateArg && *CBCandidateArg)
1118 return *CBCandidateArg;
1119
1120 // If no callbacks were found, or none used the underlying call site operand
1121 // exclusively, use the direct callee argument if available.
1122 auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
1123 if (Callee && Callee->arg_size() > unsigned(ArgNo))
1124 return Callee->getArg(ArgNo);
1125
1126 return nullptr;
1127}
1128
1131 if (getState().isAtFixpoint())
1132 return HasChanged;
1133
1134 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
1135
1136 HasChanged = updateImpl(A);
1137
1138 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
1139 << "\n");
1140
1141 return HasChanged;
1142}
1143
1145 InformationCache &InfoCache,
1146 AttributorConfig Configuration)
1147 : Allocator(InfoCache.Allocator), Functions(Functions),
1148 InfoCache(InfoCache), Configuration(Configuration) {
1149 if (!isClosedWorldModule())
1150 return;
1151 for (Function *Fn : Functions)
1152 if (Fn->hasAddressTaken(/*PutOffender=*/nullptr,
1153 /*IgnoreCallbackUses=*/false,
1154 /*IgnoreAssumeLikeCalls=*/true,
1155 /*IgnoreLLVMUsed=*/true,
1156 /*IgnoreARCAttachedCall=*/false,
1157 /*IgnoreCastedDirectCall=*/true))
1158 InfoCache.IndirectlyCallableFunctions.push_back(Fn);
1159}
1160
1165 "Did expect a valid position!");
1168 if (!Explorer)
1169 return false;
1170
1171 Value &AssociatedValue = IRP.getAssociatedValue();
1172
1173 const Assume2KnowledgeMap &A2K =
1174 getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1175
1176 // Check if we found any potential assume use, if not we don't need to create
1177 // explorer iterators.
1178 if (A2K.empty())
1179 return false;
1180
1181 LLVMContext &Ctx = AssociatedValue.getContext();
1182 unsigned AttrsSize = Attrs.size();
1183 auto EIt = Explorer->begin(IRP.getCtxI()),
1184 EEnd = Explorer->end(IRP.getCtxI());
1185 for (const auto &It : A2K)
1186 if (Explorer->findInContextOf(It.first, EIt, EEnd))
1187 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1188 return AttrsSize != Attrs.size();
1189}
1190
1191template <typename DescTy>
1193Attributor::updateAttrMap(const IRPosition &IRP, ArrayRef<DescTy> AttrDescs,
1194 function_ref<bool(const DescTy &, AttributeSet,
1195 AttributeMask &, AttrBuilder &)>
1196 CB) {
1197 if (AttrDescs.empty())
1199 switch (IRP.getPositionKind()) {
1203 default:
1204 break;
1205 };
1206
1207 AttributeList AL = IRP.getAttrList();
1208 Value *AttrListAnchor = IRP.getAttrListAnchor();
1209 auto [Iter, Inserted] = AttrsMap.insert({AttrListAnchor, AL});
1210 if (!Inserted)
1211 AL = Iter->second;
1212
1213 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1214 auto AttrIdx = IRP.getAttrIdx();
1215 AttributeSet AS = AL.getAttributes(AttrIdx);
1216 AttributeMask AM;
1217 AttrBuilder AB(Ctx);
1218
1220 for (const DescTy &AttrDesc : AttrDescs)
1221 if (CB(AttrDesc, AS, AM, AB))
1222 HasChanged = ChangeStatus::CHANGED;
1223
1224 if (HasChanged == ChangeStatus::UNCHANGED)
1226
1227 AL = AL.removeAttributesAtIndex(Ctx, AttrIdx, AM);
1228 AL = AL.addAttributesAtIndex(Ctx, AttrIdx, AB);
1229
1230 Iter->second = AL;
1231 return HasChanged;
1232}
1233
1236 bool IgnoreSubsumingPositions,
1237 Attribute::AttrKind ImpliedAttributeKind) {
1238 bool Implied = false;
1239 bool HasAttr = false;
1240 auto HasAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1241 AttributeMask &, AttrBuilder &) {
1242 if (AttrSet.hasAttribute(Kind)) {
1243 Implied |= Kind != ImpliedAttributeKind;
1244 HasAttr = true;
1245 }
1246 return false;
1247 };
1248 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1249 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, HasAttrCB);
1250 if (HasAttr)
1251 break;
1252 // The first position returned by the SubsumingPositionIterator is
1253 // always the position itself. If we ignore subsuming positions we
1254 // are done after the first iteration.
1255 if (IgnoreSubsumingPositions)
1256 break;
1257 Implied = true;
1258 }
1259 if (!HasAttr) {
1260 Implied = true;
1262 for (Attribute::AttrKind AK : AttrKinds)
1263 if (getAttrsFromAssumes(IRP, AK, Attrs)) {
1264 HasAttr = true;
1265 break;
1266 }
1267 }
1268
1269 // Check if we should manifest the implied attribute kind at the IRP.
1270 if (ImpliedAttributeKind != Attribute::None && HasAttr && Implied)
1272 ImpliedAttributeKind)});
1273 return HasAttr;
1274}
1275
1279 bool IgnoreSubsumingPositions) {
1280 auto CollectAttrCB = [&](const Attribute::AttrKind &Kind,
1281 AttributeSet AttrSet, AttributeMask &,
1282 AttrBuilder &) {
1283 if (AttrSet.hasAttribute(Kind))
1284 Attrs.push_back(AttrSet.getAttribute(Kind));
1285 return false;
1286 };
1287 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1288 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, CollectAttrCB);
1289 // The first position returned by the SubsumingPositionIterator is
1290 // always the position itself. If we ignore subsuming positions we
1291 // are done after the first iteration.
1292 if (IgnoreSubsumingPositions)
1293 break;
1294 }
1295 for (Attribute::AttrKind AK : AttrKinds)
1296 getAttrsFromAssumes(IRP, AK, Attrs);
1297}
1298
1301 auto RemoveAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1302 AttributeMask &AM, AttrBuilder &) {
1303 if (!AttrSet.hasAttribute(Kind))
1304 return false;
1305 AM.addAttribute(Kind);
1306 return true;
1307 };
1308 return updateAttrMap<Attribute::AttrKind>(IRP, AttrKinds, RemoveAttrCB);
1309}
1310
1312 ArrayRef<StringRef> Attrs) {
1313 auto RemoveAttrCB = [&](StringRef Attr, AttributeSet AttrSet,
1314 AttributeMask &AM, AttrBuilder &) -> bool {
1315 if (!AttrSet.hasAttribute(Attr))
1316 return false;
1317 AM.addAttribute(Attr);
1318 return true;
1319 };
1320
1321 return updateAttrMap<StringRef>(IRP, Attrs, RemoveAttrCB);
1322}
1323
1325 ArrayRef<Attribute> Attrs,
1326 bool ForceReplace) {
1327 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1328 auto AddAttrCB = [&](const Attribute &Attr, AttributeSet AttrSet,
1329 AttributeMask &, AttrBuilder &AB) {
1330 return addIfNotExistent(Ctx, Attr, AttrSet, ForceReplace, AB);
1331 };
1332 return updateAttrMap<Attribute>(IRP, Attrs, AddAttrCB);
1333}
1334
1336const IRPosition
1338
1340 IRPositions.emplace_back(IRP);
1341
1342 // Helper to determine if operand bundles on a call site are benign or
1343 // potentially problematic. We handle only llvm.assume for now.
1344 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1345 return (isa<IntrinsicInst>(CB) &&
1346 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1347 };
1348
1349 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1350 switch (IRP.getPositionKind()) {
1354 return;
1357 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1358 return;
1360 assert(CB && "Expected call site!");
1361 // TODO: We need to look at the operand bundles similar to the redirection
1362 // in CallBase.
1363 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1364 if (auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand()))
1365 IRPositions.emplace_back(IRPosition::function(*Callee));
1366 return;
1368 assert(CB && "Expected call site!");
1369 // TODO: We need to look at the operand bundles similar to the redirection
1370 // in CallBase.
1371 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1372 if (auto *Callee =
1373 dyn_cast_if_present<Function>(CB->getCalledOperand())) {
1374 IRPositions.emplace_back(IRPosition::returned(*Callee));
1375 IRPositions.emplace_back(IRPosition::function(*Callee));
1376 for (const Argument &Arg : Callee->args())
1377 if (Arg.hasReturnedAttr()) {
1378 IRPositions.emplace_back(
1379 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1380 IRPositions.emplace_back(
1381 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1382 IRPositions.emplace_back(IRPosition::argument(Arg));
1383 }
1384 }
1385 }
1386 IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1387 return;
1389 assert(CB && "Expected call site!");
1390 // TODO: We need to look at the operand bundles similar to the redirection
1391 // in CallBase.
1392 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1393 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
1394 if (Callee) {
1395 if (Argument *Arg = IRP.getAssociatedArgument())
1396 IRPositions.emplace_back(IRPosition::argument(*Arg));
1397 IRPositions.emplace_back(IRPosition::function(*Callee));
1398 }
1399 }
1400 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1401 return;
1402 }
1403 }
1404}
1405
1406void IRPosition::verify() {
1407#ifdef EXPENSIVE_CHECKS
1408 switch (getPositionKind()) {
1409 case IRP_INVALID:
1410 assert((CBContext == nullptr) &&
1411 "Invalid position must not have CallBaseContext!");
1412 assert(!Enc.getOpaqueValue() &&
1413 "Expected a nullptr for an invalid position!");
1414 return;
1415 case IRP_FLOAT:
1417 "Expected specialized kind for argument values!");
1418 return;
1419 case IRP_RETURNED:
1420 assert(isa<Function>(getAsValuePtr()) &&
1421 "Expected function for a 'returned' position!");
1422 assert(getAsValuePtr() == &getAssociatedValue() &&
1423 "Associated value mismatch!");
1424 return;
1426 assert((CBContext == nullptr) &&
1427 "'call site returned' position must not have CallBaseContext!");
1428 assert((isa<CallBase>(getAsValuePtr())) &&
1429 "Expected call base for 'call site returned' position!");
1430 assert(getAsValuePtr() == &getAssociatedValue() &&
1431 "Associated value mismatch!");
1432 return;
1433 case IRP_CALL_SITE:
1434 assert((CBContext == nullptr) &&
1435 "'call site function' position must not have CallBaseContext!");
1436 assert((isa<CallBase>(getAsValuePtr())) &&
1437 "Expected call base for 'call site function' position!");
1438 assert(getAsValuePtr() == &getAssociatedValue() &&
1439 "Associated value mismatch!");
1440 return;
1441 case IRP_FUNCTION:
1442 assert(isa<Function>(getAsValuePtr()) &&
1443 "Expected function for a 'function' position!");
1444 assert(getAsValuePtr() == &getAssociatedValue() &&
1445 "Associated value mismatch!");
1446 return;
1447 case IRP_ARGUMENT:
1448 assert(isa<Argument>(getAsValuePtr()) &&
1449 "Expected argument for a 'argument' position!");
1450 assert(getAsValuePtr() == &getAssociatedValue() &&
1451 "Associated value mismatch!");
1452 return;
1454 assert((CBContext == nullptr) &&
1455 "'call site argument' position must not have CallBaseContext!");
1456 Use *U = getAsUsePtr();
1457 (void)U; // Silence unused variable warning.
1458 assert(U && "Expected use for a 'call site argument' position!");
1459 assert(isa<CallBase>(U->getUser()) &&
1460 "Expected call base user for a 'call site argument' position!");
1461 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1462 "Expected call base argument operand for a 'call site argument' "
1463 "position");
1464 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1465 unsigned(getCallSiteArgNo()) &&
1466 "Argument number mismatch!");
1467 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1468 return;
1469 }
1470 }
1471#endif
1472}
1473
1474std::optional<Constant *>
1476 const AbstractAttribute &AA,
1477 bool &UsedAssumedInformation) {
1478 // First check all callbacks provided by outside AAs. If any of them returns
1479 // a non-null value that is different from the associated value, or
1480 // std::nullopt, we assume it's simplified.
1481 for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1482 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1483 if (!SimplifiedV)
1484 return std::nullopt;
1485 if (isa_and_nonnull<Constant>(*SimplifiedV))
1486 return cast<Constant>(*SimplifiedV);
1487 return nullptr;
1488 }
1489 if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1490 return C;
1492 if (getAssumedSimplifiedValues(IRP, &AA, Values,
1494 UsedAssumedInformation)) {
1495 if (Values.empty())
1496 return std::nullopt;
1497 if (auto *C = dyn_cast_or_null<Constant>(
1498 AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1499 return C;
1500 }
1501 return nullptr;
1502}
1503
1505 const IRPosition &IRP, const AbstractAttribute *AA,
1506 bool &UsedAssumedInformation, AA::ValueScope S) {
1507 // First check all callbacks provided by outside AAs. If any of them returns
1508 // a non-null value that is different from the associated value, or
1509 // std::nullopt, we assume it's simplified.
1510 for (auto &CB : SimplificationCallbacks.lookup(IRP))
1511 return CB(IRP, AA, UsedAssumedInformation);
1512
1514 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1515 return &IRP.getAssociatedValue();
1516 if (Values.empty())
1517 return std::nullopt;
1518 if (AA)
1519 if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1520 return V;
1523 return nullptr;
1524 return &IRP.getAssociatedValue();
1525}
1526
1528 const IRPosition &InitialIRP, const AbstractAttribute *AA,
1530 bool &UsedAssumedInformation, bool RecurseForSelectAndPHI) {
1533 Worklist.push_back(InitialIRP);
1534 while (!Worklist.empty()) {
1535 const IRPosition &IRP = Worklist.pop_back_val();
1536
1537 // First check all callbacks provided by outside AAs. If any of them returns
1538 // a non-null value that is different from the associated value, or
1539 // std::nullopt, we assume it's simplified.
1540 int NV = Values.size();
1541 const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1542 for (const auto &CB : SimplificationCBs) {
1543 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1544 if (!CBResult.has_value())
1545 continue;
1546 Value *V = *CBResult;
1547 if (!V)
1548 return false;
1551 Values.push_back(AA::ValueAndContext{*V, nullptr});
1552 else
1553 return false;
1554 }
1555 if (SimplificationCBs.empty()) {
1556 // If no high-level/outside simplification occurred, use
1557 // AAPotentialValues.
1558 const auto *PotentialValuesAA =
1560 if (PotentialValuesAA &&
1561 PotentialValuesAA->getAssumedSimplifiedValues(*this, Values, S)) {
1562 UsedAssumedInformation |= !PotentialValuesAA->isAtFixpoint();
1563 } else if (IRP.getPositionKind() != IRPosition::IRP_RETURNED) {
1564 Values.push_back({IRP.getAssociatedValue(), IRP.getCtxI()});
1565 } else {
1566 // TODO: We could visit all returns and add the operands.
1567 return false;
1568 }
1569 }
1570
1571 if (!RecurseForSelectAndPHI)
1572 break;
1573
1574 for (int I = NV, E = Values.size(); I < E; ++I) {
1575 Value *V = Values[I].getValue();
1576 if (!isa<PHINode>(V) && !isa<SelectInst>(V))
1577 continue;
1578 if (!Seen.insert(V).second)
1579 continue;
1580 // Move the last element to this slot.
1581 Values[I] = Values[E - 1];
1582 // Eliminate the last slot, adjust the indices.
1583 Values.pop_back();
1584 --E;
1585 --I;
1586 // Add a new value (select or phi) to the worklist.
1587 Worklist.push_back(IRPosition::value(*V));
1588 }
1589 }
1590 return true;
1591}
1592
1594 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1595 bool &UsedAssumedInformation) {
1596 if (!V)
1597 return V;
1598 if (*V == nullptr || isa<Constant>(*V))
1599 return V;
1600 if (auto *Arg = dyn_cast<Argument>(*V))
1601 if (CB.getCalledOperand() == Arg->getParent() &&
1602 CB.arg_size() > Arg->getArgNo())
1603 if (!Arg->hasPointeeInMemoryValueAttr())
1604 return getAssumedSimplified(
1605 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1606 UsedAssumedInformation, AA::Intraprocedural);
1607 return nullptr;
1608}
1609
1611 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1612 // thus we cannot delete them. We can, and want to, destruct them though.
1613 for (auto &It : AAMap) {
1614 AbstractAttribute *AA = It.getSecond();
1615 AA->~AbstractAttribute();
1616 }
1617}
1618
1620 const AAIsDead *FnLivenessAA,
1621 bool &UsedAssumedInformation,
1622 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1623 if (!Configuration.UseLiveness)
1624 return false;
1625 const IRPosition &IRP = AA.getIRPosition();
1626 if (!Functions.count(IRP.getAnchorScope()))
1627 return false;
1628 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1629 CheckBBLivenessOnly, DepClass);
1630}
1631
1633 const AbstractAttribute *QueryingAA,
1634 const AAIsDead *FnLivenessAA,
1635 bool &UsedAssumedInformation,
1636 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1637 if (!Configuration.UseLiveness)
1638 return false;
1639 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1640 if (!UserI)
1641 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1642 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1643
1644 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1645 // For call site argument uses we can check if the argument is
1646 // unused/dead.
1647 if (CB->isArgOperand(&U)) {
1648 const IRPosition &CSArgPos =
1649 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1650 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1651 UsedAssumedInformation, CheckBBLivenessOnly,
1652 DepClass);
1653 }
1654 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1655 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1656 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1657 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1658 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1659 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1660 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1661 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1662 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1663 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1664 const IRPosition IRP = IRPosition::inst(*SI);
1665 const AAIsDead *IsDeadAA =
1667 if (IsDeadAA && IsDeadAA->isRemovableStore()) {
1668 if (QueryingAA)
1669 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1670 if (!IsDeadAA->isKnown(AAIsDead::IS_REMOVABLE))
1671 UsedAssumedInformation = true;
1672 return true;
1673 }
1674 }
1675 }
1676
1677 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1678 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1679}
1680
1682 const AbstractAttribute *QueryingAA,
1683 const AAIsDead *FnLivenessAA,
1684 bool &UsedAssumedInformation,
1685 bool CheckBBLivenessOnly, DepClassTy DepClass,
1686 bool CheckForDeadStore) {
1687 if (!Configuration.UseLiveness)
1688 return false;
1689 const IRPosition::CallBaseContext *CBCtx =
1690 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1691
1692 if (ManifestAddedBlocks.contains(I.getParent()))
1693 return false;
1694
1695 const Function &F = *I.getFunction();
1696 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1697 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1698 QueryingAA, DepClassTy::NONE);
1699
1700 // Don't use recursive reasoning.
1701 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1702 return false;
1703
1704 // If we have a context instruction and a liveness AA we use it.
1705 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1706 : FnLivenessAA->isAssumedDead(&I)) {
1707 if (QueryingAA)
1708 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1709 if (!FnLivenessAA->isKnownDead(&I))
1710 UsedAssumedInformation = true;
1711 return true;
1712 }
1713
1714 if (CheckBBLivenessOnly)
1715 return false;
1716
1717 const IRPosition IRP = IRPosition::inst(I, CBCtx);
1718 const AAIsDead *IsDeadAA =
1720
1721 // Don't use recursive reasoning.
1722 if (!IsDeadAA || QueryingAA == IsDeadAA)
1723 return false;
1724
1725 if (IsDeadAA->isAssumedDead()) {
1726 if (QueryingAA)
1727 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1728 if (!IsDeadAA->isKnownDead())
1729 UsedAssumedInformation = true;
1730 return true;
1731 }
1732
1733 if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA->isRemovableStore()) {
1734 if (QueryingAA)
1735 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1736 if (!IsDeadAA->isKnownDead())
1737 UsedAssumedInformation = true;
1738 return true;
1739 }
1740
1741 return false;
1742}
1743
1745 const AbstractAttribute *QueryingAA,
1746 const AAIsDead *FnLivenessAA,
1747 bool &UsedAssumedInformation,
1748 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1749 if (!Configuration.UseLiveness)
1750 return false;
1751 // Don't check liveness for constants, e.g. functions, used as (floating)
1752 // values since the context instruction and such is here meaningless.
1755 return false;
1756 }
1757
1758 Instruction *CtxI = IRP.getCtxI();
1759 if (CtxI &&
1760 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1761 /* CheckBBLivenessOnly */ true,
1762 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1763 return true;
1764
1765 if (CheckBBLivenessOnly)
1766 return false;
1767
1768 // If we haven't succeeded we query the specific liveness info for the IRP.
1769 const AAIsDead *IsDeadAA;
1771 IsDeadAA = getOrCreateAAFor<AAIsDead>(
1773 QueryingAA, DepClassTy::NONE);
1774 else
1775 IsDeadAA = getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1776
1777 // Don't use recursive reasoning.
1778 if (!IsDeadAA || QueryingAA == IsDeadAA)
1779 return false;
1780
1781 if (IsDeadAA->isAssumedDead()) {
1782 if (QueryingAA)
1783 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1784 if (!IsDeadAA->isKnownDead())
1785 UsedAssumedInformation = true;
1786 return true;
1787 }
1788
1789 return false;
1790}
1791
1793 const AbstractAttribute *QueryingAA,
1794 const AAIsDead *FnLivenessAA,
1795 DepClassTy DepClass) {
1796 if (!Configuration.UseLiveness)
1797 return false;
1798 const Function &F = *BB.getParent();
1799 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1801 QueryingAA, DepClassTy::NONE);
1802
1803 // Don't use recursive reasoning.
1804 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1805 return false;
1806
1807 if (FnLivenessAA->isAssumedDead(&BB)) {
1808 if (QueryingAA)
1809 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1810 return true;
1811 }
1812
1813 return false;
1814}
1815
1818 const AbstractAttribute &QueryingAA, const CallBase &CB) {
1819 if (const Function *Callee = dyn_cast<Function>(CB.getCalledOperand()))
1820 return Pred(Callee);
1821
1822 const auto *CallEdgesAA = getAAFor<AACallEdges>(
1824 if (!CallEdgesAA || CallEdgesAA->hasUnknownCallee())
1825 return false;
1826
1827 const auto &Callees = CallEdgesAA->getOptimisticEdges();
1828 return Pred(Callees.getArrayRef());
1829}
1830
1831bool canMarkAsVisited(const User *Usr) {
1832 return isa<PHINode>(Usr) || !isa<Instruction>(Usr);
1833}
1834
1836 function_ref<bool(const Use &, bool &)> Pred,
1837 const AbstractAttribute &QueryingAA, const Value &V,
1838 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1839 bool IgnoreDroppableUses,
1840 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1841
1842 // Check virtual uses first.
1843 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1844 if (!CB(*this, &QueryingAA))
1845 return false;
1846
1847 if (isa<ConstantData>(V))
1848 return false;
1849
1850 // Check the trivial case first as it catches void values.
1851 if (V.use_empty())
1852 return true;
1853
1854 const IRPosition &IRP = QueryingAA.getIRPosition();
1857
1858 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1859 for (const Use &UU : V.uses()) {
1860 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1861 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1862 "rejected by the equivalence call back: "
1863 << *UU << "!\n");
1864 return false;
1865 }
1866
1867 Worklist.push_back(&UU);
1868 }
1869 return true;
1870 };
1871
1872 AddUsers(V, /* OldUse */ nullptr);
1873
1874 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1875 << " initial uses to check\n");
1876
1877 const Function *ScopeFn = IRP.getAnchorScope();
1878 const auto *LivenessAA =
1879 ScopeFn ? getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1881 : nullptr;
1882
1883 while (!Worklist.empty()) {
1884 const Use *U = Worklist.pop_back_val();
1885 if (canMarkAsVisited(U->getUser()) && !Visited.insert(U).second)
1886 continue;
1888 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1889 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1890 << "\n";
1891 else
1892 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1893 << "\n";
1894 });
1895 bool UsedAssumedInformation = false;
1896 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1897 CheckBBLivenessOnly, LivenessDepClass)) {
1899 dbgs() << "[Attributor] Dead use, skip!\n");
1900 continue;
1901 }
1902 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1904 dbgs() << "[Attributor] Droppable user, skip!\n");
1905 continue;
1906 }
1907
1908 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1909 if (&SI->getOperandUse(0) == U) {
1910 if (!Visited.insert(U).second)
1911 continue;
1912 SmallSetVector<Value *, 4> PotentialCopies;
1914 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1915 /* OnlyExact */ true)) {
1917 dbgs()
1918 << "[Attributor] Value is stored, continue with "
1919 << PotentialCopies.size()
1920 << " potential copies instead!\n");
1921 for (Value *PotentialCopy : PotentialCopies)
1922 if (!AddUsers(*PotentialCopy, U))
1923 return false;
1924 continue;
1925 }
1926 }
1927 }
1928
1929 bool Follow = false;
1930 if (!Pred(*U, Follow))
1931 return false;
1932 if (!Follow)
1933 continue;
1934
1935 User &Usr = *U->getUser();
1936 AddUsers(Usr, /* OldUse */ nullptr);
1937 }
1938
1939 return true;
1940}
1941
1943 const AbstractAttribute &QueryingAA,
1944 bool RequireAllCallSites,
1945 bool &UsedAssumedInformation) {
1946 // We can try to determine information from
1947 // the call sites. However, this is only possible all call sites are known,
1948 // hence the function has internal linkage.
1949 const IRPosition &IRP = QueryingAA.getIRPosition();
1950 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1951 if (!AssociatedFunction) {
1952 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1953 << "\n");
1954 return false;
1955 }
1956
1957 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1958 &QueryingAA, UsedAssumedInformation);
1959}
1960
1962 const Function &Fn,
1963 bool RequireAllCallSites,
1964 const AbstractAttribute *QueryingAA,
1965 bool &UsedAssumedInformation,
1966 bool CheckPotentiallyDead) {
1967 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1968 LLVM_DEBUG(
1969 dbgs()
1970 << "[Attributor] Function " << Fn.getName()
1971 << " has no internal linkage, hence not all call sites are known\n");
1972 return false;
1973 }
1974 // Check virtual uses first.
1975 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1976 if (!CB(*this, QueryingAA))
1977 return false;
1978
1980 for (unsigned u = 0; u < Uses.size(); ++u) {
1981 const Use &U = *Uses[u];
1983 if (auto *Fn = dyn_cast<Function>(U))
1984 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1985 << *U.getUser() << "\n";
1986 else
1987 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1988 << "\n";
1989 });
1990 if (!CheckPotentiallyDead &&
1991 isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1992 /* CheckBBLivenessOnly */ true)) {
1994 dbgs() << "[Attributor] Dead use, skip!\n");
1995 continue;
1996 }
1997 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1998 if (CE->isCast() && CE->getType()->isPointerTy()) {
2000 dbgs() << "[Attributor] Use, is constant cast expression, add "
2001 << CE->getNumUses() << " uses of that expression instead!\n";
2002 });
2003 for (const Use &CEU : CE->uses())
2004 Uses.push_back(&CEU);
2005 continue;
2006 }
2007 }
2008
2009 AbstractCallSite ACS(&U);
2010 if (!ACS) {
2011 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
2012 << " has non call site use " << *U.get() << " in "
2013 << *U.getUser() << "\n");
2014 return false;
2015 }
2016
2017 const Use *EffectiveUse =
2018 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
2019 if (!ACS.isCallee(EffectiveUse)) {
2020 if (!RequireAllCallSites) {
2021 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
2022 << " is not a call of " << Fn.getName()
2023 << ", skip use\n");
2024 continue;
2025 }
2026 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
2027 << " is an invalid use of " << Fn.getName() << "\n");
2028 return false;
2029 }
2030
2031 // Make sure the arguments that can be matched between the call site and the
2032 // callee argee on their type. It is unlikely they do not and it doesn't
2033 // make sense for all attributes to know/care about this.
2034 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
2035 unsigned MinArgsParams =
2036 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
2037 for (unsigned u = 0; u < MinArgsParams; ++u) {
2038 Value *CSArgOp = ACS.getCallArgOperand(u);
2039 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
2040 LLVM_DEBUG(
2041 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
2042 << u << "@" << Fn.getName() << ": "
2043 << *Fn.getArg(u)->getType() << " vs. "
2044 << *ACS.getCallArgOperand(u)->getType() << "\n");
2045 return false;
2046 }
2047 }
2048
2049 if (Pred(ACS))
2050 continue;
2051
2052 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
2053 << *ACS.getInstruction() << "\n");
2054 return false;
2055 }
2056
2057 return true;
2058}
2059
2060bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
2061 // TODO: Maintain a cache of Values that are
2062 // on the pathway from a Argument to a Instruction that would effect the
2063 // liveness/return state etc.
2065}
2066
2068 const AbstractAttribute &QueryingAA,
2070 bool RecurseForSelectAndPHI) {
2071
2072 const IRPosition &IRP = QueryingAA.getIRPosition();
2073 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2074 if (!AssociatedFunction)
2075 return false;
2076
2077 bool UsedAssumedInformation = false;
2080 IRPosition::returned(*AssociatedFunction), &QueryingAA, Values, S,
2081 UsedAssumedInformation, RecurseForSelectAndPHI))
2082 return false;
2083
2084 return llvm::all_of(Values, [&](const AA::ValueAndContext &VAC) {
2085 return Pred(*VAC.getValue());
2086 });
2087}
2088
2091 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
2092 const AAIsDead *LivenessAA, ArrayRef<unsigned> Opcodes,
2093 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
2094 bool CheckPotentiallyDead = false) {
2095 for (unsigned Opcode : Opcodes) {
2096 // Check if we have instructions with this opcode at all first.
2097 auto *Insts = OpcodeInstMap.lookup(Opcode);
2098 if (!Insts)
2099 continue;
2100
2101 for (Instruction *I : *Insts) {
2102 // Skip dead instructions.
2103 if (A && !CheckPotentiallyDead &&
2104 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
2105 UsedAssumedInformation, CheckBBLivenessOnly)) {
2107 dbgs() << "[Attributor] Instruction " << *I
2108 << " is potentially dead, skip!\n";);
2109 continue;
2110 }
2111
2112 if (!Pred(*I))
2113 return false;
2114 }
2115 }
2116 return true;
2117}
2118
2120 const Function *Fn,
2121 const AbstractAttribute *QueryingAA,
2122 ArrayRef<unsigned> Opcodes,
2123 bool &UsedAssumedInformation,
2124 bool CheckBBLivenessOnly,
2125 bool CheckPotentiallyDead) {
2126 // Since we need to provide instructions we have to have an exact definition.
2127 if (!Fn || Fn->isDeclaration())
2128 return false;
2129
2130 const IRPosition &QueryIRP = IRPosition::function(*Fn);
2131 const auto *LivenessAA =
2132 CheckPotentiallyDead && QueryingAA
2133 ? (getAAFor<AAIsDead>(*QueryingAA, QueryIRP, DepClassTy::NONE))
2134 : nullptr;
2135
2136 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2137 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, QueryingAA,
2138 LivenessAA, Opcodes, UsedAssumedInformation,
2139 CheckBBLivenessOnly, CheckPotentiallyDead))
2140 return false;
2141
2142 return true;
2143}
2144
2146 const AbstractAttribute &QueryingAA,
2147 ArrayRef<unsigned> Opcodes,
2148 bool &UsedAssumedInformation,
2149 bool CheckBBLivenessOnly,
2150 bool CheckPotentiallyDead) {
2151 const IRPosition &IRP = QueryingAA.getIRPosition();
2152 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2153 return checkForAllInstructions(Pred, AssociatedFunction, &QueryingAA, Opcodes,
2154 UsedAssumedInformation, CheckBBLivenessOnly,
2155 CheckPotentiallyDead);
2156}
2157
2159 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
2160 bool &UsedAssumedInformation) {
2161 TimeTraceScope TS("checkForAllReadWriteInstructions");
2162
2163 const Function *AssociatedFunction =
2164 QueryingAA.getIRPosition().getAssociatedFunction();
2165 if (!AssociatedFunction)
2166 return false;
2167
2168 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2169 const auto *LivenessAA =
2170 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
2171
2172 for (Instruction *I :
2173 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
2174 // Skip dead instructions.
2175 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, LivenessAA,
2176 UsedAssumedInformation))
2177 continue;
2178
2179 if (!Pred(*I))
2180 return false;
2181 }
2182
2183 return true;
2184}
2185
2186void Attributor::runTillFixpoint() {
2187 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
2188 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2189 << DG.SyntheticRoot.Deps.size()
2190 << " abstract attributes.\n");
2191
2192 // Now that all abstract attributes are collected and initialized we start
2193 // the abstract analysis.
2194
2195 unsigned IterationCounter = 1;
2196 unsigned MaxIterations =
2197 Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
2198
2200 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
2201 Worklist.insert_range(DG.SyntheticRoot);
2202
2203 do {
2204 // Remember the size to determine new attributes.
2205 size_t NumAAs = DG.SyntheticRoot.Deps.size();
2206 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2207 << ", Worklist size: " << Worklist.size() << "\n");
2208
2209 // For invalid AAs we can fix dependent AAs that have a required dependence,
2210 // thereby folding long dependence chains in a single step without the need
2211 // to run updates.
2212 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
2213 AbstractAttribute *InvalidAA = InvalidAAs[u];
2214
2215 // Check the dependences to fast track invalidation.
2217 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
2218 << " has " << InvalidAA->Deps.size()
2219 << " required & optional dependences\n");
2220 for (auto &DepIt : InvalidAA->Deps) {
2221 AbstractAttribute *DepAA = cast<AbstractAttribute>(DepIt.getPointer());
2222 if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
2224 dbgs() << " - recompute: " << *DepAA);
2225 Worklist.insert(DepAA);
2226 continue;
2227 }
2229 << " - invalidate: " << *DepAA);
2231 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2232 if (!DepAA->getState().isValidState())
2233 InvalidAAs.insert(DepAA);
2234 else
2235 ChangedAAs.push_back(DepAA);
2236 }
2237 InvalidAA->Deps.clear();
2238 }
2239
2240 // Add all abstract attributes that are potentially dependent on one that
2241 // changed to the work list.
2242 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2243 for (auto &DepIt : ChangedAA->Deps)
2244 Worklist.insert(cast<AbstractAttribute>(DepIt.getPointer()));
2245 ChangedAA->Deps.clear();
2246 }
2247
2248 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2249 << ", Worklist+Dependent size: " << Worklist.size()
2250 << "\n");
2251
2252 // Reset the changed and invalid set.
2253 ChangedAAs.clear();
2254 InvalidAAs.clear();
2255
2256 // Update all abstract attribute in the work list and record the ones that
2257 // changed.
2258 for (AbstractAttribute *AA : Worklist) {
2259 const auto &AAState = AA->getState();
2260 if (!AAState.isAtFixpoint())
2261 if (updateAA(*AA) == ChangeStatus::CHANGED)
2262 ChangedAAs.push_back(AA);
2263
2264 // Use the InvalidAAs vector to propagate invalid states fast transitively
2265 // without requiring updates.
2266 if (!AAState.isValidState())
2267 InvalidAAs.insert(AA);
2268 }
2269
2270 // Add attributes to the changed set if they have been created in the last
2271 // iteration.
2272 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2273 DG.SyntheticRoot.end());
2274
2275 // Reset the work list and repopulate with the changed abstract attributes.
2276 // Note that dependent ones are added above.
2277 Worklist.clear();
2278 Worklist.insert_range(ChangedAAs);
2279 Worklist.insert_range(QueryAAsAwaitingUpdate);
2280 QueryAAsAwaitingUpdate.clear();
2281
2282 } while (!Worklist.empty() && (IterationCounter++ < MaxIterations));
2283
2284 if (IterationCounter > MaxIterations && !Functions.empty()) {
2285 auto Remark = [&](OptimizationRemarkMissed ORM) {
2286 return ORM << "Attributor did not reach a fixpoint after "
2287 << ore::NV("Iterations", MaxIterations) << " iterations.";
2288 };
2289 Function *F = Functions.front();
2291 }
2292
2293 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2294 << IterationCounter << "/" << MaxIterations
2295 << " iterations\n");
2296
2297 // Reset abstract arguments not settled in a sound fixpoint by now. This
2298 // happens when we stopped the fixpoint iteration early. Note that only the
2299 // ones marked as "changed" *and* the ones transitively depending on them
2300 // need to be reverted to a pessimistic state. Others might not be in a
2301 // fixpoint state but we can use the optimistic results for them anyway.
2302 SmallPtrSet<AbstractAttribute *, 32> Visited;
2303 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2304 AbstractAttribute *ChangedAA = ChangedAAs[u];
2305 if (!Visited.insert(ChangedAA).second)
2306 continue;
2307
2308 AbstractState &State = ChangedAA->getState();
2309 if (!State.isAtFixpoint()) {
2311
2312 NumAttributesTimedOut++;
2313 }
2314
2315 for (auto &DepIt : ChangedAA->Deps)
2316 ChangedAAs.push_back(cast<AbstractAttribute>(DepIt.getPointer()));
2317 ChangedAA->Deps.clear();
2318 }
2319
2320 LLVM_DEBUG({
2321 if (!Visited.empty())
2322 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2323 << " abstract attributes.\n";
2324 });
2325}
2326
2328 assert(AA.isQueryAA() &&
2329 "Non-query AAs should not be required to register for updates!");
2330 QueryAAsAwaitingUpdate.insert(&AA);
2331}
2332
2333ChangeStatus Attributor::manifestAttributes() {
2334 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2335 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2336
2337 unsigned NumManifested = 0;
2338 unsigned NumAtFixpoint = 0;
2339 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2340 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2341 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2342 AbstractState &State = AA->getState();
2343
2344 // If there is not already a fixpoint reached, we can now take the
2345 // optimistic state. This is correct because we enforced a pessimistic one
2346 // on abstract attributes that were transitively dependent on a changed one
2347 // already above.
2348 if (!State.isAtFixpoint())
2349 State.indicateOptimisticFixpoint();
2350
2351 // We must not manifest Attributes that use Callbase info.
2352 if (AA->hasCallBaseContext())
2353 continue;
2354 // If the state is invalid, we do not try to manifest it.
2355 if (!State.isValidState())
2356 continue;
2357
2358 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2359 continue;
2360
2361 // Skip dead code.
2362 bool UsedAssumedInformation = false;
2363 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2364 /* CheckBBLivenessOnly */ true))
2365 continue;
2366 // Check if the manifest debug counter that allows skipping manifestation of
2367 // AAs
2368 if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2369 continue;
2370 // Manifest the state and record if we changed the IR.
2371 ChangeStatus LocalChange = AA->manifest(*this);
2372 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2373 AA->trackStatistics();
2374 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2375 << "\n");
2376
2377 ManifestChange = ManifestChange | LocalChange;
2378
2379 NumAtFixpoint++;
2380 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2381 }
2382
2383 (void)NumManifested;
2384 (void)NumAtFixpoint;
2385 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2386 << " arguments while " << NumAtFixpoint
2387 << " were in a valid fixpoint state\n");
2388
2389 NumAttributesManifested += NumManifested;
2390 NumAttributesValidFixpoint += NumAtFixpoint;
2391
2392 (void)NumFinalAAs;
2393 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2394 auto DepIt = DG.SyntheticRoot.Deps.begin();
2395 for (unsigned u = 0; u < NumFinalAAs; ++u)
2396 ++DepIt;
2397 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2398 ++u, ++DepIt) {
2399 errs() << "Unexpected abstract attribute: "
2400 << cast<AbstractAttribute>(DepIt->getPointer()) << " :: "
2401 << cast<AbstractAttribute>(DepIt->getPointer())
2402 ->getIRPosition()
2403 .getAssociatedValue()
2404 << "\n";
2405 }
2406 llvm_unreachable("Expected the final number of abstract attributes to "
2407 "remain unchanged!");
2408 }
2409
2410 for (auto &It : AttrsMap) {
2411 AttributeList &AL = It.getSecond();
2412 const IRPosition &IRP =
2413 isa<Function>(It.getFirst())
2414 ? IRPosition::function(*cast<Function>(It.getFirst()))
2415 : IRPosition::callsite_function(*cast<CallBase>(It.getFirst()));
2416 IRP.setAttrList(AL);
2417 }
2418
2419 return ManifestChange;
2420}
2421
2422void Attributor::identifyDeadInternalFunctions() {
2423 // Early exit if we don't intend to delete functions.
2424 if (!Configuration.DeleteFns)
2425 return;
2426
2427 // To avoid triggering an assertion in the lazy call graph we will not delete
2428 // any internal library functions. We should modify the assertion though and
2429 // allow internals to be deleted.
2430 const auto *TLI =
2431 isModulePass()
2432 ? nullptr
2433 : getInfoCache().getTargetLibraryInfoForFunction(*Functions.back());
2434 LibFunc LF;
2435
2436 // Identify dead internal functions and delete them. This happens outside
2437 // the other fixpoint analysis as we might treat potentially dead functions
2438 // as live to lower the number of iterations. If they happen to be dead, the
2439 // below fixpoint loop will identify and eliminate them.
2440
2441 SmallVector<Function *, 8> InternalFns;
2442 for (Function *F : Functions)
2443 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2444 InternalFns.push_back(F);
2445
2446 SmallPtrSet<Function *, 8> LiveInternalFns;
2447 bool FoundLiveInternal = true;
2448 while (FoundLiveInternal) {
2449 FoundLiveInternal = false;
2450 for (Function *&F : InternalFns) {
2451 if (!F)
2452 continue;
2453
2454 bool UsedAssumedInformation = false;
2456 [&](AbstractCallSite ACS) {
2458 return ToBeDeletedFunctions.count(Callee) ||
2459 (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2460 !LiveInternalFns.count(Callee));
2461 },
2462 *F, true, nullptr, UsedAssumedInformation)) {
2463 continue;
2464 }
2465
2466 LiveInternalFns.insert(F);
2467 F = nullptr;
2468 FoundLiveInternal = true;
2469 }
2470 }
2471
2472 for (Function *F : InternalFns)
2473 if (F)
2474 ToBeDeletedFunctions.insert(F);
2475}
2476
2477ChangeStatus Attributor::cleanupIR() {
2478 TimeTraceScope TimeScope("Attributor::cleanupIR");
2479 // Delete stuff at the end to avoid invalid references and a nice order.
2480 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2481 << ToBeDeletedFunctions.size() << " functions and "
2482 << ToBeDeletedBlocks.size() << " blocks and "
2483 << ToBeDeletedInsts.size() << " instructions and "
2484 << ToBeChangedValues.size() << " values and "
2485 << ToBeChangedUses.size() << " uses. To insert "
2486 << ToBeChangedToUnreachableInsts.size()
2487 << " unreachables.\n"
2488 << "Preserve manifest added " << ManifestAddedBlocks.size()
2489 << " blocks\n");
2490
2492 SmallVector<Instruction *, 32> TerminatorsToFold;
2493
2494 auto ReplaceUse = [&](Use *U, Value *NewV) {
2495 Value *OldV = U->get();
2496
2497 // If we plan to replace NewV we need to update it at this point.
2498 do {
2499 const auto &Entry = ToBeChangedValues.lookup(NewV);
2500 if (!get<0>(Entry))
2501 break;
2502 NewV = get<0>(Entry);
2503 } while (true);
2504
2505 Instruction *I = dyn_cast<Instruction>(U->getUser());
2506 assert((!I || isRunOn(*I->getFunction())) &&
2507 "Cannot replace an instruction outside the current SCC!");
2508
2509 // Do not replace uses in returns if the value is a must-tail call we will
2510 // not delete.
2511 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2512 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2513 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2514 return;
2515 // If we rewrite a return and the new value is not an argument, strip the
2516 // `returned` attribute as it is wrong now.
2517 if (!isa<Argument>(NewV))
2518 for (auto &Arg : RI->getFunction()->args())
2519 Arg.removeAttr(Attribute::Returned);
2520 }
2521
2522 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2523 << " instead of " << *OldV << "\n");
2524 U->set(NewV);
2525
2526 if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2527 CGModifiedFunctions.insert(I->getFunction());
2528 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2530 DeadInsts.push_back(I);
2531 }
2532 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2533 auto *CB = cast<CallBase>(U->getUser());
2534 if (CB->isArgOperand(U)) {
2535 unsigned Idx = CB->getArgOperandNo(U);
2536 CB->removeParamAttr(Idx, Attribute::NoUndef);
2537 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
2538 if (Callee && Callee->arg_size() > Idx)
2539 Callee->removeParamAttr(Idx, Attribute::NoUndef);
2540 }
2541 }
2542 if (isa<Constant>(NewV) && isa<CondBrInst>(U->getUser())) {
2543 Instruction *UserI = cast<Instruction>(U->getUser());
2544 if (isa<UndefValue>(NewV)) {
2545 ToBeChangedToUnreachableInsts.insert(UserI);
2546 } else {
2547 TerminatorsToFold.push_back(UserI);
2548 }
2549 }
2550 };
2551
2552 for (auto &It : ToBeChangedUses) {
2553 Use *U = It.first;
2554 Value *NewV = It.second;
2555 ReplaceUse(U, NewV);
2556 }
2557
2559 for (auto &It : ToBeChangedValues) {
2560 Value *OldV = It.first;
2561 auto [NewV, Done] = It.second;
2562 Uses.clear();
2563 for (auto &U : OldV->uses())
2564 if (Done || !U.getUser()->isDroppable())
2565 Uses.push_back(&U);
2566 for (Use *U : Uses) {
2567 if (auto *I = dyn_cast<Instruction>(U->getUser()))
2568 if (!isRunOn(*I->getFunction()))
2569 continue;
2570 ReplaceUse(U, NewV);
2571 }
2572 }
2573
2574 for (const auto &V : InvokeWithDeadSuccessor)
2575 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2576 assert(isRunOn(*II->getFunction()) &&
2577 "Cannot replace an invoke outside the current SCC!");
2578 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2579 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2580 bool Invoke2CallAllowed =
2582 assert((UnwindBBIsDead || NormalBBIsDead) &&
2583 "Invoke does not have dead successors!");
2584 BasicBlock *BB = II->getParent();
2585 BasicBlock *NormalDestBB = II->getNormalDest();
2586 if (UnwindBBIsDead) {
2587 Instruction *NormalNextIP = &NormalDestBB->front();
2588 if (Invoke2CallAllowed) {
2590 NormalNextIP = BB->getTerminator();
2591 }
2592 if (NormalBBIsDead)
2593 ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2594 } else {
2595 assert(NormalBBIsDead && "Broken invariant!");
2596 if (!NormalDestBB->getUniquePredecessor())
2597 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2598 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2599 }
2600 }
2601 for (Instruction *I : TerminatorsToFold) {
2602 assert(isRunOn(*I->getFunction()) &&
2603 "Cannot replace a terminator outside the current SCC!");
2604 CGModifiedFunctions.insert(I->getFunction());
2605 ConstantFoldTerminator(I->getParent());
2606 }
2607 for (const auto &V : ToBeChangedToUnreachableInsts)
2608 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2609 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2610 << "\n");
2611 assert(isRunOn(*I->getFunction()) &&
2612 "Cannot replace an instruction outside the current SCC!");
2613 CGModifiedFunctions.insert(I->getFunction());
2615 }
2616
2617 for (const auto &V : ToBeDeletedInsts) {
2618 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2620 isRunOn(*I->getFunction())) &&
2621 "Cannot delete an instruction outside the current SCC!");
2622 I->dropDroppableUses();
2623 CGModifiedFunctions.insert(I->getFunction());
2624 if (!I->getType()->isVoidTy())
2625 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2627 DeadInsts.push_back(I);
2628 else
2629 I->eraseFromParent();
2630 }
2631 }
2632
2633 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2634
2635 LLVM_DEBUG({
2636 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2637 for (auto &I : DeadInsts)
2638 if (I)
2639 dbgs() << " - " << *I << "\n";
2640 });
2641
2643
2644 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2645 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2646 ToBeDeletedBBs.reserve(NumDeadBlocks);
2647 for (BasicBlock *BB : ToBeDeletedBlocks) {
2648 assert(isRunOn(*BB->getParent()) &&
2649 "Cannot delete a block outside the current SCC!");
2650 CGModifiedFunctions.insert(BB->getParent());
2651 // Do not delete BBs added during manifests of AAs.
2652 if (ManifestAddedBlocks.contains(BB))
2653 continue;
2654 ToBeDeletedBBs.push_back(BB);
2655 }
2656 // Actually we do not delete the blocks but squash them into a single
2657 // unreachable but untangling branches that jump here is something we need
2658 // to do in a more generic way.
2659 detachDeadBlocks(ToBeDeletedBBs, nullptr);
2660 }
2661
2662 identifyDeadInternalFunctions();
2663
2664 // Rewrite the functions as requested during manifest.
2665 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2666
2667 for (Function *Fn : CGModifiedFunctions)
2668 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2669 Configuration.CGUpdater.reanalyzeFunction(*Fn);
2670
2671 for (Function *Fn : ToBeDeletedFunctions) {
2672 if (!Functions.count(Fn))
2673 continue;
2674 Configuration.CGUpdater.removeFunction(*Fn);
2675 }
2676
2677 if (!ToBeChangedUses.empty())
2678 ManifestChange = ChangeStatus::CHANGED;
2679
2680 if (!ToBeChangedToUnreachableInsts.empty())
2681 ManifestChange = ChangeStatus::CHANGED;
2682
2683 if (!ToBeDeletedFunctions.empty())
2684 ManifestChange = ChangeStatus::CHANGED;
2685
2686 if (!ToBeDeletedBlocks.empty())
2687 ManifestChange = ChangeStatus::CHANGED;
2688
2689 if (!ToBeDeletedInsts.empty())
2690 ManifestChange = ChangeStatus::CHANGED;
2691
2692 if (!InvokeWithDeadSuccessor.empty())
2693 ManifestChange = ChangeStatus::CHANGED;
2694
2695 if (!DeadInsts.empty())
2696 ManifestChange = ChangeStatus::CHANGED;
2697
2698 NumFnDeleted += ToBeDeletedFunctions.size();
2699
2700 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2701 << " functions after manifest.\n");
2702
2703#ifdef EXPENSIVE_CHECKS
2704 for (Function *F : Functions) {
2705 if (ToBeDeletedFunctions.count(F))
2706 continue;
2707 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2708 }
2709#endif
2710
2711 return ManifestChange;
2712}
2713
2715 TimeTraceScope TimeScope("Attributor::run");
2716 AttributorCallGraph ACallGraph(*this);
2717
2718 if (PrintCallGraph)
2719 ACallGraph.populateAll();
2720
2721 Phase = AttributorPhase::UPDATE;
2722 runTillFixpoint();
2723
2724 // dump graphs on demand
2725 if (DumpDepGraph)
2726 DG.dumpGraph();
2727
2728 if (ViewDepGraph)
2729 DG.viewGraph();
2730
2732 DG.print();
2733
2734 Phase = AttributorPhase::MANIFEST;
2735 ChangeStatus ManifestChange = manifestAttributes();
2736
2737 Phase = AttributorPhase::CLEANUP;
2738 ChangeStatus CleanupChange = cleanupIR();
2739
2740 if (PrintCallGraph)
2741 ACallGraph.print();
2742
2743 return ManifestChange | CleanupChange;
2744}
2745
2746ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2747 TimeTraceScope TimeScope("updateAA", [&]() {
2748 return AA.getName().str() +
2749 std::to_string(AA.getIRPosition().getPositionKind());
2750 });
2751 assert(Phase == AttributorPhase::UPDATE &&
2752 "We can update AA only in the update stage!");
2753
2754 // Use a new dependence vector for this update.
2755 DependenceVector DV;
2756 DependenceStack.push_back(&DV);
2757
2758 auto &AAState = AA.getState();
2760 bool UsedAssumedInformation = false;
2761 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2762 /* CheckBBLivenessOnly */ true))
2763 CS = AA.update(*this);
2764
2765 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2766 // If the AA did not rely on outside information but changed, we run it
2767 // again to see if it found a fixpoint. Most AAs do but we don't require
2768 // them to. Hence, it might take the AA multiple iterations to get to a
2769 // fixpoint even if it does not rely on outside information, which is fine.
2771 if (CS == ChangeStatus::CHANGED)
2772 RerunCS = AA.update(*this);
2773
2774 // If the attribute did not change during the run or rerun, and it still did
2775 // not query any non-fix information, the state will not change and we can
2776 // indicate that right at this point.
2777 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2778 AAState.indicateOptimisticFixpoint();
2779 }
2780
2781 if (!AAState.isAtFixpoint())
2782 rememberDependences();
2783
2784 // Verify the stack was used properly, that is we pop the dependence vector we
2785 // put there earlier.
2786 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2787 (void)PoppedDV;
2788 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2789
2790 return CS;
2791}
2792
2794 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2795
2796 Module &M = *F.getParent();
2797 LLVMContext &Ctx = M.getContext();
2798 FunctionType *FnTy = F.getFunctionType();
2799
2800 Function *Wrapper =
2801 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2802 F.setName(""); // set the inside function anonymous
2803 M.getFunctionList().insert(F.getIterator(), Wrapper);
2804
2805 F.setLinkage(GlobalValue::InternalLinkage);
2806
2807 F.replaceAllUsesWith(Wrapper);
2808 assert(F.use_empty() && "Uses remained after wrapper was created!");
2809
2810 // Move the COMDAT section to the wrapper.
2811 // TODO: Check if we need to keep it for F as well.
2812 Wrapper->setComdat(F.getComdat());
2813 F.setComdat(nullptr);
2814
2815 // Copy all metadata and attributes but keep them on F as well.
2817 F.getAllMetadata(MDs);
2818 for (auto MDIt : MDs)
2819 Wrapper->addMetadata(MDIt.first, *MDIt.second);
2820 Wrapper->setAttributes(F.getAttributes());
2821
2822 // Create the call in the wrapper.
2823 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2824
2826 Argument *FArgIt = F.arg_begin();
2827 for (Argument &Arg : Wrapper->args()) {
2828 Args.push_back(&Arg);
2829 Arg.setName((FArgIt++)->getName());
2830 }
2831
2832 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2833 CI->setTailCall(true);
2834 CI->addFnAttr(Attribute::NoInline);
2835 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2836
2837 NumFnShallowWrappersCreated++;
2838}
2839
2841 if (F.isDeclaration() || F.hasLocalLinkage() ||
2843 return false;
2844 return true;
2845}
2846
2848 if (!AllowDeepWrapper && !Force)
2849 return nullptr;
2850 if (!isInternalizable(F))
2851 return nullptr;
2852
2853 SmallPtrSet<Function *, 2> FnSet = {&F};
2854 DenseMap<Function *, Function *> InternalizedFns;
2855 internalizeFunctions(FnSet, InternalizedFns);
2856
2857 return InternalizedFns[&F];
2858}
2859
2862 for (Function *F : FnSet)
2864 return false;
2865
2866 FnMap.clear();
2867 // Generate the internalized version of each function.
2868 for (Function *F : FnSet) {
2869 Module &M = *F->getParent();
2870 FunctionType *FnTy = F->getFunctionType();
2871
2872 // Create a copy of the current function
2873 Function *Copied =
2874 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2875 F->getName() + ".internalized");
2876 ValueToValueMapTy VMap;
2877 auto *NewFArgIt = Copied->arg_begin();
2878 for (auto &Arg : F->args()) {
2879 auto ArgName = Arg.getName();
2880 NewFArgIt->setName(ArgName);
2881 VMap[&Arg] = &(*NewFArgIt++);
2882 }
2884
2885 // Copy the body of the original function to the new one
2886 CloneFunctionInto(Copied, F, VMap,
2888
2889 // Set the linakage and visibility late as CloneFunctionInto has some
2890 // implicit requirements.
2893
2894 // Copy metadata
2896 F->getAllMetadata(MDs);
2897 for (auto MDIt : MDs)
2898 if (!Copied->hasMetadata())
2899 Copied->addMetadata(MDIt.first, *MDIt.second);
2900
2901 M.getFunctionList().insert(F->getIterator(), Copied);
2902 Copied->setDSOLocal(true);
2903 FnMap[F] = Copied;
2904 }
2905
2906 // Replace all uses of the old function with the new internalized function
2907 // unless the caller is a function that was just internalized.
2908 for (Function *F : FnSet) {
2909 auto &InternalizedFn = FnMap[F];
2910 auto IsNotInternalized = [&](Use &U) -> bool {
2911 if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2912 return !FnMap.lookup(CB->getCaller());
2913 return false;
2914 };
2915 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2916 }
2917
2918 return true;
2919}
2920
2922 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2923
2924 if (!Configuration.RewriteSignatures)
2925 return false;
2926
2927 Function *Fn = Arg.getParent();
2928 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2929 // Forbid the call site to cast the function return type. If we need to
2930 // rewrite these functions we need to re-create a cast for the new call site
2931 // (if the old had uses).
2932 if (!ACS.getCalledFunction() ||
2933 ACS.getInstruction()->getType() !=
2935 return false;
2936 if (cast<CallBase>(ACS.getInstruction())->getCalledOperand()->getType() !=
2937 Fn->getType())
2938 return false;
2939 if (ACS.getNumArgOperands() != Fn->arg_size())
2940 return false;
2941 // Forbid must-tail calls for now.
2942 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2943 };
2944
2945 // Avoid var-arg functions for now.
2946 if (Fn->isVarArg()) {
2947 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2948 return false;
2949 }
2950
2951 // Avoid functions with complicated argument passing semantics.
2952 AttributeList FnAttributeList = Fn->getAttributes();
2953 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2954 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2955 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2956 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2957 LLVM_DEBUG(
2958 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2959 return false;
2960 }
2961
2962 // Avoid callbacks for now.
2963 bool UsedAssumedInformation = false;
2964 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2965 UsedAssumedInformation,
2966 /* CheckPotentiallyDead */ true)) {
2967 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2968 return false;
2969 }
2970
2971 auto InstPred = [](Instruction &I) {
2972 if (auto *CI = dyn_cast<CallInst>(&I))
2973 return !CI->isMustTailCall();
2974 return true;
2975 };
2976
2977 // Forbid must-tail calls for now.
2978 // TODO:
2979 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2980 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2981 nullptr, {Instruction::Call},
2982 UsedAssumedInformation)) {
2983 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2984 return false;
2985 }
2986
2987 return true;
2988}
2989
2991 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2994 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2995 << Arg.getParent()->getName() << " with "
2996 << ReplacementTypes.size() << " replacements\n");
2997 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2998 "Cannot register an invalid rewrite");
2999
3000 Function *Fn = Arg.getParent();
3002 ArgumentReplacementMap[Fn];
3003 if (ARIs.empty())
3004 ARIs.resize(Fn->arg_size());
3005
3006 // If we have a replacement already with less than or equal new arguments,
3007 // ignore this request.
3008 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
3009 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
3010 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
3011 return false;
3012 }
3013
3014 // If we have a replacement already but we like the new one better, delete
3015 // the old.
3016 ARI.reset();
3017
3018 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
3019 << Arg.getParent()->getName() << " with "
3020 << ReplacementTypes.size() << " replacements\n");
3021
3022 // Remember the replacement.
3023 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
3024 std::move(CalleeRepairCB),
3025 std::move(ACSRepairCB)));
3026
3027 return true;
3028}
3029
3030bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
3031 bool Result = true;
3032#ifndef NDEBUG
3033 if (SeedAllowList.size() != 0)
3034 Result = llvm::is_contained(SeedAllowList, AA.getName());
3035 Function *Fn = AA.getAnchorScope();
3036 if (FunctionSeedAllowList.size() != 0 && Fn)
3038#endif
3039 return Result;
3040}
3041
3042ChangeStatus Attributor::rewriteFunctionSignatures(
3043 SmallSetVector<Function *, 8> &ModifiedFns) {
3045
3046 for (auto &It : ArgumentReplacementMap) {
3047 Function *OldFn = It.getFirst();
3048
3049 // Deleted functions do not require rewrites.
3050 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
3051 continue;
3052
3054 It.getSecond();
3055 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
3056
3057 SmallVector<Type *, 16> NewArgumentTypes;
3058 SmallVector<AttributeSet, 16> NewArgumentAttributes;
3059
3060 // Collect replacement argument types and copy over existing attributes.
3061 AttributeList OldFnAttributeList = OldFn->getAttributes();
3062 for (Argument &Arg : OldFn->args()) {
3063 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3064 ARIs[Arg.getArgNo()]) {
3065 NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
3066 ARI->ReplacementTypes.end());
3067 NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
3068 AttributeSet());
3069 } else {
3070 NewArgumentTypes.push_back(Arg.getType());
3071 NewArgumentAttributes.push_back(
3072 OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
3073 }
3074 }
3075
3076 uint64_t LargestVectorWidth = 0;
3077 for (auto *I : NewArgumentTypes)
3078 if (auto *VT = dyn_cast<llvm::VectorType>(I))
3079 LargestVectorWidth =
3080 std::max(LargestVectorWidth,
3081 VT->getPrimitiveSizeInBits().getKnownMinValue());
3082
3083 FunctionType *OldFnTy = OldFn->getFunctionType();
3084 Type *RetTy = OldFnTy->getReturnType();
3085
3086 // Construct the new function type using the new arguments types.
3087 FunctionType *NewFnTy =
3088 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
3089
3090 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
3091 << "' from " << *OldFn->getFunctionType() << " to "
3092 << *NewFnTy << "\n");
3093
3094 // Create the new function body and insert it into the module.
3095 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
3096 OldFn->getAddressSpace(), "");
3097 Functions.insert(NewFn);
3098 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
3099 NewFn->takeName(OldFn);
3100 NewFn->copyAttributesFrom(OldFn);
3101
3102 // Patch the pointer to LLVM function in debug info descriptor.
3103 NewFn->setSubprogram(OldFn->getSubprogram());
3104 OldFn->setSubprogram(nullptr);
3105
3106 // Recompute the parameter attributes list based on the new arguments for
3107 // the function.
3108 LLVMContext &Ctx = OldFn->getContext();
3109 NewFn->setAttributes(AttributeList::get(
3110 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
3111 NewArgumentAttributes));
3112 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
3113
3114 // Remove argmem from the memory effects if we have no more pointer
3115 // arguments, or they are readnone.
3116 MemoryEffects ME = NewFn->getMemoryEffects();
3117 int ArgNo = -1;
3118 if (ME.doesAccessArgPointees() && all_of(NewArgumentTypes, [&](Type *T) {
3119 ++ArgNo;
3120 return !T->isPtrOrPtrVectorTy() ||
3121 NewFn->hasParamAttribute(ArgNo, Attribute::ReadNone);
3122 })) {
3124 }
3125
3126 // Since we have now created the new function, splice the body of the old
3127 // function right into the new function, leaving the old rotting hulk of the
3128 // function empty.
3129 NewFn->splice(NewFn->begin(), OldFn);
3130
3131 // Set of all "call-like" instructions that invoke the old function mapped
3132 // to their new replacements.
3134
3135 // Callback to create a new "call-like" instruction for a given one.
3136 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
3137 CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
3138 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
3139
3140 // Collect the new argument operands for the replacement call site.
3141 SmallVector<Value *, 16> NewArgOperands;
3142 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
3143 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
3144 unsigned NewFirstArgNum = NewArgOperands.size();
3145 (void)NewFirstArgNum; // only used inside assert.
3146 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3147 ARIs[OldArgNum]) {
3148 if (ARI->ACSRepairCB)
3149 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
3150 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
3151 NewArgOperands.size() &&
3152 "ACS repair callback did not provide as many operand as new "
3153 "types were registered!");
3154 // TODO: Exose the attribute set to the ACS repair callback
3155 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
3156 AttributeSet());
3157 } else {
3158 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
3159 NewArgOperandAttributes.push_back(
3160 OldCallAttributeList.getParamAttrs(OldArgNum));
3161 }
3162 }
3163
3164 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
3165 "Mismatch # argument operands vs. # argument operand attributes!");
3166 assert(NewArgOperands.size() == NewFn->arg_size() &&
3167 "Mismatch # argument operands vs. # function arguments!");
3168
3169 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
3170 OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
3171
3172 // Create a new call or invoke instruction to replace the old one.
3173 CallBase *NewCB;
3174 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
3175 NewCB = InvokeInst::Create(NewFn, II->getNormalDest(),
3176 II->getUnwindDest(), NewArgOperands,
3177 OperandBundleDefs, "", OldCB->getIterator());
3178 } else {
3179 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
3180 "", OldCB->getIterator());
3181 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
3182 NewCB = NewCI;
3183 }
3184
3185 // Copy over various properties and the new attributes.
3186 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
3187 NewCB->setCallingConv(OldCB->getCallingConv());
3188 NewCB->takeName(OldCB);
3189 NewCB->setAttributes(AttributeList::get(
3190 Ctx, OldCallAttributeList.getFnAttrs(),
3191 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
3192
3193 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewCB->getCaller(),
3194 LargestVectorWidth);
3195
3196 CallSitePairs.push_back({OldCB, NewCB});
3197 return true;
3198 };
3199
3200 // Use the CallSiteReplacementCreator to create replacement call sites.
3201 bool UsedAssumedInformation = false;
3202 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
3203 true, nullptr, UsedAssumedInformation,
3204 /* CheckPotentiallyDead */ true);
3205 (void)Success;
3206 assert(Success && "Assumed call site replacement to succeed!");
3207
3208 // Rewire the arguments.
3209 Argument *OldFnArgIt = OldFn->arg_begin();
3210 Argument *NewFnArgIt = NewFn->arg_begin();
3211 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
3212 ++OldArgNum, ++OldFnArgIt) {
3213 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3214 ARIs[OldArgNum]) {
3215 if (ARI->CalleeRepairCB)
3216 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
3217 if (ARI->ReplacementTypes.empty())
3218 OldFnArgIt->replaceAllUsesWith(
3219 PoisonValue::get(OldFnArgIt->getType()));
3220 NewFnArgIt += ARI->ReplacementTypes.size();
3221 } else {
3222 NewFnArgIt->takeName(&*OldFnArgIt);
3223 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
3224 ++NewFnArgIt;
3225 }
3226 }
3227
3228 // Eliminate the instructions *after* we visited all of them.
3229 for (auto &CallSitePair : CallSitePairs) {
3230 CallBase &OldCB = *CallSitePair.first;
3231 CallBase &NewCB = *CallSitePair.second;
3232 assert(OldCB.getType() == NewCB.getType() &&
3233 "Cannot handle call sites with different types!");
3234 ModifiedFns.insert(OldCB.getFunction());
3235 OldCB.replaceAllUsesWith(&NewCB);
3236 OldCB.eraseFromParent();
3237 }
3238
3239 // Replace the function in the call graph (if any).
3240 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
3241
3242 // If the old function was modified and needed to be reanalyzed, the new one
3243 // does now.
3244 if (ModifiedFns.remove(OldFn))
3245 ModifiedFns.insert(NewFn);
3246
3248 }
3249
3250 return Changed;
3251}
3252
3253void InformationCache::initializeInformationCache(const Function &CF,
3254 FunctionInfo &FI) {
3255 // As we do not modify the function here we can remove the const
3256 // withouth breaking implicit assumptions. At the end of the day, we could
3257 // initialize the cache eagerly which would look the same to the users.
3258 Function &F = const_cast<Function &>(CF);
3259
3260 FI.IsKernel = F.hasFnAttribute("kernel");
3261
3262 // Walk all instructions to find interesting instructions that might be
3263 // queried by abstract attributes during their initialization or update.
3264 // This has to happen before we create attributes.
3265
3266 DenseMap<const Value *, std::optional<short>> AssumeUsesMap;
3267
3268 // Add \p V to the assume uses map which track the number of uses outside of
3269 // "visited" assumes. If no outside uses are left the value is added to the
3270 // assume only use vector.
3271 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3272 SmallVector<const Instruction *> Worklist;
3273 if (auto *I = dyn_cast<Instruction>(&V))
3274 Worklist.push_back(I);
3275 while (!Worklist.empty()) {
3276 const Instruction *I = Worklist.pop_back_val();
3277 std::optional<short> &NumUses = AssumeUsesMap[I];
3278 if (!NumUses)
3279 NumUses = I->getNumUses();
3280 NumUses = *NumUses - /* this assume */ 1;
3281 if (*NumUses != 0)
3282 continue;
3283 AssumeOnlyValues.insert(I);
3284 for (const Value *Op : I->operands())
3285 if (auto *OpI = dyn_cast<Instruction>(Op))
3286 Worklist.push_back(OpI);
3287 }
3288 };
3289
3290 for (Instruction &I : instructions(&F)) {
3291 bool IsInterestingOpcode = false;
3292
3293 // To allow easy access to all instructions in a function with a given
3294 // opcode we store them in the InfoCache. As not all opcodes are interesting
3295 // to concrete attributes we only cache the ones that are as identified in
3296 // the following switch.
3297 // Note: There are no concrete attributes now so this is initially empty.
3298 switch (I.getOpcode()) {
3299 default:
3300 assert(!isa<CallBase>(&I) &&
3301 "New call base instruction type needs to be known in the "
3302 "Attributor.");
3303 break;
3304 case Instruction::Call:
3305 // Calls are interesting on their own, additionally:
3306 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3307 // For `must-tail` calls we remember the caller and callee.
3308 if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3309 AssumeOnlyValues.insert(Assume);
3310 fillMapFromAssume(*Assume, KnowledgeMap);
3311 AddToAssumeUsesMap(*Assume->getArgOperand(0));
3312 } else if (cast<CallInst>(I).isMustTailCall()) {
3313 FI.ContainsMustTailCall = true;
3314 if (auto *Callee = dyn_cast_if_present<Function>(
3315 cast<CallInst>(I).getCalledOperand()))
3316 getFunctionInfo(*Callee).CalledViaMustTail = true;
3317 }
3318 [[fallthrough]];
3319 case Instruction::CallBr:
3320 case Instruction::Invoke:
3321 case Instruction::CleanupRet:
3322 case Instruction::CatchSwitch:
3323 case Instruction::AtomicRMW:
3324 case Instruction::AtomicCmpXchg:
3325 case Instruction::UncondBr:
3326 case Instruction::CondBr:
3327 case Instruction::Resume:
3328 case Instruction::Ret:
3329 case Instruction::Load:
3330 // The alignment of a pointer is interesting for loads.
3331 case Instruction::Store:
3332 // The alignment of a pointer is interesting for stores.
3333 case Instruction::Alloca:
3334 case Instruction::AddrSpaceCast:
3335 IsInterestingOpcode = true;
3336 }
3337 if (IsInterestingOpcode) {
3338 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3339 if (!Insts)
3340 Insts = new (Allocator) InstructionVectorTy();
3341 Insts->push_back(&I);
3342 }
3343 if (I.mayReadOrWriteMemory())
3344 FI.RWInsts.push_back(&I);
3345 }
3346
3347 if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3348 isInlineViable(F).isSuccess())
3349 InlineableFunctions.insert(&F);
3350}
3351
3352InformationCache::FunctionInfo::~FunctionInfo() {
3353 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3354 // manually destroy them.
3355 for (auto &It : OpcodeInstMap)
3356 It.getSecond()->~InstructionVectorTy();
3357}
3358
3361 assert(A.isClosedWorldModule() && "Cannot see all indirect callees!");
3362 return IndirectlyCallableFunctions;
3363}
3364
3365std::optional<unsigned> InformationCache::getFlatAddressSpace() const {
3366 if (IsTargetGPU())
3367 return 0;
3368 return std::nullopt;
3369}
3370
3372 const AbstractAttribute &ToAA,
3373 DepClassTy DepClass) {
3374 if (DepClass == DepClassTy::NONE)
3375 return;
3376 // If we are outside of an update, thus before the actual fixpoint iteration
3377 // started (= when we create AAs), we do not track dependences because we will
3378 // put all AAs into the initial worklist anyway.
3379 if (DependenceStack.empty())
3380 return;
3381 if (FromAA.getState().isAtFixpoint())
3382 return;
3383 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3384}
3385
3386void Attributor::rememberDependences() {
3387 assert(!DependenceStack.empty() && "No dependences to remember!");
3388
3389 for (DepInfo &DI : *DependenceStack.back()) {
3390 assert((DI.DepClass == DepClassTy::REQUIRED ||
3391 DI.DepClass == DepClassTy::OPTIONAL) &&
3392 "Expected required or optional dependence (1 bit)!");
3393 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3394 DepAAs.insert(AbstractAttribute::DepTy(
3395 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3396 }
3397}
3398
3399template <Attribute::AttrKind AK, typename AAType>
3400void Attributor::checkAndQueryIRAttr(const IRPosition &IRP, AttributeSet Attrs,
3401 bool SkipHasAttrCheck) {
3402 bool IsKnown;
3403 if (SkipHasAttrCheck || !Attrs.hasAttribute(AK))
3404 if (!Configuration.Allowed || Configuration.Allowed->count(&AAType::ID))
3405 if (!AA::hasAssumedIRAttr<AK>(*this, nullptr, IRP, DepClassTy::NONE,
3406 IsKnown))
3407 getOrCreateAAFor<AAType>(IRP);
3408}
3409
3411 assert(!F.isDeclaration());
3412
3413 if (!VisitedFunctions.insert(&F).second)
3414 return;
3415
3416 // In non-module runs we need to look at the call sites of a function to
3417 // determine if it is part of a must-tail call edge. This will influence what
3418 // attributes we can derive.
3419 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3420 if (!isModulePass() && !FI.CalledViaMustTail) {
3421 for (const Use &U : F.uses())
3422 if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3423 if (CB->isCallee(&U) && CB->isMustTailCall())
3424 FI.CalledViaMustTail = true;
3425 }
3426
3428 bool IsIPOAmendable = isFunctionIPOAmendable(F);
3429 auto Attrs = F.getAttributes();
3430 auto FnAttrs = Attrs.getFnAttrs();
3431
3432 // Check for dead BasicBlocks in every function.
3433 // We need dead instruction detection because we do not want to deal with
3434 // broken IR in which SSA rules do not apply.
3436
3437 // Every function might contain instructions that cause "undefined
3438 // behavior".
3440
3441 // Every function might be applicable for Heap-To-Stack conversion.
3444
3445 // Every function might be "must-progress".
3446 checkAndQueryIRAttr<Attribute::MustProgress, AAMustProgress>(FPos, FnAttrs);
3447
3448 // Every function might be "no-free".
3449 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(FPos, FnAttrs);
3450
3451 // Every function might be "will-return".
3452 checkAndQueryIRAttr<Attribute::WillReturn, AAWillReturn>(FPos, FnAttrs);
3453
3454 // Every function might be marked "nosync"
3455 checkAndQueryIRAttr<Attribute::NoSync, AANoSync>(FPos, FnAttrs);
3456
3457 // Everything that is visible from the outside (=function, argument, return
3458 // positions), cannot be changed if the function is not IPO amendable. We can
3459 // however analyse the code inside.
3460 if (IsIPOAmendable) {
3461
3462 // Every function can be nounwind.
3463 checkAndQueryIRAttr<Attribute::NoUnwind, AANoUnwind>(FPos, FnAttrs);
3464
3465 // Every function might be "no-return".
3466 checkAndQueryIRAttr<Attribute::NoReturn, AANoReturn>(FPos, FnAttrs);
3467
3468 // Every function might be "no-recurse".
3469 checkAndQueryIRAttr<Attribute::NoRecurse, AANoRecurse>(FPos, FnAttrs);
3470
3471 // Every function can be "non-convergent".
3472 if (Attrs.hasFnAttr(Attribute::Convergent))
3474
3475 // Every function might be "readnone/readonly/writeonly/...".
3477
3478 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3480
3481 // Every function can track active assumptions.
3483
3484 // If we're not using a dynamic mode for float, there's nothing worthwhile
3485 // to infer. This misses the edge case denormal-fp-math="dynamic" and
3486 // denormal-fp-math-f32=something, but that likely has no real world use.
3487 DenormalMode Mode = F.getDenormalMode(APFloat::IEEEsingle());
3488 if (Mode.Input == DenormalMode::Dynamic ||
3489 Mode.Output == DenormalMode::Dynamic)
3491
3492 // Return attributes are only appropriate if the return type is non void.
3493 Type *ReturnType = F.getReturnType();
3494 if (!ReturnType->isVoidTy()) {
3496 AttributeSet RetAttrs = Attrs.getRetAttrs();
3497
3498 // Every returned value might be dead.
3500
3501 // Every function might be simplified.
3502 bool UsedAssumedInformation = false;
3503 getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3505
3506 // Every returned value might be marked noundef.
3507 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(RetPos, RetAttrs);
3508
3509 if (ReturnType->isPointerTy()) {
3510
3511 // Every function with pointer return type might be marked align.
3513
3514 // Every function with pointer return type might be marked nonnull.
3515 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(RetPos, RetAttrs);
3516
3517 // Every function with pointer return type might be marked noalias.
3518 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(RetPos, RetAttrs);
3519
3520 // Every function with pointer return type might be marked
3521 // dereferenceable.
3523 } else if (AttributeFuncs::isNoFPClassCompatibleType(ReturnType)) {
3525 }
3526 }
3527 }
3528
3529 for (Argument &Arg : F.args()) {
3530 IRPosition ArgPos = IRPosition::argument(Arg);
3531 auto ArgNo = Arg.getArgNo();
3532 AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo);
3533
3534 if (!IsIPOAmendable) {
3535 if (Arg.getType()->isPointerTy())
3536 // Every argument with pointer type might be marked nofree.
3537 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3538 continue;
3539 }
3540
3541 // Every argument might be simplified. We have to go through the
3542 // Attributor interface though as outside AAs can register custom
3543 // simplification callbacks.
3544 bool UsedAssumedInformation = false;
3545 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3547
3548 // Every argument might be dead.
3550
3551 // Every argument might be marked noundef.
3552 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(ArgPos, ArgAttrs);
3553
3554 if (Arg.getType()->isPointerTy()) {
3555 // Every argument with pointer type might be marked nonnull.
3556 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(ArgPos, ArgAttrs);
3557
3558 // Every argument with pointer type might be marked noalias.
3559 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(ArgPos, ArgAttrs);
3560
3561 // Every argument with pointer type might be marked dereferenceable.
3563
3564 // Every argument with pointer type might be marked align.
3566
3567 // Every argument with pointer type might be marked nocapture.
3568 checkAndQueryIRAttr<Attribute::Captures, AANoCapture>(
3569 ArgPos, ArgAttrs, /*SkipHasAttrCheck=*/true);
3570
3571 // Every argument with pointer type might be marked
3572 // "readnone/readonly/writeonly/..."
3574
3575 // Every argument with pointer type might be marked nofree.
3576 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3577
3578 // Every argument with pointer type might be privatizable (or
3579 // promotable)
3581 } else if (AttributeFuncs::isNoFPClassCompatibleType(Arg.getType())) {
3583 }
3584 }
3585
3586 auto CallSitePred = [&](Instruction &I) -> bool {
3587 auto &CB = cast<CallBase>(I);
3588 IRPosition CBInstPos = IRPosition::inst(CB);
3590
3591 // Call sites might be dead if they do not have side effects and no live
3592 // users. The return value might be dead if there are no live users.
3593 getOrCreateAAFor<AAIsDead>(CBInstPos);
3594
3595 Function *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
3596 // TODO: Even if the callee is not known now we might be able to simplify
3597 // the call/callee.
3598 if (!Callee) {
3600 return true;
3601 }
3602
3603 // Every call site can track active assumptions.
3605
3606 // Skip declarations except if annotations on their call sites were
3607 // explicitly requested.
3608 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3609 !Callee->hasMetadata(LLVMContext::MD_callback))
3610 return true;
3611
3612 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3614 bool UsedAssumedInformation = false;
3615 getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3617
3618 if (AttributeFuncs::isNoFPClassCompatibleType(Callee->getReturnType()))
3620 }
3621
3622 const AttributeList &CBAttrs = CBFnPos.getAttrList();
3623 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3624
3626 AttributeSet CBArgAttrs = CBAttrs.getParamAttrs(I);
3627
3628 // Every call site argument might be dead.
3630
3631 // Call site argument might be simplified. We have to go through the
3632 // Attributor interface though as outside AAs can register custom
3633 // simplification callbacks.
3634 bool UsedAssumedInformation = false;
3635 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3637
3638 // Every call site argument might be marked "noundef".
3639 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(CBArgPos, CBArgAttrs);
3640
3641 Type *ArgTy = CB.getArgOperand(I)->getType();
3642
3643 if (!ArgTy->isPointerTy()) {
3644 if (AttributeFuncs::isNoFPClassCompatibleType(ArgTy))
3646
3647 continue;
3648 }
3649
3650 // Call site argument attribute "non-null".
3651 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(CBArgPos, CBArgAttrs);
3652
3653 // Call site argument attribute "captures(none)".
3654 checkAndQueryIRAttr<Attribute::Captures, AANoCapture>(
3655 CBArgPos, CBArgAttrs, /*SkipHasAttrCheck=*/true);
3656
3657 // Call site argument attribute "no-alias".
3658 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(CBArgPos, CBArgAttrs);
3659
3660 // Call site argument attribute "dereferenceable".
3662
3663 // Call site argument attribute "align".
3664 getOrCreateAAFor<AAAlign>(CBArgPos);
3665
3666 // Call site argument attribute
3667 // "readnone/readonly/writeonly/..."
3668 if (!CBAttrs.hasParamAttr(I, Attribute::ReadNone))
3670
3671 // Call site argument attribute "nofree".
3672 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(CBArgPos, CBArgAttrs);
3673 }
3674 return true;
3675 };
3676
3677 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3678 [[maybe_unused]] bool Success;
3679 bool UsedAssumedInformation = false;
3681 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3682 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3683 (unsigned)Instruction::Call},
3684 UsedAssumedInformation);
3685 assert(Success && "Expected the check call to be successful!");
3686
3687 auto LoadStorePred = [&](Instruction &I) -> bool {
3688 if (auto *LI = dyn_cast<LoadInst>(&I)) {
3689 getOrCreateAAFor<AAAlign>(IRPosition::value(*LI->getPointerOperand()));
3690 if (SimplifyAllLoads)
3692 UsedAssumedInformation, AA::Intraprocedural);
3694 IRPosition::value(*LI->getPointerOperand()));
3696 IRPosition::value(*LI->getPointerOperand()));
3697 } else {
3698 auto &SI = cast<StoreInst>(I);
3700 getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3701 UsedAssumedInformation, AA::Intraprocedural);
3702 getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3704 IRPosition::value(*SI.getPointerOperand()));
3705 }
3706 return true;
3707 };
3709 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3710 {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3711 UsedAssumedInformation);
3712 assert(Success && "Expected the check call to be successful!");
3713
3714 // AllocaInstPredicate
3715 auto AAAllocationInfoPred = [&](Instruction &I) -> bool {
3717 return true;
3718 };
3719
3721 nullptr, OpcodeInstMap, AAAllocationInfoPred, nullptr, nullptr,
3722 {(unsigned)Instruction::Alloca}, UsedAssumedInformation);
3723 assert(Success && "Expected the check call to be successful!");
3724}
3725
3727 if (CloseWorldAssumption.getNumOccurrences())
3728 return CloseWorldAssumption;
3729 return isModulePass() && Configuration.IsClosedWorldModule;
3730}
3731
3732/// Helpers to ease debugging through output streams and print calls.
3733///
3734///{
3736 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3737}
3738
3740 switch (AP) {
3742 return OS << "inv";
3744 return OS << "flt";
3746 return OS << "fn_ret";
3748 return OS << "cs_ret";
3750 return OS << "fn";
3752 return OS << "cs";
3754 return OS << "arg";
3756 return OS << "cs_arg";
3757 }
3758 llvm_unreachable("Unknown attribute position!");
3759}
3760
3762 const Value &AV = Pos.getAssociatedValue();
3763 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3764 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3765
3766 if (Pos.hasCallBaseContext())
3767 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3768 return OS << "}";
3769}
3770
3772 OS << "range-state(" << S.getBitWidth() << ")<";
3773 S.getKnown().print(OS);
3774 OS << " / ";
3775 S.getAssumed().print(OS);
3776 OS << ">";
3777
3778 return OS << static_cast<const AbstractState &>(S);
3779}
3780
3782 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3783}
3784
3786 AA.print(OS);
3787 return OS;
3788}
3789
3792 OS << "set-state(< {";
3793 if (!S.isValidState())
3794 OS << "full-set";
3795 else {
3796 for (const auto &It : S.getAssumedSet())
3797 OS << It << ", ";
3798 if (S.undefIsContained())
3799 OS << "undef ";
3800 }
3801 OS << "} >)";
3802
3803 return OS;
3804}
3805
3807 const PotentialLLVMValuesState &S) {
3808 OS << "set-state(< {";
3809 if (!S.isValidState())
3810 OS << "full-set";
3811 else {
3812 for (const auto &It : S.getAssumedSet()) {
3813 if (auto *F = dyn_cast<Function>(It.first.getValue()))
3814 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3815 else
3816 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3817 }
3818 if (S.undefIsContained())
3819 OS << "undef ";
3820 }
3821 OS << "} >)";
3822
3823 return OS;
3824}
3825
3827 OS << "[";
3828 OS << getName();
3829 OS << "] for CtxI ";
3830
3831 if (auto *I = getCtxI()) {
3832 OS << "'";
3833 I->print(OS);
3834 OS << "'";
3835 } else
3836 OS << "<<null inst>>";
3837
3838 OS << " at position " << getIRPosition() << " with state " << getAsStr(A)
3839 << '\n';
3840}
3841
3843 print(OS);
3844
3845 for (const auto &DepAA : Deps) {
3846 auto *AA = DepAA.getPointer();
3847 OS << " updates ";
3848 AA->print(OS);
3849 }
3850
3851 OS << '\n';
3852}
3853
3855 const AAPointerInfo::Access &Acc) {
3856 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3857 if (Acc.getLocalInst() != Acc.getRemoteInst())
3858 OS << " via " << *Acc.getLocalInst();
3859 if (Acc.getContent()) {
3860 if (*Acc.getContent())
3861 OS << " [" << **Acc.getContent() << "]";
3862 else
3863 OS << " [ <unknown> ]";
3864 }
3865 return OS;
3866}
3867///}
3868
3869/// ----------------------------------------------------------------------------
3870/// Pass (Manager) Boilerplate
3871/// ----------------------------------------------------------------------------
3872
3874 SetVector<Function *> &Functions,
3875 AnalysisGetter &AG,
3876 CallGraphUpdater &CGUpdater,
3877 bool DeleteFns, bool IsModulePass) {
3878 if (Functions.empty())
3879 return false;
3880
3881 LLVM_DEBUG({
3882 dbgs() << "[Attributor] Run on module with " << Functions.size()
3883 << " functions:\n";
3884 for (Function *Fn : Functions)
3885 dbgs() << " - " << Fn->getName() << "\n";
3886 });
3887
3888 // Create an Attributor and initially empty information cache that is filled
3889 // while we identify default attribute opportunities.
3890 AttributorConfig AC(CGUpdater);
3891 AC.IsModulePass = IsModulePass;
3892 AC.DeleteFns = DeleteFns;
3893
3894 /// Tracking callback for specialization of indirect calls.
3896 IndirectCalleeTrackingMap;
3897 if (MaxSpecializationPerCB.getNumOccurrences()) {
3898 AC.IndirectCalleeSpecializationCallback =
3899 [&](Attributor &, const AbstractAttribute &AA, CallBase &CB,
3900 Function &Callee, unsigned) {
3901 if (MaxSpecializationPerCB == 0)
3902 return false;
3903 auto &Set = IndirectCalleeTrackingMap[&CB];
3904 if (!Set)
3905 Set = std::make_unique<SmallPtrSet<Function *, 8>>();
3906 if (Set->size() >= MaxSpecializationPerCB)
3907 return Set->contains(&Callee);
3908 Set->insert(&Callee);
3909 return true;
3910 };
3911 }
3912
3913 Attributor A(Functions, InfoCache, AC);
3914
3915 // Create shallow wrappers for all functions that are not IPO amendable
3917 for (Function *F : Functions)
3918 if (!A.isFunctionIPOAmendable(*F))
3920
3921 // Internalize non-exact functions
3922 // TODO: for now we eagerly internalize functions without calculating the
3923 // cost, we need a cost interface to determine whether internalizing
3924 // a function is "beneficial"
3925 if (AllowDeepWrapper) {
3926 unsigned FunSize = Functions.size();
3927 for (unsigned u = 0; u < FunSize; u++) {
3928 Function *F = Functions[u];
3929 if (!F->isDeclaration() && !F->isDefinitionExact() && !F->use_empty() &&
3930 !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3932 assert(NewF && "Could not internalize function.");
3933 Functions.insert(NewF);
3934
3935 // Update call graph
3936 CGUpdater.replaceFunctionWith(*F, *NewF);
3937 for (const Use &U : NewF->uses())
3938 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3939 auto *CallerF = CB->getCaller();
3940 CGUpdater.reanalyzeFunction(*CallerF);
3941 }
3942 }
3943 }
3944 }
3945
3946 for (Function *F : Functions) {
3947 if (F->isDeclaration())
3948 continue;
3949
3950 if (F->hasExactDefinition())
3951 NumFnWithExactDefinition++;
3952 else
3953 NumFnWithoutExactDefinition++;
3954
3955 // We look at internal functions only on-demand but if any use is not a
3956 // direct call or outside the current set of analyzed functions, we have
3957 // to do it eagerly.
3958 if (F->hasLocalLinkage()) {
3959 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3960 const auto *CB = dyn_cast<CallBase>(U.getUser());
3961 return CB && CB->isCallee(&U) &&
3962 Functions.count(const_cast<Function *>(CB->getCaller()));
3963 }))
3964 continue;
3965 }
3966
3967 // Populate the Attributor with abstract attribute opportunities in the
3968 // function and the information cache with IR information.
3969 A.identifyDefaultAbstractAttributes(*F);
3970 }
3971
3972 ChangeStatus Changed = A.run();
3973
3974 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3975 << " functions, result: " << Changed << ".\n");
3977}
3978
3980 SetVector<Function *> &Functions,
3981 AnalysisGetter &AG,
3982 CallGraphUpdater &CGUpdater,
3984 bool IsModulePass) {
3985 if (Functions.empty())
3986 return false;
3987
3988 LLVM_DEBUG({
3989 dbgs() << "[AttributorLight] Run on module with " << Functions.size()
3990 << " functions:\n";
3991 for (Function *Fn : Functions)
3992 dbgs() << " - " << Fn->getName() << "\n";
3993 });
3994
3995 // Create an Attributor and initially empty information cache that is filled
3996 // while we identify default attribute opportunities.
3997 AttributorConfig AC(CGUpdater);
3998 AC.IsModulePass = IsModulePass;
3999 AC.DeleteFns = false;
4000 DenseSet<const char *> Allowed(
4007 AC.Allowed = &Allowed;
4008 AC.UseLiveness = false;
4009
4010 Attributor A(Functions, InfoCache, AC);
4011
4012 for (Function *F : Functions) {
4013 if (F->isDeclaration())
4014 continue;
4015
4016 if (F->hasExactDefinition())
4017 NumFnWithExactDefinition++;
4018 else
4019 NumFnWithoutExactDefinition++;
4020
4021 // We look at internal functions only on-demand but if any use is not a
4022 // direct call or outside the current set of analyzed functions, we have
4023 // to do it eagerly.
4024 if (AC.UseLiveness && F->hasLocalLinkage()) {
4025 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
4026 const auto *CB = dyn_cast<CallBase>(U.getUser());
4027 return CB && CB->isCallee(&U) &&
4028 Functions.count(const_cast<Function *>(CB->getCaller()));
4029 }))
4030 continue;
4031 }
4032
4033 // Populate the Attributor with abstract attribute opportunities in the
4034 // function and the information cache with IR information.
4035 A.identifyDefaultAbstractAttributes(*F);
4036 }
4037
4038 ChangeStatus Changed = A.run();
4039
4041 // Invalidate analyses for modified functions so that we don't have to
4042 // invalidate all analyses for all functions in this SCC.
4043 PreservedAnalyses FuncPA;
4044 // We haven't changed the CFG for modified functions.
4045 FuncPA.preserveSet<CFGAnalyses>();
4046 for (Function *Changed : A.getModifiedFunctions()) {
4047 FAM.invalidate(*Changed, FuncPA);
4048 // Also invalidate any direct callers of changed functions since analyses
4049 // may care about attributes of direct callees. For example, MemorySSA
4050 // cares about whether or not a call's callee modifies memory and queries
4051 // that through function attributes.
4052 for (auto *U : Changed->users()) {
4053 if (auto *Call = dyn_cast<CallBase>(U)) {
4054 if (Call->getCalledFunction() == Changed)
4055 FAM.invalidate(*Call->getFunction(), FuncPA);
4056 }
4057 }
4058 }
4059 }
4060 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
4061 << " functions, result: " << Changed << ".\n");
4063}
4064
4065void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
4066
4068 static std::atomic<int> CallTimes;
4069 std::string Prefix;
4070
4071 if (!DepGraphDotFileNamePrefix.empty())
4073 else
4074 Prefix = "dep_graph";
4075 std::string Filename =
4076 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
4077
4078 outs() << "Dependency graph dump to " << Filename << ".\n";
4079
4080 std::error_code EC;
4081
4083 if (!EC)
4084 llvm::WriteGraph(File, this);
4085
4086 CallTimes++;
4087}
4088
4090 for (auto DepAA : SyntheticRoot.Deps)
4091 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
4092}
4093
4097 AnalysisGetter AG(FAM);
4098
4099 SetVector<Function *> Functions;
4100 for (Function &F : M)
4101 Functions.insert(&F);
4102
4103 CallGraphUpdater CGUpdater;
4104 BumpPtrAllocator Allocator;
4105 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4106 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4107 /* DeleteFns */ true, /* IsModulePass */ true)) {
4108 // FIXME: Think about passes we will preserve and add them here.
4109 return PreservedAnalyses::none();
4110 }
4111 return PreservedAnalyses::all();
4112}
4113
4116 LazyCallGraph &CG,
4117 CGSCCUpdateResult &UR) {
4119 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4120 AnalysisGetter AG(FAM);
4121
4122 SetVector<Function *> Functions;
4123 for (LazyCallGraph::Node &N : C)
4124 Functions.insert(&N.getFunction());
4125
4126 if (Functions.empty())
4127 return PreservedAnalyses::all();
4128
4129 Module &M = *Functions.back()->getParent();
4130 CallGraphUpdater CGUpdater;
4131 CGUpdater.initialize(CG, C, AM, UR);
4132 BumpPtrAllocator Allocator;
4133 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4134 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4135 /* DeleteFns */ false,
4136 /* IsModulePass */ false)) {
4137 // FIXME: Think about passes we will preserve and add them here.
4140 return PA;
4141 }
4142 return PreservedAnalyses::all();
4143}
4144
4149 AnalysisGetter AG(FAM, /* CachedOnly */ true);
4150
4151 SetVector<Function *> Functions;
4152 for (Function &F : M)
4153 Functions.insert(&F);
4154
4155 CallGraphUpdater CGUpdater;
4156 BumpPtrAllocator Allocator;
4157 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4158 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4159 /* IsModulePass */ true)) {
4161 // We have not added or removed functions.
4163 // We already invalidated all relevant function analyses above.
4165 return PA;
4166 }
4167 return PreservedAnalyses::all();
4168}
4169
4172 LazyCallGraph &CG,
4173 CGSCCUpdateResult &UR) {
4175 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4176 AnalysisGetter AG(FAM);
4177
4178 SetVector<Function *> Functions;
4179 for (LazyCallGraph::Node &N : C)
4180 Functions.insert(&N.getFunction());
4181
4182 if (Functions.empty())
4183 return PreservedAnalyses::all();
4184
4185 Module &M = *Functions.back()->getParent();
4186 CallGraphUpdater CGUpdater;
4187 CGUpdater.initialize(CG, C, AM, UR);
4188 BumpPtrAllocator Allocator;
4189 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4190 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4191 /* IsModulePass */ false)) {
4193 // We have not added or removed functions.
4195 // We already invalidated all relevant function analyses above.
4197 return PA;
4198 }
4199 return PreservedAnalyses::all();
4200}
4201namespace llvm {
4202
4219
4220template <>
4222 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
4223
4226
4227 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
4228
4229 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
4230};
4231
4232template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
4234
4235 static std::string getNodeLabel(const AADepGraphNode *Node,
4236 const AADepGraph *DG) {
4237 std::string AAString;
4238 raw_string_ostream O(AAString);
4239 Node->print(O);
4240 return AAString;
4241 }
4242};
4243
4244} // end namespace llvm
aarch64 falkor hwpf fix Falkor HW Prefetch Fix Late Phase
static unsigned getIntrinsicID(const SDNode *N)
@ Generic
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static cl::opt< bool > AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden, cl::desc("Allow the Attributor to create shallow " "wrappers for non-exact definitions."), cl::init(false))
bool canMarkAsVisited(const User *Usr)
#define VERBOSE_DEBUG_TYPE
static cl::opt< bool > EnableHeapToStack("enable-heap-to-stack-conversion", cl::init(true), cl::Hidden)
static cl::list< std::string > SeedAllowList("attributor-seed-allow-list", cl::Hidden, cl::desc("Comma separated list of attribute names that are " "allowed to be seeded."), cl::CommaSeparated)
static bool runAttributorOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, bool DeleteFns, bool IsModulePass)
}
static bool getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I, SmallSetVector< Value *, 4 > &PotentialCopies, SmallSetVector< Instruction *, 4 > *PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact)
static bool runAttributorLightOnFunctions(InformationCache &InfoCache, SetVector< Function * > &Functions, AnalysisGetter &AG, CallGraphUpdater &CGUpdater, FunctionAnalysisManager &FAM, bool IsModulePass)
static cl::opt< unsigned, true > MaxInitializationChainLengthX("attributor-max-initialization-chain-length", cl::Hidden, cl::desc("Maximal number of chained initializations (to avoid stack overflows)"), cl::location(MaxInitializationChainLength), cl::init(1024))
static cl::opt< unsigned > MaxSpecializationPerCB("attributor-max-specializations-per-call-base", cl::Hidden, cl::desc("Maximal number of callees specialized for " "a call base"), cl::init(UINT32_MAX))
static cl::opt< bool > SimplifyAllLoads("attributor-simplify-all-loads", cl::Hidden, cl::desc("Try to simplify all loads."), cl::init(true))
static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr, AttributeSet AttrSet, bool ForceReplace, AttrBuilder &AB)
Return true if the information provided by Attr was added to the attribute set AttrSet.
static cl::opt< bool > ViewDepGraph("attributor-view-dep-graph", cl::Hidden, cl::desc("View the dependency graph."), cl::init(false))
static bool isEqualOrWorse(const Attribute &New, const Attribute &Old)
Return true if New is equal or worse than Old.
static cl::opt< bool > AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden, cl::desc("Allow the Attributor to use IP information " "derived from non-exact functions via cloning"), cl::init(false))
static cl::opt< bool > DumpDepGraph("attributor-dump-dep-graph", cl::Hidden, cl::desc("Dump the dependency graph to dot files."), cl::init(false))
static cl::opt< bool > PrintCallGraph("attributor-print-call-graph", cl::Hidden, cl::desc("Print Attributor's internal call graph"), cl::init(false))
static bool checkForAllInstructionsImpl(Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap, function_ref< bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA, const AAIsDead *LivenessAA, ArrayRef< unsigned > Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
static cl::opt< bool > PrintDependencies("attributor-print-dep", cl::Hidden, cl::desc("Print attribute dependencies"), cl::init(false))
static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool RequireReadNone, bool &IsKnown)
static cl::opt< std::string > DepGraphDotFileNamePrefix("attributor-depgraph-dot-filename-prefix", cl::Hidden, cl::desc("The prefix used for the CallGraph dot file names."))
static cl::opt< bool > AnnotateDeclarationCallSites("attributor-annotate-decl-cs", cl::Hidden, cl::desc("Annotate call sites of function declarations."), cl::init(false))
static cl::opt< unsigned > SetFixpointIterations("attributor-max-iterations", cl::Hidden, cl::desc("Maximal number of fixpoint iterations."), cl::init(32))
static cl::list< std::string > FunctionSeedAllowList("attributor-function-seed-allow-list", cl::Hidden, cl::desc("Comma separated list of function names that are " "allowed to be seeded."), cl::CommaSeparated)
static cl::opt< bool > EnableCallSiteSpecific("attributor-enable-call-site-specific-deduction", cl::Hidden, cl::desc("Allow the Attributor to do call site specific analysis"), cl::init(false))
static cl::opt< bool > CloseWorldAssumption("attributor-assume-closed-world", cl::Hidden, cl::desc("Should a closed world be assumed, or not. Default if not set."))
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
Contains a collection of routines for determining if a given instruction is guaranteed to execute if ...
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static constexpr StringLiteral Filename
FunctionAnalysisManager FAM
This file defines the PointerIntPair class.
static StringRef getName(Value *V)
Remove Loads Into Fake Uses
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition Debug.h:72
void print(OutputBuffer &OB) const
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
Class for arbitrary precision integers.
Definition APInt.h:78
CallBase * getInstruction() const
Return the underlying instruction.
bool isCallbackCall() const
Return true if this ACS represents a callback call.
const Use & getCalleeUseForCallback() const
Return the use of the callee value in the underlying instruction.
static LLVM_ABI void getCallbackUses(const CallBase &CB, SmallVectorImpl< const Use * > &CallbackUses)
Add operand uses of CB that represent callback uses into CallbackUses.
bool isCallee(Value::const_user_iterator UI) const
Return true if UI is the use that defines the callee of this ACS.
Value * getCallArgOperand(Argument &Arg) const
Return the operand of the underlying instruction associated with Arg.
int getCallArgOperandNo(Argument &Arg) const
Return the operand index of the underlying instruction associated with Arg.
unsigned getNumArgOperands() const
Return the number of parameters of the callee.
Function * getCalledFunction() const
Return the function being called if this is a direct call, otherwise return null (if it's an indirect...
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
Definition Analysis.h:50
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
const Function * getParent() const
Definition Argument.h:44
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition Argument.h:50
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
Get the array size.
Definition ArrayRef.h:141
bool empty() const
Check if the array is empty.
Definition ArrayRef.h:136
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:407
LLVM_ABI MemoryEffects getMemoryEffects() const
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI Attribute getAttribute(Attribute::AttrKind Kind) const
Return the attribute object.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI bool isStringAttribute() const
Return true if the attribute is a string (target-dependent) attribute.
LLVM_ABI bool isEnumAttribute() const
Return true if the attribute is an Attribute::AttrKind type.
LLVM_ABI bool isIntAttribute() const
Return true if the attribute is an integer attribute.
LLVM_ABI uint64_t getValueAsInt() const
Return the attribute's value as an integer.
LLVM_ABI bool isConstantRangeAttribute() const
Return true if the attribute is a ConstantRange attribute.
LLVM_ABI StringRef getKindAsString() const
Return the attribute's kind as a string.
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
LLVM_ABI Attribute::AttrKind getKindAsEnum() const
Return the attribute's kind as an enum (Attribute::AttrKind).
LLVM_ABI MemoryEffects getMemoryEffects() const
Returns memory effects.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition Attributes.h:124
@ None
No attributes have been set.
Definition Attributes.h:126
LLVM Basic Block Representation.
Definition BasicBlock.h:62
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
const Instruction & front() const
Definition BasicBlock.h:484
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
CallingConv::ID getCallingConv() const
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
Wrapper to unify "old style" CallGraph and "new style" LazyCallGraph.
LLVM_ABI void replaceFunctionWith(Function &OldFn, Function &NewFn)
Replace OldFn in the call graph (and SCC) with NewFn.
LLVM_ABI void reanalyzeFunction(Function &Fn)
After an CGSCC pass changes a function in ways that affect the call graph, this method can be called ...
void initialize(LazyCallGraph &LCG, LazyCallGraph::SCC &SCC, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR)
Initializers for usage outside of a CGSCC pass, inside a CGSCC pass in the old and new pass manager (...
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setTailCall(bool IsTc=true)
A constant value that is initialized with an expression using other constant values.
Definition Constants.h:1297
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static bool shouldExecute(CounterInfo &Counter)
ValueT lookup(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or a default constructed value if no such entry exists.
Definition DenseMap.h:205
bool empty() const
Definition DenseMap.h:109
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
A proxy from a FunctionAnalysisManager to an SCC.
Class to represent function types.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:168
void splice(Function::iterator ToIt, Function *FromF)
Transfer all blocks from FromF to this function at ToIt.
Definition Function.h:761
const BasicBlock & getEntryBlock() const
Definition Function.h:809
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
iterator_range< arg_iterator > args()
Definition Function.h:892
DISubprogram * getSubprogram() const
Get the attached subprogram.
MemoryEffects getMemoryEffects() const
Definition Function.cpp:859
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
Definition Function.cpp:740
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:354
iterator begin()
Definition Function.h:853
arg_iterator arg_begin()
Definition Function.h:868
void setAttributes(AttributeList Attrs)
Set the attribute list for this Function.
Definition Function.h:357
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
size_t arg_size() const
Definition Function.h:901
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
void setMemoryEffects(MemoryEffects ME)
Definition Function.cpp:862
Argument * getArg(unsigned i) const
Definition Function.h:886
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:229
void copyAttributesFrom(const Function *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a Function) from the ...
Definition Function.cpp:843
bool hasMetadata() const
Return true if this GlobalObject has any metadata attached to it.
LLVM_ABI void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition Globals.cpp:337
LinkageTypes getLinkage() const
bool hasLocalLinkage() const
void setLinkage(LinkageTypes LT)
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
PointerType * getType() const
Global values are always pointers.
@ DefaultVisibility
The GV is visible.
Definition GlobalValue.h:68
void setVisibility(VisibilityTypes V)
static bool isInterposableLinkage(LinkageTypes Linkage)
Whether the definition of this global may be replaced by something non-equivalent at link time.
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
A node in the call graph.
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
An instruction for reading from memory.
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Definition ModRef.h:143
bool doesAccessArgPointees() const
Whether this function may access argument memory.
Definition ModRef.h:260
static LLVM_ABI MemoryLocation getForSource(const MemTransferInst *MTI)
Return a location representing the source of a memory transfer.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static LLVM_ABI std::optional< MemoryLocation > getOrNone(const Instruction *Inst)
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
const FunctionListType & getFunctionList() const
Get the Module's list of functions (constant).
Definition Module.h:598
PointerIntPair - This class implements a pair of a pointer and small integer.
void * getOpaqueValue() const
PointerTy getPointer() const
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
A vector that has set insertion semantics.
Definition SetVector.h:57
ArrayRef< value_type > getArrayRef() const
Definition SetVector.h:91
bool remove(const value_type &X)
Remove an item from the set vector.
Definition SetVector.h:181
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:103
void insert_range(Range &&R)
Definition SetVector.h:176
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
Definition SetVector.h:262
typename vector_type::const_iterator iterator
Definition SetVector.h:72
void clear()
Completely clear the SetVector.
Definition SetVector.h:267
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition SetVector.h:106
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
A visitor class for IR positions.
LLVM_ABI SubsumingPositionIterator(const IRPosition &IRP)
Provides information about what library functions are available for the current target.
The TimeTraceScope is a helper class to call the begin and end functions of the time trace profiler.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:258
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition Value.cpp:709
iterator_range< use_iterator > uses()
Definition Value.h:380
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:399
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
iterator insert(iterator where, pointer New)
Definition ilist.h:165
A raw_ostream that writes to a file descriptor.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
A raw_ostream that writes to an std::string.
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
Definition Attributor.h:165
LLVM_ABI bool isAssumedReadNone(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readnone.
LLVM_ABI bool isAssumedReadOnly(Attributor &A, const IRPosition &IRP, const AbstractAttribute &QueryingAA, bool &IsKnown)
Return true if IRP is readonly.
LLVM_ABI std::optional< Value * > combineOptionalValuesInAAValueLatice(const std::optional< Value * > &A, const std::optional< Value * > &B, Type *Ty)
Return the combination of A and B such that the result is a possible value of both.
LLVM_ABI bool isValidAtPosition(const ValueAndContext &VAC, InformationCache &InfoCache)
Return true if the value of VAC is a valid at the position of VAC, that is a constant,...
LLVM_ABI bool isAssumedThreadLocalObject(Attributor &A, Value &Obj, const AbstractAttribute &QueryingAA)
Return true if Obj is assumed to be a thread local object.
LLVM_ABI bool isGPUConstantAddressSpace(const Module &M, unsigned AS)
Check if the given address space AS corresponds to a GPU constant address space for the target triple...
LLVM_ABI bool isGPUGenericAddressSpace(const Module &M, unsigned AS)
Check if the given address space AS corresponds to a GPU generic address space for the target triple ...
LLVM_ABI bool isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA, const Value &V, bool ForAnalysisOnly=true)
Return true if V is dynamically unique, that is, there are no two "instances" of V at runtime with di...
LLVM_ABI bool getPotentialCopiesOfStoredValue(Attributor &A, StoreInst &SI, SmallSetVector< Value *, 4 > &PotentialCopies, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values of the one stored by SI into PotentialCopies.
LLVM_ABI bool isGPUSharedAddressSpace(const Module &M, unsigned AS)
Check if the given address space AS corresponds to a GPU shared address space for the target triple i...
LLVM_ABI bool isGPULocalAddressSpace(const Module &M, unsigned AS)
Check if the given address space AS corresponds to a GPU local/private address space for the target t...
LLVM_ABI bool isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is potentially affected by a barrier.
SmallPtrSet< Instruction *, 4 > InstExclusionSetTy
Definition Attributor.h:166
LLVM_ABI bool isGPU(const Module &M)
Return true iff M target a GPU (and we can use GPU AS reasoning).
LLVM_ABI Constant * getInitialValueForObj(Attributor &A, const AbstractAttribute &QueryingAA, Value &Obj, Type &Ty, const TargetLibraryInfo *TLI, const DataLayout &DL, RangeTy *RangePtr=nullptr)
Return the initial value of Obj with type Ty if that is a constant.
ValueScope
Flags to distinguish intra-procedural queries from potentially inter-procedural queries.
Definition Attributor.h:194
@ Intraprocedural
Definition Attributor.h:195
@ Interprocedural
Definition Attributor.h:196
LLVM_ABI bool isValidInScope(const Value &V, const Function *Scope)
Return true if V is a valid value in Scope, that is a constant or an instruction/argument of Scope.
LLVM_ABI bool isGPUGlobalAddressSpace(const Module &M, unsigned AS)
Check if the given address space AS corresponds to a GPU global address space for the target triple i...
LLVM_ABI bool isPotentiallyReachable(Attributor &A, const Instruction &FromI, const Instruction &ToI, const AbstractAttribute &QueryingAA, const AA::InstExclusionSetTy *ExclusionSet=nullptr, std::function< bool(const Function &F)> GoBackwardsCB=nullptr)
Return true if ToI is potentially reachable from FromI without running into any instruction in Exclus...
LLVM_ABI bool isNoSyncInst(Attributor &A, const Instruction &I, const AbstractAttribute &QueryingAA)
Return true if I is a nosync instruction.
bool hasAssumedIRAttr(Attributor &A, const AbstractAttribute *QueryingAA, const IRPosition &IRP, DepClassTy DepClass, bool &IsKnown, bool IgnoreSubsumingPositions=false, const AAType **AAPtr=nullptr)
Helper to avoid creating an AA for IR Attributes that might already be set.
LLVM_ABI bool getPotentiallyLoadedValues(Attributor &A, LoadInst &LI, SmallSetVector< Value *, 4 > &PotentialValues, SmallSetVector< Instruction *, 4 > &PotentialValueOrigins, const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, bool OnlyExact=false)
Collect all potential values LI could read into PotentialValues.
LLVM_ABI Value * getWithType(Value &V, Type &Ty)
Try to convert V to type Ty without introducing new instructions.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ Entry
Definition COFF.h:862
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
@ OF_TextWithCRLF
The file should be opened in text mode and use a carriage linefeed '\r '.
Definition FileSystem.h:786
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:557
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:535
LLVM_ABI unsigned MaxInitializationChainLength
The value passed to the line option that defines the maximal initialization chain length.
LLVM_ABI bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions=false, const TargetLibraryInfo *TLI=nullptr, DomTreeUpdater *DTU=nullptr)
If a terminator instruction is predicated on a constant value, convert it into an unconditional branc...
Definition Local.cpp:134
APInt operator&(APInt a, const APInt &b)
Definition APInt.h:2152
LLVM_ABI void detachDeadBlocks(ArrayRef< BasicBlock * > BBs, SmallVectorImpl< DominatorTree::UpdateType > *Updates, bool KeepOneInputPHIs=false)
Replace contents of every block in BBs with single unreachable instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
@ Done
Definition Threading.h:60
DenseMap< AssumeInst *, MinMax > Assume2KnowledgeMap
A mapping from intrinsics (=llvm.assume calls) to a value range (=knowledge) that is encoded in them.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
LLVM_ABI CallInst * changeToCall(InvokeInst *II, DomTreeUpdater *DTU=nullptr)
This function converts the specified invoke into a normal call.
Definition Local.cpp:2594
LLVM_ABI raw_fd_ostream & outs()
This returns a reference to a raw_fd_ostream for standard output.
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
Definition Casting.h:732
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
LLVM_ABI bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
MemoryEffectsBase< IRMemLocation > MemoryEffects
Summary of how a function affects memory in the program.
Definition ModRef.h:356
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
bool isa_and_nonnull(const Y &Val)
Definition Casting.h:676
AnalysisManager< LazyCallGraph::SCC, LazyCallGraph & > CGSCCAnalysisManager
The CGSCC analysis manager.
LLVM_ABI InlineResult isInlineViable(Function &Callee)
Check if it is mechanically possible to inline the function Callee, based on the contents of the func...
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:403
LLVM_ABI Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
PotentialValuesState< std::pair< AA::ValueAndContext, AA::ValueScope > > PotentialLLVMValuesState
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI bool AreStatisticsEnabled()
Check if statistics are enabled.
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Success
The lock was released successfully.
LLVM_ABI unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition Local.cpp:2528
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
LLVM_ABI BasicBlock * SplitBlockPredecessors(BasicBlock *BB, ArrayRef< BasicBlock * > Preds, const char *Suffix, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, bool PreserveLCSSA=false)
This method introduces at least one new basic block into the function and moves some of the predecess...
PotentialValuesState< APInt > PotentialConstantIntValuesState
bool operator&=(SparseBitVector< ElementSize > *LHS, const SparseBitVector< ElementSize > &RHS)
LLVM_ABI bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr, const CycleInfo *CI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition CFG.cpp:335
DWARFExpression::Operation Op
void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames=false, const Twine &Title="", GraphProgram::Name Program=GraphProgram::DOT)
ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, then cleanup.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
ValueMap< const Value *, WeakTrackingVH > ValueToValueMapTy
LLVM_ABI void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, CloneFunctionChangeType Changes, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc into NewFunc, transforming the old arguments into references to VMap values.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2191
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
LLVM_ABI bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates or reallocates memory (eith...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1946
ChangeStatus
{
Definition Attributor.h:508
LLVM_ABI void fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result)
Insert into the map all the informations contained in the operand bundles of the llvm....
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
LLVM_ABI Constant * ConstantFoldCastInstruction(unsigned opcode, Constant *V, Type *DestTy)
@ OPTIONAL
The target may be valid if the source is not.
Definition Attributor.h:520
@ NONE
Do not track a dependence between source and target.
Definition Attributor.h:521
@ REQUIRED
The target cannot be valid if the source is not.
Definition Attributor.h:519
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
APInt operator|(APInt a, const APInt &b)
Definition APInt.h:2172
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
#define N
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
DepSetTy Deps
Set of dependency graph nodes which should be updated if this one is updated.
Definition Attributor.h:535
PointerIntPair< AADepGraphNode *, 1 > DepTy
Definition Attributor.h:529
The data structure for the dependency graph.
Definition Attributor.h:569
iterator begin()
Definition Attributor.h:584
LLVM_ABI void viewGraph()
AADepGraphNode SyntheticRoot
There is no root node for the dependency graph.
Definition Attributor.h:581
LLVM_ABI void print()
Print dependency graph.
iterator end()
Definition Attributor.h:585
LLVM_ABI void dumpGraph()
Dump graph to file.
AADepGraphNode * GetEntryNode()
Definition Attributor.h:582
An abstract interface to track if a value leaves it's defining function instance.
bool isAssumedUniqueForAnalysis() const
Return true if we assume that the underlying value is unique in its scope wrt.
An abstract Attribute for computing reachability between functions.
static LLVM_ABI const char ID
Unique ID (due to the unique address)
An abstract interface to determine reachability of point A to B.
static LLVM_ABI const char ID
Unique ID (due to the unique address)
An abstract interface for liveness abstract attribute.
virtual bool isKnownDead() const =0
Returns true if the underlying value is known dead.
virtual bool isAssumedDead() const =0
The query functions are protected such that other attributes need to go through the Attributor interf...
virtual bool isRemovableStore() const
Return true if the underlying value is a store that is known to be removable.
static bool mayCatchAsynchronousExceptions(const Function &F)
Determine if F might catch asynchronous exceptions.
An abstract interface for memory access kind related attributes (readnone/readonly/writeonly).
static LLVM_ABI const char ID
Unique ID (due to the unique address)
An abstract interface for all memory location attributes (readnone/argmemonly/inaccessiblememonly/ina...
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI bool isNonRelaxedAtomic(const Instruction *I)
Helper function used to determine whether an instruction is non-relaxed atomic.
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
An access description.
bool isWrittenValueUnknown() const
Return true if the value written cannot be determined at all.
std::optional< Value * > getContent() const
Return the written value which can be llvm::null if it is not yet determined.
bool isWriteOrAssumption() const
Return true if this is a write access.
bool isRead() const
Return true if this is a read access.
Value * getWrittenValue() const
Return the value writen, if any.
Instruction * getLocalInst() const
Return the instruction that causes the access with respect to the local scope of the associated attri...
Instruction * getRemoteInst() const
Return the actual instruction that causes the access.
bool isWrittenValueYetUndetermined() const
Return true if the value written is not known yet.
AccessKind getKind() const
Return the access kind.
An abstract interface for struct information.
static LLVM_ABI Value * getSingleValue(Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP, SmallVectorImpl< AA::ValueAndContext > &Values)
Extract the single value in Values if any.
An abstract attribute for getting all assumption underlying objects.
static LLVM_ABI const char ID
Unique ID (due to the unique address)
static LLVM_ABI const char ID
Unique ID (due to the unique address)
Helper to represent an access offset and size, with logic to deal with uncertainty and check for over...
Definition Attributor.h:253
bool offsetOrSizeAreUnknown() const
Return true if offset or size are unknown.
Definition Attributor.h:262
Value * getValue() const
Definition Attributor.h:206
const Instruction * getCtxI() const
Definition Attributor.h:207
Base struct for all "concrete attribute" deductions.
ChangeStatus update(Attributor &A)
Hook for the Attributor to trigger an update of the internal state.
friend struct Attributor
}
virtual void printWithDeps(raw_ostream &OS) const
void print(raw_ostream &OS) const
Helper functions, for debug purposes only.
virtual StateType & getState()=0
Return the internal abstract state for inspection.
virtual const std::string getAsStr(Attributor *A) const =0
This function should return the "summarized" assumed state as string.
virtual ChangeStatus updateImpl(Attributor &A)=0
The actual update/transfer function which has to be implemented by the derived classes.
const IRPosition & getIRPosition() const
Return an IR position, see struct IRPosition.
An interface to query the internal state of an abstract attribute.
virtual ChangeStatus indicatePessimisticFixpoint()=0
Indicate that the abstract state should converge to the pessimistic state.
virtual bool isAtFixpoint() const =0
Return if this abstract state is fixed, thus does not need to be updated if information changes as it...
virtual bool isValidState() const =0
Return if this abstract state is in a valid state.
Wrapper for FunctionAnalysisManager.
LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
void populateAll() const
Force populate the entire call graph.
Configuration for the Attributor.
std::optional< unsigned > MaxFixpointIterations
Maximum number of iterations to run until fixpoint.
LLVM_ABI PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Helper struct used in the communication between an abstract attribute (AA) that wants to change the s...
std::function< void( const ArgumentReplacementInfo &, Function &, Function::arg_iterator)> CalleeRepairCBTy
Callee repair callback type.
std::function< void(const ArgumentReplacementInfo &, AbstractCallSite, SmallVectorImpl< Value * > &)> ACSRepairCBTy
Abstract call site (ACS) repair callback type.
The fixpoint analysis framework that orchestrates the attribute deduction.
LLVM_ABI bool registerFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes, ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB, ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB)
Register a rewrite for a function signature.
LLVM_ABI ~Attributor()
LLVM_ABI bool checkForAllCallees(function_ref< bool(ArrayRef< const Function * > Callees)> Pred, const AbstractAttribute &QueryingAA, const CallBase &CB)
Check Pred on all potential Callees of CB.
bool isModulePass() const
Return true if this is a module pass, false otherwise.
LLVM_ABI bool isValidFunctionSignatureRewrite(Argument &Arg, ArrayRef< Type * > ReplacementTypes)
Check if we can rewrite a function signature.
static LLVM_ABI bool isInternalizable(Function &F)
Returns true if the function F can be internalized.
LLVM_ABI ChangeStatus removeAttrs(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AttrKinds)
Remove all AttrKinds attached to IRP.
void emitRemark(Instruction *I, StringRef RemarkName, RemarkCallBack &&RemarkCB) const
Emit a remark generically.
bool isRunOn(Function &Fn) const
Return true if we derive attributes for Fn.
LLVM_ABI bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, DepClassTy DepClass=DepClassTy::OPTIONAL)
Return true if AA (or its context instruction) is assumed dead.
LLVM_ABI bool checkForAllInstructions(function_ref< bool(Instruction &)> Pred, const Function *Fn, const AbstractAttribute *QueryingAA, ArrayRef< unsigned > Opcodes, bool &UsedAssumedInformation, bool CheckBBLivenessOnly=false, bool CheckPotentiallyDead=false)
Check Pred on all instructions in Fn with an opcode present in Opcodes.
LLVM_ABI void recordDependence(const AbstractAttribute &FromAA, const AbstractAttribute &ToAA, DepClassTy DepClass)
Explicitly record a dependence from FromAA to ToAA, that is if FromAA changes ToAA should be updated ...
static LLVM_ABI void createShallowWrapper(Function &F)
Create a shallow wrapper for F such that F has internal linkage afterwards.
const AAType * getAAFor(const AbstractAttribute &QueryingAA, const IRPosition &IRP, DepClassTy DepClass)
Lookup an abstract attribute of type AAType at position IRP.
std::optional< Value * > getAssumedSimplified(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation, AA::ValueScope S)
If V is assumed simplified, return it, if it is unclear yet, return std::nullopt, otherwise return nu...
static LLVM_ABI Function * internalizeFunction(Function &F, bool Force=false)
Make another copy of the function F such that the copied version has internal linkage afterwards and ...
bool isFunctionIPOAmendable(const Function &F)
Determine whether the function F is IPO amendable.
const AAType * getOrCreateAAFor(IRPosition IRP, const AbstractAttribute *QueryingAA, DepClassTy DepClass, bool ForceUpdate=false, bool UpdateAfterInit=true)
The version of getAAFor that allows to omit a querying abstract attribute.
LLVM_ABI bool checkForAllReadWriteInstructions(function_ref< bool(Instruction &)> Pred, AbstractAttribute &QueryingAA, bool &UsedAssumedInformation)
Check Pred on all Read/Write instructions.
LLVM_ABI bool checkForAllReturnedValues(function_ref< bool(Value &)> Pred, const AbstractAttribute &QueryingAA, AA::ValueScope S=AA::ValueScope::Intraprocedural, bool RecurseForSelectAndPHI=true)
Check Pred on all values potentially returned by the function associated with QueryingAA.
LLVM_ABI bool isClosedWorldModule() const
Return true if the module contains the whole world, thus, no outside functions exist.
LLVM_ABI std::optional< Constant * > getAssumedConstant(const IRPosition &IRP, const AbstractAttribute &AA, bool &UsedAssumedInformation)
If IRP is assumed to be a constant, return it, if it is unclear yet, return std::nullopt,...
LLVM_ABI Attributor(SetVector< Function * > &Functions, InformationCache &InfoCache, AttributorConfig Configuration)
Constructor.
LLVM_ABI void getAttrs(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AKs, SmallVectorImpl< Attribute > &Attrs, bool IgnoreSubsumingPositions=false)
Return the attributes of any kind in AKs existing in the IR at a position that will affect this one.
InformationCache & getInfoCache()
Return the internal information cache.
LLVM_ABI std::optional< Value * > translateArgumentToCallSiteContent(std::optional< Value * > V, CallBase &CB, const AbstractAttribute &AA, bool &UsedAssumedInformation)
Translate V from the callee context into the call site context.
LLVM_ABI bool checkForAllUses(function_ref< bool(const Use &, bool &)> Pred, const AbstractAttribute &QueryingAA, const Value &V, bool CheckBBLivenessOnly=false, DepClassTy LivenessDepClass=DepClassTy::OPTIONAL, bool IgnoreDroppableUses=true, function_ref< bool(const Use &OldU, const Use &NewU)> EquivalentUseCB=nullptr)
Check Pred on all (transitive) uses of V.
LLVM_ABI ChangeStatus manifestAttrs(const IRPosition &IRP, ArrayRef< Attribute > DeducedAttrs, bool ForceReplace=false)
Attach DeducedAttrs to IRP, if ForceReplace is set we do this even if the same attribute kind was alr...
LLVM_ABI bool hasAttr(const IRPosition &IRP, ArrayRef< Attribute::AttrKind > AKs, bool IgnoreSubsumingPositions=false, Attribute::AttrKind ImpliedAttributeKind=Attribute::None)
Return true if any kind in AKs existing in the IR at a position that will affect this one.
LLVM_ABI void registerForUpdate(AbstractAttribute &AA)
Allows a query AA to request an update if a new query was received.
std::function< bool(Attributor &, const AbstractAttribute *)> VirtualUseCallbackTy
LLVM_ABI void identifyDefaultAbstractAttributes(Function &F)
Determine opportunities to derive 'default' attributes in F and create abstract attribute objects for...
LLVM_ABI bool getAssumedSimplifiedValues(const IRPosition &IRP, const AbstractAttribute *AA, SmallVectorImpl< AA::ValueAndContext > &Values, AA::ValueScope S, bool &UsedAssumedInformation, bool RecurseForSelectAndPHI=true)
Try to simplify IRP and in the scope S.
BumpPtrAllocator & Allocator
The allocator used to allocate memory, e.g. for AbstractAttributes.
LLVM_ABI ChangeStatus run()
Run the analyses until a fixpoint is reached or enforced (timeout).
static LLVM_ABI bool internalizeFunctions(SmallPtrSetImpl< Function * > &FnSet, DenseMap< Function *, Function * > &FnMap)
Make copies of each function in the set FnSet such that the copied version has internal linkage after...
LLVM_ABI bool checkForAllCallSites(function_ref< bool(AbstractCallSite)> Pred, const AbstractAttribute &QueryingAA, bool RequireAllCallSites, bool &UsedAssumedInformation)
Check Pred on all function call sites.
LLVM_ABI bool getAttrsFromAssumes(const IRPosition &IRP, Attribute::AttrKind AK, SmallVectorImpl< Attribute > &Attrs)
Return the attributes of kind AK existing in the IR as operand bundles of an llvm....
bool isKnown(base_t BitsEncoding=BestState) const
Return true if the bits set in BitsEncoding are "known bits".
Support structure for SCC passes to communicate updates the call graph back to the CGSCC pass manager...
static std::string getNodeLabel(const AADepGraphNode *Node, const AADepGraph *DG)
DefaultDOTGraphTraits(bool simple=false)
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ Dynamic
Denormals have unknown treatment.
An information struct used to provide DenseMap with the various necessary components for a given valu...
static NodeRef DepGetVal(const DepTy &DT)
PointerIntPair< AADepGraphNode *, 1 > DepTy
static ChildIteratorType child_end(NodeRef N)
static NodeRef getEntryNode(AADepGraphNode *DGN)
mapped_iterator< AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)> ChildIteratorType
PointerIntPair< AADepGraphNode *, 1 > EdgeRef
static ChildIteratorType child_begin(NodeRef N)
AADepGraphNode::DepSetTy::iterator ChildEdgeIteratorType
static NodeRef getEntryNode(AADepGraph *DG)
mapped_iterator< AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)> nodes_iterator
static nodes_iterator nodes_begin(AADepGraph *DG)
static nodes_iterator nodes_end(AADepGraph *DG)
typename AADepGraph *::UnknownGraphTypeError NodeRef
Definition GraphTraits.h:95
Helper to describe and deal with positions in the LLVM-IR.
Definition Attributor.h:605
Function * getAssociatedFunction() const
Return the associated function, if any.
Definition Attributor.h:736
void setAttrList(const AttributeList &AttrList) const
Update the attributes associated with this function or call site scope.
Definition Attributor.h:872
unsigned getAttrIdx() const
Return the index in the attribute list for this position.
Definition Attributor.h:837
bool hasCallBaseContext() const
Check if the position has any call base context.
Definition Attributor.h:954
static const IRPosition callsite_returned(const CallBase &CB)
Create a position describing the returned value of CB.
Definition Attributor.h:673
static const IRPosition returned(const Function &F, const CallBaseContext *CBContext=nullptr)
Create a position describing the returned value of F.
Definition Attributor.h:655
LLVM_ABI Argument * getAssociatedArgument() const
Return the associated argument, if any.
static const IRPosition value(const Value &V, const CallBaseContext *CBContext=nullptr)
Create a position describing the value of V.
Definition Attributor.h:629
CallBase CallBaseContext
Definition Attributor.h:608
AttributeList getAttrList() const
Return the attributes associated with this function or call site scope.
Definition Attributor.h:865
static const IRPosition inst(const Instruction &I, const CallBaseContext *CBContext=nullptr)
Create a position describing the instruction I.
Definition Attributor.h:641
static const IRPosition callsite_argument(const CallBase &CB, unsigned ArgNo)
Create a position describing the argument of CB at position ArgNo.
Definition Attributor.h:678
static LLVM_ABI const IRPosition TombstoneKey
Definition Attributor.h:960
Kind
The positions we distinguish in the IR.
Definition Attributor.h:611
@ IRP_ARGUMENT
An attribute for a function argument.
Definition Attributor.h:619
@ IRP_RETURNED
An attribute for the function return value.
Definition Attributor.h:615
@ IRP_CALL_SITE
An attribute for a call site (function scope).
Definition Attributor.h:618
@ IRP_CALL_SITE_RETURNED
An attribute for a call site return value.
Definition Attributor.h:616
@ IRP_FUNCTION
An attribute for a function (scope).
Definition Attributor.h:617
@ IRP_FLOAT
A position that is not associated with a spot suitable for attributes.
Definition Attributor.h:613
@ IRP_CALL_SITE_ARGUMENT
An attribute for a call site argument.
Definition Attributor.h:620
@ IRP_INVALID
An invalid position.
Definition Attributor.h:612
Instruction * getCtxI() const
Return the context instruction, if any.
Definition Attributor.h:789
static const IRPosition argument(const Argument &Arg, const CallBaseContext *CBContext=nullptr)
Create a position describing the argument Arg.
Definition Attributor.h:662
static LLVM_ABI const IRPosition EmptyKey
Special DenseMap key values.
Definition Attributor.h:959
static const IRPosition function(const Function &F, const CallBaseContext *CBContext=nullptr)
Create a position describing the function scope of F.
Definition Attributor.h:648
const CallBaseContext * getCallBaseContext() const
Get the call base context from the position.
Definition Attributor.h:951
Value & getAssociatedValue() const
Return the value this abstract attribute is associated with.
Definition Attributor.h:803
Value & getAnchorValue() const
Return the value this abstract attribute is anchored with.
Definition Attributor.h:722
Value * getAttrListAnchor() const
Return the value attributes are attached to.
Definition Attributor.h:858
int getCallSiteArgNo() const
Return the call site argument number of the associated value if it is an argument or call site argume...
Definition Attributor.h:832
Kind getPositionKind() const
Return the associated position kind.
Definition Attributor.h:901
static const IRPosition callsite_function(const CallBase &CB)
Create a position describing the function scope of CB.
Definition Attributor.h:668
Function * getAnchorScope() const
Return the Function surrounding the anchor value.
Definition Attributor.h:777
Data structure to hold cached (LLVM-IR) information.
bool IsTargetGPU() const
Return true if the target is a GPU.
friend struct Attributor
Give the Attributor access to the members so Attributor::identifyDefaultAbstractAttributes(....
bool stackIsAccessibleByOtherThreads()
Return true if the stack (llvm::Alloca) can be accessed by other threads.
MustBeExecutedContextExplorer * getMustBeExecutedContextExplorer()
Return MustBeExecutedContextExplorer.
TargetLibraryInfo * getTargetLibraryInfoForFunction(const Function &F)
Return TargetLibraryInfo for function F.
LLVM_ABI std::optional< unsigned > getFlatAddressSpace() const
Return the flat address space if the associated target has.
DenseMap< unsigned, InstructionVectorTy * > OpcodeInstMapTy
A map type from opcodes to instructions with this opcode.
const RetainedKnowledgeMap & getKnowledgeMap() const
Return the map conaining all the knowledge we have from llvm.assumes.
LLVM_ABI ArrayRef< Function * > getIndirectlyCallableFunctions(Attributor &A) const
Return all functions that might be called indirectly, only valid for closed world modules (see isClos...
SmallVector< Instruction *, 8 > InstructionVectorTy
A vector type to hold instructions.
AP::Result * getAnalysisResultForFunction(const Function &F, bool CachedOnly=false)
Return the analysis result from a pass AP for function F.
State for an integer range.
ConstantRange getKnown() const
Return the known state encoding.
ConstantRange getAssumed() const
Return the assumed state encoding.
uint32_t getBitWidth() const
Return associated values' bit width.
A "must be executed context" for a given program point PP is the set of instructions,...
iterator & end()
Return an universal end iterator.
bool findInContextOf(const Instruction *I, const Instruction *PP)
Helper to look for I in the context of PP.
iterator & begin(const Instruction *PP)
Return an iterator to explore the context around PP.
bool undefIsContained() const
Returns whether this state contains an undef value or not.
bool isValidState() const override
See AbstractState::isValidState(...)
const SetTy & getAssumedSet() const
Return this set.