LLVM 20.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The AMDGPU target machine contains all of the hardware specific
11/// information needed to emit code for SI+ GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUTargetMachine.h"
16#include "AMDGPU.h"
17#include "AMDGPUAliasAnalysis.h"
21#include "AMDGPUIGroupLP.h"
22#include "AMDGPUISelDAGToDAG.h"
23#include "AMDGPUMacroFusion.h"
24#include "AMDGPURegBankSelect.h"
25#include "AMDGPUSplitModule.h"
30#include "GCNSchedStrategy.h"
31#include "GCNVOPDUtils.h"
32#include "R600.h"
34#include "R600TargetMachine.h"
36#include "SIMachineScheduler.h"
48#include "llvm/CodeGen/Passes.h"
51#include "llvm/IR/IntrinsicsAMDGPU.h"
52#include "llvm/IR/PassManager.h"
58#include "llvm/Transforms/IPO.h"
69#include <optional>
70
71using namespace llvm;
72using namespace llvm::PatternMatch;
73
74namespace {
75class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
76public:
77 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
79};
80
81class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
82public:
83 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
85};
86
87static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
89 const Register Reg) {
90 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
91 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
92}
93
94static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
96 const Register Reg) {
97 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
98 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(RC);
99}
100
101/// -{sgpr|vgpr}-regalloc=... command line option.
102static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
103
104/// A dummy default pass factory indicates whether the register allocator is
105/// overridden on the command line.
106static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
107static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
108
109static SGPRRegisterRegAlloc
110defaultSGPRRegAlloc("default",
111 "pick SGPR register allocator based on -O option",
113
114static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
116SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
117 cl::desc("Register allocator to use for SGPRs"));
118
119static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
121VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
122 cl::desc("Register allocator to use for VGPRs"));
123
124
125static void initializeDefaultSGPRRegisterAllocatorOnce() {
126 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
127
128 if (!Ctor) {
129 Ctor = SGPRRegAlloc;
130 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
131 }
132}
133
134static void initializeDefaultVGPRRegisterAllocatorOnce() {
135 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
136
137 if (!Ctor) {
138 Ctor = VGPRRegAlloc;
139 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
140 }
141}
142
143static FunctionPass *createBasicSGPRRegisterAllocator() {
144 return createBasicRegisterAllocator(onlyAllocateSGPRs);
145}
146
147static FunctionPass *createGreedySGPRRegisterAllocator() {
148 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
149}
150
151static FunctionPass *createFastSGPRRegisterAllocator() {
152 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
153}
154
155static FunctionPass *createBasicVGPRRegisterAllocator() {
156 return createBasicRegisterAllocator(onlyAllocateVGPRs);
157}
158
159static FunctionPass *createGreedyVGPRRegisterAllocator() {
160 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
161}
162
163static FunctionPass *createFastVGPRRegisterAllocator() {
164 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
165}
166
167static SGPRRegisterRegAlloc basicRegAllocSGPR(
168 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
169static SGPRRegisterRegAlloc greedyRegAllocSGPR(
170 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
171
172static SGPRRegisterRegAlloc fastRegAllocSGPR(
173 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
174
175
176static VGPRRegisterRegAlloc basicRegAllocVGPR(
177 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
178static VGPRRegisterRegAlloc greedyRegAllocVGPR(
179 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
180
181static VGPRRegisterRegAlloc fastRegAllocVGPR(
182 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
183} // anonymous namespace
184
185static cl::opt<bool>
187 cl::desc("Run early if-conversion"),
188 cl::init(false));
189
190static cl::opt<bool>
191OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
192 cl::desc("Run pre-RA exec mask optimizations"),
193 cl::init(true));
194
195static cl::opt<bool>
196 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
197 cl::desc("Lower GPU ctor / dtors to globals on the device."),
198 cl::init(true), cl::Hidden);
199
200// Option to disable vectorizer for tests.
202 "amdgpu-load-store-vectorizer",
203 cl::desc("Enable load store vectorizer"),
204 cl::init(true),
205 cl::Hidden);
206
207// Option to control global loads scalarization
209 "amdgpu-scalarize-global-loads",
210 cl::desc("Enable global load scalarization"),
211 cl::init(true),
212 cl::Hidden);
213
214// Option to run internalize pass.
216 "amdgpu-internalize-symbols",
217 cl::desc("Enable elimination of non-kernel functions and unused globals"),
218 cl::init(false),
219 cl::Hidden);
220
221// Option to inline all early.
223 "amdgpu-early-inline-all",
224 cl::desc("Inline all functions early"),
225 cl::init(false),
226 cl::Hidden);
227
229 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
230 cl::desc("Enable removal of functions when they"
231 "use features not supported by the target GPU"),
232 cl::init(true));
233
235 "amdgpu-sdwa-peephole",
236 cl::desc("Enable SDWA peepholer"),
237 cl::init(true));
238
240 "amdgpu-dpp-combine",
241 cl::desc("Enable DPP combiner"),
242 cl::init(true));
243
244// Enable address space based alias analysis
246 cl::desc("Enable AMDGPU Alias Analysis"),
247 cl::init(true));
248
249// Option to run late CFG structurizer
251 "amdgpu-late-structurize",
252 cl::desc("Enable late CFG structurization"),
254 cl::Hidden);
255
256// Disable structurizer-based control-flow lowering in order to test convergence
257// control tokens. This should eventually be replaced by the wave-transform.
259 "amdgpu-disable-structurizer",
260 cl::desc("Disable structurizer for experiments; produces unusable code"),
262
263// Enable lib calls simplifications
265 "amdgpu-simplify-libcall",
266 cl::desc("Enable amdgpu library simplifications"),
267 cl::init(true),
268 cl::Hidden);
269
271 "amdgpu-ir-lower-kernel-arguments",
272 cl::desc("Lower kernel argument loads in IR pass"),
273 cl::init(true),
274 cl::Hidden);
275
277 "amdgpu-reassign-regs",
278 cl::desc("Enable register reassign optimizations on gfx10+"),
279 cl::init(true),
280 cl::Hidden);
281
283 "amdgpu-opt-vgpr-liverange",
284 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
285 cl::init(true), cl::Hidden);
286
288 "amdgpu-atomic-optimizer-strategy",
289 cl::desc("Select DPP or Iterative strategy for scan"),
290 cl::init(ScanOptions::Iterative),
292 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
293 clEnumValN(ScanOptions::Iterative, "Iterative",
294 "Use Iterative approach for scan"),
295 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
296
297// Enable Mode register optimization
299 "amdgpu-mode-register",
300 cl::desc("Enable mode register pass"),
301 cl::init(true),
302 cl::Hidden);
303
304// Enable GFX11.5+ s_singleuse_vdst insertion
305static cl::opt<bool>
306 EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst",
307 cl::desc("Enable s_singleuse_vdst insertion"),
308 cl::init(false), cl::Hidden);
309
310// Enable GFX11+ s_delay_alu insertion
311static cl::opt<bool>
312 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
313 cl::desc("Enable s_delay_alu insertion"),
314 cl::init(true), cl::Hidden);
315
316// Enable GFX11+ VOPD
317static cl::opt<bool>
318 EnableVOPD("amdgpu-enable-vopd",
319 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
320 cl::init(true), cl::Hidden);
321
322// Option is used in lit tests to prevent deadcoding of patterns inspected.
323static cl::opt<bool>
324EnableDCEInRA("amdgpu-dce-in-ra",
325 cl::init(true), cl::Hidden,
326 cl::desc("Enable machine DCE inside regalloc"));
327
328static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
329 cl::desc("Adjust wave priority"),
330 cl::init(false), cl::Hidden);
331
333 "amdgpu-scalar-ir-passes",
334 cl::desc("Enable scalar IR passes"),
335 cl::init(true),
336 cl::Hidden);
337
339 "amdgpu-enable-structurizer-workarounds",
340 cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true),
341 cl::Hidden);
342
344 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
346 cl::Hidden);
347
349 "amdgpu-enable-pre-ra-optimizations",
350 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
351 cl::Hidden);
352
354 "amdgpu-enable-promote-kernel-arguments",
355 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
356 cl::Hidden, cl::init(true));
357
359 "amdgpu-enable-image-intrinsic-optimizer",
360 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
361 cl::Hidden);
362
363static cl::opt<bool>
364 EnableLoopPrefetch("amdgpu-loop-prefetch",
365 cl::desc("Enable loop data prefetch on AMDGPU"),
366 cl::Hidden, cl::init(false));
367
369 "amdgpu-enable-max-ilp-scheduling-strategy",
370 cl::desc("Enable scheduling strategy to maximize ILP for a single wave."),
371 cl::Hidden, cl::init(false));
372
374 "amdgpu-enable-rewrite-partial-reg-uses",
375 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
376 cl::Hidden);
377
379 "amdgpu-enable-hipstdpar",
380 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
381 cl::Hidden);
382
384 // Register the target
387
462}
463
464static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
465 return std::make_unique<AMDGPUTargetObjectFile>();
466}
467
469 return new SIScheduleDAGMI(C);
470}
471
472static ScheduleDAGInstrs *
474 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
475 ScheduleDAGMILive *DAG =
476 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
477 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
478 if (ST.shouldClusterStores())
479 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
480 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
481 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
482 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
483 return DAG;
484}
485
486static ScheduleDAGInstrs *
488 ScheduleDAGMILive *DAG =
489 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
490 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
491 return DAG;
492}
493
494static ScheduleDAGInstrs *
496 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
497 auto DAG = new GCNIterativeScheduler(C,
499 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
500 if (ST.shouldClusterStores())
501 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
502 return DAG;
503}
504
506 return new GCNIterativeScheduler(C,
508}
509
510static ScheduleDAGInstrs *
512 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
513 auto DAG = new GCNIterativeScheduler(C,
515 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
516 if (ST.shouldClusterStores())
517 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
518 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
519 return DAG;
520}
521
523SISchedRegistry("si", "Run SI's custom scheduler",
525
528 "Run GCN scheduler to maximize occupancy",
530
532 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
534
536 "gcn-iterative-max-occupancy-experimental",
537 "Run GCN scheduler to maximize occupancy (experimental)",
539
541 "gcn-iterative-minreg",
542 "Run GCN iterative scheduler for minimal register usage (experimental)",
544
546 "gcn-iterative-ilp",
547 "Run GCN iterative scheduler for ILP scheduling (experimental)",
549
551 if (TT.getArch() == Triple::r600) {
552 // 32-bit pointers.
553 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
554 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
555 }
556
557 // 32-bit private, local, and region pointers. 64-bit global, constant and
558 // flat. 160-bit non-integral fat buffer pointers that include a 128-bit
559 // buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
560 // (address space 7), and 128-bit non-integral buffer resourcees (address
561 // space 8) which cannot be non-trivilally accessed by LLVM memory operations
562 // like getelementptr.
563 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
564 "-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-"
565 "v32:32-v48:64-v96:"
566 "128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-"
567 "G1-ni:7:8:9";
568}
569
572 if (!GPU.empty())
573 return GPU;
574
575 // Need to default to a target with flat support for HSA.
576 if (TT.getArch() == Triple::amdgcn)
577 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
578
579 return "r600";
580}
581
582static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
583 // The AMDGPU toolchain only supports generating shared objects, so we
584 // must always use PIC.
585 return Reloc::PIC_;
586}
587
589 StringRef CPU, StringRef FS,
590 const TargetOptions &Options,
591 std::optional<Reloc::Model> RM,
592 std::optional<CodeModel::Model> CM,
593 CodeGenOptLevel OptLevel)
596 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
597 TLOF(createTLOF(getTargetTriple())) {
598 initAsmInfo();
599 if (TT.getArch() == Triple::amdgcn) {
600 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
602 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
604 }
605}
606
611
613
615 Attribute GPUAttr = F.getFnAttribute("target-cpu");
616 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
617}
618
620 Attribute FSAttr = F.getFnAttribute("target-features");
621
622 return FSAttr.isValid() ? FSAttr.getValueAsString()
624}
625
626/// Predicate for Internalize pass.
627static bool mustPreserveGV(const GlobalValue &GV) {
628 if (const Function *F = dyn_cast<Function>(&GV))
629 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
630 F->getName().starts_with("__sanitizer_") ||
631 AMDGPU::isEntryFunctionCC(F->getCallingConv());
632
634 return !GV.use_empty();
635}
636
639}
640
643 if (Params.empty())
645 Params.consume_front("strategy=");
646 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
647 .Case("dpp", ScanOptions::DPP)
648 .Cases("iterative", "", ScanOptions::Iterative)
649 .Case("none", ScanOptions::None)
650 .Default(std::nullopt);
651 if (Result)
652 return *Result;
653 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
654}
655
658 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
660 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
661 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
662}
663
665
666#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
668
670 [](ModulePassManager &PM, OptimizationLevel Level) {
672 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
673 if (EnableHipStdPar)
675 });
676
678 [](ModulePassManager &PM, OptimizationLevel Level) {
680
681 if (Level == OptimizationLevel::O0)
682 return;
683
685
686 if (InternalizeSymbols) {
689 }
690
693 });
694
696 [](FunctionPassManager &FPM, OptimizationLevel Level) {
697 if (Level == OptimizationLevel::O0)
698 return;
699
703 });
704
706 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
707 if (Level == OptimizationLevel::O0)
708 return;
709
711
712 // Add promote kernel arguments pass to the opt pipeline right before
713 // infer address spaces which is needed to do actual address space
714 // rewriting.
715 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
718
719 // Add infer address spaces pass to the opt pipeline after inlining
720 // but before SROA to increase SROA opportunities.
722
723 // This should run after inlining to have any chance of doing
724 // anything, and before other cleanup optimizations.
726
727 if (Level != OptimizationLevel::O0) {
728 // Promote alloca to vector before SROA and loop unroll. If we
729 // manage to eliminate allocas before unroll we may choose to unroll
730 // less.
732 }
733
734 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
735 });
736
737 // FIXME: Why is AMDGPUAttributor not in CGSCC?
739 [this](ModulePassManager &MPM, OptimizationLevel Level) {
740 if (Level != OptimizationLevel::O0) {
742 }
743 });
744
746 [this](ModulePassManager &PM, OptimizationLevel Level) {
747 // We want to support the -lto-partitions=N option as "best effort".
748 // For that, we need to lower LDS earlier in the pipeline before the
749 // module is partitioned for codegen.
752 });
753
755 [](StringRef FilterName) -> RegAllocFilterFunc {
756 if (FilterName == "sgpr")
757 return onlyAllocateSGPRs;
758 if (FilterName == "vgpr")
759 return onlyAllocateVGPRs;
760 return nullptr;
761 });
762}
763
764int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
765 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
766 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
767 AddrSpace == AMDGPUAS::REGION_ADDRESS)
768 ? -1
769 : 0;
770}
771
773 unsigned DestAS) const {
774 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
776}
777
779 const auto *LD = dyn_cast<LoadInst>(V);
780 if (!LD)
782
783 // It must be a generic pointer loaded.
784 assert(V->getType()->isPointerTy() &&
785 V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
786
787 const auto *Ptr = LD->getPointerOperand();
788 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
790 // For a generic pointer loaded from the constant memory, it could be assumed
791 // as a global pointer since the constant memory is only populated on the
792 // host side. As implied by the offload programming model, only global
793 // pointers could be referenced on the host side.
795}
796
797std::pair<const Value *, unsigned>
799 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
800 switch (II->getIntrinsicID()) {
801 case Intrinsic::amdgcn_is_shared:
802 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
803 case Intrinsic::amdgcn_is_private:
804 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
805 default:
806 break;
807 }
808 return std::pair(nullptr, -1);
809 }
810 // Check the global pointer predication based on
811 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
812 // the order of 'is_shared' and 'is_private' is not significant.
813 Value *Ptr;
814 if (match(
815 const_cast<Value *>(V),
816 m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
817 m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
818 m_Deferred(Ptr))))))
819 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
820
821 return std::pair(nullptr, -1);
822}
823
824unsigned
826 switch (Kind) {
836 }
838}
839
841 Module &M, unsigned NumParts,
842 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) {
843 // FIXME(?): Would be better to use an already existing Analysis/PassManager,
844 // but all current users of this API don't have one ready and would need to
845 // create one anyway. Let's hide the boilerplate for now to keep it simple.
846
851
852 PassBuilder PB(this);
856
858 MPM.addPass(AMDGPUSplitModulePass(NumParts, ModuleCallback));
859 MPM.run(M, MAM);
860 return true;
861}
862
863//===----------------------------------------------------------------------===//
864// GCN Target Machine (SI+)
865//===----------------------------------------------------------------------===//
866
868 StringRef CPU, StringRef FS,
869 const TargetOptions &Options,
870 std::optional<Reloc::Model> RM,
871 std::optional<CodeModel::Model> CM,
872 CodeGenOptLevel OL, bool JIT)
873 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
874
877 StringRef GPU = getGPUName(F);
879
880 SmallString<128> SubtargetKey(GPU);
881 SubtargetKey.append(FS);
882
883 auto &I = SubtargetMap[SubtargetKey];
884 if (!I) {
885 // This needs to be done before we create a new subtarget since any
886 // creation will depend on the TM and the code generation flags on the
887 // function that reside in TargetOptions.
889 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
890 }
891
892 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
893
894 return I.get();
895}
896
899 return TargetTransformInfo(GCNTTIImpl(this, F));
900}
901
902//===----------------------------------------------------------------------===//
903// AMDGPU Pass Setup
904//===----------------------------------------------------------------------===//
905
906std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
908}
909
910namespace {
911
912class GCNPassConfig final : public AMDGPUPassConfig {
913public:
914 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
915 : AMDGPUPassConfig(TM, PM) {
916 // It is necessary to know the register usage of the entire call graph. We
917 // allow calls without EnableAMDGPUFunctionCalls if they are marked
918 // noinline, so this is always required.
919 setRequiresCodeGenSCCOrder(true);
920 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
921 }
922
923 GCNTargetMachine &getGCNTargetMachine() const {
924 return getTM<GCNTargetMachine>();
925 }
926
928 createMachineScheduler(MachineSchedContext *C) const override;
929
931 createPostMachineScheduler(MachineSchedContext *C) const override {
933 C, std::make_unique<PostGenericScheduler>(C),
934 /*RemoveKillFlags=*/true);
935 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
937 if (ST.shouldClusterStores())
939 DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
940 DAG->addMutation(
941 createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA));
942 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
944 return DAG;
945 }
946
947 bool addPreISel() override;
948 void addMachineSSAOptimization() override;
949 bool addILPOpts() override;
950 bool addInstSelector() override;
951 bool addIRTranslator() override;
952 void addPreLegalizeMachineIR() override;
953 bool addLegalizeMachineIR() override;
954 void addPreRegBankSelect() override;
955 bool addRegBankSelect() override;
956 void addPreGlobalInstructionSelect() override;
957 bool addGlobalInstructionSelect() override;
958 void addFastRegAlloc() override;
959 void addOptimizedRegAlloc() override;
960
961 FunctionPass *createSGPRAllocPass(bool Optimized);
962 FunctionPass *createVGPRAllocPass(bool Optimized);
963 FunctionPass *createRegAllocPass(bool Optimized) override;
964
965 bool addRegAssignAndRewriteFast() override;
966 bool addRegAssignAndRewriteOptimized() override;
967
968 void addPreRegAlloc() override;
969 bool addPreRewrite() override;
970 void addPostRegAlloc() override;
971 void addPreSched2() override;
972 void addPreEmitPass() override;
973};
974
975} // end anonymous namespace
976
978 : TargetPassConfig(TM, PM) {
979 // Exceptions and StackMaps are not supported, so these passes will never do
980 // anything.
983 // Garbage collection is not supported.
986}
987
991 else
993}
994
999 // ReassociateGEPs exposes more opportunities for SLSR. See
1000 // the example in reassociate-geps-and-slsr.ll.
1002 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
1003 // EarlyCSE can reuse.
1005 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
1007 // NaryReassociate on GEPs creates redundant common expressions, so run
1008 // EarlyCSE after it.
1010}
1011
1014
1018
1019 // There is no reason to run these.
1023
1025 if (LowerCtorDtor)
1027
1030
1031 // This can be disabled by passing ::Disable here or on the command line
1032 // with --expand-variadics-override=disable.
1034
1035 // Function calls are not supported, so make sure we inline everything.
1038
1039 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1040 if (Arch == Triple::r600)
1042
1043 // Replace OpenCL enqueued block function pointers with global variables.
1045
1046 // Runs before PromoteAlloca so the latter can account for function uses
1049 }
1050
1053
1054 // Run atomic optimizer before Atomic Expand
1059 }
1060
1062
1065
1068
1072 AAResults &AAR) {
1073 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1074 AAR.addAAResult(WrapperPass->getResult());
1075 }));
1076 }
1077
1079 // TODO: May want to move later or split into an early and late one.
1081 }
1082
1083 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1084 // have expanded.
1087 }
1088
1090
1091 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1092 // example, GVN can combine
1093 //
1094 // %0 = add %a, %b
1095 // %1 = add %b, %a
1096 //
1097 // and
1098 //
1099 // %0 = shl nsw %a, 2
1100 // %1 = shl %a, 2
1101 //
1102 // but EarlyCSE can do neither of them.
1105}
1106
1109 // FIXME: This pass adds 2 hacky attributes that can be replaced with an
1110 // analysis, and should be removed.
1112 }
1113
1117
1119 // This lowering has been placed after codegenprepare to take advantage of
1120 // address mode matching (which is why it isn't put with the LDS lowerings).
1121 // It could be placed anywhere before uniformity annotations (an analysis
1122 // that it changes by splitting up fat pointers into their components)
1123 // but has been put before switch lowering and CFG flattening so that those
1124 // passes can run on the more optimized control flow this pass creates in
1125 // many cases.
1126 //
1127 // FIXME: This should ideally be put after the LoadStoreVectorizer.
1128 // However, due to some annoying facts about ResourceUsageAnalysis,
1129 // (especially as exercised in the resource-usage-dead-function test),
1130 // we need all the function passes codegenprepare all the way through
1131 // said resource usage analysis to run on the call graph produced
1132 // before codegenprepare runs (because codegenprepare will knock some
1133 // nodes out of the graph, which leads to function-level passes not
1134 // being run on them, which causes crashes in the resource usage analysis).
1136 // In accordance with the above FIXME, manually force all the
1137 // function-level passes into a CGSCCPassManager.
1138 addPass(new DummyCGSCCPass());
1139 }
1140
1142
1145
1146 // LowerSwitch pass may introduce unreachable blocks that can
1147 // cause unexpected behavior for subsequent passes. Placing it
1148 // here seems better that these blocks would get cleaned up by
1149 // UnreachableBlockElim inserted next in the pass flow.
1151}
1152
1156 return false;
1157}
1158
1161 return false;
1162}
1163
1165 // Do nothing. GC is not supported.
1166 return false;
1167}
1168
1171 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1173 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1174 if (ST.shouldClusterStores())
1175 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1176 return DAG;
1177}
1178
1180 BumpPtrAllocator &Allocator, const Function &F,
1181 const TargetSubtargetInfo *STI) const {
1182 return R600MachineFunctionInfo::create<R600MachineFunctionInfo>(
1183 Allocator, F, static_cast<const R600Subtarget *>(STI));
1184}
1185
1186//===----------------------------------------------------------------------===//
1187// GCN Pass Setup
1188//===----------------------------------------------------------------------===//
1189
1190ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1191 MachineSchedContext *C) const {
1192 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1193 if (ST.enableSIScheduler())
1195
1198
1200}
1201
1202bool GCNPassConfig::addPreISel() {
1204
1205 if (TM->getOptLevel() > CodeGenOptLevel::None)
1206 addPass(createSinkingPass());
1207
1208 if (TM->getOptLevel() > CodeGenOptLevel::None)
1210
1211 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1212 // regions formed by them.
1216 addPass(createFixIrreduciblePass());
1217 addPass(createUnifyLoopExitsPass());
1218 }
1219 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1220 }
1224 // TODO: Move this right after structurizeCFG to avoid extra divergence
1225 // analysis. This depends on stopping SIAnnotateControlFlow from making
1226 // control flow modifications.
1228 }
1229 addPass(createLCSSAPass());
1230
1231 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1232 addPass(&AMDGPUPerfHintAnalysisID);
1233
1234 return false;
1235}
1236
1237void GCNPassConfig::addMachineSSAOptimization() {
1239
1240 // We want to fold operands after PeepholeOptimizer has run (or as part of
1241 // it), because it will eliminate extra copies making it easier to fold the
1242 // real source operand. We want to eliminate dead instructions after, so that
1243 // we see fewer uses of the copies. We then need to clean up the dead
1244 // instructions leftover after the operands are folded as well.
1245 //
1246 // XXX - Can we get away without running DeadMachineInstructionElim again?
1247 addPass(&SIFoldOperandsID);
1248 if (EnableDPPCombine)
1249 addPass(&GCNDPPCombineID);
1250 addPass(&SILoadStoreOptimizerID);
1251 if (isPassEnabled(EnableSDWAPeephole)) {
1252 addPass(&SIPeepholeSDWAID);
1253 addPass(&EarlyMachineLICMID);
1254 addPass(&MachineCSEID);
1255 addPass(&SIFoldOperandsID);
1256 }
1259}
1260
1261bool GCNPassConfig::addILPOpts() {
1263 addPass(&EarlyIfConverterID);
1264
1266 return false;
1267}
1268
1269bool GCNPassConfig::addInstSelector() {
1271 addPass(&SIFixSGPRCopiesID);
1272 addPass(createSILowerI1CopiesPass());
1273 return false;
1274}
1275
1276bool GCNPassConfig::addIRTranslator() {
1277 addPass(new IRTranslator(getOptLevel()));
1278 return false;
1279}
1280
1281void GCNPassConfig::addPreLegalizeMachineIR() {
1282 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1283 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1284 addPass(new Localizer());
1285}
1286
1287bool GCNPassConfig::addLegalizeMachineIR() {
1288 addPass(new Legalizer());
1289 return false;
1290}
1291
1292void GCNPassConfig::addPreRegBankSelect() {
1293 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1294 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1296}
1297
1298bool GCNPassConfig::addRegBankSelect() {
1299 addPass(new AMDGPURegBankSelect());
1300 return false;
1301}
1302
1303void GCNPassConfig::addPreGlobalInstructionSelect() {
1304 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1305 addPass(createAMDGPURegBankCombiner(IsOptNone));
1306}
1307
1308bool GCNPassConfig::addGlobalInstructionSelect() {
1309 addPass(new InstructionSelect(getOptLevel()));
1310 return false;
1311}
1312
1313void GCNPassConfig::addPreRegAlloc() {
1314 if (LateCFGStructurize) {
1316 }
1317}
1318
1319void GCNPassConfig::addFastRegAlloc() {
1320 // FIXME: We have to disable the verifier here because of PHIElimination +
1321 // TwoAddressInstructions disabling it.
1322
1323 // This must be run immediately after phi elimination and before
1324 // TwoAddressInstructions, otherwise the processing of the tied operand of
1325 // SI_ELSE will introduce a copy of the tied operand source after the else.
1327
1329
1331}
1332
1333void GCNPassConfig::addOptimizedRegAlloc() {
1334 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1335 // instructions that cause scheduling barriers.
1337
1338 if (OptExecMaskPreRA)
1340
1343
1344 if (isPassEnabled(EnablePreRAOptimizations))
1346
1347 // This is not an essential optimization and it has a noticeable impact on
1348 // compilation time, so we only enable it from O2.
1349 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1351
1352 // FIXME: when an instruction has a Killed operand, and the instruction is
1353 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1354 // the register in LiveVariables, this would trigger a failure in verifier,
1355 // we should fix it and enable the verifier.
1356 if (OptVGPRLiveRange)
1358 // This must be run immediately after phi elimination and before
1359 // TwoAddressInstructions, otherwise the processing of the tied operand of
1360 // SI_ELSE will introduce a copy of the tied operand source after the else.
1362
1363 if (EnableDCEInRA)
1365
1367}
1368
1369bool GCNPassConfig::addPreRewrite() {
1370 addPass(&SILowerWWMCopiesID);
1372 addPass(&GCNNSAReassignID);
1373 return true;
1374}
1375
1376FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1377 // Initialize the global default.
1378 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1379 initializeDefaultSGPRRegisterAllocatorOnce);
1380
1381 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1382 if (Ctor != useDefaultRegisterAllocator)
1383 return Ctor();
1384
1385 if (Optimized)
1386 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1387
1388 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1389}
1390
1391FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1392 // Initialize the global default.
1393 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1394 initializeDefaultVGPRRegisterAllocatorOnce);
1395
1396 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1397 if (Ctor != useDefaultRegisterAllocator)
1398 return Ctor();
1399
1400 if (Optimized)
1401 return createGreedyVGPRRegisterAllocator();
1402
1403 return createFastVGPRRegisterAllocator();
1404}
1405
1406FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1407 llvm_unreachable("should not be used");
1408}
1409
1411 "-regalloc not supported with amdgcn. Use -sgpr-regalloc and -vgpr-regalloc";
1412
1413bool GCNPassConfig::addRegAssignAndRewriteFast() {
1414 if (!usingDefaultRegAlloc())
1416
1417 addPass(&GCNPreRALongBranchRegID);
1418
1419 addPass(createSGPRAllocPass(false));
1420
1421 // Equivalent of PEI for SGPRs.
1422 addPass(&SILowerSGPRSpillsID);
1423 addPass(&SIPreAllocateWWMRegsID);
1424
1425 addPass(createVGPRAllocPass(false));
1426
1427 addPass(&SILowerWWMCopiesID);
1428 return true;
1429}
1430
1431bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1432 if (!usingDefaultRegAlloc())
1434
1435 addPass(&GCNPreRALongBranchRegID);
1436
1437 addPass(createSGPRAllocPass(true));
1438
1439 // Commit allocated register changes. This is mostly necessary because too
1440 // many things rely on the use lists of the physical registers, such as the
1441 // verifier. This is only necessary with allocators which use LiveIntervals,
1442 // since FastRegAlloc does the replacements itself.
1443 addPass(createVirtRegRewriter(false));
1444
1445 // Equivalent of PEI for SGPRs.
1446 addPass(&SILowerSGPRSpillsID);
1447 addPass(&SIPreAllocateWWMRegsID);
1448
1449 addPass(createVGPRAllocPass(true));
1450
1451 addPreRewrite();
1452 addPass(&VirtRegRewriterID);
1453
1455
1456 return true;
1457}
1458
1459void GCNPassConfig::addPostRegAlloc() {
1460 addPass(&SIFixVGPRCopiesID);
1461 if (getOptLevel() > CodeGenOptLevel::None)
1462 addPass(&SIOptimizeExecMaskingID);
1464}
1465
1466void GCNPassConfig::addPreSched2() {
1467 if (TM->getOptLevel() > CodeGenOptLevel::None)
1469 addPass(&SIPostRABundlerID);
1470}
1471
1472void GCNPassConfig::addPreEmitPass() {
1473 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1474 addPass(&GCNCreateVOPDID);
1475 addPass(createSIMemoryLegalizerPass());
1476 addPass(createSIInsertWaitcntsPass());
1477
1478 addPass(createSIModeRegisterPass());
1479
1480 if (getOptLevel() > CodeGenOptLevel::None)
1481 addPass(&SIInsertHardClausesID);
1482
1484 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1486 if (getOptLevel() > CodeGenOptLevel::None)
1487 addPass(&SIPreEmitPeepholeID);
1488 // The hazard recognizer that runs as part of the post-ra scheduler does not
1489 // guarantee to be able handle all hazards correctly. This is because if there
1490 // are multiple scheduling regions in a basic block, the regions are scheduled
1491 // bottom up, so when we begin to schedule a region we don't know what
1492 // instructions were emitted directly before it.
1493 //
1494 // Here we add a stand-alone hazard recognizer pass which can handle all
1495 // cases.
1496 addPass(&PostRAHazardRecognizerID);
1497
1500
1501 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1502 addPass(&AMDGPUInsertDelayAluID);
1503
1504 addPass(&BranchRelaxationPassID);
1505}
1506
1508 return new GCNPassConfig(*this, PM);
1509}
1510
1512 MachineFunction &MF) const {
1514 MF.getRegInfo().addDelegate(MFI);
1515}
1516
1518 BumpPtrAllocator &Allocator, const Function &F,
1519 const TargetSubtargetInfo *STI) const {
1520 return SIMachineFunctionInfo::create<SIMachineFunctionInfo>(
1521 Allocator, F, static_cast<const GCNSubtarget *>(STI));
1522}
1523
1525 return new yaml::SIMachineFunctionInfo();
1526}
1527
1531 return new yaml::SIMachineFunctionInfo(
1532 *MFI, *MF.getSubtarget<GCNSubtarget>().getRegisterInfo(), MF);
1533}
1534
1537 SMDiagnostic &Error, SMRange &SourceRange) const {
1538 const yaml::SIMachineFunctionInfo &YamlMFI =
1539 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1540 MachineFunction &MF = PFS.MF;
1542 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1543
1544 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1545 return true;
1546
1547 if (MFI->Occupancy == 0) {
1548 // Fixup the subtarget dependent default value.
1549 MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1550 }
1551
1552 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1553 Register TempReg;
1554 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1555 SourceRange = RegName.SourceRange;
1556 return true;
1557 }
1558 RegVal = TempReg;
1559
1560 return false;
1561 };
1562
1563 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1564 Register &RegVal) {
1565 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1566 };
1567
1568 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1569 return true;
1570
1571 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1572 return true;
1573
1574 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1575 MFI->LongBranchReservedReg))
1576 return true;
1577
1578 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1579 // Create a diagnostic for a the register string literal.
1580 const MemoryBuffer &Buffer =
1581 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1582 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1583 RegName.Value.size(), SourceMgr::DK_Error,
1584 "incorrect register class for field", RegName.Value,
1585 std::nullopt, std::nullopt);
1586 SourceRange = RegName.SourceRange;
1587 return true;
1588 };
1589
1590 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1591 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1592 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1593 return true;
1594
1595 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1596 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1597 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1598 }
1599
1600 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1601 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1602 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1603 }
1604
1605 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1606 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1607 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1608 }
1609
1610 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1611 Register ParsedReg;
1612 if (parseRegister(YamlReg, ParsedReg))
1613 return true;
1614
1615 MFI->reserveWWMRegister(ParsedReg);
1616 }
1617
1618 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1619 const TargetRegisterClass &RC,
1620 ArgDescriptor &Arg, unsigned UserSGPRs,
1621 unsigned SystemSGPRs) {
1622 // Skip parsing if it's not present.
1623 if (!A)
1624 return false;
1625
1626 if (A->IsRegister) {
1627 Register Reg;
1628 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1629 SourceRange = A->RegisterName.SourceRange;
1630 return true;
1631 }
1632 if (!RC.contains(Reg))
1633 return diagnoseRegisterClass(A->RegisterName);
1635 } else
1636 Arg = ArgDescriptor::createStack(A->StackOffset);
1637 // Check and apply the optional mask.
1638 if (A->Mask)
1639 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1640
1641 MFI->NumUserSGPRs += UserSGPRs;
1642 MFI->NumSystemSGPRs += SystemSGPRs;
1643 return false;
1644 };
1645
1646 if (YamlMFI.ArgInfo &&
1647 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1648 AMDGPU::SGPR_128RegClass,
1649 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1650 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1651 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1652 2, 0) ||
1653 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1654 MFI->ArgInfo.QueuePtr, 2, 0) ||
1655 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1656 AMDGPU::SReg_64RegClass,
1657 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1658 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1659 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1660 2, 0) ||
1661 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1662 AMDGPU::SReg_64RegClass,
1663 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1664 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1665 AMDGPU::SGPR_32RegClass,
1666 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1667 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1668 AMDGPU::SGPR_32RegClass,
1669 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1670 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1671 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1672 0, 1) ||
1673 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1674 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1675 0, 1) ||
1676 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1677 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1678 0, 1) ||
1679 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1680 AMDGPU::SGPR_32RegClass,
1681 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1682 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1683 AMDGPU::SGPR_32RegClass,
1684 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1685 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1686 AMDGPU::SReg_64RegClass,
1687 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1688 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1689 AMDGPU::SReg_64RegClass,
1690 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1691 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1692 AMDGPU::VGPR_32RegClass,
1693 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1694 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1695 AMDGPU::VGPR_32RegClass,
1696 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1697 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1698 AMDGPU::VGPR_32RegClass,
1699 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1700 return true;
1701
1702 if (ST.hasIEEEMode())
1703 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1704 if (ST.hasDX10ClampMode())
1705 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1706
1707 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
1708 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
1711 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
1714
1721
1722 return false;
1723}
unsigned const MachineRegisterInfo * MRI
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool, true > DisableStructurizer("amdgpu-disable-structurizer", cl::desc("Disable structurizer for experiments; produces unusable code"), cl::location(AMDGPUTargetMachine::DisableStructurizer), cl::ReallyHidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
static cl::opt< bool > EnableStructurizerWorkarounds("amdgpu-enable-structurizer-workarounds", cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool, true > LateCFGStructurize("amdgpu-late-structurize", cl::desc("Enable late CFG structurization"), cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), cl::Hidden)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableMaxIlpSchedStrategy("amdgpu-enable-max-ilp-scheduling-strategy", cl::desc("Enable scheduling strategy to maximize ILP for a single wave."), cl::Hidden, cl::init(false))
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst", cl::desc("Enable s_singleuse_vdst insertion"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfo::Concept conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
#define LLVM_READNONE
Definition: Compiler.h:220
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:135
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
static std::string computeDataLayout()
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
CGSCCAnalysisManager CGAM
ModulePassManager MPM
LoopAnalysisManager LAM
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
const char LLVMTargetMachineRef TM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
This header defines various interfaces for pass management in LLVM.
The AMDGPU TargetMachine interface definition for hw codegen targets.
Basic Register Allocator
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Machine Scheduler interface.
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
Target-Independent Code Generator Pass Configuration Options pass.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
Splits the module M into N linkable partitions.
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) override
Entry point for module splitting.
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:391
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:203
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
Error buildPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType) const
void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Definition: Constants.cpp:723
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Tagged union holding either a T or a Error.
Definition: Error.h:481
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
const SIRegisterInfo * getRegisterInfo() const override
Definition: GCNSubtarget.h:278
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
Pass to remove unused function declarations.
Definition: GlobalDCE.h:36
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition: Internalize.h:36
This class describes a target machine that is implemented with the LLVM target-independent code gener...
This pass implements the localization mechanism described at the top of this file.
Definition: Localizer.h:43
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
Definition: MemoryBuffer.h:51
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
Definition: MemoryBuffer.h:76
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static const OptimizationLevel O0
Disable as many optimizations as possible.
unsigned getSpeedupLevel() const
static const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
Definition: PassBuilder.h:106
void registerPipelineEarlySimplificationEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:481
void registerPipelineStartEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:472
void crossRegisterProxies(LoopAnalysisManager &LAM, FunctionAnalysisManager &FAM, CGSCCAnalysisManager &CGAM, ModuleAnalysisManager &MAM, MachineFunctionAnalysisManager *MFAM=nullptr)
Cross register the analysis managers through their proxies.
void registerOptimizerLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:499
void registerPeepholeEPCallback(const std::function< void(FunctionPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:406
void registerCGSCCOptimizerLateEPCallback(const std::function< void(CGSCCPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:451
void registerRegClassFilterParsingCallback(const std::function< RegAllocFilterFunc(StringRef)> &C)
Register callbacks to parse target specific filter field if regalloc pass needs it.
Definition: PassBuilder.h:588
void registerModuleAnalyses(ModuleAnalysisManager &MAM)
Registers all available module analysis passes.
void registerFullLinkTimeOptimizationLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:517
void registerFunctionAnalyses(FunctionAnalysisManager &FAM)
Registers all available function analysis passes.
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same_v< PassT, PassManager > > addPass(PassT &&Pass)
Definition: PassManager.h:195
PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, ExtraArgTs... ExtraArgs)
Run all of the passes in this manager over the given unit of IR.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition: SourceMgr.h:281
Represents a location in source code.
Definition: SMLoc.h:23
Represents a range in source code.
Definition: SMLoc.h:48
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:575
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:576
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
unsigned getMainFileID() const
Definition: SourceMgr.h:132
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition: SourceMgr.h:125
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:620
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:90
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
Definition: TargetMachine.h:96
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Target-Independent Code Generator Pass Configuration Options.
LLVMTargetMachine * TM
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:375
LLVM Value Representation.
Definition: Value.h:74
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Definition: raw_ostream.h:434
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
Definition: AMDGPU.h:415
bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:893
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
@ ReallyHidden
Definition: CommandLine.h:138
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:463
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFlattenCFGPass()
void initializeSIFormMemoryClausesPass(PassRegistry &)
char & SIPreAllocateWWMRegsID
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
std::function< bool(const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, const Register Reg)> RegAllocFilterFunc
Filter function for register classes during regalloc.
FunctionPass * createAMDGPUSetWavePriorityPass()
void initializeAMDGPUInsertSingleUseVDSTPass(PassRegistry &)
Pass * createLCSSAPass()
Definition: LCSSA.cpp:506
void initializeGCNCreateVOPDPass(PassRegistry &)
ModulePass * createAMDGPUOpenCLEnqueuedBlockLoweringPass()
char & GCNPreRAOptimizationsID
char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeGCNPreRAOptimizationsPass(PassRegistry &)
Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
ModulePass * createExpandVariadicsPass(ExpandVariadicsMode)
void initializeGCNRewritePartialRegUsesPass(llvm::PassRegistry &)
void initializeAMDGPUAttributorLegacyPass(PassRegistry &)
char & SIPostRABundlerID
FunctionPass * createSIModeRegisterPass()
FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
void initializeSIModeRegisterPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
void initializeSIOptimizeVGPRLiveRangePass(PassRegistry &)
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
Definition: PassManager.h:848
void initializeAMDGPULateCodeGenPreparePass(PassRegistry &)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
char & GCNRewritePartialRegUsesID
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPUAnnotateUniformValuesPass(PassRegistry &)
std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition: Error.cpp:98
void initializeSIShrinkInstructionsPass(PassRegistry &)
char & SIFoldOperandsID
void initializeGCNPreRALongBranchRegPass(PassRegistry &)
char & SILoadStoreOptimizerID
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createNaryReassociatePass()
char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeSIPreEmitPeepholePass(PassRegistry &)
char & SILowerWWMCopiesID
void initializeSIFixVGPRCopiesPass(PassRegistry &)
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition: CSEInfo.cpp:79
Target & getTheR600Target()
The target for R600 GPUs.
char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
void initializeAMDGPURemoveIncompatibleFunctionsPass(PassRegistry &)
void initializeSILowerWWMCopiesPass(PassRegistry &)
void initializeGCNNSAReassignPass(PassRegistry &)
char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
void initializeSIInsertWaitcntsPass(PassRegistry &)
char & AMDGPUInsertSingleUseVDSTID
Pass * createLICMPass()
Definition: LICM.cpp:379
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
char & SIFormMemoryClausesID
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
void initializeSILoadStoreOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringPass(PassRegistry &)
void initializeSIPeepholeSDWAPass(PassRegistry &)
char & AMDGPUUnifyDivergentExitNodesID
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
char & AMDGPUPerfHintAnalysisID
char & SILowerSGPRSpillsID
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
char & SILateBranchLoweringPassID
char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
FunctionPass * createSinkingPass()
Definition: Sink.cpp:277
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
FunctionPass * createSIShrinkInstructionsPass()
void initializeAMDGPUAnnotateKernelFeaturesPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition: CodeGen.h:83
void initializeSIPostRABundlerPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry &)
void initializeSIWholeQuadModePass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
Pass * createAMDGPUAnnotateKernelFeaturesPass()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUPassConfig...
char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
char & SIOptimizeVGPRLiveRangeID
FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
FunctionPass * createFixIrreduciblePass()
char & FuncletLayoutID
This pass lays out funclets contiguously.
void initializeSIInsertHardClausesPass(PassRegistry &)
char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
void initializeSIAnnotateControlFlowPass(PassRegistry &)
ModulePass * createAMDGPUPrintfRuntimeBinding()
void initializeSIMemoryLegalizerPass(PassRegistry &)
Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
FunctionPass * createAMDGPUAnnotateUniformValues()
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
char & EarlyIfConverterID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
char & SIPreEmitPeepholeID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
FunctionPass * createSILowerI1CopiesPass()
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
Definition: GlobalISel.cpp:17
void initializeSIPreAllocateWWMRegsPass(PassRegistry &)
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
char & MachineCSEID
MachineCSE - This pass performs global CSE on machine instructions.
Definition: MachineCSE.cpp:165
char & GCNDPPCombineID
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
char & SIWholeQuadModeID
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
void initializeSIOptimizeExecMaskingPreRAPass(PassRegistry &)
void initializeAMDGPUMarkLastScratchLoadPass(PassRegistry &)
char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createGVNPass(bool NoMemDepAnalysis=false)
Create a legacy GVN pass.
Definition: GVN.cpp:3396
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:87
void initializeSILowerSGPRSpillsPass(PassRegistry &)
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
FunctionPass * createAMDGPUMachineCFGStructurizerPass()
void initializeAMDGPUResourceUsageAnalysisPass(PassRegistry &)
void initializeSIFixSGPRCopiesPass(PassRegistry &)
char & GCNCreateVOPDID
FunctionPass * createInferAddressSpacesPass(unsigned AddressSpace=~0u)
char & VirtRegRewriterID
VirtRegRewriter pass.
Definition: VirtRegMap.cpp:227
void initializeSILowerI1CopiesPass(PassRegistry &)
char & SILowerControlFlowID
FunctionPass * createLowerSwitchPass()
FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
Definition: VirtRegMap.cpp:645
void initializeR600VectorRegMergerPass(PassRegistry &)
ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
void initializeSIOptimizeExecMaskingPass(PassRegistry &)
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeSIFoldOperandsPass(PassRegistry &)
void initializeSILowerControlFlowPass(PassRegistry &)
char & SIPeepholeSDWAID
char & SIFixVGPRCopiesID
char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1932
void initializeGCNDPPCombinePass(PassRegistry &)
char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
Definition: MIParser.cpp:3598
FunctionPass * createAMDGPULateCodeGenPreparePass()
char & AMDGPUMarkLastScratchLoadID
char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluPass(PassRegistry &)
char & SIOptimizeExecMaskingID
void initializeAMDGPUUnifyMetadataPass(PassRegistry &)
char & SIFixSGPRCopiesID
FunctionPass * createSIAnnotateControlFlowPass()
Create the annotation pass.
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
char & GCNPreRALongBranchRegID
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
RegisterTargetMachine - Helper template for registering a target machine implementation,...
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition: Threading.h:68
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
A wrapper around std::string which contains a source range that's being set during parsing.