LLVM 18.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The AMDGPU target machine contains all of the hardware specific
11/// information needed to emit code for SI+ GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUTargetMachine.h"
16#include "AMDGPU.h"
17#include "AMDGPUAliasAnalysis.h"
20#include "AMDGPUIGroupLP.h"
21#include "AMDGPUMacroFusion.h"
22#include "AMDGPURegBankSelect.h"
27#include "GCNSchedStrategy.h"
28#include "GCNVOPDUtils.h"
29#include "R600.h"
31#include "R600TargetMachine.h"
33#include "SIMachineScheduler.h"
44#include "llvm/CodeGen/Passes.h"
47#include "llvm/IR/IntrinsicsAMDGPU.h"
48#include "llvm/IR/PassManager.h"
53#include "llvm/Transforms/IPO.h"
63#include <optional>
64
65using namespace llvm;
66using namespace llvm::PatternMatch;
67
68namespace {
69class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
70public:
71 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
73};
74
75class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
76public:
77 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
79};
80
81static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
82 const TargetRegisterClass &RC) {
83 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC);
84}
85
86static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
87 const TargetRegisterClass &RC) {
88 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC);
89}
90
91
92/// -{sgpr|vgpr}-regalloc=... command line option.
93static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
94
95/// A dummy default pass factory indicates whether the register allocator is
96/// overridden on the command line.
97static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
98static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
99
100static SGPRRegisterRegAlloc
101defaultSGPRRegAlloc("default",
102 "pick SGPR register allocator based on -O option",
104
105static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
107SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
108 cl::desc("Register allocator to use for SGPRs"));
109
110static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
112VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
113 cl::desc("Register allocator to use for VGPRs"));
114
115
116static void initializeDefaultSGPRRegisterAllocatorOnce() {
117 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
118
119 if (!Ctor) {
120 Ctor = SGPRRegAlloc;
121 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
122 }
123}
124
125static void initializeDefaultVGPRRegisterAllocatorOnce() {
126 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
127
128 if (!Ctor) {
129 Ctor = VGPRRegAlloc;
130 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
131 }
132}
133
134static FunctionPass *createBasicSGPRRegisterAllocator() {
135 return createBasicRegisterAllocator(onlyAllocateSGPRs);
136}
137
138static FunctionPass *createGreedySGPRRegisterAllocator() {
139 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
140}
141
142static FunctionPass *createFastSGPRRegisterAllocator() {
143 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
144}
145
146static FunctionPass *createBasicVGPRRegisterAllocator() {
147 return createBasicRegisterAllocator(onlyAllocateVGPRs);
148}
149
150static FunctionPass *createGreedyVGPRRegisterAllocator() {
151 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
152}
153
154static FunctionPass *createFastVGPRRegisterAllocator() {
155 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
156}
157
158static SGPRRegisterRegAlloc basicRegAllocSGPR(
159 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
160static SGPRRegisterRegAlloc greedyRegAllocSGPR(
161 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
162
163static SGPRRegisterRegAlloc fastRegAllocSGPR(
164 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
165
166
167static VGPRRegisterRegAlloc basicRegAllocVGPR(
168 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
169static VGPRRegisterRegAlloc greedyRegAllocVGPR(
170 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
171
172static VGPRRegisterRegAlloc fastRegAllocVGPR(
173 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
174}
175
176static cl::opt<bool>
178 cl::desc("Run early if-conversion"),
179 cl::init(false));
180
181static cl::opt<bool>
182OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
183 cl::desc("Run pre-RA exec mask optimizations"),
184 cl::init(true));
185
186static cl::opt<bool>
187 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
188 cl::desc("Lower GPU ctor / dtors to globals on the device."),
189 cl::init(true), cl::Hidden);
190
191// Option to disable vectorizer for tests.
193 "amdgpu-load-store-vectorizer",
194 cl::desc("Enable load store vectorizer"),
195 cl::init(true),
196 cl::Hidden);
197
198// Option to control global loads scalarization
200 "amdgpu-scalarize-global-loads",
201 cl::desc("Enable global load scalarization"),
202 cl::init(true),
203 cl::Hidden);
204
205// Option to run internalize pass.
207 "amdgpu-internalize-symbols",
208 cl::desc("Enable elimination of non-kernel functions and unused globals"),
209 cl::init(false),
210 cl::Hidden);
211
212// Option to inline all early.
214 "amdgpu-early-inline-all",
215 cl::desc("Inline all functions early"),
216 cl::init(false),
217 cl::Hidden);
218
220 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
221 cl::desc("Enable removal of functions when they"
222 "use features not supported by the target GPU"),
223 cl::init(true));
224
226 "amdgpu-sdwa-peephole",
227 cl::desc("Enable SDWA peepholer"),
228 cl::init(true));
229
231 "amdgpu-dpp-combine",
232 cl::desc("Enable DPP combiner"),
233 cl::init(true));
234
235// Enable address space based alias analysis
237 cl::desc("Enable AMDGPU Alias Analysis"),
238 cl::init(true));
239
240// Option to run late CFG structurizer
242 "amdgpu-late-structurize",
243 cl::desc("Enable late CFG structurization"),
245 cl::Hidden);
246
247// Enable lib calls simplifications
249 "amdgpu-simplify-libcall",
250 cl::desc("Enable amdgpu library simplifications"),
251 cl::init(true),
252 cl::Hidden);
253
255 "amdgpu-ir-lower-kernel-arguments",
256 cl::desc("Lower kernel argument loads in IR pass"),
257 cl::init(true),
258 cl::Hidden);
259
261 "amdgpu-reassign-regs",
262 cl::desc("Enable register reassign optimizations on gfx10+"),
263 cl::init(true),
264 cl::Hidden);
265
267 "amdgpu-opt-vgpr-liverange",
268 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
269 cl::init(true), cl::Hidden);
270
272 "amdgpu-atomic-optimizer-strategy",
273 cl::desc("Select DPP or Iterative strategy for scan"),
274 cl::init(ScanOptions::Iterative),
276 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
277 clEnumValN(ScanOptions::Iterative, "Iterative",
278 "Use Iterative approach for scan"),
279 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
280
281// Enable Mode register optimization
283 "amdgpu-mode-register",
284 cl::desc("Enable mode register pass"),
285 cl::init(true),
286 cl::Hidden);
287
288// Enable GFX11+ s_delay_alu insertion
289static cl::opt<bool>
290 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
291 cl::desc("Enable s_delay_alu insertion"),
292 cl::init(true), cl::Hidden);
293
294// Enable GFX11+ VOPD
295static cl::opt<bool>
296 EnableVOPD("amdgpu-enable-vopd",
297 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
298 cl::init(true), cl::Hidden);
299
300// Option is used in lit tests to prevent deadcoding of patterns inspected.
301static cl::opt<bool>
302EnableDCEInRA("amdgpu-dce-in-ra",
303 cl::init(true), cl::Hidden,
304 cl::desc("Enable machine DCE inside regalloc"));
305
306static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
307 cl::desc("Adjust wave priority"),
308 cl::init(false), cl::Hidden);
309
311 "amdgpu-scalar-ir-passes",
312 cl::desc("Enable scalar IR passes"),
313 cl::init(true),
314 cl::Hidden);
315
317 "amdgpu-enable-structurizer-workarounds",
318 cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true),
319 cl::Hidden);
320
322 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
324 cl::Hidden);
325
327 "amdgpu-enable-pre-ra-optimizations",
328 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
329 cl::Hidden);
330
332 "amdgpu-enable-promote-kernel-arguments",
333 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
334 cl::Hidden, cl::init(true));
335
337 "amdgpu-enable-image-intrinsic-optimizer",
338 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
339 cl::Hidden);
340
342 "amdgpu-enable-max-ilp-scheduling-strategy",
343 cl::desc("Enable scheduling strategy to maximize ILP for a single wave."),
344 cl::Hidden, cl::init(false));
345
347 "amdgpu-enable-rewrite-partial-reg-uses",
348 cl::desc("Enable rewrite partial reg uses pass"), cl::init(false),
349 cl::Hidden);
350
352 // Register the target
355
425}
426
427static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
428 return std::make_unique<AMDGPUTargetObjectFile>();
429}
430
432 return new SIScheduleDAGMI(C);
433}
434
435static ScheduleDAGInstrs *
437 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
438 ScheduleDAGMILive *DAG =
439 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
440 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
441 if (ST.shouldClusterStores())
442 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
443 DAG->addMutation(createIGroupLPDAGMutation());
444 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
445 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
446 return DAG;
447}
448
449static ScheduleDAGInstrs *
451 ScheduleDAGMILive *DAG =
452 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
454 return DAG;
455}
456
457static ScheduleDAGInstrs *
459 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
460 auto DAG = new GCNIterativeScheduler(C,
462 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
463 if (ST.shouldClusterStores())
464 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
465 return DAG;
466}
467
469 return new GCNIterativeScheduler(C,
471}
472
473static ScheduleDAGInstrs *
475 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
476 auto DAG = new GCNIterativeScheduler(C,
478 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
479 if (ST.shouldClusterStores())
480 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
481 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
482 return DAG;
483}
484
486SISchedRegistry("si", "Run SI's custom scheduler",
488
491 "Run GCN scheduler to maximize occupancy",
493
495 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
497
499 "gcn-iterative-max-occupancy-experimental",
500 "Run GCN scheduler to maximize occupancy (experimental)",
502
504 "gcn-iterative-minreg",
505 "Run GCN iterative scheduler for minimal register usage (experimental)",
507
509 "gcn-iterative-ilp",
510 "Run GCN iterative scheduler for ILP scheduling (experimental)",
512
514 if (TT.getArch() == Triple::r600) {
515 // 32-bit pointers.
516 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
517 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
518 }
519
520 // 32-bit private, local, and region pointers. 64-bit global, constant and
521 // flat. 160-bit non-integral fat buffer pointers that include a 128-bit
522 // buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
523 // (address space 7), and 128-bit non-integral buffer resourcees (address
524 // space 8) which cannot be non-trivilally accessed by LLVM memory operations
525 // like getelementptr.
526 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
527 "-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:"
528 "128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-"
529 "G1-ni:7:8";
530}
531
534 if (!GPU.empty())
535 return GPU;
536
537 // Need to default to a target with flat support for HSA.
538 if (TT.getArch() == Triple::amdgcn)
539 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
540
541 return "r600";
542}
543
544static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
545 // The AMDGPU toolchain only supports generating shared objects, so we
546 // must always use PIC.
547 return Reloc::PIC_;
548}
549
551 StringRef CPU, StringRef FS,
553 std::optional<Reloc::Model> RM,
554 std::optional<CodeModel::Model> CM,
555 CodeGenOptLevel OptLevel)
558 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
559 TLOF(createTLOF(getTargetTriple())) {
560 initAsmInfo();
561 if (TT.getArch() == Triple::amdgcn) {
562 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
564 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
566 }
567}
568
572
574
576 Attribute GPUAttr = F.getFnAttribute("target-cpu");
577 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
578}
579
581 Attribute FSAttr = F.getFnAttribute("target-features");
582
583 return FSAttr.isValid() ? FSAttr.getValueAsString()
585}
586
587/// Predicate for Internalize pass.
588static bool mustPreserveGV(const GlobalValue &GV) {
589 if (const Function *F = dyn_cast<Function>(&GV))
590 return F->isDeclaration() || F->getName().startswith("__asan_") ||
591 F->getName().startswith("__sanitizer_") ||
592 AMDGPU::isEntryFunctionCC(F->getCallingConv());
593
595 return !GV.use_empty();
596}
597
600}
601
606 if (PassName == "amdgpu-unify-metadata") {
608 return true;
609 }
610 if (PassName == "amdgpu-printf-runtime-binding") {
612 return true;
613 }
614 if (PassName == "amdgpu-always-inline") {
616 return true;
617 }
618 if (PassName == "amdgpu-lower-module-lds") {
620 return true;
621 }
622 if (PassName == "amdgpu-lower-ctor-dtor") {
624 return true;
625 }
626 return false;
627 });
631 if (PassName == "amdgpu-simplifylib") {
633 return true;
634 }
635 if (PassName == "amdgpu-image-intrinsic-opt") {
637 return true;
638 }
639 if (PassName == "amdgpu-usenative") {
641 return true;
642 }
643 if (PassName == "amdgpu-promote-alloca") {
645 return true;
646 }
647 if (PassName == "amdgpu-promote-alloca-to-vector") {
649 return true;
650 }
651 if (PassName == "amdgpu-lower-kernel-attributes") {
653 return true;
654 }
655 if (PassName == "amdgpu-promote-kernel-arguments") {
657 return true;
658 }
659 if (PassName == "amdgpu-unify-divergent-exit-nodes") {
661 return true;
662 }
663 if (PassName == "amdgpu-atomic-optimizer") {
664 PM.addPass(
666 return true;
667 }
668 if (PassName == "amdgpu-codegenprepare") {
670 return true;
671 }
672 if (PassName == "amdgpu-lower-kernel-arguments") {
674 return true;
675 }
676 if (PassName == "amdgpu-rewrite-undef-for-phi") {
678 return true;
679 }
680 return false;
681 });
682
684 FAM.registerPass([&] { return AMDGPUAA(); });
685 });
686
688 if (AAName == "amdgpu-aa") {
690 return true;
691 }
692 return false;
693 });
694
696 [](ModulePassManager &PM, OptimizationLevel Level) {
701 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
702 });
703
705 [](ModulePassManager &PM, OptimizationLevel Level) {
707
708 if (Level == OptimizationLevel::O0)
709 return;
710
712
713 if (InternalizeSymbols) {
716 }
717
720 });
721
723 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
724 if (Level == OptimizationLevel::O0)
725 return;
726
728
729 // Add promote kernel arguments pass to the opt pipeline right before
730 // infer address spaces which is needed to do actual address space
731 // rewriting.
732 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
735
736 // Add infer address spaces pass to the opt pipeline after inlining
737 // but before SROA to increase SROA opportunities.
739
740 // This should run after inlining to have any chance of doing
741 // anything, and before other cleanup optimizations.
743
744 if (Level != OptimizationLevel::O0) {
745 // Promote alloca to vector before SROA and loop unroll. If we
746 // manage to eliminate allocas before unroll we may choose to unroll
747 // less.
749 }
750
751 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
752 });
753}
754
755int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
756 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
757 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
758 AddrSpace == AMDGPUAS::REGION_ADDRESS)
759 ? -1
760 : 0;
761}
762
764 unsigned DestAS) const {
765 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
767}
768
770 const auto *LD = dyn_cast<LoadInst>(V);
771 if (!LD)
773
774 // It must be a generic pointer loaded.
775 assert(V->getType()->isPointerTy() &&
776 V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
777
778 const auto *Ptr = LD->getPointerOperand();
779 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
781 // For a generic pointer loaded from the constant memory, it could be assumed
782 // as a global pointer since the constant memory is only populated on the
783 // host side. As implied by the offload programming model, only global
784 // pointers could be referenced on the host side.
786}
787
788std::pair<const Value *, unsigned>
790 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
791 switch (II->getIntrinsicID()) {
792 case Intrinsic::amdgcn_is_shared:
793 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
794 case Intrinsic::amdgcn_is_private:
795 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
796 default:
797 break;
798 }
799 return std::pair(nullptr, -1);
800 }
801 // Check the global pointer predication based on
802 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
803 // the order of 'is_shared' and 'is_private' is not significant.
804 Value *Ptr;
805 if (match(
806 const_cast<Value *>(V),
807 m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
808 m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
809 m_Deferred(Ptr))))))
810 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
811
812 return std::pair(nullptr, -1);
813}
814
815unsigned
817 switch (Kind) {
827 }
829}
830
831//===----------------------------------------------------------------------===//
832// GCN Target Machine (SI+)
833//===----------------------------------------------------------------------===//
834
836 StringRef CPU, StringRef FS,
838 std::optional<Reloc::Model> RM,
839 std::optional<CodeModel::Model> CM,
840 CodeGenOptLevel OL, bool JIT)
841 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
842
845 StringRef GPU = getGPUName(F);
847
848 SmallString<128> SubtargetKey(GPU);
849 SubtargetKey.append(FS);
850
851 auto &I = SubtargetMap[SubtargetKey];
852 if (!I) {
853 // This needs to be done before we create a new subtarget since any
854 // creation will depend on the TM and the code generation flags on the
855 // function that reside in TargetOptions.
857 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
858 }
859
860 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
861
862 return I.get();
863}
864
867 return TargetTransformInfo(GCNTTIImpl(this, F));
868}
869
870//===----------------------------------------------------------------------===//
871// AMDGPU Pass Setup
872//===----------------------------------------------------------------------===//
873
874std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
876}
877
878namespace {
879
880class GCNPassConfig final : public AMDGPUPassConfig {
881public:
882 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
883 : AMDGPUPassConfig(TM, PM) {
884 // It is necessary to know the register usage of the entire call graph. We
885 // allow calls without EnableAMDGPUFunctionCalls if they are marked
886 // noinline, so this is always required.
887 setRequiresCodeGenSCCOrder(true);
888 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
889 }
890
891 GCNTargetMachine &getGCNTargetMachine() const {
892 return getTM<GCNTargetMachine>();
893 }
894
896 createMachineScheduler(MachineSchedContext *C) const override;
897
899 createPostMachineScheduler(MachineSchedContext *C) const override {
901 C, std::make_unique<PostGenericScheduler>(C),
902 /*RemoveKillFlags=*/true);
903 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
905 if (ST.shouldClusterStores())
907 DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
909 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
911 return DAG;
912 }
913
914 bool addPreISel() override;
915 void addMachineSSAOptimization() override;
916 bool addILPOpts() override;
917 bool addInstSelector() override;
918 bool addIRTranslator() override;
919 void addPreLegalizeMachineIR() override;
920 bool addLegalizeMachineIR() override;
921 void addPreRegBankSelect() override;
922 bool addRegBankSelect() override;
923 void addPreGlobalInstructionSelect() override;
924 bool addGlobalInstructionSelect() override;
925 void addFastRegAlloc() override;
926 void addOptimizedRegAlloc() override;
927
928 FunctionPass *createSGPRAllocPass(bool Optimized);
929 FunctionPass *createVGPRAllocPass(bool Optimized);
930 FunctionPass *createRegAllocPass(bool Optimized) override;
931
932 bool addRegAssignAndRewriteFast() override;
933 bool addRegAssignAndRewriteOptimized() override;
934
935 void addPreRegAlloc() override;
936 bool addPreRewrite() override;
937 void addPostRegAlloc() override;
938 void addPreSched2() override;
939 void addPreEmitPass() override;
940};
941
942} // end anonymous namespace
943
945 : TargetPassConfig(TM, PM) {
946 // Exceptions and StackMaps are not supported, so these passes will never do
947 // anything.
950 // Garbage collection is not supported.
953}
954
958 else
960}
961
964 // ReassociateGEPs exposes more opportunities for SLSR. See
965 // the example in reassociate-geps-and-slsr.ll.
967 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
968 // EarlyCSE can reuse.
970 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
972 // NaryReassociate on GEPs creates redundant common expressions, so run
973 // EarlyCSE after it.
975}
976
979
983
984 // There is no reason to run these.
988
990 if (LowerCtorDtor)
992
995
996 // Function calls are not supported, so make sure we inline everything.
999
1000 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1001 if (Arch == Triple::r600)
1003
1004 // Replace OpenCL enqueued block function pointers with global variables.
1006
1007 // Runs before PromoteAlloca so the latter can account for function uses
1010 }
1011
1012 // AMDGPUAttributor infers lack of llvm.amdgcn.lds.kernel.id calls, so run
1013 // after their introduction
1016
1019
1020 // Run atomic optimizer before Atomic Expand
1025 }
1026
1028
1031
1034
1038 AAResults &AAR) {
1039 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1040 AAR.addAAResult(WrapperPass->getResult());
1041 }));
1042 }
1043
1045 // TODO: May want to move later or split into an early and late one.
1047 }
1048
1049 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1050 // have expanded.
1053 }
1054
1056
1057 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1058 // example, GVN can combine
1059 //
1060 // %0 = add %a, %b
1061 // %1 = add %b, %a
1062 //
1063 // and
1064 //
1065 // %0 = shl nsw %a, 2
1066 // %1 = shl %a, 2
1067 //
1068 // but EarlyCSE can do neither of them.
1071}
1072
1075 // FIXME: This pass adds 2 hacky attributes that can be replaced with an
1076 // analysis, and should be removed.
1078 }
1079
1083
1085
1088
1089 // LowerSwitch pass may introduce unreachable blocks that can
1090 // cause unexpected behavior for subsequent passes. Placing it
1091 // here seems better that these blocks would get cleaned up by
1092 // UnreachableBlockElim inserted next in the pass flow.
1094}
1095
1099 return false;
1100}
1101
1104 return false;
1105}
1106
1108 // Do nothing. GC is not supported.
1109 return false;
1110}
1111
1114 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1116 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1117 if (ST.shouldClusterStores())
1118 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1119 return DAG;
1120}
1121
1123 BumpPtrAllocator &Allocator, const Function &F,
1124 const TargetSubtargetInfo *STI) const {
1125 return R600MachineFunctionInfo::create<R600MachineFunctionInfo>(
1126 Allocator, F, static_cast<const R600Subtarget *>(STI));
1127}
1128
1129//===----------------------------------------------------------------------===//
1130// GCN Pass Setup
1131//===----------------------------------------------------------------------===//
1132
1133ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1134 MachineSchedContext *C) const {
1135 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1136 if (ST.enableSIScheduler())
1138
1141
1143}
1144
1145bool GCNPassConfig::addPreISel() {
1147
1148 if (TM->getOptLevel() > CodeGenOptLevel::None)
1150
1151 if (TM->getOptLevel() > CodeGenOptLevel::None)
1152 addPass(createSinkingPass());
1153
1154 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1155 // regions formed by them.
1157 if (!LateCFGStructurize) {
1159 addPass(createFixIrreduciblePass());
1160 addPass(createUnifyLoopExitsPass());
1161 }
1162 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1163 }
1165 if (!LateCFGStructurize) {
1167 // TODO: Move this right after structurizeCFG to avoid extra divergence
1168 // analysis. This depends on stopping SIAnnotateControlFlow from making
1169 // control flow modifications.
1171 }
1172 addPass(createLCSSAPass());
1173
1174 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1175 addPass(&AMDGPUPerfHintAnalysisID);
1176
1177 return false;
1178}
1179
1180void GCNPassConfig::addMachineSSAOptimization() {
1182
1183 // We want to fold operands after PeepholeOptimizer has run (or as part of
1184 // it), because it will eliminate extra copies making it easier to fold the
1185 // real source operand. We want to eliminate dead instructions after, so that
1186 // we see fewer uses of the copies. We then need to clean up the dead
1187 // instructions leftover after the operands are folded as well.
1188 //
1189 // XXX - Can we get away without running DeadMachineInstructionElim again?
1190 addPass(&SIFoldOperandsID);
1191 if (EnableDPPCombine)
1192 addPass(&GCNDPPCombineID);
1193 addPass(&SILoadStoreOptimizerID);
1194 if (isPassEnabled(EnableSDWAPeephole)) {
1195 addPass(&SIPeepholeSDWAID);
1196 addPass(&EarlyMachineLICMID);
1197 addPass(&MachineCSEID);
1198 addPass(&SIFoldOperandsID);
1199 }
1202}
1203
1204bool GCNPassConfig::addILPOpts() {
1206 addPass(&EarlyIfConverterID);
1207
1209 return false;
1210}
1211
1212bool GCNPassConfig::addInstSelector() {
1214 addPass(&SIFixSGPRCopiesID);
1215 addPass(createSILowerI1CopiesPass());
1216 return false;
1217}
1218
1219bool GCNPassConfig::addIRTranslator() {
1220 addPass(new IRTranslator(getOptLevel()));
1221 return false;
1222}
1223
1224void GCNPassConfig::addPreLegalizeMachineIR() {
1225 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1226 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1227 addPass(new Localizer());
1228}
1229
1230bool GCNPassConfig::addLegalizeMachineIR() {
1231 addPass(new Legalizer());
1232 return false;
1233}
1234
1235void GCNPassConfig::addPreRegBankSelect() {
1236 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1237 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1238}
1239
1240bool GCNPassConfig::addRegBankSelect() {
1241 addPass(new AMDGPURegBankSelect());
1242 return false;
1243}
1244
1245void GCNPassConfig::addPreGlobalInstructionSelect() {
1246 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1247 addPass(createAMDGPURegBankCombiner(IsOptNone));
1248}
1249
1250bool GCNPassConfig::addGlobalInstructionSelect() {
1251 addPass(new InstructionSelect(getOptLevel()));
1252 return false;
1253}
1254
1255void GCNPassConfig::addPreRegAlloc() {
1256 if (LateCFGStructurize) {
1258 }
1259}
1260
1261void GCNPassConfig::addFastRegAlloc() {
1262 // FIXME: We have to disable the verifier here because of PHIElimination +
1263 // TwoAddressInstructions disabling it.
1264
1265 // This must be run immediately after phi elimination and before
1266 // TwoAddressInstructions, otherwise the processing of the tied operand of
1267 // SI_ELSE will introduce a copy of the tied operand source after the else.
1269
1272
1274}
1275
1276void GCNPassConfig::addOptimizedRegAlloc() {
1277 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1278 // instructions that cause scheduling barriers.
1281
1282 if (OptExecMaskPreRA)
1284
1287
1288 if (isPassEnabled(EnablePreRAOptimizations))
1290
1291 // This is not an essential optimization and it has a noticeable impact on
1292 // compilation time, so we only enable it from O2.
1293 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1295
1296 // FIXME: when an instruction has a Killed operand, and the instruction is
1297 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1298 // the register in LiveVariables, this would trigger a failure in verifier,
1299 // we should fix it and enable the verifier.
1300 if (OptVGPRLiveRange)
1302 // This must be run immediately after phi elimination and before
1303 // TwoAddressInstructions, otherwise the processing of the tied operand of
1304 // SI_ELSE will introduce a copy of the tied operand source after the else.
1306
1307 if (EnableDCEInRA)
1309
1311}
1312
1313bool GCNPassConfig::addPreRewrite() {
1314 addPass(&SILowerWWMCopiesID);
1316 addPass(&GCNNSAReassignID);
1317 return true;
1318}
1319
1320FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1321 // Initialize the global default.
1322 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1323 initializeDefaultSGPRRegisterAllocatorOnce);
1324
1325 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1326 if (Ctor != useDefaultRegisterAllocator)
1327 return Ctor();
1328
1329 if (Optimized)
1330 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1331
1332 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1333}
1334
1335FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1336 // Initialize the global default.
1337 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1338 initializeDefaultVGPRRegisterAllocatorOnce);
1339
1340 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1341 if (Ctor != useDefaultRegisterAllocator)
1342 return Ctor();
1343
1344 if (Optimized)
1345 return createGreedyVGPRRegisterAllocator();
1346
1347 return createFastVGPRRegisterAllocator();
1348}
1349
1350FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1351 llvm_unreachable("should not be used");
1352}
1353
1355 "-regalloc not supported with amdgcn. Use -sgpr-regalloc and -vgpr-regalloc";
1356
1357bool GCNPassConfig::addRegAssignAndRewriteFast() {
1358 if (!usingDefaultRegAlloc())
1360
1361 addPass(&GCNPreRALongBranchRegID);
1362
1363 addPass(createSGPRAllocPass(false));
1364
1365 // Equivalent of PEI for SGPRs.
1366 addPass(&SILowerSGPRSpillsID);
1367
1368 addPass(createVGPRAllocPass(false));
1369
1370 addPass(&SILowerWWMCopiesID);
1371 return true;
1372}
1373
1374bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1375 if (!usingDefaultRegAlloc())
1377
1378 addPass(&GCNPreRALongBranchRegID);
1379
1380 addPass(createSGPRAllocPass(true));
1381
1382 // Commit allocated register changes. This is mostly necessary because too
1383 // many things rely on the use lists of the physical registers, such as the
1384 // verifier. This is only necessary with allocators which use LiveIntervals,
1385 // since FastRegAlloc does the replacements itself.
1386 addPass(createVirtRegRewriter(false));
1387
1388 // Equivalent of PEI for SGPRs.
1389 addPass(&SILowerSGPRSpillsID);
1390
1391 addPass(createVGPRAllocPass(true));
1392
1393 addPreRewrite();
1394 addPass(&VirtRegRewriterID);
1395
1396 return true;
1397}
1398
1399void GCNPassConfig::addPostRegAlloc() {
1400 addPass(&SIFixVGPRCopiesID);
1401 if (getOptLevel() > CodeGenOptLevel::None)
1402 addPass(&SIOptimizeExecMaskingID);
1404}
1405
1406void GCNPassConfig::addPreSched2() {
1407 if (TM->getOptLevel() > CodeGenOptLevel::None)
1409 addPass(&SIPostRABundlerID);
1410}
1411
1412void GCNPassConfig::addPreEmitPass() {
1413 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1414 addPass(&GCNCreateVOPDID);
1415 addPass(createSIMemoryLegalizerPass());
1416 addPass(createSIInsertWaitcntsPass());
1417
1418 addPass(createSIModeRegisterPass());
1419
1420 if (getOptLevel() > CodeGenOptLevel::None)
1421 addPass(&SIInsertHardClausesID);
1422
1424 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1426 if (getOptLevel() > CodeGenOptLevel::None)
1427 addPass(&SIPreEmitPeepholeID);
1428 // The hazard recognizer that runs as part of the post-ra scheduler does not
1429 // guarantee to be able handle all hazards correctly. This is because if there
1430 // are multiple scheduling regions in a basic block, the regions are scheduled
1431 // bottom up, so when we begin to schedule a region we don't know what
1432 // instructions were emitted directly before it.
1433 //
1434 // Here we add a stand-alone hazard recognizer pass which can handle all
1435 // cases.
1436 addPass(&PostRAHazardRecognizerID);
1437
1438 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1439 addPass(&AMDGPUInsertDelayAluID);
1440
1441 addPass(&BranchRelaxationPassID);
1442}
1443
1445 return new GCNPassConfig(*this, PM);
1446}
1447
1449 MachineFunction &MF) const {
1451 MF.getRegInfo().addDelegate(MFI);
1452}
1453
1455 BumpPtrAllocator &Allocator, const Function &F,
1456 const TargetSubtargetInfo *STI) const {
1457 return SIMachineFunctionInfo::create<SIMachineFunctionInfo>(
1458 Allocator, F, static_cast<const GCNSubtarget *>(STI));
1459}
1460
1462 return new yaml::SIMachineFunctionInfo();
1463}
1464
1468 return new yaml::SIMachineFunctionInfo(
1469 *MFI, *MF.getSubtarget<GCNSubtarget>().getRegisterInfo(), MF);
1470}
1471
1474 SMDiagnostic &Error, SMRange &SourceRange) const {
1475 const yaml::SIMachineFunctionInfo &YamlMFI =
1476 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1477 MachineFunction &MF = PFS.MF;
1479
1480 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1481 return true;
1482
1483 if (MFI->Occupancy == 0) {
1484 // Fixup the subtarget dependent default value.
1485 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1486 MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1487 }
1488
1489 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1490 Register TempReg;
1491 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1492 SourceRange = RegName.SourceRange;
1493 return true;
1494 }
1495 RegVal = TempReg;
1496
1497 return false;
1498 };
1499
1500 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1501 Register &RegVal) {
1502 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1503 };
1504
1505 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1506 return true;
1507
1508 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1509 return true;
1510
1511 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1512 MFI->LongBranchReservedReg))
1513 return true;
1514
1515 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1516 // Create a diagnostic for a the register string literal.
1517 const MemoryBuffer &Buffer =
1518 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1519 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1520 RegName.Value.size(), SourceMgr::DK_Error,
1521 "incorrect register class for field", RegName.Value,
1522 std::nullopt, std::nullopt);
1523 SourceRange = RegName.SourceRange;
1524 return true;
1525 };
1526
1527 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1528 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1529 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1530 return true;
1531
1532 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1533 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1534 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1535 }
1536
1537 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1538 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1539 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1540 }
1541
1542 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1543 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1544 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1545 }
1546
1547 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1548 Register ParsedReg;
1549 if (parseRegister(YamlReg, ParsedReg))
1550 return true;
1551
1552 MFI->reserveWWMRegister(ParsedReg);
1553 }
1554
1555 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1556 const TargetRegisterClass &RC,
1557 ArgDescriptor &Arg, unsigned UserSGPRs,
1558 unsigned SystemSGPRs) {
1559 // Skip parsing if it's not present.
1560 if (!A)
1561 return false;
1562
1563 if (A->IsRegister) {
1564 Register Reg;
1565 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1566 SourceRange = A->RegisterName.SourceRange;
1567 return true;
1568 }
1569 if (!RC.contains(Reg))
1570 return diagnoseRegisterClass(A->RegisterName);
1572 } else
1573 Arg = ArgDescriptor::createStack(A->StackOffset);
1574 // Check and apply the optional mask.
1575 if (A->Mask)
1576 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1577
1578 MFI->NumUserSGPRs += UserSGPRs;
1579 MFI->NumSystemSGPRs += SystemSGPRs;
1580 return false;
1581 };
1582
1583 if (YamlMFI.ArgInfo &&
1584 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1585 AMDGPU::SGPR_128RegClass,
1586 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1587 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1588 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1589 2, 0) ||
1590 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1591 MFI->ArgInfo.QueuePtr, 2, 0) ||
1592 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1593 AMDGPU::SReg_64RegClass,
1594 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1595 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1596 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1597 2, 0) ||
1598 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1599 AMDGPU::SReg_64RegClass,
1600 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1601 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1602 AMDGPU::SGPR_32RegClass,
1603 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1604 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1605 AMDGPU::SGPR_32RegClass,
1606 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1607 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1608 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1609 0, 1) ||
1610 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1611 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1612 0, 1) ||
1613 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1614 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1615 0, 1) ||
1616 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1617 AMDGPU::SGPR_32RegClass,
1618 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1619 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1620 AMDGPU::SGPR_32RegClass,
1621 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1622 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1623 AMDGPU::SReg_64RegClass,
1624 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1625 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1626 AMDGPU::SReg_64RegClass,
1627 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1628 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1629 AMDGPU::VGPR_32RegClass,
1630 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1631 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1632 AMDGPU::VGPR_32RegClass,
1633 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1634 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1635 AMDGPU::VGPR_32RegClass,
1636 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1637 return true;
1638
1639 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1640 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1641
1642 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
1643 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
1646 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
1649
1656
1657 return false;
1658}
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
This is the AMGPU address space based alias analysis pass.
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
static cl::opt< bool > EnableStructurizerWorkarounds("amdgpu-enable-structurizer-workarounds", cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool, true > LateCFGStructurize("amdgpu-late-structurize", cl::desc("Enable late CFG structurization"), cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableMaxIlpSchedStrategy("amdgpu-enable-max-ilp-scheduling-strategy", cl::desc("Enable scheduling strategy to maximize ILP for a single wave."), cl::Hidden, cl::init(false))
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfo::Concept conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:680
#define LLVM_READNONE
Definition: Compiler.h:201
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:135
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
This file declares the IRTranslator pass.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
static std::string computeDataLayout()
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
FunctionAnalysisManager FAM
const char LLVMTargetMachineRef TM
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
This header defines various interfaces for pass management in LLVM.
The AMDGPU TargetMachine interface definition for hw codegen targets.
Basic Register Allocator
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Machine Scheduler interface.
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
Target-Independent Code Generator Pass Configuration Options pass.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
static const char PassName[]
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
Lower llvm.global_ctors and llvm.global_dtors to special kernels.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, TargetOptions Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
void registerPassBuilderCallbacks(PassBuilder &PB) override
Allow the target to modify the pass pipeline.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
bool registerPass(PassBuilderT &&PassBuilder)
Register an analysis pass with the manager.
Definition: PassManager.h:836
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:318
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:184
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Definition: Constants.cpp:708
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
const SIRegisterInfo * getRegisterInfo() const override
Definition: GCNSubtarget.h:247
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, TargetOptions Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
Pass to remove unused function declarations.
Definition: GlobalDCE.h:36
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition: Internalize.h:34
This class describes a target machine that is implemented with the LLVM target-independent code gener...
This pass implements the localization mechanism described at the top of this file.
Definition: Localizer.h:43
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
Definition: MemoryBuffer.h:51
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
Definition: MemoryBuffer.h:76
static const OptimizationLevel O0
Disable as many optimizations as possible.
unsigned getSpeedupLevel() const
static const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
Definition: PassBuilder.h:103
void registerPipelineEarlySimplificationEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:457
void registerPipelineStartEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:448
void registerParseAACallback(const std::function< bool(StringRef Name, AAManager &AA)> &C)
Register a callback for parsing an AliasAnalysis Name to populate the given AAManager AA.
Definition: PassBuilder.h:500
void registerAnalysisRegistrationCallback(const std::function< void(CGSCCAnalysisManager &)> &C)
{{@ Register callbacks for analysis registration with this PassBuilder instance.
Definition: PassBuilder.h:508
void registerCGSCCOptimizerLateEPCallback(const std::function< void(CGSCCPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:427
void registerPipelineParsingCallback(const std::function< bool(StringRef Name, CGSCCPassManager &, ArrayRef< PipelineElement >)> &C)
{{@ Register pipeline parsing callbacks with this pass builder instance.
Definition: PassBuilder.h:530
LLVM_ATTRIBUTE_MINSIZE std::enable_if_t<!std::is_same< PassT, PassManager >::value > addPass(PassT &&Pass)
Definition: PassManager.h:544
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition: SourceMgr.h:281
Represents a location in source code.
Definition: SMLoc.h:23
Represents a range in source code.
Definition: SMLoc.h:48
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:557
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:558
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
unsigned getMainFileID() const
Definition: SourceMgr.h:132
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition: SourceMgr.h:125
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
Definition: TargetMachine.h:97
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Target-Independent Code Generator Pass Configuration Options.
LLVMTargetMachine * TM
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:355
LLVM Value Representation.
Definition: Value.h:74
bool use_empty() const
Definition: Value.h:344
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
Definition: AMDGPU.h:392
@ LOCAL_ADDRESS
Address space for local memory.
Definition: AMDGPU.h:395
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
Definition: AMDGPU.h:394
@ UNKNOWN_ADDRESS_SPACE
Definition: AMDGPU.h:438
@ FLAT_ADDRESS
Address space for flat memory.
Definition: AMDGPU.h:390
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
Definition: AMDGPU.h:391
@ PRIVATE_ADDRESS
Address space for private memory.
Definition: AMDGPU.h:396
bool isFlatGlobalAddrSpace(unsigned AS)
Definition: AMDGPU.h:445
bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:798
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:76
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:705
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:465
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFlattenCFGPass()
void initializeSIFormMemoryClausesPass(PassRegistry &)
char & SIPreAllocateWWMRegsID
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
FunctionPass * createAMDGPUSetWavePriorityPass()
Pass * createLCSSAPass()
Definition: LCSSA.cpp:489
void initializeGCNCreateVOPDPass(PassRegistry &)
ModulePass * createAMDGPUOpenCLEnqueuedBlockLoweringPass()
char & GCNPreRAOptimizationsID
char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeGCNPreRAOptimizationsPass(PassRegistry &)
Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
void initializeGCNRewritePartialRegUsesPass(llvm::PassRegistry &)
void initializeAMDGPUDAGToDAGISelPass(PassRegistry &)
char & SIPostRABundlerID
FunctionPass * createSIModeRegisterPass()
FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
void initializeR600ClauseMergePassPass(PassRegistry &)
void initializeSIModeRegisterPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
void initializeSIOptimizeVGPRLiveRangePass(PassRegistry &)
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
Definition: PassManager.h:1218
void initializeAMDGPULateCodeGenPreparePass(PassRegistry &)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
void initializeAMDGPUAttributorPass(PassRegistry &)
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
char & GCNRewritePartialRegUsesID
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPUAnnotateUniformValuesPass(PassRegistry &)
void initializeSIShrinkInstructionsPass(PassRegistry &)
char & SIFoldOperandsID
FunctionPass * createAtomicExpandPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
void initializeGCNPreRALongBranchRegPass(PassRegistry &)
char & SILoadStoreOptimizerID
FunctionPass * createNaryReassociatePass()
char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeSIPreEmitPeepholePass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
char & SILowerWWMCopiesID
void initializeSIFixVGPRCopiesPass(PassRegistry &)
std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition: CSEInfo.cpp:79
Target & getTheR600Target()
The target for R600 GPUs.
char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
void initializeAMDGPURemoveIncompatibleFunctionsPass(PassRegistry &)
void initializeSILowerWWMCopiesPass(PassRegistry &)
void initializeGCNNSAReassignPass(PassRegistry &)
char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
void initializeSIInsertWaitcntsPass(PassRegistry &)
Pass * createLICMPass()
Definition: LICM.cpp:370
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
char & SIFormMemoryClausesID
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
void initializeSILoadStoreOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringPass(PassRegistry &)
void initializeSIPeepholeSDWAPass(PassRegistry &)
char & AMDGPUUnifyDivergentExitNodesID
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
char & AMDGPUPerfHintAnalysisID
char & SILowerSGPRSpillsID
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
char & SILateBranchLoweringPassID
char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
FunctionPass * createSinkingPass()
Definition: Sink.cpp:276
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation()
FunctionPass * createSIShrinkInstructionsPass()
void initializeAMDGPUAnnotateKernelFeaturesPass(PassRegistry &)
void initializeSIPostRABundlerPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
void initializeSIWholeQuadModePass(PassRegistry &)
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
Pass * createAMDGPUAnnotateKernelFeaturesPass()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUPassConfig...
char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
char & SIOptimizeVGPRLiveRangeID
FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
FunctionPass * createFixIrreduciblePass()
char & FuncletLayoutID
This pass lays out funclets contiguously.
void initializeSIInsertHardClausesPass(PassRegistry &)
char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
void initializeSIAnnotateControlFlowPass(PassRegistry &)
ModulePass * createAMDGPUPrintfRuntimeBinding()
void initializeSIMemoryLegalizerPass(PassRegistry &)
Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
FunctionPass * createAMDGPUAnnotateUniformValues()
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
char & EarlyIfConverterID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
char & SIPreEmitPeepholeID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
FunctionPass * createSILowerI1CopiesPass()
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
Definition: GlobalISel.cpp:17
void initializeSIPreAllocateWWMRegsPass(PassRegistry &)
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
char & MachineCSEID
MachineCSE - This pass performs global CSE on machine instructions.
Definition: MachineCSE.cpp:166
char & GCNDPPCombineID
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
char & SIWholeQuadModeID
void initializeSIOptimizeExecMaskingPreRAPass(PassRegistry &)
char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createGVNPass(bool NoMemDepAnalysis=false)
Create a legacy GVN pass.
Definition: GVN.cpp:3335
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:87
void initializeSILowerSGPRSpillsPass(PassRegistry &)
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
FunctionPass * createAMDGPUMachineCFGStructurizerPass()
void initializeAMDGPUResourceUsageAnalysisPass(PassRegistry &)
void initializeSIFixSGPRCopiesPass(PassRegistry &)
char & GCNCreateVOPDID
FunctionPass * createInferAddressSpacesPass(unsigned AddressSpace=~0u)
Pass * createAMDGPUAttributorPass()
char & VirtRegRewriterID
VirtRegRewriter pass.
Definition: VirtRegMap.cpp:227
void initializeSILowerI1CopiesPass(PassRegistry &)
char & SILowerControlFlowID
FunctionPass * createLowerSwitchPass()
FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
Definition: VirtRegMap.cpp:645
void initializeR600VectorRegMergerPass(PassRegistry &)
ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
void initializeSIOptimizeExecMaskingPass(PassRegistry &)
FunctionPass * createSIMemoryLegalizerPass()
void initializeSIFoldOperandsPass(PassRegistry &)
void initializeSILowerControlFlowPass(PassRegistry &)
char & SIPeepholeSDWAID
char & SIFixVGPRCopiesID
char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
void initializeAMDGPURegBankSelectPass(PassRegistry &)
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1932
void initializeGCNDPPCombinePass(PassRegistry &)
char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
Definition: MIParser.cpp:3606
FunctionPass * createAMDGPULateCodeGenPreparePass()
char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluPass(PassRegistry &)
char & SIOptimizeExecMaskingID
void initializeAMDGPUUnifyMetadataPass(PassRegistry &)
char & SIFixSGPRCopiesID
FunctionPass * createSIAnnotateControlFlowPass()
Create the annotation pass.
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
char & GCNPreRALongBranchRegID
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
RegisterTargetMachine - Helper template for registering a target machine implementation,...
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition: Threading.h:68
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
A wrapper around std::string which contains a source range that's being set during parsing.